Hello,
I am looking for help with creating/migrating alert rule via REST API.
-
What Grafana version and what operating system are you using?
Grafana: 10.1.2 OSS (latest)
OS: Fedora 36 -
What are you trying to achieve?
I am trying to simulate migration of Alert Rule via REST API. -
How are you trying to achieve it?
Here are steps:
- Create an alert rule in Grafana GUI (as normal)
- Export alert rule in JSON format via REST API and save it file on grafana server.
curl -s -X 'GET' -u admin:grafana 'http://hostname:3000/api/v1/provisioning/alert-rules/b5015786-869b-4f7f-8064-6bf02065305d/export' -H 'accept: application/json' | jq --sort-keys '.' > process_memory_copy.json
Content of process_memory_copy.json file
{
"apiVersion": 1,
"groups": [
{
"folder": "Processes",
"interval": "30s",
"name": "Processes_Memory",
"orgId": 1,
"rules": [
{
"annotations": {
"description": "Moj opis alerta 24.9.2023.",
"summary": "Processes Memory usage."
},
"condition": "C",
"data": [
{
"datasourceUid": "H8fJ02_4k",
"model": {
"intervalMs": 1000,
"maxDataPoints": 43200,
"query": "from(bucket: \"processes\")\r\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\r\n |> filter(fn: (r) => r[\"_measurement\"] == \"procstat\")\r\n |> filter(fn: (r) => r[\"_field\"] == \"memory_usage\")\r\n |> filter(fn: (r) => r[\"cmdline\"] == \"/usr/bin/influxd\" or r[\"cmdline\"] == \"/usr/pgsql-15/bin/postmaster -D /var/lib/pgsql/15/data/\")\r\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\r\n |> yield(name: \"mean\")",
"refId": "A"
},
"refId": "A",
"relativeTimeRange": {
"from": 600,
"to": 0
}
},
{
"datasourceUid": "__expr__",
"model": {
"conditions": [
{
"evaluator": {
"params": [],
"type": "gt"
},
"operator": {
"type": "and"
},
"query": {
"params": [
"B"
]
},
"reducer": {
"params": [],
"type": "last"
},
"type": "query"
}
],
"datasource": {
"type": "__expr__",
"uid": "__expr__"
},
"expression": "A",
"intervalMs": 1000,
"maxDataPoints": 43200,
"reducer": "last",
"refId": "B",
"settings": {
"mode": "dropNN"
},
"type": "reduce"
},
"refId": "B",
"relativeTimeRange": {
"from": 600,
"to": 0
}
},
{
"datasourceUid": "__expr__",
"model": {
"conditions": [
{
"evaluator": {
"params": [
0
],
"type": "gt"
},
"operator": {
"type": "and"
},
"query": {
"params": [
"C"
]
},
"reducer": {
"params": [],
"type": "last"
},
"type": "query"
}
],
"datasource": {
"type": "__expr__",
"uid": "__expr__"
},
"expression": "B",
"intervalMs": 1000,
"maxDataPoints": 43200,
"refId": "C",
"type": "threshold"
},
"refId": "C",
"relativeTimeRange": {
"from": 600,
"to": 0
}
}
],
"execErrState": "OK",
"for": "1m",
"isPaused": false,
"labels": {
"contact": "webhook"
},
"noDataState": "OK",
"title": "Processes_Memory backup (copy)",
"uid": "b5015786-869b-4f7f-8064-6bf02065305d"
}
]
}
]
}
- Keep only part of JSON that is required for POST (i.e.
.groups[].rules[]
).
curl -s -X 'GET' -u admin:grafana 'http://hostname:3000/api/v1/provisioning/alert-rules/b5015786-869b-4f7f-8064-6bf02065305d/export' -H 'accept: application/json' | jq --sort-keys '.groups[].rules[]' > process_memory_copy_group_rules.json
Content of process_memory_copy_group_rules.json file
{
"annotations": {
"description": "Moj opis alerta 24.9.2023.",
"summary": "Processes Memory usage."
},
"condition": "C",
"data": [
{
"datasourceUid": "H8fJ02_4k",
"model": {
"intervalMs": 1000,
"maxDataPoints": 43200,
"query": "from(bucket: \"processes\")\r\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\r\n |> filter(fn: (r) => r[\"_measurement\"] == \"procstat\")\r\n |> filter(fn: (r) => r[\"_field\"] == \"memory_usage\")\r\n |> filter(fn: (r) => r[\"cmdline\"] == \"/usr/bin/influxd\" or r[\"cmdline\"] == \"/usr/pgsql-15/bin/postmaster -D /var/lib/pgsql/15/data/\")\r\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\r\n |> yield(name: \"mean\")",
"refId": "A"
},
"refId": "A",
"relativeTimeRange": {
"from": 600,
"to": 0
}
},
{
"datasourceUid": "__expr__",
"model": {
"conditions": [
{
"evaluator": {
"params": [],
"type": "gt"
},
"operator": {
"type": "and"
},
"query": {
"params": [
"B"
]
},
"reducer": {
"params": [],
"type": "last"
},
"type": "query"
}
],
"datasource": {
"type": "__expr__",
"uid": "__expr__"
},
"expression": "A",
"intervalMs": 1000,
"maxDataPoints": 43200,
"reducer": "last",
"refId": "B",
"settings": {
"mode": "dropNN"
},
"type": "reduce"
},
"refId": "B",
"relativeTimeRange": {
"from": 600,
"to": 0
}
},
{
"datasourceUid": "__expr__",
"model": {
"conditions": [
{
"evaluator": {
"params": [
0
],
"type": "gt"
},
"operator": {
"type": "and"
},
"query": {
"params": [
"C"
]
},
"reducer": {
"params": [],
"type": "last"
},
"type": "query"
}
],
"datasource": {
"type": "__expr__",
"uid": "__expr__"
},
"expression": "B",
"intervalMs": 1000,
"maxDataPoints": 43200,
"refId": "C",
"type": "threshold"
},
"refId": "C",
"relativeTimeRange": {
"from": 600,
"to": 0
}
}
],
"execErrState": "OK",
"for": "1m",
"isPaused": false,
"labels": {
"contact": "webhook"
},
"noDataState": "OK",
"title": "Processes_Memory backup (copy)",
"uid": "b5015786-869b-4f7f-8064-6bf02065305d"
}
- Add missing JSON fields: folderUID, orgID and ruleGroup
In process_memory_copy_group_rules.json I found that folderUID, orgID and ruleGroup is missing so I added them manually.
cp -p process_memory_copy_group_rules.json process_memory_copy_group_rules_required.json
vim process_memory_copy_group_rules_required.json
Add content:
"folderUID": "Ox7_0h_Vz",
"orgID": 1,
"ruleGroup": "Processes_Memory",
- Delete alert rule in Grafana GUI (Folder and Rule Group are not deleted)
- Create same alert rule via REST API
curl -s -X 'POST' -u admin:grafana 'http://hostname:3000/api/v1/provisioning/alert-rules' -H 'accept: application/json' -d @/home/ldrascic/grafana/alert_rules/process_memory_copy_group_rules_required.json
Finally, content of file that is used in creating alert rule (process_memory_copy_group_rules_required.json file)
{
"annotations": {
"description": "Moj opis alerta 24.9.2023.",
"summary": "Processes Memory usage."
},
"condition": "C",
"data": [
{
"datasourceUid": "H8fJ02_4k",
"model": {
"intervalMs": 1000,
"maxDataPoints": 43200,
"query": "from(bucket: \"processes\")\r\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\r\n |> filter(fn: (r) => r[\"_measurement\"] == \"procstat\")\r\n |> filter(fn: (r) => r[\"_field\"] == \"memory_usage\")\r\n |> filter(fn: (r) => r[\"cmdline\"] == \"/usr/bin/influxd\" or r[\"cmdline\"] == \"/usr/pgsql-15/bin/postmaster -D /var/lib/pgsql/15/data/\")\r\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\r\n |> yield(name: \"mean\")",
"refId": "A"
},
"refId": "A",
"relativeTimeRange": {
"from": 600,
"to": 0
}
},
{
"datasourceUid": "__expr__",
"model": {
"conditions": [
{
"evaluator": {
"params": [],
"type": "gt"
},
"operator": {
"type": "and"
},
"query": {
"params": [
"B"
]
},
"reducer": {
"params": [],
"type": "last"
},
"type": "query"
}
],
"datasource": {
"type": "__expr__",
"uid": "__expr__"
},
"expression": "A",
"intervalMs": 1000,
"maxDataPoints": 43200,
"reducer": "last",
"refId": "B",
"settings": {
"mode": "dropNN"
},
"type": "reduce"
},
"refId": "B",
"relativeTimeRange": {
"from": 600,
"to": 0
}
},
{
"datasourceUid": "__expr__",
"model": {
"conditions": [
{
"evaluator": {
"params": [
0
],
"type": "gt"
},
"operator": {
"type": "and"
},
"query": {
"params": [
"C"
]
},
"reducer": {
"params": [],
"type": "last"
},
"type": "query"
}
],
"datasource": {
"type": "__expr__",
"uid": "__expr__"
},
"expression": "B",
"intervalMs": 1000,
"maxDataPoints": 43200,
"refId": "C",
"type": "threshold"
},
"refId": "C",
"relativeTimeRange": {
"from": 600,
"to": 0
}
}
],
"execErrState": "OK",
"folderUID": "Ox7_0h_Vz",
"for": "1m",
"isPaused": false,
"labels": {
"contact": "webhook"
},
"noDataState": "OK",
"orgID": 1,
"ruleGroup": "Processes_Memory",
"title": "Processes_Memory backup (copy)",
"uid": "b5015786-869b-4f7f-8064-6bf02065305d"
}
-
What happened?
I got error message and alert rule is not created. -
What did you expect to happen?
I expect that alert rule would be created. -
Can you copy/paste the configuration(s) that you are having problems with?
curl -s -X 'POST' -u admin:grafana 'http://hostname:3000/api/v1/provisioning/alert-rules' -H 'accept: application/json' -d @/home/ldrascic/grafana/alert_rules/process_memory_copy_group_rules_required.json
Finally, content of file that is used in creating alert rule (process_memory_copy_group_rules_required.json file)
{
"annotations": {
"description": "Moj opis alerta 24.9.2023.",
"summary": "Processes Memory usage."
},
"condition": "C",
"data": [
{
"datasourceUid": "H8fJ02_4k",
"model": {
"intervalMs": 1000,
"maxDataPoints": 43200,
"query": "from(bucket: \"processes\")\r\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\r\n |> filter(fn: (r) => r[\"_measurement\"] == \"procstat\")\r\n |> filter(fn: (r) => r[\"_field\"] == \"memory_usage\")\r\n |> filter(fn: (r) => r[\"cmdline\"] == \"/usr/bin/influxd\" or r[\"cmdline\"] == \"/usr/pgsql-15/bin/postmaster -D /var/lib/pgsql/15/data/\")\r\n |> aggregateWindow(every: v.windowPeriod, fn: mean, createEmpty: false)\r\n |> yield(name: \"mean\")",
"refId": "A"
},
"refId": "A",
"relativeTimeRange": {
"from": 600,
"to": 0
}
},
{
"datasourceUid": "__expr__",
"model": {
"conditions": [
{
"evaluator": {
"params": [],
"type": "gt"
},
"operator": {
"type": "and"
},
"query": {
"params": [
"B"
]
},
"reducer": {
"params": [],
"type": "last"
},
"type": "query"
}
],
"datasource": {
"type": "__expr__",
"uid": "__expr__"
},
"expression": "A",
"intervalMs": 1000,
"maxDataPoints": 43200,
"reducer": "last",
"refId": "B",
"settings": {
"mode": "dropNN"
},
"type": "reduce"
},
"refId": "B",
"relativeTimeRange": {
"from": 600,
"to": 0
}
},
{
"datasourceUid": "__expr__",
"model": {
"conditions": [
{
"evaluator": {
"params": [
0
],
"type": "gt"
},
"operator": {
"type": "and"
},
"query": {
"params": [
"C"
]
},
"reducer": {
"params": [],
"type": "last"
},
"type": "query"
}
],
"datasource": {
"type": "__expr__",
"uid": "__expr__"
},
"expression": "B",
"intervalMs": 1000,
"maxDataPoints": 43200,
"refId": "C",
"type": "threshold"
},
"refId": "C",
"relativeTimeRange": {
"from": 600,
"to": 0
}
}
],
"execErrState": "OK",
"folderUID": "Ox7_0h_Vz",
"for": "1m",
"isPaused": false,
"labels": {
"contact": "webhook"
},
"noDataState": "OK",
"orgID": 1,
"ruleGroup": "Processes_Memory",
"title": "Processes_Memory backup (copy)",
"uid": "b5015786-869b-4f7f-8064-6bf02065305d"
}
-
Did you receive any errors in the Grafana UI or in related logs? If so, please tell us exactly what they were.
On grafana server CLI I got:
{"message":"bad request data","traceID":""}
-
Did you follow any online instructions? If so, what is the URL?
I followed Grafana Post Alert Rule and Grafana Provisioned Alert Rule for required JSON fields.
Best regards,
ldrascic