I had to add the namespace field manually to the file downloaded from https://raw.githubusercontent.com/grafana/pdc-agent/main/production/kubernetes/pdc-agent-deployment.yaml. Without this field, the deployment would fail because it lacked the necessary namespace configuration.
Having a Terraform example and better documentation or support for cloud environments would greatly simplify implementations like this.
From my perspective, grouping agents by namespace would be a more efficient approach. Currently, I need to create a separate token for each group of agents, which adds complexity. Providing a way to manage tokens at a namespace level or grouping agents would streamline deployments and make it easier to manage resources in large environments.
data "local_file" "pdc_agent_deployment" {
filename = "${path.module}/manifest/pdc-agent-deployment.yaml"
}
resource "kubernetes_manifest" "pdc_agent_deployment" {
manifest = yamldecode(
templatefile(
"${path.module}/manifest/pdc-agent-deployment.yaml",
{
namespace = kubernetes_namespace.metrics.id
}
)
)
}
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: ${ namespace }
labels:
app: grafana-pdc-agent
name: grafana-pdc-agent
name: grafana-pdc-agent
spec:
replicas: 1
selector:
matchLabels:
name: grafana-pdc-agent
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
type: RollingUpdate
template:
metadata:
labels:
name: grafana-pdc-agent
spec:
containers:
- name: grafana-pdc-agent
env:
- name: TOKEN
valueFrom:
secretKeyRef:
key: token
name: grafana-pdc-agent
- name: CLUSTER
valueFrom:
secretKeyRef:
key: cluster
name: grafana-pdc-agent
- name: HOSTED_GRAFANA_ID
valueFrom:
secretKeyRef:
key: hosted-grafana-id
name: grafana-pdc-agent
args:
- -cluster
- $(CLUSTER)
- -token
- $(TOKEN)
- -gcloud-hosted-grafana-id
- "$(HOSTED_GRAFANA_ID)"
image: grafana/pdc-agent:latest
imagePullPolicy: Always
resources:
limits:
memory: 1Gi
requests:
cpu: 1
memory: 1Gi
securityContext:
allowPrivilegeEscalation: false
privileged: false
runAsNonRoot: true
capabilities:
drop:
- all
securityContext:
runAsUser: 30000
runAsGroup: 30000
fsGroup: 30000