In this example we are going to collect Golang application metrics with Prometheus in a Kubernetes cluster. We will then check what Prometheus has.


There are two important points in this example. First one is, Prometheus will automatically discover any new application that exposes the /metrics endpoint. You don't have to change Prometheus config. The second one hasn't been implemented so you need to add it. Metrics are currently not persistent. If you restart your pods so on. you will lose them which is not an ideal scenario. This is a "must have" feature, not a "nice to have"!


Golang


Structure


├── Makefile
├── deploy
│   └── k8s
│   ├── deployment.yaml
│   └── service.yaml
├── docker
│   └── Dockerfile
└── main.go

Files


deployment.yaml

apiVersion: apps/v1
kind: Deployment

metadata:
name: football-deployment
namespace: default
labels:
app: football

spec:
replicas: 1
selector:
matchLabels:
app: football
template:
metadata:
labels:
app: football
spec:
containers:
- name: golang
image: you/football:latest
ports:
- containerPort: 3000

service.yaml

apiVersion: v1
kind: Service

metadata:
name: football-service
namespace: default

spec:
type: NodePort
selector:
app: football
ports:
- protocol: TCP
port: 80
targetPort: 3000

Dockerfile

FROM golang:1.15-alpine3.12 as build

WORKDIR /source
COPY . .

RUN CGO_ENABLED=0 go build -ldflags "-s -w" -o bin/main main.go

FROM alpine:3.12

COPY --from=build /source/bin/main /main

ENTRYPOINT ["/main"]

main.go

package main

import (
"log"
"net/http"

"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp"
)

func main() {
rtr := http.NewServeMux()
rtr.HandleFunc("/metrics", promhttp.Handler().ServeHTTP)
rtr.HandleFunc("/api/v1/leagues", league)

if err := http.ListenAndServe(":3000", rtr); err != nil && err != http.ErrServerClosed {
log.Fatalln(err)
}
}

func league(w http.ResponseWriter, _ *http.Request) {
counter.Inc()
_, _ = w.Write([]byte("FIFA"))
}

// Auto register counter metrics.
var counter = promauto.NewCounter(prometheus.CounterOpts{
Namespace: "football",
Name: "leagues_request_counter",
Help: "Number of requests",
})

Makefile

## LOCAL -----------------------------------------------------------------------

.PHONY: run
run:
go build -race -ldflags "-s -w" -o bin/main main.go
bin/main

.PHONY: test
test:
curl --request GET http://localhost:3000/api/v1/leagues

## DOCKER ----------------------------------------------------------------------

.PHONY: docker-push
docker-push:
docker build -t you/football:latest -f ./docker/Dockerfile .
docker push you/football:latest
docker rmi you/football:latest
docker system prune --volumes --force

## KUBE ------------------------------------------------------------------------

.PHONY: kube-deploy
kube-deploy:
kubectl apply -f deploy/k8s/deployment.yaml
kubectl apply -f deploy/k8s/service.yaml

.PHONY: kube-test
kube-test:
curl --request GET $(shell minikube service football-service --url)/api/v1/leagues

Monitoring


Structure


├── Makefile
└── deploy
└── k8s
├── monitor
│   ├── cluster-role-binding.yaml
│   ├── cluster-role.yaml
│   └── service-account.yaml
└── prometheus
├── config-map.yaml
├── deployment.yaml
└── service.yaml

Files


cluster-role-binding.yaml

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding

metadata:
name: monitor-cluster-role-binding

roleRef:
kind: ClusterRole
name: monitor-cluster-role
apiGroup: rbac.authorization.k8s.io

subjects:
- kind: ServiceAccount
name: monitor-service-account
namespace: default

# Grant permissions defined in a cluster role to user(s) in the whole cluster.

cluster-role.yaml

apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole

metadata:
name: monitor-cluster-role

rules:
- apiGroups: [""]
resources: ["services", "pods", "endpoints"]
verbs: ["get", "watch", "list"]
- nonResourceURLs: ["/metrics"]
verbs: ["get"]

# Grant read access to all services, pods, endpoints and /metrics URLs in the
# whole cluster. Run commands below to confirm role permissions. You sould get
# "yes".

# kubectl auth can-i get services --as=system:serviceaccount:default:monitor-service-account -n default
# kubectl auth can-i list services --as=system:serviceaccount:default:monitor-service-account -n default
# kubectl auth can-i watch services --as=system:serviceaccount:default:monitor-service-account -n default
# kubectl auth can-i get pods --as=system:serviceaccount:default:monitor-service-account -n default
# kubectl auth can-i list pods --as=system:serviceaccount:default:monitor-service-account -n default
# kubectl auth can-i watch pods --as=system:serviceaccount:default:monitor-service-account -n default
# kubectl auth can-i get endpoints --as=system:serviceaccount:default:monitor-service-account -n default
# kubectl auth can-i list endpoints --as=system:serviceaccount:default:monitor-service-account -n default
# kubectl auth can-i watch endpoints --as=system:serviceaccount:default:monitor-service-account -n default

service-account.yaml

apiVersion: v1
kind: ServiceAccount

metadata:
name: monitor-service-account
namespace: default

# Help Prometheus represent its identity in the cluster.

config-map.yaml

Check this example for more detailed configuration options.


apiVersion: v1
kind: ConfigMap

metadata:
name: prometheus-config-map
namespace: default

data:
prometheus.yml: |
global:
scrape_interval: 15s
scrape_timeout: 15s
scrape_configs:
- job_name: application-metrics
metrics_path: /metrics
kubernetes_sd_configs:
- role: endpoints
relabel_configs:
- action: labelmap
regex: __meta_kubernetes_service_label_(.+)
- source_labels: [__meta_kubernetes_namespace]
action: replace
target_label: namespace
- source_labels: [__meta_kubernetes_service_name]
action: replace
target_label: service

# Given that we exposed our applications as services, watch for any service
# endpoints shipping Prometheus-format metrics with Kubernetes auto-discover
# plugin kubernetes_sd_configs. If you want to ignore other services that expose
# metrics via /metrics endpoint, a workaround is to change your application
# metrics path to something else. e.g. /app/metrics

deployment.yaml

apiVersion: apps/v1
kind: Deployment

metadata:
name: prometheus-deployment
namespace: default
labels:
app: prometheus

spec:
replicas: 1
selector:
matchLabels:
app: prometheus
template:
metadata:
labels:
app: prometheus
spec:
serviceAccountName: monitor-service-account
containers:
- name: prometheus
image: prom/prometheus:latest
resources:
limits:
cpu: "1000m" # 1 core
memory: "1024Mi" # 1 GB
requests:
cpu: "500m" # 0.5 core
memory: "512Mi" # 0.5 GB
ports:
- name: http
protocol: TCP
containerPort: 9090
volumeMounts:
- name: config
mountPath: /etc/prometheus/
- name: storage
mountPath: /prometheus/
volumes:
- name: config
configMap:
name: prometheus-config-map
- name: storage
emptyDir: {}

service.yaml

apiVersion: v1
kind: Service

metadata:
name: prometheus-service
namespace: default

spec:
type: NodePort
selector:
app: prometheus
ports:
- name: http
protocol: TCP
port: 80
targetPort: 9090

Makefile

.PHONY: deploy-roles
deploy-roles:
kubectl apply -f deploy/k8s/monitor/cluster-role.yaml
kubectl apply -f deploy/k8s/monitor/service-account.yaml
kubectl apply -f deploy/k8s/monitor/cluster-role-binding.yaml

.PHONY: deploy-prometheus
deploy-prometheus:
kubectl apply -f deploy/k8s/prometheus/config-map.yaml
kubectl apply -f deploy/k8s/prometheus/deployment.yaml
kubectl apply -f deploy/k8s/prometheus/service.yaml

Test


I am using Minikube so I have to star it with RBAC enabled.


$ minikube start --vm-driver=virtualbox --extra-config=apiserver.Authorization.Mode=RBAC
apiserver.Authorization.Mode=RBAC

Deploy Golang


I assume you already pushed the application to DockerHub with football$ make docker-push command. Run command below to deploy it.


football$ make kube-deploy
kubectl apply -f deploy/k8s/deployment.yaml
deployment.apps/football-deployment created
kubectl apply -f deploy/k8s/service.yaml
service/football-service created

Deploy Monitoring


monitoring$ make deploy-roles
kubectl apply -f deploy/k8s/monitor/cluster-role.yaml
clusterrole.rbac.authorization.k8s.io/monitor-cluster-role created
kubectl apply -f deploy/k8s/monitor/service-account.yaml
serviceaccount/monitor-service-account created
kubectl apply -f deploy/k8s/monitor/cluster-role-binding.yaml
clusterrolebinding.rbac.authorization.k8s.io/monitor-cluster-role-binding created

monitoring$ make deploy-prometheus
kubectl apply -f deploy/k8s/prometheus/config-map.yaml
configmap/prometheus-config-map created
kubectl apply -f deploy/k8s/prometheus/deployment.yaml
deployment.apps/prometheus-deployment created
kubectl apply -f deploy/k8s/prometheus/service.yaml
service/prometheus-service created

Obtain Prometheus IP address for the UI.


$ minikube service prometheus-service --url --namespace=default
http://192.168.99.101:30566

When you visit http://192.168.99.101:30566, you will see many metrics including the football_leagues_request_counter. It will look like below.


football_leagues_request_counter{instance="172.17.0.5:3000",job="application-metrics",namespace="default",service="football-service"} 0

If you run football$ make kube-test three times, your metrics will change like below.


football_leagues_request_counter{instance="172.17.0.5:3000",job="application-metrics",namespace="default",service="football-service"} 3