In this example we are going to demonstrate how we can gracefully shutdown a Golang application. This example is well known by many by now. However, what's not well known is that does this solution work in Kubernetes environment as it does in local environment. It all depends on the terminationGracePeriodSeconds option in Kubernetes pod/deployment configuration. Story cut short, make sure the context timeout value is less than the value of terminationGracePeriodSeconds option. By default, if you don't configure it, it is set to 30 seconds so your context timeout value should be n < 30.


Our application is allowing long-running requests/connections 40 seconds to complete. We then set terminationGracePeriodSeconds to 60. This will prevent our application being prematurely killed by Kubernetes. Normally it is okay to not configure this value and set context timeout to something like 10 seconds. You will see the logs below.


Structure


├── Makefile
├── deploy
│   └── k8s
│   ├── deployment.yaml
│   └── service.yaml
├── docker
│   └── dev
│   └── Dockerfile
└── main.go

Files


Makefile


.PHONY: run
run:
go run -race main.go

.PHONY: docker-push
docker-push:
docker build -t you/api:latest -f ./docker/dev/Dockerfile .
docker push you/api:latest
docker rmi you/api:latest
docker system prune --volumes --force

.PHONY: k8s-deploy
k8s-deploy:
kubectl apply -f deploy/k8s/deployment.yaml
kubectl apply -f deploy/k8s/service.yaml

Dockerfile


FROM golang:1.15-alpine3.12 as build

WORKDIR /source
COPY . .

RUN CGO_ENABLED=0 go build -ldflags "-s -w" -o bin/main main.go

FROM alpine:3.12 as run

COPY --from=build /source/bin/main /main

ENTRYPOINT ["./main"]

main.go


package main

import (
"context"
"log"
"net/http"
"os"
"os/signal"
"syscall"
"time"
)

func main() {
// Create HTTP router.
rtr := http.NewServeMux()
rtr.HandleFunc("/", home)

// Create HTTP sever.
srv := &http.Server{
Addr: ":8080",
Handler: rtr,
}

// Start HTTP server.
go func() {
if err := srv.ListenAndServe(); err != nil {
log.Println("http server: listen and serve:", err)
}
}()
log.Println("app: started")

// Listen on application shutdown signals.
listener := make(chan os.Signal, 1)
signal.Notify(listener, os.Interrupt, syscall.SIGTERM)
log.Println("app: received a shutdown signal:", <-listener)

// Allow live connections a set period of time to complete.
ctx, cancel := context.WithTimeout(context.Background(), time.Second*40)
defer cancel()

// Shutdown HTTP server.
if err := srv.Shutdown(ctx); err != nil && err != http.ErrServerClosed {
log.Println("app: dirty shutdown:", err)
return
}

log.Println("app: clean shutdown")
}

// A dummy endpoint that simulates a long running request/connection/process.
func home(w http.ResponseWriter, _ *http.Request) {
log.Println("sleeping!")
time.Sleep(time.Second * 50)
log.Println("woke up!")

_, _ = w.Write([]byte("welcome home"))
}

service.yaml


apiVersion: v1
kind: Service

metadata:
name: api-service

spec:
type: NodePort
selector:
app: api
ports:
- protocol: TCP
port: 80
targetPort: 8080

deployment.yaml


apiVersion: apps/v1
kind: Deployment

metadata:
name: api-deployment
labels:
app: api

spec:
replicas: 1
selector:
matchLabels:
app: api
template:
metadata:
labels:
app: api
spec:
containers:
- name: golang
image: you/api:latest
ports:
- containerPort: 8080
terminationGracePeriodSeconds: 60

Test


When there is no live connection/request or there is but finished early, pod logs will look like below after restarting the deployment. The application has been shut down nicely and all requests have been served.


$ kubectl logs -f pod/api-deployment-66cb684477-7tjrf
2021/02/14 20:33:59 app: started
2021/02/14 20:34:17 sleeping!
2021/02/14 20:34:34 app: received a shutdown signal: terminated
2021/02/14 20:34:34 http server: listen and serve: http: Server closed
2021/02/14 20:35:07 woke up!
2021/02/14 20:35:08 app: clean shutdown

When there is a long-running connection/request, pod logs will look like below after restarting the deployment. The application has been interrupted and long-running request haven't been served.


$ kubectl logs -f pod/api-deployment-6f6d994c98-vv8jt
2021/02/14 20:26:13 app: started
2021/02/14 20:28:01 sleeping!
2021/02/14 20:28:10 app: received a shutdown signal: terminated
2021/02/14 20:28:10 http server: listen and serve: http: Server closed
2021/02/14 20:28:50 app: dirty shutdown: context deadline exceeded

When there is a long-running connection/request and the terminationGracePeriodSeconds value is less than 40 seconds which is context timeout, pod logs will look like below after restarting the deployment. The pod has been killed so there isn't even a "shutdown" log!


$ kubectl logs -f pod/api-deployment-b8d5dc94c-wqb6s
2021/02/14 20:14:55 app: started
2021/02/14 20:16:26 sleeping!
2021/02/14 20:16:32 app: received a shutdown signal: terminated
2021/02/14 20:16:32 http server: listen and serve: http: Server closed