#yyds干货盘点#--k8s-controller-nginx热更新

人生处万类,知识最为贤。这篇文章主要讲述#yyds干货盘点#--k8s-controller-nginx热更新相关的知识,希望能为你提供帮助。
controller.go

package main

import (
"context"
"fmt"
"strconv"
"time"

corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
appsinformers "k8s.io/client-go/informers/apps/v1"
coreinformers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
appslisters "k8s.io/client-go/listers/apps/v1"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
)

var version int = 1

const controllerAgentName = "nginx-reload-controller"
const (
// SuccessSynced is used as part of the Event reason when a controller-cm is synced
SuccessSynced = "Synced"
// ErrResourceExists is used as part of the Event reason when a controller-cm fails
// to sync due to a Deployment of the same name already existing.
ErrResourceExists = "ErrResourceExists"

// MessageResourceExists is the message used for Events when a resource
// fails to sync due to a Deployment already existing
MessageResourceExists = "Resource %q already exists and is not managed by controller-cm"
// MessageResourceSynced is the message used for an Event fired when a controller-cm
// is synced successfully
MessageResourceSynced = "controller-cm synced successfully"
)

// Controller is the controller implementation for controller-cm resources
type Controller struct
// kubeclientset is a standard kubernetes clientset
kubeclientset kubernetes.Interface

deploymentsLister appslisters.DeploymentLister
deploymentsSynced cache.InformerSynced
configmapsListercorelisters.ConfigMapLister
configmapsSyncedcache.InformerSynced

workqueue workqueue.RateLimitingInterface
// recorder is an event recorder for recording Event resources to the
// Kubernetes API.
recorder record.EventRecorder


// NewController returns a new sample controller
func NewController(
kubeclientset kubernetes.Interface,
deploymentInformer appsinformers.DeploymentInformer,
configmapInformer coreinformers.ConfigMapInformer) *Controller

eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartStructuredLogging(0)
eventBroadcaster.StartRecordingToSink(& typedcorev1.EventSinkImplInterface: kubeclientset.CoreV1().Events(""))
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSourceComponent: controllerAgentName)

controller := & Controller
kubeclientset:kubeclientset,
deploymentsLister: deploymentInformer.Lister(),
deploymentsSynced: deploymentInformer.Informer().HasSynced,
configmapsLister:configmapInformer.Lister(),
configmapsSynced:configmapInformer.Informer().HasSynced,

workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "controller-cm"),
recorder:recorder,


klog.Info("Setting up event handlers")

configmapInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs
//AddFunc: controller.handleObject,
UpdateFunc: func(old, new interface)
newCm := new.(*corev1.ConfigMap)
oldCm := old.(*corev1.ConfigMap)
if newCm.ResourceVersion == oldCm.ResourceVersion
// Periodic resync will send update events for all known Deployments.
// Two different versions of the same Deployment will always have different RVs.
return

controller.handleObject(new)
,
)

return controller



func (c *Controller) Run(threadiness int, stopCh < -chan struct) error
defer utilruntime.HandleCrash()
defer c.workqueue.ShutDown()

klog.Info("Starting CM controller")

// Wait for the caches to be synced before starting workers
klog.Info("Waiting for informer caches to sync")
if ok := cache.WaitForCacheSync(stopCh, c.configmapsSynced); !ok
return fmt.Errorf("failed to wait for caches to sync")


klog.Info("Starting workers")
// Launch two workers to process controller-cm resources
for i := 0; i < threadiness; i++
go wait.Until(c.runWorker, time.Second, stopCh)


klog.Info("Started workers")
< -stopCh
klog.Info("Shutting down workers")

return nil


// runWorker is a long-running function that will continually call the
// processNextWorkItem function in order to read and process a message on the
// workqueue.
func (c *Controller) runWorker()
for c.processNextWorkItem()



// processNextWorkItem will read a single work item off the workqueue and
// attempt to process it, by calling the syncHandler.
func (c *Controller) processNextWorkItem() bool
obj, shutdown := c.workqueue.Get()

if shutdown
return false


// We wrap this block in a func so we can defer c.workqueue.Done.
err := func(obj interface) error

defer c.workqueue.Done(obj)
var key string
var ok bool

if key, ok = obj.(string); !ok
// As the item in the workqueue is actually invalid, we call
// Forget here else wed go into a loop of attempting to
// process a work item that is invalid.
c.workqueue.Forget(obj)
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
return nil


// Run the syncHandler, passing it the namespace/name string of the
// controller-cm resource to be synced.
if err := c.syncHandler(key); err != nil
// Put the item back on the workqueue to handle any transient errors.
c.workqueue.AddRateLimited(key)
return fmt.Errorf("error syncing %s: %s, requeuing", key, err.Error())


c.workqueue.Forget(obj)
klog.Infof("Successfully synced %s", key)
return nil
(obj)

if err != nil
utilruntime.HandleError(err)
return true


return true



func (c *Controller) syncHandler(key string) error
// Convert the namespace/name string into a distinct namespace and name
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key))
return nil


configmap, err := c.configmapsLister.ConfigMaps(namespace).Get(name)
if err != nil
return err


version++
deployment, err := c.deploymentsLister.Deployments(namespace).Get("nginx-deployment-reload")
if err != nil
return err


deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name = "configmapversion" + strconv.Itoa(version)
deployment.Spec.Template.Spec.Volumes[0].Name = "configmapversion" + strconv.Itoa(version)
deployment, err = c.kubeclientset.AppsV1().Deployments(namespace).Update(context.TODO(), deployment, metav1.UpdateOptions)
if err != nil
return err


klog.V(4).Infof(deployment.Name)
// Finally, we update the status block of the controller-cm resource to reflect the
// current state of the world

c.recorder.Event(configmap, corev1.EventTypeNormal, SuccessSynced, MessageResourceSynced)
return nil


func (c *Controller) enqueueConfigMap(obj interface)
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil
utilruntime.HandleError(err)
return

c.workqueue.Add(key)


func (c *Controller) handleObject(obj interface)
var object metav1.Object
var ok bool
if object, ok = obj.(metav1.Object); !ok
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok
utilruntime.HandleError(fmt.Errorf("error decoding object, invalid type"))
return

object, ok = tombstone.Obj.(metav1.Object)
if !ok
utilruntime.HandleError(fmt.Errorf("error decoding object tombstone, invalid type"))
return

klog.V(4).Infof("Recovered deleted object %s from tombstone", object.GetName())

klog.V(4).Infof("Processing object: %s", object.GetName())
if ownerRef := metav1.GetControllerOf(object); ownerRef != nil
// If this object is not owned by a controller-cm, we should not do anything more
// with it.
if ownerRef.Kind != "controller-cm"
return

configmap, err := c.configmapsLister.ConfigMaps(object.GetNamespace()).Get(ownerRef.Name)
if err != nil
klog.V(4).Infof("ignoring orphaned object %s of nginx-cm %s", object.GetSelfLink(), ownerRef.Name)
return

c.enqueueConfigMap(configmap)
return


main.go
package main

import (
"flag"
"time"

kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
"k8s.io/sample-controller/pkg/signals"
)

var (
masterURLstring
kubeconfig string
)

func main()
klog.InitFlags(nil)
flag.Parse()

// set up signals so we handle the first shutdown signal gracefully
stopCh := signals.SetupSignalHandler()

cfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfig)
if err != nil
klog.Fatalf("Error building kubeconfig: %s", err.Error())


kubeClient, err := kubernetes.NewForConfig(cfg)
if err != nil
klog.Fatalf("Error building kubernetes clientset: %s", err.Error())


kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, time.Second*30)

controller := NewController(kubeClient,
kubeInformerFactory.Apps().V1().Deployments(),
kubeInformerFactory.Core().V1().ConfigMaps())

// notice that there is no need to run Start methods in a separate goroutine. (i.e. go kubeInformerFactory.Start(stopCh)
// Start method is non-blocking and runs all registered informers in a dedicated goroutine.
kubeInformerFactory.Start(stopCh)

if err = controller.Run(2, stopCh); err != nil
klog.Fatalf("Error running controller: %s", err.Error())



func init()
flag.StringVar(& kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(& masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")


生成controller
go build -o nginx-controller .

dockerfile
FROM golang:latest

COPY nginx-controller nginx-controller
CMD["./nginx-controller"]

部署
# controller-deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-reload-controller
spec:
replicas: 1
selector: # 选择器
matchLabels: # 匹配标签
app: nginx-reload-controller
template:
metadata:
labels:
app: nginx-reload-controller
spec:
containers:
- name: nginx-controller
image:nginx-reload-controller
imagePullPolicy: IfNotPresent
serviceAccount: nginx-reload-controller
serviceAccountName: nginx-reload-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: controller-role
rules:
- apiGroups:
- ""
resources:
- configmaps
- deployments
- events
verbs:
- get
- list
- watch
- create
- update
- patch
- delete

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: controller-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: controller-role
subjects:
- kind: ServiceAccount
name: nginx-reload-controller
namespace: nginx-reload-controller

SA
apiVersion: v1
kind: ServiceAccount
metadata:
name: nginx-reload-controller
namespace: default
secrets:
- name: nginx-reload-controller-token-kkrq8

configmap
apiVersion: v1
kind: ConfigMap
metadata:
name: nginx-cm
namespace: default
ownerReferences:
- apiVersion: samplecontroller.k8s.io/v1alpha1
blockOwnerDeletion: true
controller: true
kind: controller-cm
name: nginx-cm
uid: 834c0ab3-e4bf-4eaf-b928-a428ae39b617
resourceVersion: "818333"
uid: 9e6f1bed-30d9-41ba-bca7-a3225ea25dba
data:
nginx.conf: |
user nginx;
worker_processes auto;
error_log /etc/nginx/error.log;
pid /run/nginx.pid;
include /usr/share/nginx/modules/*.conf;
events
worker_connections 1024;

http
log_formatmain$remote_addr - $remote_user [$time_local] "$request"
$status $body_bytes_sent "$http_referer"
"$http_user_agent" "$http_x_forwarded_for";
server_tokensoff;
access_log/usr/share/nginx/html/access.logmain;
sendfileon;
tcp_nopushon;
tcp_nodelayon;
keepalive_timeout65;
types_hash_max_size 2048;
include/etc/nginx/mime.types;
default_typeapplication/octet-stream;
include /etc/nginx/conf.d/*.conf;
server
listen80 default_server;
listen[::]:80 default_server;
server_name_;
root/usr/share/nginx/html;
include /etc/nginx/default.d/*.conf;
location /

error_page 404 /404.html;
location = /40x.html

error_page 500 502 503 504 /50x.html;
location = /50x.html




deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment-reload
annotations:
configmap: nginx-cm
spec:
replicas: 3
selector:
matchLabels:
app: nginx-deployment-reload
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
template:
metadata:
labels:
app: nginx-deployment-reload
spec:
containers:
- name:nginx
image: nginx:latest
imagePullPolicy: IfNotPresent
ports:
- name: http
containerPort: 80
volumeMounts:
- name: cmv1
mountPath: /etc/nginx/nginx.conf
subPath: nginx.conf
volumes:
- name: cmv1
configMap:
name: nginx-cm
items:
- key: nginx.conf
path: nginx.conf



【#yyds干货盘点#--k8s-controller-nginx热更新】


    推荐阅读