32,k8s jenkins 之微服務自動化部署
阿新 • • 發佈:2020-08-14
1,先安裝好K8S叢集。 [root@centos7 ~]# [root@centos7 ~]# kubectl get pods -n kube-system NAME READY STATUS RESTARTS AGE coredns-575bd6d498-9pdkk 1/1 Running 0 53s kube-flannel-ds-amd64-444h9 1/1 Running 0 52s kube-flannel-ds-amd64-pn568 1/1 Running 0 52s kube-flannel-ds-amd64-x6hcv 1/1 Running 0 52s [root@centos7 ~]# [root@centos7 ~]# [root@centos7 ~]# kubectl get pods -n ingress-nginx NAME READY STATUS RESTARTS AGE nginx-ingress-controller-64d8d5c8d7-cmhdg 1/1 Running 0 72s [root@centos7 ~]# [root@centos7 ~]# [root@centos7 ~]# kubectl get node NAME STATUS ROLES AGE VERSION k8s-master1 Ready <none> 100s v1.16.0 k8s-node1 Ready <none> 101s v1.16.0 k8s-node2 Ready <none> 100s v1.16.0 [root@centos7 ~]# 2,先準備一臺NFS伺服器為K8S提供儲存支援。 yum install -y nfs-utils vi /etc/exports /ifs/Kubernetes *(rw,no_root_squash) systemctl start nfs systemctl enable nfs 並且每個node上安裝nfs-utils包,用於mount掛載用。 3,由於K8S不支援NFS動態供給,還需要先安裝上圖中的nfs.client.provisioner外掛;(外掛作用:建立PV) [root@centos7 nfs-client]# mkdir /root/nfs-client -p [root@centos7 nfs-client]# [root@centos7 nfs-client]# cat class.yaml apiVersion: storage.k8s.io/v1 kind: StorageClass metadata: name: managed-nfs-storage provisioner: fuseim.pri/ifs # or choose another name, must match deployment's env PROVISIONER_NAME' parameters: archiveOnDelete: "true" [root@centos7 nfs-client]# [root@centos7 nfs-client]# cat deployment.yaml apiVersion: v1 kind: ServiceAccount metadata: name: nfs-client-provisioner --- kind: Deployment apiVersion: apps/v1 metadata: name: nfs-client-provisioner spec: replicas: 1 strategy: type: Recreate selector: matchLabels: app: nfs-client-provisioner template: metadata: labels: app: nfs-client-provisioner spec: serviceAccountName: nfs-client-provisioner containers: - name: nfs-client-provisioner image: lizhenliang/nfs-client-provisioner:latest volumeMounts: - name: nfs-client-root mountPath: /persistentvolumes env: - name: PROVISIONER_NAME value: fuseim.pri/ifs - name: NFS_SERVER value: 192.168.0.13 - name: NFS_PATH value: /ifs/kubernetes volumes: - name: nfs-client-root nfs: server: 192.168.0.13 path: /ifs/kubernetes [root@centos7 nfs-client]# [root@centos7 nfs-client]# [root@centos7 nfs-client]# cat rbac.yaml kind: ServiceAccount apiVersion: v1 metadata: name: nfs-client-provisioner --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: nfs-client-provisioner-runner rules: - apiGroups: [""] resources: ["persistentvolumes"] verbs: ["get", "list", "watch", "create", "delete"] - apiGroups: [""] resources: ["persistentvolumeclaims"] verbs: ["get", "list", "watch", "update"] - apiGroups: ["storage.k8s.io"] resources: ["storageclasses"] verbs: ["get", "list", "watch"] - apiGroups: [""] resources: ["events"] verbs: ["create", "update", "patch"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: run-nfs-client-provisioner subjects: - kind: ServiceAccount name: nfs-client-provisioner namespace: default roleRef: kind: ClusterRole name: nfs-client-provisioner-runner apiGroup: rbac.authorization.k8s.io --- kind: Role apiVersion: rbac.authorization.k8s.io/v1 metadata: name: leader-locking-nfs-client-provisioner rules: - apiGroups: [""] resources: ["endpoints"] verbs: ["get", "list", "watch", "create", "update", "patch"] --- kind: RoleBinding apiVersion: rbac.authorization.k8s.io/v1 metadata: name: leader-locking-nfs-client-provisioner subjects: - kind: ServiceAccount name: nfs-client-provisioner # replace with namespace where provisioner is deployed namespace: default roleRef: kind: Role name: leader-locking-nfs-client-provisioner apiGroup: rbac.authorization.k8s.io [root@centos7 nfs-client]# [root@centos7 nfs-client]# kubectl delete -f . [root@centos7 nfs-client]# [root@centos7 nfs-client]# kubectl get pods NAME READY STATUS RESTARTS AGE nfs-client-provisioner-6dcbb9f588-ckffj 1/1 Running 0 48s [root@centos7 nfs-client]# 4,安裝Helm工具 [root@centos7 ~]# wget https://get.helm.sh/helm-v3.0.0-linux-amd64.tar.gz [root@centos7 ~]# tar xf helm-v3.0.0-linux-amd64.tar.gz [root@centos7 ~]# mv linux-amd64/helm /usr/bin/ 5,配置國內Chart倉庫 [root@centos7 ~]# helm repo add stable http://mirror.azure.cn/kubernetes/charts [root@centos7 ~]# helm repo add aliyun https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts [root@centos7 ~]# helm repo list 6,安裝push外掛 [root@centos7 ~]# helm plugin install https://github.com/chartmuseum/helm-push 如果網路下載不了,也可以直接解壓課件裡包: # tar zxvf helm-push_0.7.1_linux_amd64.tar.gz # mkdir -p /root/.local/share/helm/plugins/helm-push # chmod +x bin/* # mv bin plugin.yaml /root/.local/share/helm/plugins/helm-push 7,新增repo [root@centos7 ~]# helm repo add --username admin --password Harbor12345 myrepo http://192.168.31.70/chartrepo/library 8,微服務資料庫 [root@centos7 ~]# yum install -y mariadb* [root@centos7 ~]# systemctl start mariadb [root@centos7 ~]# mysqladmin -uroot password '123456' 9,安裝程式碼版本倉庫gitlab docker run -d \ --name gitlab \ -p 8443:443 \ -p 9999:80 \ -p 9998:22 \ -v $PWD/config:/etc/gitlab \ -v $PWD/logs:/var/log/gitlab \ -v $PWD/data:/var/opt/gitlab \ -v /etc/localtime:/etc/localtime \ lizhenliang/gitlab-ce-zh:latest gitlab/gitlab-ce:latest 訪問地址:http://IP:9999 初次先設定管理員密碼,然後登陸,預設管理員使用者名稱root。 10,安裝Harbor倉庫 harbor 啟動依賴 docker-compose 和 docker [root@centos7 ~]# tar xf harbor-offline-installer-v1.2.0.tgz [root@centos7 ~]# cd harbor [root@centos7 ~]# vim harbor hostname: 192.168.0.14 [root@centos7 ~]# ./prepare [root@centos7 ~]# ./install.sh --with-chartmuseum [root@centos7 ~]# docker-compose ps # --with-chartmuseum 表示啟用chart 儲存功能