SpringCloud 微服务 RuoYi-Cloud 部署文档(DevOps版)(2023-10-18)

argo-rollouts + istio(金丝雀发布)(渐进式交付)

基础集群组件

0、k8s集群(k8s-1.23.17)

1、helm、kubens、kubectl补全

2、ingress-nginx

3、istio

4、argocd

5、Argo Rollouts

6、nfs-subdir-external-provisioner

7、metrics-server

8、gitlab

9、harbor

10、jenkins

RuoYi-Cloud 业务组件

0、mysql-8.0.22

1、nacos-2.1.0

2、redis-7.2

argocd.huanghuanhui.cloud

gitlab.huanghuanhui.cloud

harbor.huanghuanhui.cloud

jenkins-prod.huanghuanhui.cloud

www.huanghuanhui.cloud/nacos

一、基础集群组件

0、k8s集群(k8s-1.23.17)

1、helm、kubens、kubectl补全

2、ingress-nginx

3、istio

4、argocd

5、Argo Rollouts

6、nfs-subdir-external-provisioner

7、metrics-server

8、gitlab

9、harbor

10、jenkins

0、k8s集群(k8s-1.23.17)

docker-24.0.2 + k8s-1.23.17(最新)(kubeadm方式)(docker版)

kubeadm 方式安装 “最新版” k8s-1.23.17(docker容器运行时)

docker-24.0.2 + k8s-1.23.17(最新)(kubeadm方式)(docker版)

这里的 “最新版” 是指1.24之前的版本 !!!

k8s-1.23.17

docker-24.0.2

0、环境准备(centos7 环境配置+调优)

# 颜色

echo "PS1='\[\033[35m\][\[\033[00m\]\[\033[31m\]\u\[\033[33m\]\[\033[33m\]@\[\033[03m\]\[\033[35m\]\h\[\033[00m\] \[\033[5;32m\]\w\[\033[00m\]\[\033[35m\]]\[\033[00m\]\[\033[5;31m\]\\$\[\033[00m\] '" >> ~/.bashrc && source ~/.bashrc

# 0、centos7 环境配置

# 安装 vim

yum -y install vim wget net-tools

# 行号

echo "set nu" >> /root/.vimrc

# 搜索关键字高亮

sed -i "8calias grep='grep --color'" /root/.bashrc

# 腾讯源

wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.cloud.tencent.com/repo/centos7_base.repo

wget -O /etc/yum.repos.d/CentOS-Epel.repo http://mirrors.cloud.tencent.com/repo/epel-7.repo

yum clean all

yum makecache

# 1、设置主机名

hostnamectl set-hostname k8s-master && su -

hostnamectl set-hostname k8s-node1 && su -

hostnamectl set-hostname k8s-node2 && su -

# 2、添加hosts解析

cat >> /etc/hosts << EOF

192.168.1.201 k8s-master

192.168.1.202 k8s-node1

192.168.1.203 k8s-node2

EOF

# 3、同步时间

yum -y install ntp

systemctl enable ntpd --now

# 4、永久关闭seLinux(需重启系统生效)

setenforce 0

sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config

# 5、永久关闭swap(需重启系统生效)

swapoff -a # 临时关闭

sed -i 's/.*swap.*/#&/g' /etc/fstab # 永久关闭

# 6、升级内核为5.4版本(需重启系统生效)

# https://elrepo.org/tiki/kernel-lt

# https://elrepo.org/linux/kernel/el7/x86_64/RPMS/

rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-6.el7.elrepo.noarch.rpm

yum --disablerepo="*" --enablerepo="elrepo-kernel" list available

yum --enablerepo=elrepo-kernel install -y kernel-lt

grub2-set-default 0

#这里先重启再继续

reboot

# 7、关闭防火墙、清空iptables规则

systemctl disable firewalld && systemctl stop firewalld

iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X && iptables -P FORWARD ACCEPT && service iptables save

# 8、关闭 NetworkManager

systemctl disable NetworkManager && systemctl stop NetworkManager

# 9、加载IPVS模块

yum -y install ipset ipvsadm

cat > /etc/sysconfig/modules/ipvs.modules <

modprobe -- ip_vs

modprobe -- ip_vs_rr

modprobe -- ip_vs_wrr

modprobe -- ip_vs_sh

modprobe -- nf_conntrack

EOF

modprobe -- nf_conntrack

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack

# 10、开启br_netfilter、ipv4 路由转发

cat <

overlay

br_netfilter

EOF

sudo modprobe overlay

sudo modprobe br_netfilter

# 设置所需的 sysctl 参数,参数在重新启动后保持不变

cat <

net.bridge.bridge-nf-call-iptables = 1

net.bridge.bridge-nf-call-ip6tables = 1

net.ipv4.ip_forward = 1

EOF

# 应用 sysctl 参数而不重新启动

sudo sysctl --system

# 查看是否生效

lsmod | grep br_netfilter

lsmod | grep overlay

sysctl net.bridge.bridge-nf-call-iptables net.bridge.bridge-nf-call-ip6tables net.ipv4.ip_forward

# 11、内核调优

cat > /etc/sysctl.d/99-sysctl.conf << 'EOF'

# sysctl settings are defined through files in

# /usr/lib/sysctl.d/, /run/sysctl.d/, and /etc/sysctl.d/.

#

# Vendors settings live in /usr/lib/sysctl.d/.

# To override a whole file, create a new file with the same in

# /etc/sysctl.d/ and put new settings there. To override

# only specific settings, add a file with a lexically later

# name in /etc/sysctl.d/ and put new settings there.

#

# For more information, see sysctl.conf(5) and sysctl.d(5).

# Controls IP packet forwarding

# Controls source route verification

net.ipv4.conf.default.rp_filter = 1

# Do not accept source routing

net.ipv4.conf.default.accept_source_route = 0

# Controls the System Request debugging functionality of the kernel

# Controls whether core dumps will append the PID to the core filename.

# Useful for debugging multi-threaded applications.

kernel.core_uses_pid = 1

# Controls the use of TCP syncookies

net.ipv4.tcp_syncookies = 1

# Controls the maximum size of a message, in bytes

kernel.msgmnb = 65536

# Controls the default maxmimum size of a mesage queue

kernel.msgmax = 65536

net.ipv4.conf.all.promote_secondaries = 1

net.ipv4.conf.default.promote_secondaries = 1

net.ipv6.neigh.default.gc_thresh3 = 4096

kernel.sysrq = 1

net.ipv6.conf.all.disable_ipv6=0

net.ipv6.conf.default.disable_ipv6=0

net.ipv6.conf.lo.disable_ipv6=0

kernel.numa_balancing = 0

kernel.shmmax = 68719476736

kernel.printk = 5

net.core.rps_sock_flow_entries=8192

net.bridge.bridge-nf-call-ip6tables=1

net.ipv4.ip_local_reserved_ports=60001,60002

net.core.rmem_max=16777216

fs.inotify.max_user_watches=524288

kernel.core_pattern=core

net.core.dev_weight_tx_bias=1

net.ipv4.tcp_max_orphans=32768

kernel.pid_max=4194304

kernel.softlockup_panic=1

fs.file-max=3355443

net.core.bpf_jit_harden=1

net.ipv4.tcp_max_tw_buckets=32768

fs.inotify.max_user_instances=8192

net.core.bpf_jit_kallsyms=1

vm.max_map_count=262144

kernel.threads-max=262144

net.core.bpf_jit_enable=1

net.ipv4.tcp_keepalive_time=600

net.ipv4.tcp_wmem=4096 12582912 16777216

net.core.wmem_max=16777216

net.ipv4.neigh.default.gc_thresh1=2048

net.core.somaxconn=32768

net.ipv4.neigh.default.gc_thresh3=8192

net.ipv4.ip_forward=1

net.ipv4.neigh.default.gc_thresh2=4096

net.ipv4.tcp_max_syn_backlog=8096

net.bridge.bridge-nf-call-iptables=1

net.ipv4.tcp_rmem=4096 12582912 16777216

EOF

# 应用 sysctl 参数而不重新启动

sudo sysctl --system

# 12、设置资源配置文件

cat >> /etc/security/limits.conf << 'EOF'

* soft nofile 100001

* hard nofile 100002

root soft nofile 100001

root hard nofile 100002

* soft memlock unlimited

* hard memlock unlimited

* soft nproc 254554

* hard nproc 254554

* soft sigpending 254554

* hard sigpending 254554

EOF

grep -vE "^\s*#" /etc/security/limits.conf

ulimit -a

1、安装 docker-24.0.2(腾讯源)

wget -O /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo

sed -i 's+download.docker.com+mirrors.cloud.tencent.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo

yum makecache fast

yum list docker-ce --showduplicates | sort -r

yum list docker-ce-cli --showduplicates | sort -r

yum list containerd.io --showduplicates | sort -r

yum -y install docker-ce-24.0.2-1.el7 docker-ce-cli-24.0.2-1.el7 containerd.io-1.6.9-3.1.el7

systemctl enable docker --now

cat > /etc/docker/daemon.json << 'EOF'

{

"registry-mirrors": ["https://mirror.ccs.tencentyun.com"],

"insecure-registries": ["https://harbor.huanghuanhui.cloud"],

"exec-opts": ["native.cgroupdriver=systemd"],

"log-driver": "json-file",

"log-opts": {

"max-size": "100m"

},

"storage-driver": "overlay2",

"data-root": "/var/lib/docker"

}

EOF

systemctl daemon-reload && systemctl restart docker

2、安装k8s(kubeadm-1.23.17、kubelet-1.23.17、kubectl-1.23.17)(清华源)

cat > /etc/yum.repos.d/kubernetes.repo <

[kubernetes]

name=kubernetes

baseurl=https://mirrors.tuna.tsinghua.edu.cn/kubernetes/yum/repos/kubernetes-el7-x86_64/

enabled=1

gpgcheck=0

EOF

yum -y install kubeadm-1.23.17-0 kubelet-1.23.17-0 kubectl-1.23.17-0

systemctl enable --now kubelet

3、初始化 k8s-1.23.17 集群

mkdir ~/kubeadm_init && cd ~/kubeadm_init

kubeadm config print init-defaults > kubeadm-init.yaml

cat > ~/kubeadm_init/kubeadm-init.yaml << EOF

apiVersion: kubeadm.k8s.io/v1beta2

bootstrapTokens:

- groups:

- system:bootstrappers:kubeadm:default-node-token

token: abcdef.0123456789abcdef

ttl: 24h0m0s

usages:

- signing

- authentication

kind: InitConfiguration

localAPIEndpoint:

advertiseAddress: 192.168.1.201 # 修改自己的ip

bindPort: 6443

nodeRegistration:

criSocket: /var/run/dockershim.sock

name: k8s-master

taints:

- effect: "NoSchedule"

key: "node-role.kubernetes.io/k8s-master"

---

apiServer:

timeoutForControlPlane: 4m0s

apiVersion: kubeadm.k8s.io/v1beta2

certificatesDir: /etc/kubernetes/pki

clusterName: kubernetes

controllerManager: {}

dns:

type: CoreDNS

etcd:

local:

dataDir: /var/lib/etcd

imageRepository: registry.aliyuncs.com/google_containers

kind: ClusterConfiguration

kubernetesVersion: v1.23.17

networking:

dnsDomain: cluster.local

serviceSubnet: 10.96.0.0/12

podSubnet: 10.244.0.0/16

scheduler: {}

---

apiVersion: kubeproxy.config.k8s.io/v1alpha1

kind: KubeProxyConfiguration

mode: ipvs

---

apiVersion: kubelet.config.k8s.io/v1beta1

kind: KubeletConfiguration

cgroupDriver: systemd

EOF

# 预拉取镜像

kubeadm config images pull --config kubeadm-init.yaml

# 初始化

kubeadm init --config=kubeadm-init.yaml | tee kubeadm-init.log

mkdir -p $HOME/.kube

sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config

sudo chown $(id -u):$(id -g) $HOME/.kube/config

4、安装 k8s 集群网络(calico)

查看calico与k8s的版本对应关系

https://docs.tigera.io/archive/v3.25/getting-started/kubernetes/requirements

这里k8s-1.23.17,所以使用calico-v3.25.1版本(版本对应很关键)

mkdir ~/calico-yml

cd ~/calico-yml && wget https://kgithub.com/projectcalico/calico/raw/v3.25.1/manifests/calico.yaml

1 修改CIDR

- name: CALICO_IPV4POOL_CIDR

value: "10.244.0.0/16"

2 指定网卡

# Cluster type to identify the deployment type

- name: CLUSTER_TYPE

value: "k8s,bgp"

# 下面添加

- name: IP_AUTODETECTION_METHOD

value: "interface=ens33"

# ens33为本地网卡名字(自己机器啥网卡就改啥)

kubectl apply -f ~/calico-yml/calico.yaml

5、coredns 解析测试是否正常

[root@k8s-master ~]# kubectl run -it --rm dns-test --image=busybox:1.28.4 sh

If you don't see a command prompt, try pressing enter.

/ # nslookup kubernetes

Server: 10.96.0.10

Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local # 看到这个说明dns解析正常

Name: kubernetes

Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local

/ #

1、helm、kubens、kubectl补全

helm

cd && wget https://repo.huaweicloud.com/helm/v3.12.3/helm-v3.12.3-linux-amd64.tar.gz

tar xf ~/helm-v3.12.3-linux-amd64.tar.gz

cp ~/linux-amd64/helm /usr/local/sbin/helm

rm -rf ~/helm-v3.12.3-linux-amd64.tar.gz && rm -rf ~/linux-amd64

helm version

kubectx、kubens

wget -O /usr/local/sbin/kubens https://kgithub.com/ahmetb/kubectx/raw/v0.9.5/kubens

chmod +x /usr/local/sbin/kubens

wget -O /usr/local/sbin/kubectx https://kgithub.com/ahmetb/kubectx/raw/v0.9.5/kubectx

# chmod +x /usr/local/sbin/kubectx

kubectl 补全

yum -y install bash-completion

source /etc/profile.d/bash_completion.sh

echo "source <(crictl completion bash)" >> ~/.bashrc

echo "source <(kubectl completion bash)" >> ~/.bashrc

echo "source <(helm completion bash)" >> ~/.bashrc

source ~/.bashrc && su -

别名

cat >> ~/.bashrc << 'EOF'

alias pod='kubectl get pod'

alias svc='kubectl get svc'

alias ns='kubectl get ns'

alias pvc='kubectl get pvc'

alias pv='kubectl get pv'

alias sc='kubectl get sc'

alias ingress='kubectl get ingress'

alias all='kubectl get all'

EOF

source ~/.bashrc

2、ingress-nginx

helm安装 ingress-nginx(k8s-master边缘节点)

master(ingress-nginx边缘节点)

chart version:4.5.2 (k8s:1.26, 1.25, 1.24, 1.23)

当前版本:k8s-v1.23.17

https://kgithub.com/kubernetes/ingress-nginx

helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx

helm repo update

helm search repo ingress-nginx/ingress-nginx

helm pull ingress-nginx/ingress-nginx --version 4.5.2 --untar

cat > ~/ingress-nginx/values-prod.yaml << EOF

controller:

name: controller

image:

registry: dyrnq

image: controller

tag: "v1.6.4"

digest:

pullPolicy: IfNotPresent

dnsPolicy: ClusterFirstWithHostNet

hostNetwork: true

publishService: # hostNetwork 模式下设置为false,通过节点IP地址上报ingress status数据

enabled: false

kind: DaemonSet

tolerations: # kubeadm 安装的集群默认情况下 k8s-master 是有污点,需要容忍这个污点才可以部署

- key: "node-role.kubernetes.io/k8s-master"

operator: "Equal"

effect: "NoSchedule"

nodeSelector: # 固定到k8s-master节点(自己master啥名字就写啥)

kubernetes.io/hostname: "k8s-master"

service: # HostNetwork 模式不需要创建service

enabled: false

admissionWebhooks: # 强烈建议开启 admission webhook

enabled: true

patch:

enabled: true

image:

registry: dyrnq

image: kube-webhook-certgen

tag: v20220916-gd32f8c343

digest:

pullPolicy: IfNotPresent

defaultBackend:

enabled: true

name: defaultbackend

image:

registry: dyrnq

image: defaultbackend-amd64

tag: "1.5"

digest:

pullPolicy: IfNotPresent

EOF

kubectl create ns ingress-nginx

helm upgrade --install --namespace ingress-nginx ingress-nginx -f ./values-prod.yaml .

卸载

[root@k8s-master ~/ingress-nginx]# helm delete ingress-nginx -n ingress-nginx

[root@k8s-master ~/ingress-nginx]# helm ls -n ingress-nginx

[root@k8s-master ~/ingress-nginx]# kubens ingress-nginx

Context "kubernetes-admin@kubernetes" modified.

Active namespace is "ingress-nginx".

[root@k8s-master ~/ingress-nginx]# kubectl get po -owide

NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES

ingress-nginx-controller-j6dp4 1/1 Running 0 2m53s 192.168.1.201 k8s-master

ingress-nginx-defaultbackend-59d67dfdb9-z58j4 1/1 Running 0 3m 10.244.235.247 k8s-master

[root@k8s-master ~/ingress-nginx]# kubectl get svc

NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE

ingress-nginx-controller-admission ClusterIP 10.100.59.127 443/TCP 23m

ingress-nginx-defaultbackend ClusterIP 10.107.38.196 80/TCP 23m

[root@k8s-master ~/ingress-nginx]# netstat -tnlp |grep 443

tcp 0 0 0.0.0.0:443 0.0.0.0:* LISTEN 2306361/nginx: mast

tcp6 0 0 :::6443 :::* LISTEN 2360/kube-apiserver

tcp6 0 0 :::8443 :::* LISTEN 2306341/nginx-ingre

tcp6 0 0 :::443 :::* LISTEN 2306361/nginx: mast

[root@k8s-master ~/ingress-nginx]# netstat -tnlp |grep 80

tcp 0 0 192.168.1.201:2380 0.0.0.0:* LISTEN 2331/etcd

tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 2306361/nginx: mast

tcp6 0 0 :::80 :::* LISTEN 2306361/nginx: mast

[root@k8s-master ~/ingress-nginx]#

3、istio

cd && wget https://kgithub.com/istio/istio/releases/download/1.18.2/istio-1.18.2-linux-amd64.tar.gz

tar xf istio-1.18.2-linux-amd64.tar.gz

cp ~/istio-1.18.2/bin/istioctl /usr/bin/istioctl

# istioctl version

no ready Istio pods in "istio-system"

1.18.2

istioctl install --set profile=demo -y

# istioctl version

client version: 1.18.2

control plane version: 1.18.2

data plane version: 1.18.2 (2 proxies)

stioctl 命令补全

yum -y install bash-completion

source /etc/profile.d/bash_completion.sh

cp ~/istio-1.18.2/tools/istioctl.bash ~/.istioctl.bash

source ~/.istioctl.bash

4、argocd

https://github.com/argoproj/argo-cd/releases

mkdir -p ~/argocd-yml

kubectl create ns argocd

cd ~/argocd-yml && wget https://kgithub.com/argoproj/argo-cd/raw/v2.8.0/manifests/install.yaml

kubectl apply -f ~/argocd-yml/install.yaml -n argocd

cat > ~/argocd-yml/argocd-Ingress.yml << EOF

apiVersion: networking.k8s.io/v1

kind: Ingress

metadata:

name: argocd-ingress

namespace: argocd

annotations:

kubernetes.io/tls-acme: "true"

nginx.ingress.kubernetes.io/ssl-redirect: "true"

nginx.ingress.kubernetes.io/ssl-passthrough: "true"

nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"

nginx.ingress.kubernetes.io/proxy-body-size: '4G'

spec:

ingressClassName: nginx

rules:

- host: argocd.huanghuanhui.cloud

http:

paths:

- path: /

pathType: Prefix

backend:

service:

name: argocd-server # 将所有请求发送到 argocd 服务的 80 端口

port:

number: 80

tls:

- hosts:

- argocd.huanghuanhui.cloud

secretName: argocd-ingress-tls

EOF

kubectl create secret -n argocd \

tls argocd-ingress-tls \

--key=/root/ssl/huanghuanhui.cloud_nginx/huanghuanhui.cloud.key \

--cert=/root/ssl/huanghuanhui.cloud_nginx/huanghuanhui.cloud_bundle.crt

kubectl apply -f ~/argocd-yml/argocd-Ingress.yml

# kubectl edit deploy argocd-server -n argocd

spec:

template:

spec:

containers:

- name: argocd-server

command:

- argocd-server

- --staticassets

- /shared/app

- --repo-server

- argocd-repo-server:8081

- --insecure # 禁用 TLS

访问:argocd.huanghuanhui.cloud

账号:admin

# 获取密码方式如下

1、

# echo $(kubectl get secret -n argocd argocd-initial-admin-secret -o yaml | grep password | awk -F: '{print $2}') | base64 -d

2、

# kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d && echo

用户:admin

密码:xzeXgS0aSIcIq-x5

5、Argo Rollouts

mkdir -p ~/argo-rollouts-yml

kubectl create ns argo-rollouts

cd ~/argo-rollouts-yml && wget https://github.com/argoproj/argo-rollouts/releases/latest/download/install.yaml

cd ~/argo-rollouts-yml && wget https://github.com/argoproj/argo-rollouts/releases/latest/download/dashboard-install.yaml

kubectl apply -n argo-rollouts -f ~/argo-rollouts-yml/install.yaml

kubectl apply -n argo-rollouts -f ~/argo-rollouts-yml/dashboard-install.yaml

curl -LO https://github.com/argoproj/argo-rollouts/releases/latest/download/kubectl-argo-rollouts-darwin-amd64

chmod +x ./kubectl-argo-rollouts-linux-amd64

mv ./kubectl-argo-rollouts-linux-amd64 /usr/local/bin/kubectl-argo-rollouts

kubectl argo rollouts version

6、nfs-subdir-external-provisioner

k8s(pv 与 pvc)动态存储 StorageClass

k8s-1.23.17 持久化存储(nfs动态存储)

1、部署nfs

nfs 服务端(k8s-master)

# 所有服务端节点安装nfs

yum -y install nfs-utils

systemctl enable nfs-server rpcbind --now

# 创建nfs共享目录、授权

mkdir -p /data/k8s && chmod -R 777 /data/k8s

# 写入exports

cat > /etc/exports << EOF

/data/k8s 192.168.1.0/24(rw,sync,no_root_squash)

EOF

systemctl reload nfs-server

使用如下命令进行验证

# showmount -e 192.168.1.201

Export list for 192.168.1.201:

/data/k8s 192.168.1.0/24

nfs 客户端(k8s-node)

yum -y install nfs-utils

systemctl enable rpcbind --now

使用如下命令进行验证

# showmount -e 192.168.1.201

Export list for 192.168.1.201:

/data/k8s 192.168.1.0/24

备份

mkdir -p /data/k8s && chmod -R 777 /data/k8s

rsync -avzP /data/k8s root@192.168.1.203:/data

00 2 * * * rsync -avz /data/k8s root@192.168.1.203:/data &>/dev/null

2、动态创建 NFS存储(动态存储)

https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner

mkdir ~/nfs-subdir-external-provisioner-4.0.18 && cd ~/nfs-subdir-external-provisioner-4.0.18

版本:nfs-subdir-external-provisioner-4.0.18

https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner/tree/nfs-subdir-external-provisioner-4.0.18/deploy

wget https://kgithub.com/kubernetes-sigs/nfs-subdir-external-provisioner/raw/nfs-subdir-external-provisioner-4.0.18/deploy/deployment.yaml

wget https://kgithub.com/kubernetes-sigs/nfs-subdir-external-provisioner/raw/nfs-subdir-external-provisioner-4.0.18/deploy/rbac.yaml

wget https://kgithub.com/kubernetes-sigs/nfs-subdir-external-provisioner/raw/nfs-subdir-external-provisioner-4.0.18/deploy/class.yaml

# 1、修改镜像(默认谷歌k8s.gcr.io)

sed -i 's/registry.k8s.io\/sig-storage/dyrnq/g' deployment.yaml

# 2、修改nfs服务端地址

sed -i 's/10.3.243.101/192.168.1.201/g' deployment.yaml

# 3、修改存储地址(/data/k8s)

sed -i 's#\/ifs\/kubernetes#\/data\/k8s#g' deployment.yaml

sed -i 's#nfs-client#nfs-storage#g' class.yaml

使用这个镜像:dyrnq/nfs-subdir-external-provisioner:v4.0.2

dockerhub 地址:https://hub.docker.com/r/dyrnq/nfs-subdir-external-provisioner/tags

kubectl apply -f .

kubectl get pods -n default -l app=nfs-client-provisioner

kubectl get storageclass

7、metrics-server

https://github.com/kubernetes-sigs/metrics-server

版本:v0.6.4

k8s-v1.23.17

Metrics ServerMetrics API group/versionSupported Kubernetes version0.6.xmetrics.k8s.io/v1beta11.19+0.5.xmetrics.k8s.io/v1beta1*1.8+0.4.xmetrics.k8s.io/v1beta1*1.8+0.3.xmetrics.k8s.io/v1beta11.8-1.21

mkdir -p ~/metrics-server

wget -O ~/metrics-server/components.yaml https://kgithub.com/kubernetes-sigs/metrics-server/releases/download/v0.6.4/components.yaml

#1、添加"- --kubelet-insecure-tls"参数(匹配行后)

sed -i '/15s/a\ - --kubelet-insecure-tls' ~/metrics-server/components.yaml

#2、 修改镜像(默认谷歌k8s.gcr.io)

sed -i 's/registry.k8s.io\/metrics-server/dyrnq/g' ~/metrics-server/components.yaml

kubectl apply -f ~/metrics-server/components.yaml

kubectl get pods -n kube-system -l k8s-app=metrics-server

[root@k8s-master ~/metrics-server]# kubectl top node

NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%

k8s-master 211m 5% 1882Mi 24%

k8s-node1 155m 3% 985Mi 12%

k8s-node2 164m 4% 1249Mi 15%

[root@k8s-master ~/metrics-server]# kubectl top pod

NAME CPU(cores) MEMORY(bytes)

calico-kube-controllers-646b6595d5-5fgj9 2m 28Mi

calico-node-c8pfd 33m 137Mi

calico-node-ck4kt 36m 137Mi

calico-node-gw7xs 37m 138Mi

coredns-6d8c4cb4d-mk5f2 4m 22Mi

coredns-6d8c4cb4d-r7xfv 4m 22Mi

etcd-k8s-master 17m 86Mi

kube-apiserver-k8s-master 52m 422Mi

kube-controller-manager-k8s-master 20m 73Mi

kube-proxy-fzpcp 8m 30Mi

kube-proxy-l6jhz 4m 32Mi

kube-proxy-m6s7s 10m 30Mi

kube-scheduler-k8s-master 3m 25Mi

metrics-server-848b755f94-jv6mq 4m 21Mi

[root@k8s-master ~/metrics-server]# kubectl get node

NAME STATUS ROLES AGE VERSION

k8s-master Ready control-plane,master 43d v1.23.17

k8s-node1 Ready 43d v1.23.17

k8s-node2 Ready 43d v1.23.17

[root@k8s-master ~/metrics-server]#

8、gitlab

4c8g、100g

docker安装gitlab(使用k8s的ingress暴露)

版本:https://gitlab.com/gitlab-org/gitlab-foss/-/tags

官方docker仓库:https://hub.docker.com/r/gitlab/gitlab-ce/tags

docker pull gitlab/gitlab-ce:16.2.4-ce.0

cd && mkdir gitlab && cd gitlab && export GITLAB_HOME=/root/gitlab

docker run -d \

--name gitlab \

--hostname 'gitlab.huanghuanhui.cloud' \

--restart always \

--privileged=true \

-p 9797:80 \

-v $GITLAB_HOME/config:/etc/gitlab \

-v $GITLAB_HOME/logs:/var/log/gitlab \

-v $GITLAB_HOME/data:/var/opt/gitlab \

-e TIME_ZONE='Asia/Shanghai' \

gitlab/gitlab-ce:16.2.4-ce.0

初始化默认密码:

docker exec -it gitlab grep 'Password:' /etc/gitlab/initial_root_password

使用k8s的ingress暴露

mkdir -p ~/gitlab-yml

kubectl create ns gitlab

cat > ~/gitlab-yml/gitlab-endpoints.yml << 'EOF'

apiVersion: v1

kind: Endpoints

metadata:

name: gitlab-service

namespace: gitlab

subsets:

- addresses:

- ip: 192.168.1.201

ports:

- port: 9797

EOF

kubectl apply -f ~/gitlab-yml/gitlab-endpoints.yml

cat > ~/gitlab-yml/gitlab-Service.yml << 'EOF'

apiVersion: v1

kind: Service

metadata:

name: gitlab-service

namespace: gitlab

spec:

ports:

- protocol: TCP

port: 80

targetPort: 9797

EOF

kubectl apply -f ~/gitlab-yml/gitlab-Service.yml

cat > ~/gitlab-yml/gitlab-Ingress.yml << 'EOF'

apiVersion: networking.k8s.io/v1

kind: Ingress

metadata:

name: gitlab-ingress

namespace: gitlab

annotations:

nginx.ingress.kubernetes.io/ssl-redirect: 'true'

nginx.ingress.kubernetes.io/proxy-body-size: '4G'

spec:

ingressClassName: nginx

rules:

- host: gitlab.huanghuanhui.cloud

http:

paths:

- path: /

pathType: Prefix

backend:

service:

name: gitlab-service

port:

number: 80

tls:

- hosts:

- gitlab.huanghuanhui.cloud

secretName: gitlab-ingress-tls

EOF

kubectl create secret -n gitlab \

tls gitlab-ingress-tls \

--key=/root/ssl/huanghuanhui.cloud_nginx/huanghuanhui.cloud.key \

--cert=/root/ssl/huanghuanhui.cloud_nginx/huanghuanhui.cloud_bundle.crt

kubectl apply -f ~/gitlab-yml/gitlab-Ingress.yml

访问地址:jenkins-prod.huanghuanhui.cloud

设置账号密码为:admin、Admin@2023

9、harbor

2c4g、400g

docker-compose安装harbor-v2.8.4

1、安装 docker

腾讯源

wget -O /etc/yum.repos.d/docker-ce.repo https://download.docker.com/linux/centos/docker-ce.repo

sudo sed -i 's+download.docker.com+mirrors.cloud.tencent.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo

yum -y install docker-ce

2、安装 docker-compose

官方文档:https://docs.docker.com/compose/install/

github:https://github.com/docker/compose/releases/

wget -O /usr/local/sbin/docker-compose https://kgithub.com/docker/compose/releases/download/v2.20.3/docker-compose-linux-x86_64

chmod +x /usr/local/sbin/docker-compose

3、安装 harbor

https://github.com/goharbor/harbor/releases (离线下载上传)

wget https://github.com/goharbor/harbor/releases/download/v2.8.4/harbor-offline-installer-v2.8.4.tgz

tar xf harbor-offline-installer-v2.8.4.tgz -C /usr/local/

# 1、改成本机ip

sed -i.bak 's/reg\.mydomain\.com/harbor.huanghuanhui.cloud/g' /usr/local/harbor/harbor.yml

# 2、修改https协议证书位置

sed -i 's#certificate: .*#certificate: /root/ssl/huanghuanhui.cloud_nginx/huanghuanhui.cloud_bundle.crt#g' /usr/local/harbor/harbor.yml

sed -i 's#private_key: .*#private_key: /root/ssl/huanghuanhui.cloud_nginx/huanghuanhui.cloud.key#g' /usr/local/harbor/harbor.yml

# 3、修改登录密码(生产环境一定要修改)

sed -i 's/Harbor12345/Admin@2023/g' /usr/local/harbor/harbor.yml

# ./install.sh(执行安装脚本)

/usr/local/harbor/install.sh

# 如果habor未配置https,还需要在docker配置可信任(这里有https)

cat > /etc/docker/daemon.json << 'EOF'

{

"registry-mirrors": [

"https://mirror.ccs.tencentyun.com"

],

"insecure-registries": [

"https://harbor.huanghuanhui.cloud"

],

"exec-opts": [

"native.cgroupdriver=systemd"

],

"log-driver": "json-file",

"log-opts": {

"max-size": "100m"

},

"storage-driver": "overlay2",

"data-root": "/var/lib/docker"

}

EOF

systemctl daemon-reload && systemctl restart docker

访问地址:harbor.huanghuanhui.cloud

账号密码:admin、Admin@2023

10、jenkins

k8s手撕yml方式部署最新版 Jenkins 2.427(jdk-21 版)(jenkins-prod)

mkdir -p ~/jenkins-prod-yml

kubectl create ns jenkins-prod

kubectl label node k8s-node1 jenkins=jenkins

cat > ~/jenkins-prod-yml/Jenkins-prod-rbac.yml << 'EOF'

apiVersion: v1

kind: Namespace

metadata:

name: jenkins-prod

---

apiVersion: v1

kind: ServiceAccount

metadata:

name: jenkins-prod

namespace: jenkins-prod

---

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRole

metadata:

annotations:

rbac.authorization.kubernetes.io/autoupdate: "true"

labels:

kubernetes.io/bootstrapping: rbac-defaults

name: jenkins-prod

rules:

- apiGroups:

- '*'

resources:

- statefulsets

- services

- replicationcontrollers

- replicasets

- podtemplates

- podsecuritypolicies

- pods

- pods/log

- pods/exec

- podpreset

- poddisruptionbudget

- persistentvolumes

- persistentvolumeclaims

- jobs

- endpoints

- deployments

- deployments/scale

- daemonsets

- cronjobs

- configmaps

- namespaces

- events

- secrets

verbs:

- create

- get

- watch

- delete

- list

- patch

- update

- apiGroups:

- ""

resources:

- nodes

verbs:

- get

- list

- watch

- update

---

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRoleBinding

metadata:

annotations:

rbac.authorization.kubernetes.io/autoupdate: "true"

labels:

kubernetes.io/bootstrapping: rbac-defaults

name: jenkins-prod

roleRef:

apiGroup: rbac.authorization.k8s.io

kind: ClusterRole

name: jenkins-prod

subjects:

- apiGroup: rbac.authorization.k8s.io

kind: Group

name: system:serviceaccounts:jenkins-prod

EOF

kubectl apply -f ~/jenkins-prod-yml/Jenkins-prod-rbac.yml

cat > ~/jenkins-prod-yml/Jenkins-prod-Service.yml << 'EOF'

apiVersion: v1

kind: Service

metadata:

name: jenkins-prod

namespace: jenkins-prod

labels:

app: jenkins-prod

spec:

selector:

app: jenkins-prod

type: NodePort

ports:

- name: web

nodePort: 30456

port: 8080

targetPort: web

- name: agent

nodePort: 30789

port: 50000

targetPort: agent

EOF

kubectl apply -f ~/jenkins-prod-yml/Jenkins-prod-Service.yml

cat > ~/jenkins-prod-yml/Jenkins-prod-Deployment.yml << 'EOF'

apiVersion: apps/v1

kind: Deployment

metadata:

name: jenkins-prod

namespace: jenkins-prod

labels:

app: jenkins-prod

spec:

replicas: 1

selector:

matchLabels:

app: jenkins-prod

template:

metadata:

labels:

app: jenkins-prod

spec:

tolerations:

- effect: NoSchedule

key: no-pod

operator: Exists

nodeSelector:

jenkins: jenkins

containers:

- name: jenkins-prod

image: :2.427-jdk21

securityContext:

runAsUser: 0

ports:

- containerPort: 8080

name: web

protocol: TCP

- containerPort: 50000

name: agent

protocol: TCP

env:

- name: LIMITS_MEMORY

valueFrom:

resourceFieldRef:

resource: limits.memory

divisor: 1Mi

- name: JAVA_OPTS

value: -Dhudson.security.csrf.GlobalCrumbIssuerConfiguration.DISABLE_CSRF_PROTECTION=true

volumeMounts:

- name: jenkins-home-prod

mountPath: /var/jenkins_home

- mountPath: /etc/localtime

name: localtime

volumes:

- name: jenkins-home-prod

persistentVolumeClaim:

claimName: jenkins-home-prod

- name: localtime

hostPath:

path: /etc/localtime

---

apiVersion: v1

kind: PersistentVolumeClaim

metadata:

name: jenkins-home-prod

namespace: jenkins-prod

spec:

storageClassName: "nfs-storage"

accessModes: [ReadWriteOnce]

resources:

requests:

storage: 2Ti

EOF

kubectl apply -f ~/jenkins-prod-yml/Jenkins-prod-Deployment.yml

cat > ~/jenkins-prod-yml/Jenkins-prod-Ingress.yml << 'EOF'

apiVersion: networking.k8s.io/v1

kind: Ingress

metadata:

name: jenkins-prod-ingress

namespace: jenkins-prod

annotations:

nginx.ingress.kubernetes.io/ssl-redirect: 'true'

nginx.ingress.kubernetes.io/proxy-body-size: '4G'

spec:

ingressClassName: nginx

rules:

- host: jenkins-prod.huanghuanhui.cloud

http:

paths:

- path: /

pathType: Prefix

backend:

service:

name: jenkins-prod # 将所有请求发送到 jenkins-prod 服务的 8080 端口

port:

number: 8080

tls:

- hosts:

- jenkins-prod.huanghuanhui.cloud

secretName: jenkins-prod-ingress-tls

EOF

kubectl create secret -n jenkins-prod \

tls jenkins-prod-ingress-tls \

--key=/root/ssl/huanghuanhui.cloud_nginx/huanghuanhui.cloud.key \

--cert=/root/ssl/huanghuanhui.cloud_nginx/huanghuanhui.cloud_bundle.crt

kubectl apply -f ~/jenkins-prod-yml/Jenkins-prod-Ingress.yml

访问地址:jenkins-prod.huanghuanhui.cloud

设置账号密码为:admin、Admin@2023

# 插件

1、Localization: Chinese (Simplified)

2、Pipeline

3、Kubernetes

4、Git

5、Git Parameter

6、GitLab # webhook钩子(触发构建)

7、Config FIle Provider # 连接远程k8s集群

8、Extended Choice Parameter

9、SSH Pipeline Steps # Pipeline通过ssh远程执行命令

10、Pipeline: Stage View

11、Role-based Authorization Strategy

http://jenkins-prod.jenkins-prod:8080

cat > ~/jenkins-prod-yml/Jenkins-prod-slave-maven-cache.yml << 'EOF'

apiVersion: v1

kind: PersistentVolumeClaim

metadata:

name: jenkins-prod-slave-maven-cache

namespace: jenkins-prod

spec:

storageClassName: "nfs-storage"

accessModes: [ReadWriteOnce]

resources:

requests:

storage: 2Ti

EOF

kubectl apply -f ~/jenkins-prod-yml/Jenkins-prod-slave-maven-cache.yml

二、RuoYi-Cloud 业务组件

0、mysql-8.0.22

1、nacos-2.1.0

2、redis-7.2

0、mysql-8.0.28

(阿里模板)

wget https://cdn.mysql.com/archives/mysql-8.0/mysql-8.0.28-linux-glibc2.12-x86_64.tar.xz

yum -y install expect

cat > ~/install-mysql-8.0.28.sh << 'eof'

useradd mysql -r -s /sbin/nologin

tar xf ~/mysql-8.0.28-linux-glibc2.12-x86_64.tar.xz

mv ~/mysql-8.0.28-linux-glibc2.12-x86_64 /usr/local/mysql

cd /usr/local/mysql

# 阿里云模板(8.0.28)

cat > my.cnf << 'EOF'

[client]

port=3306

socket=/usr/local/mysql/mysql.sock

[mysql]

socket=/usr/local/mysql/mysql.sock

[mysqld]

user=mysql

port=3306

basedir=/usr/local/mysql

datadir=/usr/local/mysql/data

socket=/usr/local/mysql/mysql.sock

pid-file=/usr/local/mysql/mysqld.pid

admin_address='127.0.0.1'

admin_port=33062

innodb_flush_log_at_trx_commit=2

loose_recycle_scheduler=OFF

innodb_buffer_pool_load_at_startup=ON

loose_performance_schema_max_index_stat=0

bulk_insert_buffer_size=4194304

show_old_temporals=OFF

ft_query_expansion_limit=20

innodb_old_blocks_time=1000

loose_ccl_queue_hot_delete=OFF

loose_rds_audit_log_event_buffer_size=8192

thread_stack=1048576

loose_performance_schema_max_digest_sample_age=0

innodb_thread_concurrency=0

loose_innodb_rds_flashback_task_enabled=OFF

default_time_zone=+8:00

loose_performance_schema_max_digest_length=0

loose_recycle_bin=OFF

optimizer_search_depth=62

max_sort_length=1024

max_binlog_cache_size=18446744073709547520

init_connect=''

innodb_adaptive_max_sleep_delay=150000

innodb_purge_rseg_truncate_frequency=128

innodb_lock_wait_timeout=50

loose_json_document_max_depth=100

innodb_compression_pad_pct_max=50

max_connections=2520

loose_binlog_parallel_flush=OFF

#opt_tablestat=OFF

max_execution_time=0

event_scheduler=ON

innodb_flush_method=O_DIRECT

loose_performance_schema_accounts_size=0

loose_optimizer_trace_features=greedy_search=on,range_optimizer=on,dynamic_range=on,repeated_subselect=on

innodb_purge_batch_size=300

loose_performance_schema_events_statements_history_size=0

avoid_temporal_upgrade=OFF

loose_group_replication_flow_control_member_quota_percent=0

innodb_sync_array_size=1

binlog_transaction_dependency_history_size=500000

net_read_timeout=30

end_markers_in_json=OFF

loose_performance_schema_hosts_size=0

loose_innodb_numa_interleave=ON

loose_performance_schema_max_cond_instances=0

max_binlog_stmt_cache_size=18446744073709547520

innodb_checksum_algorithm=crc32

loose_performance_schema_events_waits_history_long_size=0

innodb_ft_enable_stopword=ON

loose_innodb_undo_retention=0

#opt_indexstat=OFF

disconnect_on_expired_password=ON

default_storage_engine=InnoDB

loose_group_replication_flow_control_min_quota=0

loose_performance_schema_session_connect_attrs_size=0

#innodb_data_file_purge_max_size=128

innodb_ft_result_cache_limit=2000000000

explicit_defaults_for_timestamp=OFF

ft_max_word_len=84

innodb_autoextend_increment=64

sql_mode=ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION

innodb_stats_transient_sample_pages=8

# table_open_cache={LEAST(DBInstanceClassMemory/1073741824*512, 8192)}

loose_performance_schema_max_rwlock_classes=0

range_optimizer_max_mem_size=8388608

loose_innodb_rds_faster_ddl=ON

innodb_status_output=OFF

innodb_log_compressed_pages=OFF

slave_net_timeout=60

max_points_in_geometry=65536

max_prepared_stmt_count=16382

wait_timeout=86400

loose_group_replication_flow_control_mode=DISABLED

innodb_print_all_deadlocks=OFF

loose_thread_pool_size=1

binlog_stmt_cache_size=32768

transaction_isolation=READ-COMMITTED

optimizer_trace_limit=1

innodb_max_purge_lag=0

innodb_buffer_pool_dump_pct=25

max_sp_recursion_depth=0

updatable_views_with_limit=YES

local_infile=ON

loose_opt_rds_last_error_gtid=ON

innodb_ft_max_token_size=84

loose_thread_pool_enabled=ON

innodb_adaptive_hash_index=OFF

net_write_timeout=60

flush_time=0

character_set_filesystem=binary

loose_performance_schema_max_statement_classes=0

key_cache_division_limit=100

#innodb_data_file_purge=ON

innodb_read_ahead_threshold=56

loose_optimizer_switch=index_merge=on,index_merge_union=on,index_merge_sort_union=on,index_merge_intersection=on,engine_condition_pushdown=on,index_condition_pushdown=on,mrr=on,mrr_cost_based=on,block_nested_loop=on,batched_key_access=off,materialization=on,semijoin=on,loosescan=on,firstmatch=on,subquery_materialization_cost_based=on,use_index_extensions=on

loose_performance_schema_max_socket_classes=0

innodb_monitor_disable=

loose_performance_schema_max_program_instances=0

innodb_adaptive_flushing_lwm=10

innodb_log_checksums=ON

innodb_ft_sort_pll_degree=2

log_slow_admin_statements=OFF

innodb_stats_on_metadata=OFF

stored_program_cache=256

group_concat_max_len=1024

innodb_rollback_segments=128

loose_information_schema_stats_expiry=86400

innodb_commit_concurrency=0

# table_definition_cache={LEAST(DBInstanceClassMemory/1073741824*512, 8192)}

auto_increment_increment=1

max_seeks_for_key=18446744073709500000

#performance_point_iostat_volume_size=10000

loose_persist_binlog_to_redo=OFF

loose_ccl_queue_hot_update=OFF

back_log=3000

binlog_transaction_dependency_tracking=WRITESET

loose_recycle_bin_retention=604800

innodb_io_capacity_max=40000

loose_performance_schema_events_transactions_history_size=0

min_examined_row_limit=0

loose_performance_schema_events_transactions_history_long_size=0

sync_relay_log_info=10000

innodb_stats_auto_recalc=ON

max_connect_errors=100

loose_performance_schema_max_file_classes=0

innodb_change_buffering=all

loose_opt_rds_enable_show_slave_lag=ON

loose_group_replication_flow_control_min_recovery_quota=0

loose_performance_schema_max_statement_stack=0

max_join_size=18446744073709551615

loose_validate_password_length=8

innodb_max_purge_lag_delay=0

loose_optimizer_trace=enabled=off,one_line=off

default_week_format=0

innodb_cmp_per_index_enabled=OFF

host_cache_size=644

auto_increment_offset=1

ft_min_word_len=4

default_authentication_plugin=mysql_native_password

loose_performance_schema_max_sql_text_length=0

slave_type_conversions=

loose_group_replication_flow_control_certifier_threshold=25000

optimizer_trace_offset=-1

loose_force_memory_to_innodb=OFF

character_set_server=utf8

innodb_adaptive_flushing=ON

#performance_point_iostat_interval=2

innodb_monitor_enable=

loose_group_replication_flow_control_applier_threshold=25000

table_open_cache_instances=16

innodb_buffer_pool_instances=8

loose_multi_blocks_ddl_count=0

loose_performance_schema_max_table_instances=0

loose_group_replication_flow_control_release_percent=50

loose_innodb_undo_space_reserved_size=0

innodb_log_file_size=1500M

lc_time_names=en_US

sync_master_info=10000

innodb_compression_level=6

loose_innodb_log_optimize_ddl=OFF

loose_performance_schema_max_prepared_statements_instances=0

loose_innodb_log_write_ahead_size=4096

loose_performance_schema_max_mutex_classes=0

innodb_online_alter_log_max_size=134217728

key_cache_block_size=1024

mysql_native_password_proxy_users=OFF

loose_innodb_rds_chunk_flush_interval=100

query_alloc_block_size=8192

loose_performance_schema_max_socket_instances=0

#innodb_purge_threads={LEAST(DBInstanceClassMemory/1073741824, 8)}

loose_group_replication_transaction_size_limit=150000000

innodb_compression_failure_threshold_pct=5

loose_performance_schema_error_size=0

binlog_rows_query_log_events=OFF

loose_innodb_undo_space_supremum_size=10240

innodb_stats_persistent_sample_pages=20

innodb_ft_total_cache_size=640000000

eq_range_index_dive_limit=100

loose_sql_safe_updates=OFF

loose_performance_schema_events_stages_history_long_size=0

connect_timeout=10

div_precision_increment=4

#performance_point_lock_rwlock_enabled=ON

sync_binlog=1000

innodb_stats_method=nulls_equal

lock_wait_timeout=31536000

innodb_deadlock_detect=ON

innodb_write_io_threads=4

loose_ccl_queue_bucket_count=4

ngram_token_size=2

loose_performance_schema_max_table_lock_stat=0

loose_performance_schema_max_table_handles=0

loose_performance_schema_max_memory_classes=0

loose_ignore_index_hint_error=OFF

loose_innodb_rds_free_resize=ON

innodb_ft_enable_diag_print=OFF

innodb_io_capacity=20000

slow_launch_time=2

innodb_table_locks=ON

loose_performance_schema_events_stages_history_size=0

innodb_stats_persistent=ON

tmp_table_size=2097152

loose_performance_schema_max_thread_classes=0

net_retry_count=10

innodb_ft_cache_size=8000000

binlog_cache_size=1M

innodb_max_dirty_pages_pct=75

innodb_disable_sort_file_cache=OFF

# innodb_lru_scan_depth={LEAST(DBInstanceClassMemory/1048576/8, 8192)}

loose_performance_schema_max_mutex_instances=0

long_query_time=1

interactive_timeout=7200

innodb_read_io_threads=4

transaction_prealloc_size=4096

open_files_limit=655350

loose_performance_schema_max_metadata_locks=0

temptable_max_ram=1073741824

# innodb_open_files={LEAST(DBInstanceClassCPU*500, 8000)}

max_heap_table_size=67108864

loose_performance_schema_digests_size=0

automatic_sp_privileges=ON

max_user_connections=2000

innodb_random_read_ahead=OFF

loose_group_replication_flow_control_max_commit_quota=0

delay_key_write=ON

general_log=OFF

log_bin_use_v1_row_events=1

loose_performance_schema_setup_actors_size=0

#innodb_data_file_purge_interval=100

innodb_buffer_pool_dump_at_shutdown=ON

query_prealloc_size=8192

key_cache_age_threshold=300

loose_performance_schema_setup_objects_size=0

transaction_alloc_block_size=8192

optimizer_prune_level=1

loose_performance_schema_max_file_instances=0

innodb_max_dirty_pages_pct_lwm=0

innodb_status_output_locks=OFF

binlog_row_image=full

innodb_change_buffer_max_size=25

innodb_optimize_fulltext_only=OFF

loose_performance_schema_max_file_handles=0

loose_performance_schema_users_size=0

innodb_max_undo_log_size=1073741824

slave_parallel_type=LOGICAL_CLOCK

innodb_sync_spin_loops=30

loose_group_replication_flow_control_period=1

loose_internal_tmp_mem_storage_engine=MEMORY

lower_case_table_names=0

sha256_password_proxy_users=OFF

innodb_flush_sync=ON

#tls_version=TLSv1,TLSv1.1,TLSv1.2

loose_performance_schema_max_rwlock_instances=0

delayed_insert_timeout=300

preload_buffer_size=32768

concurrent_insert=1

block_encryption_mode="aes-128-ecb"

slow_query_log=ON

net_buffer_length=16384

#innodb_buffer_pool_size={DBInstanceClassMemory*3/4}

delayed_insert_limit=100

delayed_queue_size=1000

session_track_gtids=OFF

innodb_thread_sleep_delay=10000

sql_require_primary_key=OFF

innodb_old_blocks_pct=37

innodb_sort_buffer_size=1048576

innodb_page_cleaners=8

loose_innodb_parallel_read_threads=1

innodb_spin_wait_delay=6

myisam_sort_buffer_size=262144

innodb_concurrency_tickets=5000

loose_performance_schema_max_cond_classes=0

loose_innodb_doublewrite_pages=64

transaction_write_set_extraction=XXHASH64

binlog_checksum=CRC32

loose_performance_schema_max_stage_classes=0

loose_performance_schema_events_statements_history_long_size=0

loose_ccl_queue_bucket_size=64

max_length_for_sort_data=1024

max_error_count=64

innodb_strict_mode=OFF

binlog_order_commits=OFF

performance_schema={LEAST(DBInstanceClassMemory/8589934592, 1)}

innodb_ft_min_token_size=3

join_buffer_size=1M

optimizer_trace_max_mem_size=16384

innodb_autoinc_lock_mode=2

innodb_rollback_on_timeout=OFF

loose_performance_schema_max_thread_instances=0

max_write_lock_count=102400

loose_innodb_trx_resurrect_table_lock_accelerate=OFF

master_verify_checksum=OFF

innodb_ft_num_word_optimize=2000

log_error_verbosity=3

log_throttle_queries_not_using_indexes=0

loose_group_replication_flow_control_hold_percent=10

low_priority_updates=0

range_alloc_block_size=4096

sort_buffer_size=2M

max_allowed_packet=1073741824

read_buffer_size=1M

thread_cache_size=100

loose_performance_schema_events_waits_history_size=0

loose_thread_pool_oversubscribe=32

log_queries_not_using_indexes=OFF

innodb_flush_neighbors=0

EOF

chown -R mysql.mysql /usr/local/mysql

./bin/mysqld --defaults-file=/usr/local/mysql/my.cnf --initialize --user=mysql 2>&1 | tee password.txt

mysql_password=`awk '/A temporary password/{print $NF}' /usr/local/mysql/password.txt`

bin/mysql_ssl_rsa_setup --datadir=/usr/local/mysql/data

cat > /usr/lib/systemd/system/mysqld.service << 'EOF'

[Unit]

Description=MySQL Server

After=network.target

After=syslog.target

[Service]

User=mysql

Group=mysql

Type=notify

TimeoutSec=0

PermissionsStartOnly=true

# 修改这里的 ExecStart 为指定的 my.cnf 文件路径

ExecStart=/usr/local/mysql/bin/mysqld --defaults-file=/usr/local/mysql/my.cnf $MYSQLD_OPTS

EnvironmentFile=-/etc/sysconfig/mysql

LimitNOFILE = 10000

Restart=on-failure

RestartPreventExitStatus=1

Environment=MYSQLD_PARENT_PID=1

PrivateTmp=false

[Install]

WantedBy=multi-user.target

EOF

systemctl daemon-reload

systemctl enable mysqld

systemctl start mysqld

./bin/mysqladmin -S /usr/local/mysql/mysql.sock -uroot password 'Admin@2023' -p$mysql_password

ln -sv /usr/local/mysql/bin/* /usr/bin/ &> /dev/null

expect &> /dev/null <

spawn ./bin/mysql_secure_installation -S /usr/local/mysql/mysql.sock

expect {

"Enter password" { send "Admin@2023\n";exp_continue }

"Press y" { send "n\n";exp_continue }

"Change the password" { send "n\n";exp_continue }

"Remove anonymous users" { send "y\n";exp_continue }

"Disallow root login" { send "n\n";exp_continue }

"Remove test database" { send "y\n";exp_continue }

"Reload privilege" { send "y\n" }

}

EOF

mysql -S /usr/local/mysql/mysql.sock -pAdmin@2023 -e "update mysql.user set host = '%' where user = 'root';"

mysql -S /usr/local/mysql/mysql.sock -pAdmin@2023 -e "flush privileges;"

mysql -S /usr/local/mysql/mysql.sock -pAdmin@2023 -e "select host,user from mysql.user;"

systemctl stop mysqld && systemctl start mysqld

echo "数据库安装成功"

eof

sh -x ~/install-mysql-8.0.28.sh

mysql -h 192.168.1.201 -u root -P 3306 -pAdmin@2023 -e "select host,user from mysql.user;"

# 创建 RuoYi-Cloud 数据库并且导入数据

cd && git clone https://gitee.com/y_project/RuoYi-Cloud.git

mysql -h 192.168.1.201 -u root -P 3306 -pAdmin@2023 -e "create database \`ry-cloud\`;"

mysql -h 192.168.1.201 -u root -P 3306 -pAdmin@2023 ry-cloud < ry_20230706.sql

mysql -h 192.168.1.201 -u root -P 3306 -pAdmin@2023 -e "create database \`ry-config\`;"

mysql -h 192.168.1.201 -u root -P 3306 -pAdmin@2023 ry-cloud < ry_config_20220929.sql

mysql -h 192.168.1.201 -u root -P 3306 -pAdmin@2023 -e "show databases;"

1、nacos-2.1.0

mkdir -p ~/nacos-yml

kubectl create ns nacos

cat > ~/nacos-yml/nacos-mysql.yml << 'EOF'

apiVersion: apps/v1

kind: StatefulSet

metadata:

name: mysql

namespace: nacos

spec:

serviceName: mysql-headless

replicas: 1

selector:

matchLabels:

app: mysql

template:

metadata:

labels:

app: mysql

spec:

containers:

- name: mysql

image: mysql:5.7.40

imagePullPolicy: IfNotPresent

resources:

limits:

cpu: "2"

memory: "4Gi"

requests:

cpu: "1"

memory: "2Gi"

ports:

- name: mysql

containerPort: 3306

env:

- name: MYSQL_ROOT_PASSWORD

value: "Admin@2023"

- name: MYSQL_DATABASE

value: "nacos"

- name: MYSQL_USER

value: "nacos"

- name: MYSQL_PASSWORD

value: "nacos@2023"

volumeMounts:

- name: nacos-mysql-data-pvc

mountPath: /var/lib/mysql

- mountPath: /etc/localtime

name: localtime

volumes:

- name: localtime

hostPath:

path: /etc/localtime

volumeClaimTemplates:

- metadata:

name: nacos-mysql-data-pvc

spec:

accessModes: ["ReadWriteOnce"]

storageClassName: nfs-storage

resources:

requests:

storage: 10Gi

---

apiVersion: v1

kind: Service

metadata:

name: mysql-headless

namespace: nacos

labels:

app: mysql

spec:

clusterIP: None

ports:

- port: 3306

name: mysql

targetPort: 3306

selector:

app: mysql

EOF

kubectl apply -f ~/nacos-yml/nacos-mysql.yml

https://github.com/alibaba/nacos/blob/2.1.0/config/src/main/resources/META-INF/nacos-db.sql(sql地址)

cd ~/nacos-yml && wget https://github.com/alibaba/nacos/raw/2.1.0/config/src/main/resources/META-INF/nacos-db.sql

kubectl cp nacos-db.sql mysql-0:/

kubectl exec mysql-0 -- mysql -pAdmin@2023 -e "use nacos;source /nacos-db.sql;"

kubectl exec mysql-0 -- mysql -pAdmin@2023 -e "use nacos;show tables;"

cat > ~/nacos-yml/nacos-v2.1.0-yml << 'EOF'

apiVersion: v1

kind: Service

metadata:

name: nacos-headless

namespace: nacos

labels:

app: nacos

spec:

clusterIP: None

ports:

- port: 8848

name: server

targetPort: 8848

- port: 9848

name: client-rpc

targetPort: 9848

- port: 9849

name: raft-rpc

targetPort: 9849

## 兼容1.4.x版本的选举端口

- port: 7848

name: old-raft-rpc

targetPort: 7848

selector:

app: nacos

---

apiVersion: v1

kind: Service

metadata:

name: nacos

namespace: nacos

labels:

app: nacos

spec:

type: NodePort

ports:

- port: 8848

name: server

targetPort: 8848

nodePort: 31000

- port: 9848

name: client-rpc

targetPort: 9848

nodePort: 32000

- port: 9849

name: raft-rpc

nodePort: 32001

## 兼容1.4.x版本的选举端口

- port: 7848

name: old-raft-rpc

targetPort: 7848

nodePort: 30000

selector:

app: nacos

---

apiVersion: v1

kind: ConfigMap

metadata:

name: nacos-cm

namespace: nacos

data:

mysql.host: "mysql-headless.nacos.svc.cluster.local"

mysql.db.name: "nacos"

mysql.port: "3306"

mysql.user: "nacos"

mysql.password: "nacos@2023"

---

apiVersion: apps/v1

kind: StatefulSet

metadata:

name: nacos

namespace: nacos

spec:

serviceName: nacos-headless

replicas: 3

template:

metadata:

labels:

app: nacos

annotations:

pod.alpha.kubernetes.io/initialized: "true"

spec:

affinity:

podAntiAffinity:

requiredDuringSchedulingIgnoredDuringExecution:

- labelSelector:

matchExpressions:

- key: "app"

operator: In

values:

- nacos-headless

topologyKey: "kubernetes.io/hostname"

containers:

- name: k8snacos

image: nacos/nacos-server:v2.1.0

imagePullPolicy: IfNotPresent

resources:

limits:

cpu: 8

memory: 8Gi

requests:

cpu: 2

memory: 2Gi

ports:

- containerPort: 8848

name: client

- containerPort: 9848

name: client-rpc

- containerPort: 9849

name: raft-rpc

- containerPort: 7848

name: old-raft-rpc

env:

- name: NACOS_REPLICAS

value: "3"

- name: MYSQL_SERVICE_HOST

valueFrom:

configMapKeyRef:

name: nacos-cm

key: mysql.host

- name: MYSQL_SERVICE_DB_NAME

valueFrom:

configMapKeyRef:

name: nacos-cm

key: mysql.db.name

- name: MYSQL_SERVICE_PORT

valueFrom:

configMapKeyRef:

name: nacos-cm

key: mysql.port

- name: MYSQL_SERVICE_USER

valueFrom:

configMapKeyRef:

name: nacos-cm

key: mysql.user

- name: MYSQL_SERVICE_PASSWORD

valueFrom:

configMapKeyRef:

name: nacos-cm

key: mysql.password

- name: SPRING_DATASOURCE_PLATFORM

value: "mysql"

- name: MODE

value: "cluster"

- name: NACOS_SERVER_PORT

value: "8848"

- name: PREFER_HOST_MODE

value: "hostname"

- name: NACOS_SERVERS

value: "nacos-0.nacos-headless.nacos.svc.cluster.local:8848 nacos-1.nacos-headless.nacos.svc.cluster.local:8848 nacos-2.nacos-headless.nacos.svc.cluster.local:8848"

selector:

matchLabels:

app: nacos

EOF

kubectl apply -f ~/nacos-yml/nacos-v2.1.0-yml

cat > ~/nacos-yml/nacos-Ingress.yml << 'EOF'

apiVersion: networking.k8s.io/v1

kind: Ingress

metadata:

name: nacos-ingress

namespace: nacos

annotations:

nginx.ingress.kubernetes.io/ssl-redirect: 'true'

nginx.ingress.kubernetes.io/proxy-body-size: '4G'

spec:

ingressClassName: nginx

rules:

- host: www.huanghuanhui.cloud

http:

paths:

- path: /nacos

pathType: Prefix

backend:

service:

name: nacos-headless

port:

number: 8848

tls:

- hosts:

- www.huanghuanhui.cloud

secretName: nacos-ingress-tls

EOF

kubectl create secret -n nacos \

tls nacos-ingress-tls \

--key=/root/ssl/huanghuanhui.cloud_nginx/huanghuanhui.cloud.key \

--cert=/root/ssl/huanghuanhui.cloud_nginx/huanghuanhui.cloud_bundle.crt

kubectl apply -f ~/nacos-yml/nacos-Ingress.yml

kubectl exec -it nacos-0 bash

# 进容器里面执行

curl -X POST 'http://nacos-headless.nacos.svc.cluster.local:8848/nacos/v1/ns/instance?serviceName=nacos.naming.serviceName&ip=20.18.7.10&port=8080'

curl -X POST 'http://192.168.1.201:31000/nacos/v1/ns/instance?serviceName=nacos.naming.serviceName&ip=20.18.7.10&port=8080'

代码连接地址:nacos-headless.nacos.svc.cluster.local:8848

访问地址ip:192.168.1.201:31000/nacos、

访问地址域名:https://www.huanghuanhui.cloud/nacos/#/login

默认用户密码:nacos、nacos

用户密码:nacos、nacos@2023

导入配置文件

1、ruoyi-gateway-dev.yml

spring:

redis:

host: 192.168.1.201

port: 30078

password: Admin@2023

cloud:

gateway:

discovery:

locator:

lowerCaseServiceId: true

enabled: true

routes:

- id: ruoyi-auth

uri: lb://ruoyi-auth

predicates:

- Path=/auth/**

filters:

- CacheRequestFilter

- ValidateCodeFilter

- StripPrefix=1

- id: ruoyi-gen

uri: lb://ruoyi-gen

predicates:

- Path=/code/**

filters:

- StripPrefix=1

- id: ruoyi-job

uri: lb://ruoyi-job

predicates:

- Path=/schedule/**

filters:

- StripPrefix=1

- id: ruoyi-system

uri: lb://ruoyi-system

predicates:

- Path=/system/**

filters:

- StripPrefix=1

- id: ruoyi-file

uri: lb://ruoyi-file

predicates:

- Path=/file/**

filters:

- StripPrefix=1

security:

captcha:

enabled: true

type: math

xss:

enabled: true

excludeUrls:

- /system/notice

ignore:

whites:

- /auth/logout

- /auth/login

- /auth/register

- /*/v2/api-docs

- /csrf

2、ruoyi-auth-dev.yml

spring:

redis:

host: 192.168.1.201

port: 30078

password: Admin@2023

3、 ruoyi-system-dev.yml

spring:

redis:

host: 192.168.1.201

port: 30078

password: Admin@2023

datasource:

druid:

stat-view-servlet:

enabled: true

loginUsername: admin

loginPassword: 123456

dynamic:

druid:

initial-size: 5

min-idle: 5

maxActive: 20

maxWait: 60000

timeBetweenEvictionRunsMillis: 60000

minEvictableIdleTimeMillis: 300000

validationQuery: SELECT 1 FROM DUAL

testWhileIdle: true

testOnBorrow: false

testOnReturn: false

poolPreparedStatements: true

maxPoolPreparedStatementPerConnectionSize: 20

filters: stat,slf4j

connectionProperties: druid.stat.mergeSql=true;druid.stat.slowSqlMillis=5000

datasource:

master:

driver-class-name: com.mysql.cj.jdbc.Driver

url: jdbc:mysql://192.168.1.201:3306/ry-cloud?useUnicode=true&characterEncoding=utf8&zeroDateTimeBehavior=convertToNull&useSSL=true&serverTimezone=GMT%2B8

username: root

password: Admin@2023

mybatis:

typeAliasesPackage: com.ruoyi.system

mapperLocations: classpath:mapper/**/*.xml

swagger:

title: 系统模块接口文档

license: Powered By ruoyi

licenseUrl: https://ruoyi.vip

2、redis-7.2

mkdir -p ~/redis-yml

kubectl create ns redis

cat > ~/redis-yml/redis-ConfigMap.yml << 'EOF'

kind: ConfigMap

apiVersion: v1

metadata:

name: redis-cm

namespace: redis

labels:

app: redis

data:

redis.conf: |-

dir /data

port 6379

bind 0.0.0.0

appendonly yes

protected-mode no

requirepass Admin@2023

pidfile /data/redis-6379.pid

EOF

kubectl apply -f ~/redis-yml/redis-ConfigMap.yml

cat > ~/redis-yml/redis-StatefulSet.yml << 'EOF'

apiVersion: apps/v1

kind: StatefulSet

metadata:

name: redis

namespace: redis

spec:

replicas: 1

serviceName: redis

selector:

matchLabels:

app: redis

template:

metadata:

name: redis

labels:

app: redis

spec:

affinity:

podAntiAffinity:

requiredDuringSchedulingIgnoredDuringExecution:

- labelSelector:

matchLabels:

app: redis

topologyKey: kubernetes.io/hostname

containers:

- name: redis

image: redis:7.2-rc3-alpine

imagePullPolicy: IfNotPresent

env:

- name: TZ

value: Asia/Shanghai

command:

- "sh"

- "-c"

- "redis-server /etc/redis/redis.conf"

ports:

- containerPort: 6379

name: tcp-redis

protocol: TCP

resources:

limits:

cpu: "2"

memory: "4Gi"

requests:

cpu: "1"

memory: "2Gi"

volumeMounts:

- name: redis-data

mountPath: /data

- name: config

mountPath: /etc/redis/redis.conf

subPath: redis.conf

volumes:

- name: config

configMap:

name: redis-cm

volumeClaimTemplates:

- metadata:

name: redis-data

spec:

storageClassName: "nfs-storage"

accessModes: [ "ReadWriteOnce" ]

resources:

requests:

storage: 2Ti

EOF

kubectl apply -f ~/redis-yml/redis-StatefulSet.yml

cat > ~/redis-yml/redis-Service.yml << 'EOF'

apiVersion: v1

kind: Service

metadata:

name: redis

namespace: redis

spec:

type: NodePort

ports:

- name: redis

port: 6379

targetPort: 6379

protocol: TCP

nodePort: 30078

selector:

app: redis

EOF

kubectl apply -f ~/redis-yml/redis-Service.yml

访问地址:ip:192.168.1.213(端口30078)

密码:Admin@2023

三、gitlab(操作)配置webhook钩子(触发构建)

https://gitlab.huanghuanhui.cloud/admin/application_settings/network

Outbound requests

https://gitlab.huanghuanhui.cloud/admin/application_settings/general

Import and export settings

Visibility and access controls

# 导入gitlab

https://gitee.com/y_project/RuoYi-Cloud.git

# 配置webhook钩子(触发构建)

https://jenkins-prod.huanghuanhui.cloud/job/ruoyi-gateway/build?token=11198baeb81a20a9b734b9ece849dcb541

https://jenkins-prod.huanghuanhui.cloud/job/ruoyi-gateway/build?token=11b9d19a186e96c83a73efce6e74ae4552

http://192.168.1.201:30456/job/ruoyi-gateway/build?token=11b9d19a186e96c83a73efce6e74ae4552

https://jenkins-prod.huanghuanhui.cloud/job/ruoyi-gateway/build?token=1182880a9d149b5ab5111104e043b4820d

https://jenkins-prod.huanghuanhui.cloud/view/ruoyi-gateway/build?token=1182880a9d149b5ab5111104e043b4820d

https://jenkins-prod.huanghuanhui.cloud/job/ruoyi-auth/build?token=1182880a9d149b5ab5111104e043b4820d

https://jenkins-prod.huanghuanhui.cloud/job/ruoyi-system/build?token=1182880a9d149b5ab5111104e043b4820d

https://jenkins-prod.huanghuanhui.cloud/job/ruoyi-vue/build?token=0a134decb6a7cfb09d06776ece709c6b

四、harbor(操作)

# 创建仓库

ruoyi-gateway

ruoyi-auth

ruoyi-system

ruoyi-vue

openjdk

docker login https://harbor.huanghuanhui.cloud --username=admin

Admin@2023

docker pull openjdk:8-jre

docker tag openjdk:8-jre harbor.huanghuanhui.cloud/openjdk/openjdk:8-jre

docker push harbor.huanghuanhui.cloud/openjdk/openjdk:8-jre

五、jenkins-podTemplate(操作)

1、创建 RuoYi-Cloud RuoYi-Vue

2、创建 git_auth、harbor_auth 秘钥

3、创建 kubeconfig

4、创建 token (webhook自动触发)

webhook 11198baeb81a20a9b734b9ece849dcb541

5、匿名用户具有可读权限(webhook需要用到)

https://jenkins-prod.huanghuanhui.cloud/manage/configureSecurity/

后端(podTemplate)

1、ruoyi-gateway

# 参数化构建

AppName

服务名称

ruoyi-gateway

harbor_url

镜像仓库地址

harbor.huanghuanhui.cloud

NacosServer

Nacos地址

192.168.1.201:31000

JAVA_OPTS

jar 运行时的参数配置

-Xms1024M -Xmx1024M -Xmn256M -Dspring.config.location=app.yml -Dserver.tomcat.max-threads=800

podTemplate(yaml: '''

apiVersion: v1

kind: Pod

spec:

volumes:

- name: docker-socket

emptyDir: {}

- name: maven-cache

persistentVolumeClaim:

claimName: jenkins-prod-slave-maven-cache

containers:

- name: docker

image: docker:24.0.6

readinessProbe:

exec:

command: [sh, -c, "ls -S /var/run/docker.sock"]

command:

- sleep

args:

- 99d

env:

- name: AppName

value: "$AppName"

- name: harbor_url

value: "$harbor_url"

- name: JAVA_OPTS

value: "$JAVA_OPTS"

- name: NacosServer

value: "$NacosServer"

volumeMounts:

- name: docker-socket

mountPath: /var/run

- name: docker-daemon

image: docker:24.0.6-dind

securityContext:

privileged: true

volumeMounts:

- name: docker-socket

mountPath: /var/run

- name: maven

image: maven:3.8.1-jdk-8

command:

- sleep

args:

- 99d

volumeMounts:

- name: maven-cache

mountPath: /root/.m2/repository

- name: kubectl

image: kostiscodefresh/kubectl-argo-rollouts:v1.6.0

command:

- sleep

args:

- 99d

''') {

node(POD_LABEL) {

stage('拉取代码') {

git credentialsId: '77066368-e8a8-4edb-afaf-53aaf90c31a9', url: 'http://gitlab.huanghuanhui.cloud/root/RuoYi-Cloud.git'

container('maven') {

stage('代码编译') {

sh 'mvn -U clean install -Dmaven.test.skip=true && GIT_COMMIT=`git log --abbrev-commit --pretty=format:"%h" -1` && echo "GIT_COMMIT=$GIT_COMMIT" >> /home/jenkins/agent/env.txt'

}

}

}

container('docker') {

stage('打包镜像') {

sh '''cat > entrypoint.sh << EOF

#! /bash/bin -e

env

java $JAVA_OPTS -jar ./*.jar

EOF

cat > app.yml << EOF

# Tomcat

server:

port: 8080

# Spring

spring:

application:

# 应用名称

name: ${AppName}

profiles:

# 环境配置

active: dev

cloud:

nacos:

discovery:

# 服务注册地址

server-addr: ${NacosServer}

config:

# 配置中心地址

server-addr: ${NacosServer}

# 配置文件格式

file-extension: yml

# 共享配置

shared-configs:

- application

sentinel:

# 取消控制台懒加载

eager: true

transport:

# 控制台地址

dashboard: 127.0.0.1:8718

# nacos配置持久化

datasource:

ds1:

nacos:

server-addr: 127.0.0.1:8848

dataId: sentinel-ruoyi-gateway

groupId: DEFAULT_GROUP

data-type: json

rule-type: gw-flow

EOF

cat > Dockerfile << EOF

FROM harbor.huanghuanhui.cloud/openjdk/openjdk:8-jre

WORKDIR /usr/local/src/

ADD ./ruoyi-gateway/target/ruoyi-gateway.jar /usr/local/src/ruoyi-gateway.jar

ADD app.yml .

ADD entrypoint.sh .

ENTRYPOINT ["sh","./entrypoint.sh"]

EOF'''

sh '. /home/jenkins/agent/env.txt && docker build -t ${harbor_url}/${AppName}/${AppName}:$GIT_COMMIT-${BUILD_ID} . && docker images && echo ${harbor_url}/${AppName}/${AppName}:$GIT_COMMIT-${BUILD_ID} > /home/jenkins/agent/docker.txt && cat /home/jenkins/agent/docker.txt'

}

}

container('docker') {

stage('推送镜像') {

withCredentials([usernamePassword(credentialsId: '9c10572f-c324-422f-b0c0-1b80d2ddb857', passwordVariable: 'password', usernameVariable: 'username')]) {

sh """

docker login -u ${username} -p '${password}' harbor.huanghuanhui.cloud

docker push `cat /home/jenkins/agent/docker.txt`

"""

}

}

}

container('kubectl') {

stage('argo-rollouts + istio(金丝雀发布)(渐进式交付)') {

configFileProvider([configFile(fileId: 'c26898c2-92c3-4c19-8490-9cf8ff7918ef', variable: 'kubeconfig')]) {

sh """

mkdir -p ~/.kube && cp ${kubeconfig} ~/.kube/config

/app/kubectl-argo-rollouts-linux-amd64 set image ${AppName} "*=`cat /home/jenkins/agent/docker.txt`" -n ruoyi

"""

}

}

}

}

}

2、ruoyi-auth

podTemplate(yaml: '''

apiVersion: v1

kind: Pod

spec:

volumes:

- name: docker-socket

emptyDir: {}

- name: maven-cache

persistentVolumeClaim:

claimName: jenkins-prod-slave-maven-cache

containers:

- name: docker

image: docker:24.0.6

readinessProbe:

exec:

command: [sh, -c, "ls -S /var/run/docker.sock"]

command:

- sleep

args:

- 99d

env:

- name: AppName

value: "$AppName"

- name: harbor_url

value: "$harbor_url"

- name: JAVA_OPTS

value: "$JAVA_OPTS"

- name: NacosServer

value: "$NacosServer"

volumeMounts:

- name: docker-socket

mountPath: /var/run

- name: docker-daemon

image: docker:24.0.6-dind

securityContext:

privileged: true

volumeMounts:

- name: docker-socket

mountPath: /var/run

- name: maven

image: maven:3.8.1-jdk-8

command:

- sleep

args:

- 99d

volumeMounts:

- name: maven-cache

mountPath: /root/.m2/repository

- name: kubectl

image: kostiscodefresh/kubectl-argo-rollouts:v1.6.0

command:

- sleep

args:

- 99d

''') {

node(POD_LABEL) {

stage('拉取代码') {

git credentialsId: '77066368-e8a8-4edb-afaf-53aaf90c31a9', url: 'http://gitlab.huanghuanhui.cloud/root/RuoYi-Cloud.git'

container('maven') {

stage('代码编译') {

sh 'mvn -U clean install -Dmaven.test.skip=true && GIT_COMMIT=`git log --abbrev-commit --pretty=format:"%h" -1` && echo "GIT_COMMIT=$GIT_COMMIT" >> /home/jenkins/agent/env.txt'

}

}

}

container('docker') {

stage('打包镜像') {

sh '''cat > entrypoint.sh << EOF

#! /bash/bin -e

env

java $JAVA_OPTS -jar ./*.jar

EOF

cat > app.yml << EOF

# Tomcat

server:

port: 9200

# Spring

spring:

application:

# 应用名称

name: ruoyi-auth

profiles:

# 环境配置

active: dev

cloud:

nacos:

discovery:

# 服务注册地址

server-addr: ${NacosServer}

config:

# 配置中心地址

server-addr: ${NacosServer}

# 配置文件格式

file-extension: yml

# 共享配置

shared-configs:

- application

EOF

cat > Dockerfile << EOF

FROM harbor.huanghuanhui.cloud/openjdk/openjdk:8-jre

WORKDIR /usr/local/src/

ADD ./ruoyi-auth/target/ruoyi-auth.jar /usr/local/src/ruoyi-auth.jar

ADD app.yml .

ADD entrypoint.sh .

ENTRYPOINT ["sh","./entrypoint.sh"]

EOF'''

sh '. /home/jenkins/agent/env.txt && docker build -t ${harbor_url}/${AppName}/${AppName}:$GIT_COMMIT-${BUILD_ID} . && docker images && echo ${harbor_url}/${AppName}/${AppName}:$GIT_COMMIT-${BUILD_ID} > /home/jenkins/agent/docker.txt && cat /home/jenkins/agent/docker.txt'

}

}

container('docker') {

stage('推送镜像') {

withCredentials([usernamePassword(credentialsId: '9c10572f-c324-422f-b0c0-1b80d2ddb857', passwordVariable: 'password', usernameVariable: 'username')]) {

sh """

docker login -u ${username} -p '${password}' harbor.huanghuanhui.cloud

docker push `cat /home/jenkins/agent/docker.txt`

"""

}

}

}

container('kubectl') {

stage('argo-rollouts + istio(金丝雀发布)(渐进式交付)') {

configFileProvider([configFile(fileId: 'c26898c2-92c3-4c19-8490-9cf8ff7918ef', variable: 'kubeconfig')]) {

sh """

mkdir -p ~/.kube && cp ${kubeconfig} ~/.kube/config

/app/kubectl-argo-rollouts-linux-amd64 set image ${AppName} "*=`cat /home/jenkins/agent/docker.txt`" -n ruoyi

"""

}

}

}

}

}

3、ruoyi-system

podTemplate(yaml: '''

apiVersion: v1

kind: Pod

spec:

volumes:

- name: docker-socket

emptyDir: {}

- name: maven-cache

persistentVolumeClaim:

claimName: jenkins-prod-slave-maven-cache

containers:

- name: docker

image: docker:24.0.6

readinessProbe:

exec:

command: [sh, -c, "ls -S /var/run/docker.sock"]

command:

- sleep

args:

- 99d

env:

- name: AppName

value: "$AppName"

- name: harbor_url

value: "$harbor_url"

- name: JAVA_OPTS

value: "$JAVA_OPTS"

- name: NacosServer

value: "$NacosServer"

volumeMounts:

- name: docker-socket

mountPath: /var/run

- name: docker-daemon

image: docker:24.0.6-dind

securityContext:

privileged: true

volumeMounts:

- name: docker-socket

mountPath: /var/run

- name: maven

image: maven:3.8.1-jdk-8

command:

- sleep

args:

- 99d

volumeMounts:

- name: maven-cache

mountPath: /root/.m2/repository

- name: kubectl

image: kostiscodefresh/kubectl-argo-rollouts:v1.6.0

command:

- sleep

args:

- 99d

''') {

node(POD_LABEL) {

stage('拉取代码') {

git credentialsId: '77066368-e8a8-4edb-afaf-53aaf90c31a9', url: 'http://gitlab.huanghuanhui.cloud/root/RuoYi-Cloud.git'

container('maven') {

stage('代码编译') {

sh 'mvn -U clean install -Dmaven.test.skip=true && GIT_COMMIT=`git log --abbrev-commit --pretty=format:"%h" -1` && echo "GIT_COMMIT=$GIT_COMMIT" >> /home/jenkins/agent/env.txt'

}

}

}

container('docker') {

stage('打包镜像') {

sh '''cat > entrypoint.sh << EOF

#! /bash/bin -e

env

java $JAVA_OPTS -jar ./*.jar

EOF

cat > app.yml << EOF

# Tomcat

server:

port: 9201

# Spring

spring:

application:

# 应用名称

name: $AppName

profiles:

# 环境配置

active: dev

cloud:

nacos:

discovery:

# 服务注册地址

server-addr: ${NacosServer}

config:

# 配置中心地址

server-addr: ${NacosServer}

# 配置文件格式

file-extension: yml

# 共享配置

shared-configs:

- application

EOF

cat > Dockerfile << EOF

FROM harbor.huanghuanhui.cloud/openjdk/openjdk:8-jre

WORKDIR /usr/local/src/

ADD ./ruoyi-modules/ruoyi-system/target/ruoyi-modules-system.jar /usr/local/src/ruoyi-modules-system.jar

ADD app.yml .

ADD entrypoint.sh .

ENTRYPOINT ["sh","./entrypoint.sh"]

EOF'''

sh '. /home/jenkins/agent/env.txt && docker build -t ${harbor_url}/${AppName}/${AppName}:$GIT_COMMIT-${BUILD_ID} . && docker images && echo ${harbor_url}/${AppName}/${AppName}:$GIT_COMMIT-${BUILD_ID} > /home/jenkins/agent/docker.txt && cat /home/jenkins/agent/docker.txt'

}

}

container('docker') {

stage('推送镜像') {

withCredentials([usernamePassword(credentialsId: '9c10572f-c324-422f-b0c0-1b80d2ddb857', passwordVariable: 'password', usernameVariable: 'username')]) {

sh """

docker login -u ${username} -p '${password}' harbor.huanghuanhui.cloud

docker push `cat /home/jenkins/agent/docker.txt`

"""

}

}

}

container('kubectl') {

stage('argo-rollouts + istio(金丝雀发布)(渐进式交付)') {

configFileProvider([configFile(fileId: 'c26898c2-92c3-4c19-8490-9cf8ff7918ef', variable: 'kubeconfig')]) {

sh """

mkdir -p ~/.kube && cp ${kubeconfig} ~/.kube/config

/app/kubectl-argo-rollouts-linux-amd64 set image ${AppName} "*=`cat /home/jenkins/agent/docker.txt`" -n ruoyi

"""

}

}

}

}

}

前端(podTemplate)

4、ruoyi-vue

# 参数化构建

AppName

服务名称

ruoyi-vue

harbor_url

镜像仓库地址

harbor.huanghuanhui.cloud

podTemplate(yaml: '''

apiVersion: v1

kind: Pod

spec:

volumes:

- name: docker-socket

emptyDir: {}

containers:

- name: docker

image: docker:24.0.6

readinessProbe:

exec:

command: [sh, -c, "ls -S /var/run/docker.sock"]

command:

- sleep

args:

- 99d

env:

- name: AppName

value: "$AppName"

- name: harbor_url

value: "$harbor_url"

- name: JAVA_OPTS

value: "$JAVA_OPTS"

- name: NacosServer

value: "$NacosServer"

volumeMounts:

- name: docker-socket

mountPath: /var/run

- name: docker-daemon

image: docker:24.0.6-dind

securityContext:

privileged: true

volumeMounts:

- name: docker-socket

mountPath: /var/run

- name: node

image: node:16.17.0-alpine

command:

- sleep

args:

- 99d

- name: kubectl

image: kostiscodefresh/kubectl-argo-rollouts:v1.6.0

command:

- sleep

args:

- 99d

''') {

node(POD_LABEL) {

stage('拉取代码') {

git credentialsId: '77066368-e8a8-4edb-afaf-53aaf90c31a9', url: 'http://gitlab.huanghuanhui.cloud/root/RuoYi-Cloud.git'

container('node') {

stage('代码编译 ') {

sh 'cd ruoyi-ui && sed -i \'s/localhost/ruoyi-gateway-svc/g\' vue.config.js && npm install --registry=https://registry.npm.taobao.org && npm run build:prod && ls -la'

}

}

}

container('docker') {

stage('打包镜像') {

sh '''cat > nginx.conf << 'EOF'

worker_processes auto;

events {

worker_connections 10240;

}

http {

include mime.types;

default_type application/octet-stream;

sendfile on;

keepalive_timeout 65;

server {

listen 80;

server_name localhost;

location / {

root /usr/share/nginx/html;

try_files $uri $uri/ /index.html;

index index.html index.htm;

}

location /prod-api/{

proxy_pass http://ruoyi-gateway-svc:8080/;

proxy_set_header Host $http_host;

proxy_set_header X-Real-IP $remote_addr;

proxy_set_header REMOTE-HOST $remote_addr;

proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;

proxy_http_version 1.1;

}

# 避免actuator暴露

if ($request_uri ~ "/actuator") {

return 403;

}

error_page 500 502 503 504 /50x.html;

location = /50x.html {

root html;

}

}

}

EOF

cat > dockerfile << 'EOF'

FROM nginx:1.25.1-alpine

WORKDIR /usr/share/nginx/html

COPY nginx.conf /etc/nginx/nginx.conf

COPY ./ruoyi-ui/dist /usr/share/nginx/html

EOF'''

sh 'docker build -t ${harbor_url}/${AppName}/${AppName}:${BUILD_ID} . && docker images'

}

}

container('docker') {

stage('推送镜像') {

withCredentials([usernamePassword(credentialsId: '9c10572f-c324-422f-b0c0-1b80d2ddb857', passwordVariable: 'password', usernameVariable: 'username')]) {

sh """

docker login -u ${username} -p '${password}' harbor.huanghuanhui.cloud

docker push ${harbor_url}/${AppName}/${AppName}:${BUILD_ID}

"""

}

}

}

container('kubectl') {

stage('argo-rollouts + istio(金丝雀发布)(渐进式交付)') {

configFileProvider([configFile(fileId: 'c26898c2-92c3-4c19-8490-9cf8ff7918ef', variable: 'kubeconfig')]) {

sh """

mkdir -p ~/.kube && cp ${kubeconfig} ~/.kube/config

/app/kubectl-argo-rollouts-linux-amd64 set image ${AppName} "*=${harbor_url}/${AppName}/${AppName}:${BUILD_ID}" -n ruoyi

"""

}

}

}

}

}

六、Argo-Rollouts+ istio 部署前后端服务

mkdir -p ~/RuoYi-Cloud-rollout-yml

cd ~/RuoYi-Cloud-rollout-yml

kubectl create namespace ruoyi

kubectl label namespace ruoyi istio-injection=enabled

1、ruoyi-gateway

cat > ruoyi-gateway-rollout.yml << 'EOF'

apiVersion: argoproj.io/v1alpha1

kind: Rollout

metadata:

name: ruoyi-gateway

namespace: ruoyi

spec:

replicas: 3

strategy:

canary:

steps:

- setWeight: 20

- pause: {} # 人工卡点

- setWeight: 40

- pause: {duration: 10}

- setWeight: 60

- pause: {duration: 10}

- setWeight: 80

- pause: {duration: 10}

- setWeight: 100

- pause: {} # 人工卡点

revisionHistoryLimit: 2

selector:

matchLabels:

app: ruoyi-gateway

template:

metadata:

labels:

app: ruoyi-gateway

spec:

containers:

- name: ruoyi-gateway

image: harbor.huanghuanhui.cloud/ruoyi-gateway/ruoyi-gateway:dc121ff-3

ports:

- name: http

containerPort: 8080

protocol: TCP

EOF

cat > ruoyi-gateway-svc.yml << 'EOF'

apiVersion: v1

kind: Service

metadata:

name: ruoyi-gateway-svc

namespace: ruoyi

labels:

app: ruoyi-gateway

spec:

type: ClusterIP

ports:

- port: 8080

targetPort: http

protocol: TCP

name: http

selector:

app: ruoyi-gateway

EOF

2、ruoyi-auth

cat > ruoyi-auth-rollout.yml << 'EOF'

apiVersion: argoproj.io/v1alpha1

kind: Rollout

metadata:

name: ruoyi-auth

namespace: ruoyi

spec:

replicas: 3

strategy:

canary:

steps:

- setWeight: 20

- pause: {} # 人工卡点

- setWeight: 40

- pause: {duration: 10}

- setWeight: 60

- pause: {duration: 10}

- setWeight: 80

- pause: {duration: 10}

- setWeight: 100

- pause: {} # 人工卡点

revisionHistoryLimit: 2

selector:

matchLabels:

app: ruoyi-auth

template:

metadata:

labels:

app: ruoyi-auth

spec:

containers:

- name: ruoyi-auth

image: harbor.huanghuanhui.cloud/ruoyi-auth/ruoyi-auth:dc121ff-3

ports:

- name: http

containerPort: 9200

protocol: TCP

EOF

3、ruoyi-system

cat > ruoyi-system-rollout.yml << 'EOF'

apiVersion: argoproj.io/v1alpha1

kind: Rollout

metadata:

name: ruoyi-system

namespace: ruoyi

spec:

replicas: 3

strategy:

canary:

steps:

- setWeight: 20

- pause: {} # 人工卡点

- setWeight: 40

- pause: {duration: 10}

- setWeight: 60

- pause: {duration: 10}

- setWeight: 80

- pause: {duration: 10}

- setWeight: 100

- pause: {} # 人工卡点

revisionHistoryLimit: 2

selector:

matchLabels:

app: ruoyi-system

template:

metadata:

labels:

app: ruoyi-system

spec:

containers:

- name: ruoyi-system

image: harbor.huanghuanhui.cloud/ruoyi-system/ruoyi-system:dc121ff-5

ports:

- name: http

containerPort: 9201

protocol: TCP

EOF

4、ruoyi-vue

cat > ruoyi-vue-rollout.yml << 'EOF'

apiVersion: argoproj.io/v1alpha1

kind: Rollout

metadata:

name: ruoyi-vue

namespace: ruoyi

spec:

replicas: 3

strategy:

canary:

steps:

- setWeight: 20

- pause: {} # 人工卡点

- setWeight: 40

- pause: {duration: 10}

- setWeight: 60

- pause: {duration: 10}

- setWeight: 80

- pause: {duration: 10}

- setWeight: 100

- pause: {} # 人工卡点

revisionHistoryLimit: 2

selector:

matchLabels:

app: ruoyi-vue

template:

metadata:

labels:

app: ruoyi-vue

spec:

containers:

- name: ruoyi-vue

image: harbor.huanghuanhui.cloud/ruoyi-vue/ruoyi-vue:3

imagePullPolicy: Always

ports:

- name: http

containerPort: 80

protocol: TCP

EOF

cat > ruoyi-vue-svc.yml << 'EOF'

apiVersion: v1

kind: Service

metadata:

name: ruoyi-vue-svc

namespace: ruoyi

labels:

app: ruoyi-vue

spec:

type: NodePort

ports:

- port: 80

targetPort: http

protocol: TCP

name: http

selector:

app: ruoyi-vue

EOF

七、istio

前端:ruoyi-vue

1、ruoyi-vue

cat > ~/RuoYi-Cloud-rollout-yml/ruoyi-vue-rollout-istio.yml << 'EOF'

apiVersion: argoproj.io/v1alpha1

kind: Rollout

metadata:

name: ruoyi-vue

namespace: ruoyi

spec:

replicas: 3

strategy:

canary:

canaryService: ruoyi-vue-svc-canary # 关联 canary Service

stableService: ruoyi-vue-svc-stable # 关联 stable Service

trafficRouting:

istio:

virtualServices:

- name: ruoyi-vue-vsvc # 关联的 Istio virtualService

routes:

- primary

steps:

- setWeight: 20

- pause: {} # 人工卡点

- setWeight: 40

- pause: {duration: 10}

- setWeight: 60

- pause: {duration: 10}

- setWeight: 80

- pause: {duration: 10}

- setWeight: 100

- pause: {}

revisionHistoryLimit: 5

selector:

matchLabels:

app: ruoyi-vue

template:

metadata:

labels:

app: ruoyi-vue

istio-injection: enabled

spec:

containers:

- name: ruoyi-vue

image: harbor.huanghuanhui.cloud/ruoyi-vue/ruoyi-vue:3

ports:

- name: http

containerPort: 80

protocol: TCP

EOF

kubectl delete -f ruoyi-vue-rollout.yml

kubectl apply -f ruoyi-vue-rollout-istio.yml

cat > ruoyi-vue-rollout-istio-svc.yml << 'EOF'

apiVersion: v1

kind: Service

metadata:

name: ruoyi-vue-svc-canary

namespace: ruoyi

labels:

app: ruoyi-vue

spec:

type: ClusterIP

ports:

- port: 80

targetPort: http

protocol: TCP

name: http

selector:

app: ruoyi-vue

# This selector will be updated with the pod-template-hash of the canary ReplicaSet. e.g.:

# rollouts-pod-template-hash: 7bf84f9696

---

apiVersion: v1

kind: Service

metadata:

name: ruoyi-vue-svc-stable

namespace: ruoyi

labels:

app: ruoyi-vue

spec:

type: ClusterIP

ports:

- port: 80

targetPort: http

protocol: TCP

name: http

selector:

app: ruoyi-vue

# This selector will be updated with the pod-template-hash of the stable ReplicaSet. e.g.:

# rollouts-pod-template-hash: 789746c88d

EOF

kubectl delete -f ruoyi-vue-svc.yml

kubectl apply -f ruoyi-vue-rollout-istio-svc.yml

# 实现加请求头实现版本控制

cat > ruoyi-vue-vsvc.yml << 'EOF'

apiVersion: networking.istio.io/v1alpha3

kind: VirtualService

metadata:

name: ruoyi-vue-vsvc

namespace: ruoyi

spec:

gateways:

- ruoyi-vue-gateway

hosts:

- "*"

http:

- name: primary

match:

- headers:

x-canary:

exact: test-user

uri:

prefix: /

route:

- destination:

host: ruoyi-vue-svc-stable

weight: 0

- destination:

host: ruoyi-vue-svc-canary

weight: 100

- route:

- destination:

host: ruoyi-vue-svc-stable

weight: 100

EOF

kubectl apply -f ruoyi-vue-vsvc.yml

cat > ruoyi-vue-gateway.yml << 'EOF'

apiVersion: networking.istio.io/v1alpha3

kind: Gateway

metadata:

name: ruoyi-vue-gateway

namespace: ruoyi

spec:

selector:

istio: ingressgateway # 默认创建的 istio ingressgateway pod 有这个 Label

servers:

- port:

number: 80

name: http

protocol: HTTP

hosts:

- "ruoyi.huanghuanhui.cloud" # 匹配 host

EOF

kubectl apply -f ruoyi-vue-gateway.yml

kubectl argo rollouts get rollout ruoyi-vue

kubectl describe vs ruoyi-vue-vsvc

b站对应视频地址:https://www.bilibili.com/video/BV1UN411s7v7/?vd_source=e80df0948ea63a29c41e3714877011a4

推荐阅读

评论可见,请评论后查看内容,谢谢!!!评论后请刷新页面。