首次提交:初始化项目

This commit is contained in:
fei
2026-02-05 00:11:05 +08:00
commit 26eaf8110b
171 changed files with 17105 additions and 0 deletions

View File

View File

@@ -0,0 +1,29 @@
# Traefik Middleware - CORS 配置
apiVersion: traefik.io/v1alpha1
kind: Middleware
metadata:
name: cors-headers
namespace: registry-system
spec:
headers:
accessControlAllowMethods:
- "GET"
- "HEAD"
- "POST"
- "PUT"
- "DELETE"
- "OPTIONS"
accessControlAllowOriginList:
- "http://registry.u6.net3w.com"
- "https://registry.u6.net3w.com"
accessControlAllowCredentials: true
accessControlAllowHeaders:
- "Authorization"
- "Content-Type"
- "Accept"
- "Cache-Control"
accessControlExposeHeaders:
- "Docker-Content-Digest"
- "WWW-Authenticate"
accessControlMaxAge: 100
addVaryHeader: true

View File

@@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: registry-auth-secret
namespace: registry-system
type: Opaque
stringData:
# ▼▼▼ 重点:这是 123456 的 bcrypt 加密,直接复制不要改 ▼▼▼
htpasswd: |
admin:$2y$05$WSu.LllzUnHQcNPgklqqqum3o69unaC6lCUNz.rRmmq3YhowL99RW

View File

@@ -0,0 +1,27 @@
root@98-hk:~/k3s/registry# docker run --rm --entrypoint htpasswd httpd:alpine -Bbn admin 123456
Unable to find image 'httpd:alpine' locally
alpine: Pulling from library/httpd
1074353eec0d: Pull complete
0bd765d2a2cb: Pull complete
0c4ffdba1e9e: Pull complete
4f4fb700ef54: Pull complete
0c51c0b07eae: Pull complete
e626d5c4ed2c: Pull complete
988cd7d09a31: Pull complete
Digest: sha256:6b7535d8a33c42b0f0f48ff0067765d518503e465b1bf6b1629230b62a466a87
Status: Downloaded newer image for httpd:alpine
admin:$2y$05$yYEah4y9O9F/5TumcJSHAuytQko2MAyFM1MuqgAafDED7Fmiyzzse
root@98-hk:~/k3s/registry# # 注意:两边要有单引号 ' '
kubectl create secret generic registry-auth-secret \
--from-literal=htpasswd='admin:$2y$05$yYEah4y9O9F/5TumcJSHAuytQko2MAyFM1MuqgAafDED7Fmiyzzse' \
--namespace registry-system
secret/registry-auth-secret created
root@98-hk:~/k3s/registry# # 重新部署应用
kubectl apply -f registry-stack.yaml
namespace/registry-system unchanged
persistentvolumeclaim/registry-pvc unchanged
deployment.apps/registry created
service/registry-service unchanged
ingress.networking.k8s.io/registry-ingress unchanged
root@98-hk:~/k3s/registry#

View File

@@ -0,0 +1,131 @@
# 1. 创建独立的命名空间
apiVersion: v1
kind: Namespace
metadata:
name: registry-system
---
# 2. 将刚才生成的密码文件创建为 K8s Secret
---
# 3. 申请硬盘空间 (存放镜像文件)
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: registry-pvc
namespace: registry-system
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 20Gi # 给仓库 20G 空间,不够随时可以扩
---
# 4. 部署 Registry 应用
apiVersion: apps/v1
kind: Deployment
metadata:
name: registry
namespace: registry-system
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: registry
template:
metadata:
labels:
app: registry
spec:
containers:
- name: registry
image: registry:2
ports:
- containerPort: 5000
env:
# --- 开启认证 ---
- name: REGISTRY_AUTH
value: "htpasswd"
- name: REGISTRY_AUTH_HTPASSWD_REALM
value: "Registry Realm"
- name: REGISTRY_AUTH_HTPASSWD_PATH
value: "/auth/htpasswd"
# --- 存储路径 ---
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: "/var/lib/registry"
volumeMounts:
- name: data-volume
mountPath: /var/lib/registry
- name: auth-volume
mountPath: /auth
volumes:
- name: data-volume
persistentVolumeClaim:
claimName: registry-pvc
- name: auth-volume
secret:
secretName: registry-auth-secret
---
# 5. 内部服务
apiVersion: v1
kind: Service
metadata:
name: registry-service
namespace: registry-system
spec:
selector:
app: registry
ports:
- protocol: TCP
port: 80
targetPort: 5000
---
# 6. 暴露 HTTPS 域名
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: registry-ingress
namespace: registry-system
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
# 增加上传大小限制 (Docker 镜像层可能很大)
ingress.kubernetes.io/proxy-body-size: "0"
nginx.ingress.kubernetes.io/proxy-body-size: "0"
# CORS 配置 (允许 UI 访问 Registry API)
traefik.ingress.kubernetes.io/router.middlewares: registry-system-cors-headers@kubernetescrd
spec:
rules:
- host: registry.u6.net3w.com
http:
paths:
# Registry API 路径 (优先级高,必须放在前面)
- path: /v2
pathType: Prefix
backend:
service:
name: registry-service
port:
number: 80
# UI 显示在根路径
- path: /
pathType: Prefix
backend:
service:
name: registry-ui-service
port:
number: 80
tls:
- hosts:
- registry.u6.net3w.com
secretName: registry-tls-secret

View File

@@ -0,0 +1,84 @@
# Joxit Docker Registry UI - 轻量级 Web 界面
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: registry-ui
namespace: registry-system
spec:
replicas: 1
selector:
matchLabels:
app: registry-ui
template:
metadata:
labels:
app: registry-ui
spec:
containers:
- name: registry-ui
image: joxit/docker-registry-ui:latest
ports:
- containerPort: 80
env:
# Registry API 地址(通过 nginx 代理,避免混合内容问题)
- name: NGINX_PROXY_PASS_URL
value: "http://registry-service.registry-system.svc.cluster.local"
# 允许删除镜像
- name: DELETE_IMAGES
value: "true"
# 显示内容摘要
- name: SHOW_CONTENT_DIGEST
value: "true"
# 单个 registry 模式
- name: SINGLE_REGISTRY
value: "true"
# Registry 标题
- name: REGISTRY_TITLE
value: "U9 Docker Registry"
# 启用搜索功能
- name: CATALOG_ELEMENTS_LIMIT
value: "1000"
---
# UI 服务
apiVersion: v1
kind: Service
metadata:
name: registry-ui-service
namespace: registry-system
spec:
selector:
app: registry-ui
ports:
- protocol: TCP
port: 80
targetPort: 80
---
# 暴露 UI 到外网
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: registry-ui-ingress
namespace: registry-system
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
spec:
rules:
- host: registry-ui.u6.net3w.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: registry-ui-service
port:
number: 80
tls:
- hosts:
- registry-ui.u6.net3w.com
secretName: registry-ui-tls-secret

View File

@@ -0,0 +1,72 @@
# 01-mysql.yaml (新版)
# --- 第一部分:申请一张硬盘券 (PVC) ---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: mysql-pvc # 记住这个券的名字
namespace: demo-space
spec:
accessModes:
- ReadWriteOnce # 只能被一个节点读写
storageClassName: longhorn # K3s 默认的存储驱动,利用 VPS 本地硬盘
resources:
requests:
storage: 2Gi # 申请 2GB 大小
---
# --- 第二部分:数据库服务 (不变) ---
apiVersion: v1
kind: Service
metadata:
name: mysql-service
namespace: demo-space
spec:
ports:
- port: 3306
selector:
app: wordpress-mysql
---
# --- 第三部分:部署数据库 (挂载硬盘) ---
apiVersion: apps/v1
kind: Deployment
metadata:
name: wordpress-mysql
namespace: demo-space
spec:
selector:
matchLabels:
app: wordpress-mysql
strategy:
type: Recreate # 有状态应用建议用 Recreate (先关旧的再开新的)
template:
metadata:
labels:
app: wordpress-mysql
spec:
containers:
- image: mariadb:10.6.4-focal
name: mysql
env:
- name: MYSQL_ROOT_PASSWORD
value: "password123"
- name: MYSQL_DATABASE
value: "wordpress"
- name: MYSQL_USER
value: "wordpress"
- name: MYSQL_PASSWORD
value: "wordpress"
ports:
- containerPort: 3306
name: mysql
# ▼▼▼ 重点变化在这里 ▼▼▼
volumeMounts:
- name: mysql-store
mountPath: /var/lib/mysql # 容器里数据库存文件的位置
volumes:
- name: mysql-store
persistentVolumeClaim:
claimName: mysql-pvc # 使用上面定义的那张券

View File

@@ -0,0 +1,64 @@
# 02-wordpress.yaml
apiVersion: v1
kind: Service
metadata:
name: wordpress-service
namespace: demo-space
spec:
sessionAffinity: ClientIP
sessionAffinityConfig:
clientIP:
timeoutSeconds: 10800 # 3 hours
ports:
- port: 80
selector:
app: wordpress
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: wordpress
namespace: demo-space
spec:
replicas: 2 # 我们启动 2 个 WordPress 前台
selector:
matchLabels:
app: wordpress
template:
metadata:
labels:
app: wordpress
spec:
containers:
- image: wordpress:latest
name: wordpress
env:
- name: WORDPRESS_DB_HOST
value: "mysql-service" # 魔法所在!直接填名字
- name: WORDPRESS_DB_USER
value: "wordpress"
- name: WORDPRESS_DB_PASSWORD
value: "wordpress"
- name: WORDPRESS_DB_NAME
value: "wordpress"
- name: WORDPRESS_CONFIG_EXTRA
value: |
/* HTTPS behind reverse proxy - Complete configuration */
if (isset($_SERVER['HTTP_X_FORWARDED_PROTO']) && $_SERVER['HTTP_X_FORWARDED_PROTO'] === 'https') {
$_SERVER['HTTPS'] = 'on';
}
if (isset($_SERVER['HTTP_X_FORWARDED_HOST'])) {
$_SERVER['HTTP_HOST'] = $_SERVER['HTTP_X_FORWARDED_HOST'];
}
/* Force SSL for admin */
define('FORCE_SSL_ADMIN', true);
/* Redis session storage for multi-replica support */
@ini_set('session.save_handler', 'redis');
@ini_set('session.save_path', 'tcp://redis-service:6379');
/* Fix cookie issues */
@ini_set('session.cookie_httponly', true);
@ini_set('session.cookie_secure', true);
@ini_set('session.use_only_cookies', true);
ports:
- containerPort: 80
name: wordpress

View File

@@ -0,0 +1,31 @@
# 03-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: wordpress-ingress
namespace: demo-space
annotations:
# ▼▼▼ 关键注解:我要申请证书 ▼▼▼
cert-manager.io/cluster-issuer: letsencrypt-prod
# ▼▼▼ Traefik sticky session 配置 ▼▼▼
traefik.ingress.kubernetes.io/affinity: "true"
traefik.ingress.kubernetes.io/session-cookie-name: "wordpress-session"
spec:
rules:
- host: blog.u6.net3w.com # 您的域名
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: wordpress-service
port:
number: 80
# ▼▼▼ 关键配置:证书存放在这个 Secret 里 ▼▼▼
tls:
- hosts:
- blog.u6.net3w.com
secretName: blog-tls-secret # K3s 会自动创建这个 secret 并填入证书

View File

@@ -0,0 +1,40 @@
# 04-redis.yaml - Redis for WordPress session storage
apiVersion: v1
kind: Service
metadata:
name: redis-service
namespace: demo-space
spec:
ports:
- port: 6379
targetPort: 6379
selector:
app: redis
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis
namespace: demo-space
spec:
replicas: 1
selector:
matchLabels:
app: redis
template:
metadata:
labels:
app: redis
spec:
containers:
- name: redis
image: redis:7-alpine
ports:
- containerPort: 6379
resources:
requests:
memory: "64Mi"
cpu: "100m"
limits:
memory: "128Mi"
cpu: "200m"

View File

@@ -0,0 +1,8 @@
# Custom WordPress image with Redis PHP extension
FROM wordpress:latest
# Install Redis PHP extension
RUN pecl install redis && docker-php-ext-enable redis
# Verify installation
RUN php -m | grep redis

View File

@@ -0,0 +1,30 @@
# 1. 定义一个“虚假”的服务,作为 K8s 内部的入口
#
# external-app.yaml (修正版)
apiVersion: v1
kind: Service
metadata:
name: host-app-service
namespace: demo-space
spec:
ports:
- name: http # <--- Service 这里叫 http
protocol: TCP
port: 80
targetPort: 3100
---
apiVersion: v1
kind: Endpoints
metadata:
name: host-app-service
namespace: demo-space
subsets:
- addresses:
- ip: 85.137.244.98
ports:
- port: 3100
name: http # <--- 【关键修改】这里必须也叫 http才能配对成功

View File

@@ -0,0 +1,25 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: host-app-ingress
namespace: demo-space
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
# ▼▼▼ 核心修复:添加这一行 ▼▼▼
ingress.kubernetes.io/custom-response-headers: "Content-Security-Policy: upgrade-insecure-requests"
spec:
rules:
- host: wt.u6.net3w.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: host-app-service
port:
number: 80
tls:
- hosts:
- wt.u6.net3w.com
secretName: wt-tls-secret

View File

@@ -0,0 +1,16 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
# Let's Encrypt 的生产环境接口
server: https://acme-v02.api.letsencrypt.org/directory
# 填您的真实邮箱,证书过期前会发邮件提醒(虽然它会自动续期)
email: fszy2021@gmail.com
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- http01:
ingress:
class: traefik

View File

@@ -0,0 +1,27 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: longhorn-ingress
namespace: longhorn-system # 注意Longhorn 安装在这个命名空间
annotations:
# 1. 告诉 Cert-Manager请用这个发证机构给我发证
cert-manager.io/cluster-issuer: letsencrypt-prod
# (可选) 强制 Traefik 使用 HTTPS 入口但这行通常不需要Traefik 会自动识别 TLS
# traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
rules:
- host: storage.u6.net3w.com # 您的域名
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: longhorn-frontend
port:
number: 80
# 2. 告诉 K3s证书下载下来后存在哪里
tls:
- hosts:
- storage.u6.net3w.com
secretName: longhorn-tls-secret # 证书会自动保存在这个 Secret 里

View File

@@ -0,0 +1,37 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: php-apache
namespace: demo-space
spec:
selector:
matchLabels:
run: php-apache
replicas: 1
template:
metadata:
labels:
run: php-apache
spec:
containers:
- name: php-apache
image: registry.k8s.io/hpa-example
ports:
- containerPort: 80
resources:
# 必须设置资源限制HPA 才能计算百分比
limits:
cpu: 500m
requests:
cpu: 200m
---
apiVersion: v1
kind: Service
metadata:
name: php-apache
namespace: demo-space
spec:
ports:
- port: 80
selector:
run: php-apache

View File

@@ -0,0 +1,120 @@
# 1. 独立的命名空间
apiVersion: v1
kind: Namespace
metadata:
name: n8n-system
---
# 2. 数据持久化 (保存工作流和账号信息)
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: n8n-pvc
namespace: n8n-system
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 5Gi
---
# 3. 核心应用
apiVersion: apps/v1
kind: Deployment
metadata:
name: n8n
namespace: n8n-system
labels:
app: n8n
spec:
replicas: 1
selector:
matchLabels:
app: n8n
template:
metadata:
labels:
app: n8n
spec:
securityContext:
fsGroup: 1000
containers:
- name: n8n
image: n8nio/n8n:latest
securityContext:
runAsUser: 1000
runAsGroup: 1000
ports:
- containerPort: 5678
env:
# ▼▼▼ 关键配置 ▼▼▼
- name: N8N_HOST
value: "n8n.u6.net3w.com"
- name: N8N_PORT
value: "5678"
- name: N8N_PROTOCOL
value: "https"
- name: WEBHOOK_URL
value: "https://n8n.u6.net3w.com/"
# 时区设置 (方便定时任务)
- name: GENERIC_TIMEZONE
value: "Asia/Shanghai"
- name: TZ
value: "Asia/Shanghai"
# 禁用 n8n 的一些统计收集
- name: N8N_DIAGNOSTICS_ENABLED
value: "false"
volumeMounts:
- name: data
mountPath: /home/node/.n8n
volumes:
- name: data
persistentVolumeClaim:
claimName: n8n-pvc
---
# 4. 服务暴露
apiVersion: v1
kind: Service
metadata:
name: n8n-service
namespace: n8n-system
spec:
selector:
app: n8n
ports:
- protocol: TCP
port: 80
targetPort: 5678
---
# 5. Ingress (自动 HTTPS)
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: n8n-ingress
namespace: n8n-system
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
spec:
tls:
- hosts:
- n8n.u6.net3w.com
secretName: n8n-tls
rules:
- host: n8n.u6.net3w.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: n8n-service
port:
number: 80

View File

@@ -0,0 +1,109 @@
# 1. 命名空间
apiVersion: v1
kind: Namespace
metadata:
name: gitea-system
---
# 2. 数据持久化 (存放代码仓库和数据库)
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: gitea-data-pvc
namespace: gitea-system
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn # 沿用你的 Longhorn
resources:
requests:
storage: 10Gi
---
# 3. 部署 Gitea 应用
apiVersion: apps/v1
kind: Deployment
metadata:
name: gitea
namespace: gitea-system
spec:
replicas: 1
selector:
matchLabels:
app: gitea
template:
metadata:
labels:
app: gitea
spec:
containers:
- name: gitea
image: gitea/gitea:latest
ports:
- containerPort: 3000
name: http
- containerPort: 22
name: ssh
volumeMounts:
- name: gitea-data
mountPath: /data
env:
# 初始设置,避免手动改配置文件
- name: GITEA__server__DOMAIN
value: "git.u6.net3w.com"
- name: GITEA__server__ROOT_URL
value: "https://git.u6.net3w.com/"
- name: GITEA__server__SSH_PORT
value: "22" # 注意:通过 Ingress 访问时通常用 HTTPSSSH 需要额外配置 NodePort暂时先设为标准
volumes:
- name: gitea-data
persistentVolumeClaim:
claimName: gitea-data-pvc
---
# 4. Service (内部网络)
apiVersion: v1
kind: Service
metadata:
name: gitea-service
namespace: gitea-system
spec:
selector:
app: gitea
ports:
- protocol: TCP
port: 80
targetPort: 3000
name: http
- protocol: TCP
port: 2222 # 如果未来要用 SSH可以映射这个端口
targetPort: 22
name: ssh
---
# 5. Ingress (暴露 HTTPS 域名)
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: gitea-ingress
namespace: gitea-system
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
# 允许大文件上传 (Git push 可能很大)
nginx.ingress.kubernetes.io/proxy-body-size: "0"
spec:
rules:
- host: git.u6.net3w.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: gitea-service
port:
number: 80
tls:
- hosts:
- git.u6.net3w.com
secretName: gitea-tls-secret

View File

@@ -0,0 +1,97 @@
# 1. 创建一个独立的命名空间,保持整洁
apiVersion: v1
kind: Namespace
metadata:
name: monitoring
---
# 2. 申请一块 10GB 的硬盘 (使用 Longhorn)
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: kuma-pvc
namespace: monitoring
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 2Gi
---
# 3. 部署应用 (StatefulSet 也可以用 Deployment单实例用 Deployment 足够)
apiVersion: apps/v1
kind: Deployment
metadata:
name: uptime-kuma
namespace: monitoring
spec:
replicas: 1
selector:
matchLabels:
app: uptime-kuma
strategy:
type: Recreate
template:
metadata:
labels:
app: uptime-kuma
spec:
containers:
- name: uptime-kuma
image: louislam/uptime-kuma:1
ports:
- containerPort: 3001
volumeMounts:
- name: data
mountPath: /app/data
volumes:
- name: data
persistentVolumeClaim:
claimName: kuma-pvc
---
# 4. 创建内部服务
apiVersion: v1
kind: Service
metadata:
name: kuma-service
namespace: monitoring
spec:
selector:
app: uptime-kuma
ports:
- protocol: TCP
port: 80
targetPort: 3001
---
# 5. 暴露到外网 (HTTPS + 域名)
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kuma-ingress
namespace: monitoring
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
spec:
rules:
- host: status.u6.net3w.com # <--- 您的新域名
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kuma-service
port:
number: 80
tls:
- hosts:
- status.u6.net3w.com
secretName: status-tls-secret

View File

@@ -0,0 +1,62 @@
apiVersion: v1
kind: Namespace
metadata:
name: navigation
---
# ▼▼▼ 核心知识点ConfigMap ▼▼▼
apiVersion: v1
kind: ConfigMap
metadata:
name: homepage-config
namespace: navigation
data:
# 配置文件 1: 定义小组件 (显示时间、搜索框、资源占用)
widgets.yaml: |
- search:
provider: google
target: _blank
- resources:
cpu: true
memory: true
disk: true
- datetime:
text_size: xl
format:
timeStyle: short
# 配置文件 2: 定义您的服务链接 (请注意看下面的 icon 和 href)
services.yaml: |
- 我的应用:
- 个人博客:
icon: wordpress.png
href: https://blog.u6.net3w.com
description: 我的数字花园
- 远程桌面:
icon: linux.png
href: https://wt.u6.net3w.com
description: K8s 外部反代测试
- 基础设施:
- 状态监控:
icon: uptime-kuma.png
href: https://status.u6.net3w.com
description: Uptime Kuma
widget:
type: uptimekuma
url: http://kuma-service.monitoring.svc.cluster.local # ▼ 重点K8s 内部 DNS
slug: my-wordpress-blog # (高级玩法:稍后填这个)
- 存储管理:
icon: longhorn.png
href: https://storage.u6.net3w.com
description: 分布式存储面板
widget:
type: longhorn
url: http://longhorn-frontend.longhorn-system.svc.cluster.local
# 配置文件 3: 基础设置
settings.yaml: |
title: K3s 指挥中心
background: https://images.unsplash.com/photo-1519681393784-d120267933ba?auto=format&fit=crop&w=1920&q=80
theme: dark
color: slate

View File

@@ -0,0 +1,71 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: homepage
namespace: navigation
spec:
replicas: 1
selector:
matchLabels:
app: homepage
template:
metadata:
labels:
app: homepage
spec:
containers:
- name: homepage
image: ghcr.io/gethomepage/homepage:latest
ports:
- containerPort: 3000
# ▼▼▼ 关键动作:把 ConfigMap 挂载成文件 ▼▼▼
volumeMounts:
- name: config-volume
mountPath: /app/config # 容器里的配置目录
volumes:
- name: config-volume
configMap:
name: homepage-config # 引用上面的 ConfigMap
---
apiVersion: v1
kind: Service
metadata:
name: homepage-service
namespace: navigation
spec:
selector:
app: homepage
ports:
- protocol: TCP
port: 80
targetPort: 3000
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: homepage-ingress
namespace: navigation
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
# 开启这个可以允许跨域调用 (可选)
nginx.ingress.kubernetes.io/enable-cors: "true"
spec:
rules:
- host: nav.u6.net3w.com # <--- 您的新域名
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: homepage-service
port:
number: 80
tls:
- hosts:
- nav.u6.net3w.com
secretName: nav-tls-secret

View File

@@ -0,0 +1,33 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: k3s-apps
namespace: argocd
spec:
project: default
# Git 仓库配置
source:
repoURL: https://git.u6.net3w.com/admin/k3s-configs.git
targetRevision: HEAD
path: k3s
# 目标集群配置
destination:
server: https://kubernetes.default.svc
namespace: default
# 自动同步配置
syncPolicy:
automated:
prune: true # 自动删除 Git 中不存在的资源
selfHeal: true # 自动修复被手动修改的资源
allowEmpty: false
syncOptions:
- CreateNamespace=true # 自动创建命名空间
retry:
limit: 5
backoff:
duration: 5s
factor: 2
maxDuration: 3m

View File

@@ -0,0 +1,24 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-server-ingress
namespace: argocd
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
tls:
- hosts:
- argocd.u6.net3w.com
secretName: argocd-tls-secret
rules:
- host: argocd.u6.net3w.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
name: http

View File

@@ -0,0 +1,25 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: rancher
namespace: cattle-system
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
ingressClassName: traefik
tls:
- hosts:
- rancher.u6.net3w.com
secretName: rancher-tls
rules:
- host: rancher.u6.net3w.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: rancher
port:
number: 80

View File

@@ -0,0 +1,23 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: portainer-ingress
namespace: portainer
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
spec:
tls:
- hosts:
- portainer.u6.net3w.com # <--- 换个新子域名
secretName: portainer-tls
rules:
- host: portainer.u6.net3w.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: portainer
port:
number: 9000

View File

@@ -0,0 +1,148 @@
这是一个非常棒的想法!天天面对黑底白字的命令行确实容易累,而且在查看日志流、多 Pod 资源占用时图形化界面GUI有着无法比拟的优势。
对于 K3s我有 **三个最强推荐**,分别对应不同的使用场景。
---
### 方案一:最强桌面端神器 —— OpenLens (推荐开发使用)
如果您希望在**自己的电脑Windows/Mac**上管理远程的 K3s而不占用 VPS 的资源Lens 是绝对的王者。它就像是一个专门用来操作 K8s 的“远程桌面”。
**优点:**
* **零资源占用**:运行在您自己电脑上,不消耗 VPS 内存。
* **功能极其强大**:查看日志、进入终端、编辑 YAML、查看监控点击即达。
* **体验流畅**:原生应用,比网页版更丝滑。
#### 🚀 如何连接 (只需 3 步)
要让本地的 Lens 连上远程的 K3s您需要把 VPS 上的“钥匙”拿回来。
1. **在 VPS 上获取 Kubeconfig**
执行命令查看配置文件内容:
```bash
cat /etc/rancher/k3s/k3s.yaml
```
*(复制这一大段内容)*
2. **在本地电脑创建配置文件**
在您的电脑上新建一个文件(例如 `my-k3s-config`),粘贴刚才的内容。
**⚠️ 关键修改**:找到 `server: https://127.0.0.1:6443` 这一行,把 `127.0.0.1` 改成您的 **域名** (比如 `dev.u6.net3w.com`,前提是 6443 端口已开放) 或者 **VPS 公网 IP**
3. **导入 Lens**
下载并安装 **OpenLens** (免费开源版) 或 **Lens Desktop**
打开软件 -> File -> Add Cluster -> 粘贴刚才修改后的配置内容。
**瞬间,您的 K3s 集群全貌就会展现在眼前!**
---
### 方案二:最强网页版 —— Portainer (推荐运维/监控)
如果您希望打开浏览器就能管理,而且您习惯了 Docker 的管理方式,那么 **Portainer** 是不二之选。它对 K8s 的支持非常友好,界面简洁漂亮。
**优点:**
* **随时随地访问**:有浏览器就能管。
* **可视化极佳**:有漂亮的仪表盘,能看到 CPU/内存使用率。
* **部署简单**:它自己就是 K3s 里的一个 Pod。
#### 🚀 如何安装 (30秒搞定)
直接在您的 VPS 上执行:
1. **安装 Portainer**
```bash
kubectl apply -n portainer -f https://raw.githubusercontent.com/portainer/k8s/master/deploy/manifests/portainer/portainer.yaml
```
*(这一步会自动创建命名空间和所有资源)*
2. **暴露端口 (我们要用 NodePort 访问)**
默认 Portainer 用的是 LoadBalancer我们改一下让它能直接通过端口访问。
```bash
# 编辑服务配置
kubectl edit service portainer -n portainer
```
找到 `type: LoadBalancer` 改为 `type: NodePort`
或者,既然您已经玩得很溜了,**建议直接写一个 Ingress** 给它!
**创建 `portainer-ingress.yaml`**
```yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: portainer-ingress
namespace: portainer
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
spec:
tls:
- hosts:
- portainer.u6.net3w.com # <--- 换个新子域名
secretName: portainer-tls
rules:
- host: portainer.u6.net3w.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: portainer
port:
number: 9000
```
3. **应用并访问**
`kubectl apply -f portainer-ingress.yaml`
稍等片刻,访问 `https://portainer.u6.net3w.com`,设置管理员密码即可。
---
### 方案三:官方全家桶 —— Rancher (企业级)
K3s 是 Rancher Labs 开发的Rancher 是他们的旗舰管理平台。
**优点:** 功能最全,企业级标准,可以管理成百上千个集群。
**缺点:** **非常吃内存**。Rancher 本身启动就需要占用 2G+ 内存。如果您的 VPS 只有 4G 或更少,**强烈不推荐**,它会把您的业务挤垮。
---
### 💡 极客的选择K9s (终端里的 GUI)
如果您还是离不开终端,但想要图形化的便利,**K9s** 是必备神器。它是一个运行在终端里的图形界面。
**安装:**
```bash
# 在 VPS 上安装 Linuxbrew 后安装,或者直接下载二进制文件
curl -sS https://webinstall.dev/k9s | bash
```
**启动:**
输入 `k9s`
您可以像打游戏一样:
*`0` 显示所有 Pod。
*`l` 看日志。
*`s` 进入 Shell。
*`ctrl-d` 删除 Pod。
* 全键盘操作,酷炫到没朋友。
---
### 我的建议
1. **首选方案**:先在本地安装 **OpenLens**。不用在 VPS 上装任何东西,无负担,体验最好。
2. **次选方案**:如果经常需要在不同电脑上管理,在 VPS 上部署一个 **Portainer**
**您想先试试哪一个?如果是 OpenLens我可以教您怎么把 VPS 的 6443 端口安全地放行。**

View File

@@ -0,0 +1,98 @@
**有眼光!** 既然您的 VPS 配置不错(之前提到有 16GB 内存),跑 Rancher 是**绝对最爽**的选择。它就像给您的 Kubernetes 集群装了一个“Windows 操作系统”,所有操作都能点点鼠标完成。
虽然您喜欢用 YAML 硬编码(这很好),但在安装 Rancher 这种巨型软件时,**Helm**Kubernetes 的包管理器,类似于 Python 的 pip 或 Linux 的 apt是唯一标准且靠谱的途径。手动写 YAML 安装 Rancher 会涉及到几百个文件,极易出错。
请按照以下步骤把这头“牛”Rancher 的 Logo牵进您的集群。
---
### 🚀 第一步:安装 Helm (如果您还没有)
Helm 是安装 Rancher 的必备工具。在 Node 1 上执行:
```bash
curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash
```
### 第二步:添加 Rancher 仓库
告诉 Helm 去哪里下载 Rancher。
```bash
# 1. 添加仓库
helm repo add rancher-latest https://releases.rancher.com/server-charts/latest
# 2. 创建 Rancher 专属的命名空间
kubectl create namespace cattle-system
```
### 第三步:一键安装 Rancher (关键)
我们要安装 Rancher并让它**自动利用**您集群里已经存在的 `cert-manager` 来申请 `rancher.u6.net3w.com` 的 HTTPS 证书。
请**直接复制**下面这整段命令(我已经帮您把参数配好了):
* **域名**: `rancher.u6.net3w.com`
* **初始密码**: `admin123456` (进去后强制要求改)
* **副本数**: `1` (为了节省资源,生产环境通常用 3但 1 个也够用)
* **证书**: 使用 Let's Encrypt 自动生成
```bash
helm install rancher rancher-latest/rancher \
--namespace cattle-system \
--set hostname=rancher.u6.net3w.com \
--set bootstrapPassword=admin123456 \
--set replicas=1 \
--set ingress.tls.source=letsEncrypt \
--set letsEncrypt.email=admin@net3w.com \
--set letsEncrypt.ingress.class=traefik
```
---
### 第四步:等待“巨兽”苏醒
Rancher 是一个庞大的 Java/Go 应用,启动需要一点时间(通常 2-5 分钟)。
您可以像看电影一样监控它的启动过程:
```bash
watch kubectl get pods -n cattle-system
```
* **阶段 1**: 您会看到 `rancher-webhook` 先启动。
* **阶段 2**: 接着 `rancher-xxxx` 主 Pod 开始 `ContainerCreating`
* **阶段 3**: 当 `rancher-xxxx` 状态变为 **`Running`** 且 **`READY 1/1`** 时,说明启动成功。
*(如果 Ready 很久都是 0/1不要急那是它在做数据库初始化只要不报错 CrashLoopBackOff 就等着)*
---
### 第五步:登录并接管一切
一旦 Pod 跑起来证书也签发好Ingress 生效)后:
1. 打开浏览器访问:**[https://rancher.u6.net3w.com](https://www.google.com/search?q=https://rancher.u6.net3w.com)**
2. **密码**:输入刚才设置的 `admin123456`
3. **接管**
* 登录后,它会提示您设置一个新的复杂密码。
* 然后您会看到一个名为 **"local"** 的集群 —— **这就是您的 K3s 集群!**
---
### 🎉 您现在可以用图形界面做什么?
进入 "local" 集群后,您可以尝试以下操作,体验一下为什么我说它是“神器”:
1. **看工作负载**:点击左侧菜单 **Workload (工作负载)**,您会看到之前的 `my-python-app`, `registry`, `wordpress` 全都在列表里CPU/内存占用一目了然。
2. **查看日志**:点进 `my-python-app` -> 点击 Pod 名字 -> 右上角三个点 -> **View Logs**。界面非常舒服,支持实时滚动。
3. **进入终端**:同上,点击 **Execute Shell**,直接在浏览器里获得一个 root 终端,再也不用 SSH 连来连去了。
4. **拖拽扩容**:找到 Deployment点一下 `+` 号,瞬间从 1 个 Pod 变成 5 个 Pod感受 Kubernetes 的弹性。
**快去试试吧!如果安装过程中卡在 Pulling Image 或者 Pending请告诉我 `kubectl get pods -n cattle-system` 的截图。**

View File

@@ -0,0 +1,76 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: kube-system
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: traefik
namespace: kube-system
labels:
app: traefik
spec:
replicas: 1
selector:
matchLabels:
app: traefik
template:
metadata:
labels:
app: traefik
spec:
serviceAccountName: traefik-ingress-controller
containers:
- name: traefik
image: traefik:v2.10
args:
- --api.insecure=true
- --accesslog
- --entrypoints.web.Address=:80
- --entrypoints.websecure.Address=:443
- --providers.kubernetescrd
- --providers.kubernetesingress
- --providers.kubernetesingress.ingressclass=traefik
- --entrypoints.websecure.http.tls=true
- --certificatesresolvers.default.acme.tlschallenge
- --certificatesresolvers.default.acme.email=admin@example.com
- --certificatesresolvers.default.acme.storage=/data/acme.json
ports:
- name: web
containerPort: 80
- name: websecure
containerPort: 443
- name: admin
containerPort: 8080
volumeMounts:
- name: data
mountPath: /data
volumes:
- name: data
emptyDir: {}
---
apiVersion: v1
kind: Service
metadata:
name: traefik
namespace: kube-system
spec:
type: LoadBalancer
selector:
app: traefik
ports:
- protocol: TCP
port: 80
name: web
targetPort: 80
- protocol: TCP
port: 443
name: websecure
targetPort: 443
- protocol: TCP
port: 8080
name: admin
targetPort: 8080

View File

@@ -0,0 +1,9 @@
---
apiVersion: networking.k8s.io/v1
kind: IngressClass
metadata:
name: traefik
annotations:
ingressclass.kubernetes.io/is-default-class: "true"
spec:
controller: traefik.io/ingress-controller

View File

@@ -0,0 +1,63 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- traefik.io
- traefik.containo.us
resources:
- middlewares
- middlewaretcps
- ingressroutes
- traefikservices
- ingressroutetcps
- ingressrouteudps
- tlsoptions
- tlsstores
- serverstransports
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: kube-system

View File

@@ -0,0 +1,19 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: kuboard-v3
namespace: kuboard
annotations:
kubernetes.io/ingress.class: traefik
spec:
rules:
- host: kuboard.u6.net3w.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: kuboard-v3
port:
number: 80

View File

@@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: kuboard-v3
namespace: kuboard
labels:
app: kuboard-v3
spec:
type: ClusterIP
ports:
- name: http
port: 80
targetPort: 80
protocol: TCP
selector:
app: kuboard-v3

View File

@@ -0,0 +1,303 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: kuboard
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kuboard-v3-config
namespace: kuboard
data:
# 关于如下参数的解释,请参考文档 https://kuboard.cn/install/v3/install-built-in.html
# [common]
KUBOARD_SERVER_NODE_PORT: '30080'
KUBOARD_AGENT_SERVER_UDP_PORT: '30081'
KUBOARD_AGENT_SERVER_TCP_PORT: '30081'
KUBOARD_SERVER_LOGRUS_LEVEL: info # error / debug / trace
# KUBOARD_AGENT_KEY 是 Agent 与 Kuboard 通信时的密钥请修改为一个任意的包含字母、数字的32位字符串此密钥变更后需要删除 Kuboard Agent 重新导入。
KUBOARD_AGENT_KEY: 32b7d6572c6255211b4eec9009e4a816
KUBOARD_AGENT_IMAG: eipwork/kuboard-agent
KUBOARD_QUESTDB_IMAGE: questdb/questdb:6.0.5
KUBOARD_DISABLE_AUDIT: 'false' # 如果要禁用 Kuboard 审计功能,将此参数的值设置为 'true',必须带引号。
# 关于如下参数的解释,请参考文档 https://kuboard.cn/install/v3/install-gitlab.html
# [gitlab login]
# KUBOARD_LOGIN_TYPE: "gitlab"
# KUBOARD_ROOT_USER: "your-user-name-in-gitlab"
# GITLAB_BASE_URL: "http://gitlab.mycompany.com"
# GITLAB_APPLICATION_ID: "7c10882aa46810a0402d17c66103894ac5e43d6130b81c17f7f2d8ae182040b5"
# GITLAB_CLIENT_SECRET: "77c149bd3a4b6870bffa1a1afaf37cba28a1817f4cf518699065f5a8fe958889"
# 关于如下参数的解释,请参考文档 https://kuboard.cn/install/v3/install-github.html
# [github login]
# KUBOARD_LOGIN_TYPE: "github"
# KUBOARD_ROOT_USER: "your-user-name-in-github"
# GITHUB_CLIENT_ID: "17577d45e4de7dad88e0"
# GITHUB_CLIENT_SECRET: "ff738553a8c7e9ad39569c8d02c1d85ec19115a7"
# 关于如下参数的解释,请参考文档 https://kuboard.cn/install/v3/install-ldap.html
# [ldap login]
# KUBOARD_LOGIN_TYPE: "ldap"
# KUBOARD_ROOT_USER: "your-user-name-in-ldap"
# LDAP_HOST: "ldap-ip-address:389"
# LDAP_BIND_DN: "cn=admin,dc=example,dc=org"
# LDAP_BIND_PASSWORD: "admin"
# LDAP_BASE_DN: "dc=example,dc=org"
# LDAP_FILTER: "(objectClass=posixAccount)"
# LDAP_ID_ATTRIBUTE: "uid"
# LDAP_USER_NAME_ATTRIBUTE: "uid"
# LDAP_EMAIL_ATTRIBUTE: "mail"
# LDAP_DISPLAY_NAME_ATTRIBUTE: "cn"
# LDAP_GROUP_SEARCH_BASE_DN: "dc=example,dc=org"
# LDAP_GROUP_SEARCH_FILTER: "(objectClass=posixGroup)"
# LDAP_USER_MACHER_USER_ATTRIBUTE: "gidNumber"
# LDAP_USER_MACHER_GROUP_ATTRIBUTE: "gidNumber"
# LDAP_GROUP_NAME_ATTRIBUTE: "cn"
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kuboard-boostrap
namespace: kuboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kuboard-boostrap-crb
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kuboard-boostrap
namespace: kuboard
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
k8s.kuboard.cn/name: kuboard-etcd
name: kuboard-etcd
namespace: kuboard
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
k8s.kuboard.cn/name: kuboard-etcd
template:
metadata:
labels:
k8s.kuboard.cn/name: kuboard-etcd
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
- matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
- matchExpressions:
- key: k8s.kuboard.cn/role
operator: In
values:
- etcd
containers:
- env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: HOSTIP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
image: 'eipwork/etcd-host:3.4.16-2'
imagePullPolicy: Always
name: etcd
ports:
- containerPort: 2381
hostPort: 2381
name: server
protocol: TCP
- containerPort: 2382
hostPort: 2382
name: peer
protocol: TCP
livenessProbe:
failureThreshold: 3
httpGet:
path: /health
port: 2381
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
volumeMounts:
- mountPath: /data
name: data
dnsPolicy: ClusterFirst
hostNetwork: true
restartPolicy: Always
serviceAccount: kuboard-boostrap
serviceAccountName: kuboard-boostrap
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
- key: node-role.kubernetes.io/control-plane
operator: Exists
volumes:
- hostPath:
path: /usr/share/kuboard/etcd
name: data
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations: {}
labels:
k8s.kuboard.cn/name: kuboard-v3
name: kuboard-v3
namespace: kuboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s.kuboard.cn/name: kuboard-v3
template:
metadata:
labels:
k8s.kuboard.cn/name: kuboard-v3
spec:
affinity:
nodeAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- preference:
matchExpressions:
- key: node-role.kubernetes.io/master
operator: Exists
weight: 100
- preference:
matchExpressions:
- key: node-role.kubernetes.io/control-plane
operator: Exists
weight: 100
containers:
- env:
- name: HOSTIP
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: status.hostIP
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
envFrom:
- configMapRef:
name: kuboard-v3-config
image: 'eipwork/kuboard:v3'
imagePullPolicy: Always
livenessProbe:
failureThreshold: 3
httpGet:
path: /kuboard-resources/version.json
port: 80
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
name: kuboard
ports:
- containerPort: 80
name: web
protocol: TCP
- containerPort: 443
name: https
protocol: TCP
- containerPort: 10081
name: peer
protocol: TCP
- containerPort: 10081
name: peer-u
protocol: UDP
readinessProbe:
failureThreshold: 3
httpGet:
path: /kuboard-resources/version.json
port: 80
scheme: HTTP
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
resources: {}
# startupProbe:
# failureThreshold: 20
# httpGet:
# path: /kuboard-resources/version.json
# port: 80
# scheme: HTTP
# initialDelaySeconds: 5
# periodSeconds: 10
# successThreshold: 1
# timeoutSeconds: 1
dnsPolicy: ClusterFirst
restartPolicy: Always
serviceAccount: kuboard-boostrap
serviceAccountName: kuboard-boostrap
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
---
apiVersion: v1
kind: Service
metadata:
annotations: {}
labels:
k8s.kuboard.cn/name: kuboard-v3
name: kuboard-v3
namespace: kuboard
spec:
ports:
- name: web
nodePort: 30080
port: 80
protocol: TCP
targetPort: 80
- name: tcp
nodePort: 30081
port: 10081
protocol: TCP
targetPort: 10081
- name: udp
nodePort: 30081
port: 10081
protocol: UDP
targetPort: 10081
selector:
k8s.kuboard.cn/name: kuboard-v3
sessionAffinity: None
type: NodePort

View File

@@ -0,0 +1,59 @@
---
apiVersion: v1
kind: Service
metadata:
name: localhost-15001
namespace: default
spec:
ports:
- protocol: TCP
port: 80
targetPort: 15001
---
apiVersion: v1
kind: Endpoints
metadata:
name: localhost-15001
namespace: default
subsets:
- addresses:
- ip: 134.195.210.237
ports:
- port: 15001
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: 1go-proxy
namespace: default
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
traefik.ingress.kubernetes.io/router.entrypoints: websecure
spec:
ingressClassName: traefik
tls:
- hosts:
- 1go.u6.net3w.com
- gl.u6.net3w.com
secretName: 1go-proxy-tls
rules:
- host: 1go.u6.net3w.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: localhost-15001
port:
number: 80
- host: gl.u6.net3w.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: localhost-15001
port:
number: 80

View File

@@ -0,0 +1,84 @@
#!/bin/bash
#
# 节点健康检查脚本
# 使用方法: bash check-node-health.sh
#
# 颜色输出
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
echo -e "${BLUE}================================${NC}"
echo -e "${BLUE}K3s 集群健康检查${NC}"
echo -e "${BLUE}================================${NC}"
echo ""
# 1. 检查节点状态
echo -e "${YELLOW}[1/8] 检查节点状态...${NC}"
kubectl get nodes -o wide
echo ""
# 2. 检查节点资源
echo -e "${YELLOW}[2/8] 检查节点资源使用...${NC}"
kubectl top nodes 2>/dev/null || echo -e "${YELLOW}⚠ metrics-server 未就绪${NC}"
echo ""
# 3. 检查系统 Pods
echo -e "${YELLOW}[3/8] 检查系统组件...${NC}"
kubectl get pods -n kube-system
echo ""
# 4. 检查 Longhorn
echo -e "${YELLOW}[4/8] 检查 Longhorn 存储...${NC}"
kubectl get pods -n longhorn-system | head -10
echo ""
# 5. 检查 PVC
echo -e "${YELLOW}[5/8] 检查持久化存储卷...${NC}"
kubectl get pvc -A
echo ""
# 6. 检查应用 Pods
echo -e "${YELLOW}[6/8] 检查应用 Pods...${NC}"
kubectl get pods -A | grep -v "kube-system\|longhorn-system\|cert-manager" | head -20
echo ""
# 7. 检查 Ingress
echo -e "${YELLOW}[7/8] 检查 Ingress 配置...${NC}"
kubectl get ingress -A
echo ""
# 8. 检查证书
echo -e "${YELLOW}[8/8] 检查 SSL 证书...${NC}"
kubectl get certificate -A
echo ""
# 统计信息
echo -e "${BLUE}================================${NC}"
echo -e "${BLUE}集群统计信息${NC}"
echo -e "${BLUE}================================${NC}"
TOTAL_NODES=$(kubectl get nodes --no-headers | wc -l)
READY_NODES=$(kubectl get nodes --no-headers | grep " Ready " | wc -l)
TOTAL_PODS=$(kubectl get pods -A --no-headers | wc -l)
RUNNING_PODS=$(kubectl get pods -A --no-headers | grep "Running" | wc -l)
TOTAL_PVC=$(kubectl get pvc -A --no-headers | wc -l)
BOUND_PVC=$(kubectl get pvc -A --no-headers | grep "Bound" | wc -l)
echo -e "节点总数: ${GREEN}${TOTAL_NODES}${NC} (就绪: ${GREEN}${READY_NODES}${NC})"
echo -e "Pod 总数: ${GREEN}${TOTAL_PODS}${NC} (运行中: ${GREEN}${RUNNING_PODS}${NC})"
echo -e "PVC 总数: ${GREEN}${TOTAL_PVC}${NC} (已绑定: ${GREEN}${BOUND_PVC}${NC})"
echo ""
# 健康评分
if [ $READY_NODES -eq $TOTAL_NODES ] && [ $RUNNING_PODS -gt $((TOTAL_PODS * 80 / 100)) ]; then
echo -e "${GREEN}✓ 集群健康状态: 良好${NC}"
elif [ $READY_NODES -gt $((TOTAL_NODES / 2)) ]; then
echo -e "${YELLOW}⚠ 集群健康状态: 一般${NC}"
else
echo -e "${RED}✗ 集群健康状态: 异常${NC}"
fi
echo ""

View File

@@ -0,0 +1,113 @@
#!/bin/bash
#
# 快速配置脚本生成器
# 为新节点生成定制化的加入脚本
#
# 颜色输出
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
echo -e "${GREEN}================================${NC}"
echo -e "${GREEN}K3s 节点加入脚本生成器${NC}"
echo -e "${GREEN}================================${NC}"
echo ""
# 获取当前配置
MASTER_IP="134.195.210.237"
NODE_TOKEN="K109d35a131f48b4d40b162398a828b766d60735f29dd7b4a37b030c1d1c0e26b23::server:72e04c3a9e3e762cbdefffc96f348a2d"
echo -e "${YELLOW}当前 Master 节点信息:${NC}"
echo "IP: $MASTER_IP"
echo "Token: ${NODE_TOKEN:0:20}..."
echo ""
# 选择节点类型
echo "请选择要加入的节点类型:"
echo "1) Worker 节点 (推荐用于 2 节点方案)"
echo "2) Master 节点 (用于 HA 高可用方案)"
echo ""
read -p "请输入选项 (1 或 2): " NODE_TYPE
if [ "$NODE_TYPE" == "1" ]; then
SCRIPT_NAME="join-worker-custom.sh"
echo ""
echo -e "${GREEN}生成 Worker 节点加入脚本...${NC}"
cat > $SCRIPT_NAME << 'EOFWORKER'
#!/bin/bash
set -e
# 配置信息
MASTER_IP="134.195.210.237"
NODE_TOKEN="K109d35a131f48b4d40b162398a828b766d60735f29dd7b4a37b030c1d1c0e26b23::server:72e04c3a9e3e762cbdefffc96f348a2d"
echo "开始加入 Worker 节点..."
# 系统准备
swapoff -a
sed -i '/ swap / s/^/#/' /etc/fstab
apt-get update -qq
apt-get install -y curl open-iscsi nfs-common
systemctl enable --now iscsid
# 安装 k3s agent
curl -sfL https://get.k3s.io | K3S_URL=https://${MASTER_IP}:6443 \
K3S_TOKEN=${NODE_TOKEN} sh -
echo "Worker 节点加入完成!"
echo "在 Master 节点执行: kubectl get nodes"
EOFWORKER
chmod +x $SCRIPT_NAME
elif [ "$NODE_TYPE" == "2" ]; then
SCRIPT_NAME="join-master-custom.sh"
echo ""
read -p "请输入负载均衡器 IP: " LB_IP
echo -e "${GREEN}生成 Master 节点加入脚本...${NC}"
cat > $SCRIPT_NAME << EOFMASTER
#!/bin/bash
set -e
# 配置信息
FIRST_MASTER_IP="134.195.210.237"
LB_IP="$LB_IP"
NODE_TOKEN="K109d35a131f48b4d40b162398a828b766d60735f29dd7b4a37b030c1d1c0e26b23::server:72e04c3a9e3e762cbdefffc96f348a2d"
echo "开始加入 Master 节点 (HA 模式)..."
# 系统准备
swapoff -a
sed -i '/ swap / s/^/#/' /etc/fstab
apt-get update -qq
apt-get install -y curl open-iscsi nfs-common
systemctl enable --now iscsid
# 安装 k3s server
curl -sfL https://get.k3s.io | sh -s - server \\
--server https://\${FIRST_MASTER_IP}:6443 \\
--token \${NODE_TOKEN} \\
--tls-san=\${LB_IP} \\
--write-kubeconfig-mode 644
echo "Master 节点加入完成!"
echo "在任意 Master 节点执行: kubectl get nodes"
EOFMASTER
chmod +x $SCRIPT_NAME
else
echo "无效的选项"
exit 1
fi
echo ""
echo -e "${GREEN}✓ 脚本已生成: $SCRIPT_NAME${NC}"
echo ""
echo "使用方法:"
echo "1. 将脚本复制到新节点"
echo "2. 在新节点上执行: sudo bash $SCRIPT_NAME"
echo ""

View File

@@ -0,0 +1,137 @@
#!/bin/bash
#
# K3s Master 节点快速加入脚本 (用于 HA 集群)
# 使用方法: sudo bash join-master.sh
#
set -e
# 颜色输出
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
echo -e "${GREEN}================================${NC}"
echo -e "${GREEN}K3s Master 节点加入脚本 (HA)${NC}"
echo -e "${GREEN}================================${NC}"
echo ""
# 检查是否为 root
if [ "$EUID" -ne 0 ]; then
echo -e "${RED}错误: 请使用 sudo 运行此脚本${NC}"
exit 1
fi
# 配置信息
FIRST_MASTER_IP="134.195.210.237"
NODE_TOKEN="K109d35a131f48b4d40b162398a828b766d60735f29dd7b4a37b030c1d1c0e26b23::server:72e04c3a9e3e762cbdefffc96f348a2d"
echo -e "${YELLOW}第一个 Master 节点 IP: ${FIRST_MASTER_IP}${NC}"
echo ""
# 获取负载均衡器 IP
read -p "请输入负载均衡器 IP 地址: " LB_IP
if [ -z "$LB_IP" ]; then
echo -e "${RED}错误: 负载均衡器 IP 不能为空${NC}"
exit 1
fi
echo -e "${YELLOW}负载均衡器 IP: ${LB_IP}${NC}"
echo ""
# 1. 检查网络连通性
echo -e "${YELLOW}[1/6] 检查网络连通性...${NC}"
if ping -c 2 ${FIRST_MASTER_IP} > /dev/null 2>&1; then
echo -e "${GREEN}✓ 可以连接到第一个 Master 节点${NC}"
else
echo -e "${RED}✗ 无法连接到第一个 Master 节点 ${FIRST_MASTER_IP}${NC}"
exit 1
fi
if ping -c 2 ${LB_IP} > /dev/null 2>&1; then
echo -e "${GREEN}✓ 可以连接到负载均衡器${NC}"
else
echo -e "${RED}✗ 无法连接到负载均衡器 ${LB_IP}${NC}"
exit 1
fi
# 2. 检查端口
echo -e "${YELLOW}[2/6] 检查端口...${NC}"
if timeout 5 bash -c "cat < /dev/null > /dev/tcp/${FIRST_MASTER_IP}/6443" 2>/dev/null; then
echo -e "${GREEN}✓ Master 节点端口 6443 可访问${NC}"
else
echo -e "${RED}✗ Master 节点端口 6443 无法访问${NC}"
exit 1
fi
# 3. 系统准备
echo -e "${YELLOW}[3/6] 准备系统环境...${NC}"
# 禁用 swap
swapoff -a
sed -i '/ swap / s/^/#/' /etc/fstab
echo -e "${GREEN}✓ 已禁用 swap${NC}"
# 安装依赖
apt-get update -qq
apt-get install -y curl open-iscsi nfs-common > /dev/null 2>&1
systemctl enable --now iscsid > /dev/null 2>&1
echo -e "${GREEN}✓ 已安装必要依赖${NC}"
# 4. 设置主机名
echo -e "${YELLOW}[4/6] 配置主机名...${NC}"
read -p "请输入此节点的主机名 (例如: master-2): " HOSTNAME
if [ -n "$HOSTNAME" ]; then
hostnamectl set-hostname $HOSTNAME
echo -e "${GREEN}✓ 主机名已设置为: $HOSTNAME${NC}"
else
echo -e "${YELLOW}⚠ 跳过主机名设置${NC}"
fi
# 5. 安装 k3s server
echo -e "${YELLOW}[5/6] 安装 k3s server (HA 模式)...${NC}"
echo -e "${YELLOW}这可能需要几分钟时间...${NC}"
curl -sfL https://get.k3s.io | sh -s - server \
--server https://${FIRST_MASTER_IP}:6443 \
--token ${NODE_TOKEN} \
--tls-san=${LB_IP} \
--write-kubeconfig-mode 644 > /dev/null 2>&1
if [ $? -eq 0 ]; then
echo -e "${GREEN}✓ k3s server 安装成功${NC}"
else
echo -e "${RED}✗ k3s server 安装失败${NC}"
exit 1
fi
# 6. 验证安装
echo -e "${YELLOW}[6/6] 验证安装...${NC}"
sleep 15
if systemctl is-active --quiet k3s; then
echo -e "${GREEN}✓ k3s 服务运行正常${NC}"
else
echo -e "${RED}✗ k3s 服务未运行${NC}"
echo -e "${YELLOW}查看日志: sudo journalctl -u k3s -f${NC}"
exit 1
fi
echo ""
echo -e "${GREEN}================================${NC}"
echo -e "${GREEN}✓ Master 节点加入成功!${NC}"
echo -e "${GREEN}================================${NC}"
echo ""
echo -e "${YELLOW}下一步操作:${NC}"
echo -e "1. 在任意 Master 节点执行以下命令查看节点状态:"
echo -e " ${GREEN}kubectl get nodes${NC}"
echo ""
echo -e "2. 检查 etcd 集群状态:"
echo -e " ${GREEN}kubectl get pods -n kube-system | grep etcd${NC}"
echo ""
echo -e "3. 查看节点详细信息:"
echo -e " ${GREEN}kubectl describe node $HOSTNAME${NC}"
echo ""
echo -e "4. 更新负载均衡器配置,添加此节点的 IP"
echo ""

View File

@@ -0,0 +1,116 @@
#!/bin/bash
#
# K3s Worker 节点快速加入脚本
# 使用方法: sudo bash join-worker.sh
#
set -e
# 颜色输出
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
echo -e "${GREEN}================================${NC}"
echo -e "${GREEN}K3s Worker 节点加入脚本${NC}"
echo -e "${GREEN}================================${NC}"
echo ""
# 检查是否为 root
if [ "$EUID" -ne 0 ]; then
echo -e "${RED}错误: 请使用 sudo 运行此脚本${NC}"
exit 1
fi
# 配置信息
MASTER_IP="134.195.210.237"
NODE_TOKEN="K109d35a131f48b4d40b162398a828b766d60735f29dd7b4a37b030c1d1c0e26b23::server:72e04c3a9e3e762cbdefffc96f348a2d"
echo -e "${YELLOW}Master 节点 IP: ${MASTER_IP}${NC}"
echo ""
# 1. 检查网络连通性
echo -e "${YELLOW}[1/6] 检查网络连通性...${NC}"
if ping -c 2 ${MASTER_IP} > /dev/null 2>&1; then
echo -e "${GREEN}✓ 网络连通正常${NC}"
else
echo -e "${RED}✗ 无法连接到 Master 节点 ${MASTER_IP}${NC}"
exit 1
fi
# 2. 检查端口
echo -e "${YELLOW}[2/6] 检查 Master 节点端口 6443...${NC}"
if timeout 5 bash -c "cat < /dev/null > /dev/tcp/${MASTER_IP}/6443" 2>/dev/null; then
echo -e "${GREEN}✓ 端口 6443 可访问${NC}"
else
echo -e "${RED}✗ 端口 6443 无法访问,请检查防火墙${NC}"
exit 1
fi
# 3. 系统准备
echo -e "${YELLOW}[3/6] 准备系统环境...${NC}"
# 禁用 swap
swapoff -a
sed -i '/ swap / s/^/#/' /etc/fstab
echo -e "${GREEN}✓ 已禁用 swap${NC}"
# 安装依赖
apt-get update -qq
apt-get install -y curl open-iscsi nfs-common > /dev/null 2>&1
systemctl enable --now iscsid > /dev/null 2>&1
echo -e "${GREEN}✓ 已安装必要依赖${NC}"
# 4. 设置主机名
echo -e "${YELLOW}[4/6] 配置主机名...${NC}"
read -p "请输入此节点的主机名 (例如: worker-1): " HOSTNAME
if [ -n "$HOSTNAME" ]; then
hostnamectl set-hostname $HOSTNAME
echo -e "${GREEN}✓ 主机名已设置为: $HOSTNAME${NC}"
else
echo -e "${YELLOW}⚠ 跳过主机名设置${NC}"
fi
# 5. 安装 k3s agent
echo -e "${YELLOW}[5/6] 安装 k3s agent...${NC}"
echo -e "${YELLOW}这可能需要几分钟时间...${NC}"
curl -sfL https://get.k3s.io | K3S_URL=https://${MASTER_IP}:6443 \
K3S_TOKEN=${NODE_TOKEN} \
sh - > /dev/null 2>&1
if [ $? -eq 0 ]; then
echo -e "${GREEN}✓ k3s agent 安装成功${NC}"
else
echo -e "${RED}✗ k3s agent 安装失败${NC}"
exit 1
fi
# 6. 验证安装
echo -e "${YELLOW}[6/6] 验证安装...${NC}"
sleep 10
if systemctl is-active --quiet k3s-agent; then
echo -e "${GREEN}✓ k3s-agent 服务运行正常${NC}"
else
echo -e "${RED}✗ k3s-agent 服务未运行${NC}"
echo -e "${YELLOW}查看日志: sudo journalctl -u k3s-agent -f${NC}"
exit 1
fi
echo ""
echo -e "${GREEN}================================${NC}"
echo -e "${GREEN}✓ Worker 节点加入成功!${NC}"
echo -e "${GREEN}================================${NC}"
echo ""
echo -e "${YELLOW}下一步操作:${NC}"
echo -e "1. 在 Master 节点执行以下命令查看节点状态:"
echo -e " ${GREEN}kubectl get nodes${NC}"
echo ""
echo -e "2. 为节点添加标签 (在 Master 节点执行):"
echo -e " ${GREEN}kubectl label nodes $HOSTNAME node-role.kubernetes.io/worker=worker${NC}"
echo ""
echo -e "3. 查看节点详细信息:"
echo -e " ${GREEN}kubectl describe node $HOSTNAME${NC}"
echo ""

View File

@@ -0,0 +1,88 @@
#!/bin/bash
# 项目状态检查脚本
# 扫描仓库并显示项目状态、部署情况、文档完整性等
echo "╔════════════════════════════════════════════════════════════════╗"
echo "║ K3s Monorepo - 项目状态 ║"
echo "╚════════════════════════════════════════════════════════════════╝"
echo ""
# 检查已部署的应用
echo "📦 已部署应用:"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
if command -v kubectl &> /dev/null; then
kubectl get deployments -A 2>/dev/null | grep -E "(php-test|go01|wordpress|registry|n8n|gitea)" | \
awk '{printf " ✅ %-25s %-15s %s/%s replicas\n", $2, $1, $4, $3}' || echo " ⚠️ 无法获取部署信息"
else
echo " ⚠️ kubectl 未安装,无法检查部署状态"
fi
echo ""
echo "📱 应用项目:"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
# 检查每个应用目录
for dir in php-test go01 rails/*/ www; do
if [ -d "$dir" ]; then
name=$(basename "$dir")
readme=""
dockerfile=""
k8s=""
[ -f "$dir/README.md" ] && readme="📄" || readme=" "
[ -f "$dir/Dockerfile" ] && dockerfile="🐳" || dockerfile=" "
[ -d "$dir/k8s" ] || [ -f "$dir/k8s-deployment.yaml" ] && k8s="☸️ " || k8s=" "
printf " %-30s %s %s %s\n" "$name" "$readme" "$dockerfile" "$k8s"
fi
done
echo ""
echo "🏗️ 基础设施服务:"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
for dir in k3s/*/; do
if [ -d "$dir" ]; then
name=$(basename "$dir")
yaml_count=$(find "$dir" -name "*.yaml" 2>/dev/null | wc -l)
printf " %-30s %2d YAML 文件\n" "$name" "$yaml_count"
fi
done
echo ""
echo "🛠️ 平台工具:"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
for dir in traefik kuboard proxy; do
if [ -d "$dir" ]; then
yaml_count=$(find "$dir" -name "*.yaml" 2>/dev/null | wc -l)
printf " %-30s %2d YAML 文件\n" "$dir" "$yaml_count"
fi
done
echo ""
echo "📊 统计信息:"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
total_yaml=$(find . -name "*.yaml" -type f 2>/dev/null | wc -l)
total_md=$(find . -name "*.md" -type f 2>/dev/null | wc -l)
total_sh=$(find . -name "*.sh" -type f 2>/dev/null | wc -l)
total_dockerfile=$(find . -name "Dockerfile" -type f 2>/dev/null | wc -l)
echo " YAML 配置文件: $total_yaml"
echo " Markdown 文档: $total_md"
echo " Shell 脚本: $total_sh"
echo " Dockerfile: $total_dockerfile"
echo ""
echo "💡 提示:"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo " 📄 = 有 README 文档"
echo " 🐳 = 有 Dockerfile"
echo " ☸️ = 有 Kubernetes 配置"
echo ""
echo " 查看详细信息: cat PROJECT-INDEX.md"
echo " 查看目录结构: ./scripts/project-tree.sh"
echo " 查看集群状态: make status"
echo ""

View File

@@ -0,0 +1,59 @@
#!/bin/bash
# 目录树生成脚本
# 生成清晰的项目目录结构,过滤掉不必要的文件
echo "╔════════════════════════════════════════════════════════════════╗"
echo "║ K3s Monorepo - 目录结构 ║"
echo "╚════════════════════════════════════════════════════════════════╝"
echo ""
# 检查 tree 命令是否存在
if ! command -v tree &> /dev/null; then
echo "⚠️ tree 命令未安装"
echo ""
echo "安装方法:"
echo " Ubuntu/Debian: sudo apt-get install tree"
echo " CentOS/RHEL: sudo yum install tree"
echo " macOS: brew install tree"
echo ""
echo "使用 ls 命令替代显示:"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
ls -lh --group-directories-first 2>/dev/null || ls -lh
exit 0
fi
echo "📁 根目录结构 (2层):"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
tree -L 2 -I 'node_modules|.git|__pycache__|*.pyc|.claude' \
--dirsfirst \
-F \
/home/fei/k3s
echo ""
echo "📱 应用项目详情:"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "php-test/ (PHP 应用):"
tree -L 2 -I 'node_modules|.git' php-test 2>/dev/null || echo " 目录不存在"
echo ""
echo "go01/ (Go 应用):"
tree -L 2 -I 'node_modules|.git' go01 2>/dev/null || echo " 目录不存在"
echo ""
echo "🏗️ 基础设施配置:"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "k3s/ (基础设施):"
tree -L 2 -I 'node_modules|.git' k3s 2>/dev/null || echo " 目录不存在"
echo ""
echo "💡 提示:"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo " 查看完整结构: tree -I 'node_modules|.git'"
echo " 查看项目索引: cat PROJECT-INDEX.md"
echo " 查看项目状态: ./scripts/project-status.sh"
echo ""

228
old/005-docs/notes/biji.md Normal file
View File

@@ -0,0 +1,228 @@
k3s集群部署完成!
📊 部署总结
✅ 集群状态
- k3s版本: v1.34.3+k3s1
- 节点状态: Ready (control-plane)
- 运行中的Pods: 35个
- 存储系统: Longhorn (分布式存储)
- 证书管理: cert-manager + Let's Encrypt
🚀 已部署的服务
所有服务都已实现无状态化部署,数据和存储完全解耦:
┌─────────────────┬───────────────────────────────┬────────────┬─────────────┬─────────────────┐
│ 服务 │ 域名 │ 状态 │ 存储 │ HTTPS │
├─────────────────┼───────────────────────────────┼────────────┼─────────────┼─────────────────┤
│ WordPress博客 │ https://blog.u6.net3w.com │ ✅ Running │ 2Gi (MySQL) │ ✅ │
├─────────────────┼───────────────────────────────┼────────────┼─────────────┼─────────────────┤
│ Docker Registry │ https://registry.u6.net3w.com │ ✅ Running │ 20Gi │ ⏳ (证书申请中) │
├─────────────────┼───────────────────────────────┼────────────┼─────────────┼─────────────────┤
│ n8n工作流 │ https://n8n.u6.net3w.com │ ✅ Running │ 5Gi │ ✅ │
├─────────────────┼───────────────────────────────┼────────────┼─────────────┼─────────────────┤
│ Gitea代码仓库 │ https://git.u6.net3w.com │ ✅ Running │ 10Gi │ ✅ │
├─────────────────┼───────────────────────────────┼────────────┼─────────────┼─────────────────┤
│ Uptime Kuma监控 │ https://status.u6.net3w.com │ ✅ Running │ 2Gi │ ✅ │
├─────────────────┼───────────────────────────────┼────────────┼─────────────┼─────────────────┤
│ 导航页 │ https://nav.u6.net3w.com │ ✅ Running │ - │ ✅ │
└─────────────────┴───────────────────────────────┴────────────┴─────────────┴─────────────────┘
💾 存储配置
所有数据都使用Longhorn持久化存储,实现了容器与数据的完全解耦:
- MySQL数据库: 2Gi独立存储卷
- Docker镜像仓库: 20Gi独立存储卷
- Gitea代码仓库: 10Gi独立存储卷
- n8n工作流数据: 5Gi独立存储卷
- Uptime Kuma监控数据: 2Gi独立存储卷
🔐 安全配置
- ✅ 自动HTTPS证书(Let's Encrypt)
- ✅ Traefik Ingress Controller
- ✅ Docker Registry认证(用户名: admin, 密码: 123456)
📝 常用命令
# 查看所有服务
kubectl get pods -A
# 查看存储卷
kubectl get pvc -A
# 查看域名和证书
kubectl get ingress -A
kubectl get certificate -A
# 查看Longhorn UI (可选)
kubectl get svc -n longhorn-system
⚠️ 注意事项
1. Registry证书: registry.u6.net3w.com的SSL证书还在申请中,稍等几分钟会自动完成
2. DNS解析: 确保所有*.u6.net3w.com子域名都已正确解析到 134.195.210.237
3. 数据持久化: 所有数据都存储在Longhorn卷中,容器重启不会丢失数据
所有服务已成功实现无状态化部署,可以随时重启、扩容或迁移! 🎊
● ✅ 部署完成!
Registry 账号密码
- 用户名: admin
- 密码: 123456
Web 界面访问地址
🌐 https://registry-ui.u6.net3w.com
部署状态
- ✅ Registry UI Pod 运行中
- ✅ Service 已创建
- ✅ Ingress 已配置
- ✅ SSL 证书已自动签发Let's Encrypt
使用说明
1. 访问 Web 界面
- 打开浏览器访问: https://registry-ui.u6.net3w.com
- 现在可以看到漂亮的 Web 界面,不再是空白页面
2. 浏览镜像
- UI 会自动连接到您的 Registry
- 可以查看所有已推送的镜像
- 支持搜索和过滤功能
3. Docker 命令使用
# 登录 Registry
docker login registry.u6.net3w.com
# 用户名: admin
# 密码: 123456
# 推送镜像
docker tag my-image:latest registry.u6.net3w.com/my-image:latest
docker push registry.u6.net3w.com/my-image:latest
# 拉取镜像
docker pull registry.u6.net3w.com/my-image:latest
配置文件位置
- Registry 配置: /home/fei/k3s/k3s/registry/registry-stack.yaml
- UI 配置: /home/fei/k3s/k3s/registry/registry-ui.yaml
- 密码文件: /home/fei/k3s/k3s/registry/hardcode-secret.yaml
现在您可以通过 Web 界面方便地管理 Docker 镜像了!
认证已经成功!
我刚才测试了 docker login登录成功了
docker login registry.u6.net3w.com --username admin --password
123456
# 输出: Login Succeeded ✅
可能的问题原因
您遇到 401 错误可能是因为:
1. 密码输入错误 - 在交互式输入时可能有隐藏字符或空格
2. Docker 缓存 - 可能缓存了旧的错误认证信息
解决方案
请尝试以下方法:
方法 1清除旧的认证信息后重试
# 先登出
sudo docker logout registry.u6.net3w.com
# 再登录
sudo docker login registry.u6.net3w.com
# Username: admin
# Password: 123456
方法 2使用命令行参数推荐
echo "123456" | sudo docker login registry.u6.net3w.com --username
admin --password-stdin
方法 3直接使用密码参数
sudo docker login registry.u6.net3w.com --username admin --password
123456
验证登录成功后
登录成功后,您可以:
# 测试推送镜像
sudo docker tag nginx:latest registry.u6.net3w.com/nginx:test
sudo docker push registry.u6.net3w.com/nginx:test
# 查看 Web UI
# 访问: https://registry-ui.u6.net3w.com
请尝试上述方法之一,应该就能成功登录了!如果还有问题,请告诉我具体的
错误信息。
✻ Crunched for 1m 17s
● Background command "Monitor registry logs in real-time" completed
(exit code 0)
● 我看到后台日志监控任务已完成。让我读取完整的日志输出。
● Read agent output bb664e0
⎿ Read 16 lines
● 完美!从日志中我可以看到 Docker 登录已经成功了!
日志分析
查看日志的第 35-39 行,显示了完整的 Docker 登录流程:
1. 第一次请求401
"GET /v2/" HTTP/1.1" 401 87 "" "Docker-Client/28.2.2 (linux)"
1. Docker 客户端先发送不带认证的请求,收到 401 要求认证
2. 第二次请求200 成功):
level=info msg="authorized request" ... "Docker-Client/28.2.2
(linux)"
"GET /v2/" HTTP/1.1" 200 2 "" "Docker-Client/28.2.2 (linux)"
2. Docker 客户端发送带认证的请求,认证成功!
总结
✅ 问题已解决
- Registry 认证配置正确
- 账号密码admin / 123456
- Docker 登录测试成功
您现在可以使用的服务
1. Docker Registry API
sudo docker login registry.u6.net3w.com
# Username: admin
# Password: 123456
2. Web UI 界面
- 访问https://registry-ui.u6.net3w.com
- 可视化管理您的镜像
使用示例
# 推送镜像
sudo docker tag my-app:latest registry.u6.net3w.com/my-app:latest
sudo docker push registry.u6.net3w.com/my-app:latest
# 拉取镜像
sudo docker pull registry.u6.net3w.com/my-app:latest
如果您之前遇到的 401 错误还在,请尝试我之前建议的方法(先 logout 再
login或使用 --password-stdin。Registry 本身已经完全正常工作了!

View File

@@ -0,0 +1,94 @@
完美GitOps 自动部署环境已经完全配置好了!
🎉 GitOps 实现条件评估结果
✅ 所有条件已满足!
┌────────────────┬──────┬──────────────────────────────┐
│ 条件 │ 状态 │ 说明 │
├────────────────┼──────┼──────────────────────────────┤
│ k3s 集群 │ ✅ │ v1.34.3+k3s1 运行中 │
├────────────────┼──────┼──────────────────────────────┤
│ Git 仓库 │ ✅ │ 本地仓库已初始化,配置已提交 │
├────────────────┼──────┼──────────────────────────────┤
│ Gitea 代码托管 │ ✅ │ https://git.u6.net3w.com │
├────────────────┼──────┼──────────────────────────────┤
│ ArgoCD │ ✅ │ https://argocd.u6.net3w.com │
├────────────────┼──────┼──────────────────────────────┤
│ 自动同步配置 │ ✅ │ argocd-app.yaml 已创建 │
├────────────────┼──────┼──────────────────────────────┤
│ HTTPS 证书 │ ✅ │ Let's Encrypt 自动签发 │
└────────────────┴──────┴──────────────────────────────┘
🚀 下一步操作指南
1⃣ 访问 Gitea 创建仓库
访问: https://git.u6.net3w.com
1. 注册账号(首次访问)
2. 创建新仓库: k3s-configs
3. 记录仓库 URL
2⃣ 推送代码到 Gitea
# 添加远程仓库(替换为你的用户名)
git remote add origin
https://git.u6.net3w.com/你的用户名/k3s-configs.git
# 推送代码
git push -u origin master
3⃣ 配置 ArgoCD
访问: https://argocd.u6.net3w.com
用户名: admin
密码: R5ALO8NIRi4KoN8Q
步骤:
1. Settings → Repositories → Connect Repo
2. 填入 Gitea 仓库信息
3. 应用 ArgoCD Application:
kubectl apply -f k3s/argocd-app.yaml
4⃣ 测试自动部署
# 修改任意配置文件
vim k3s/my-blog/02-wordpress.yaml
# 提交并推送
git add .
git commit -m "Test auto-sync"
git push
# ArgoCD 会在 3 分钟内自动同步到集群!
📊 当前部署状态
✅ k3s 集群: 运行中 (35+ pods)
✅ ArgoCD: https://argocd.u6.net3w.com
✅ Gitea: https://git.u6.net3w.com
✅ WordPress: https://blog.u6.net3w.com
✅ Docker Registry: https://registry.u6.net3w.com
✅ n8n: https://n8n.u6.net3w.com
✅ Uptime Kuma: https://status.u6.net3w.com
✅ 导航页: https://nav.u6.net3w.com
🎯 GitOps 工作流
┌─────────────┐ ┌─────────────┐ ┌─────────────┐
│ 修改 YAML │ ───> │ Git Push │ ───> │ Gitea │
└─────────────┘ └─────────────┘
┌─────────────┐ ┌─────────────┐ ┌─────────────┐
│ k3s 集群 │ <─── │ ArgoCD │ <─── │ 自动检测 │
│ 更新完成 │ │ 自动同步 │ │ 变更 │
└─────────────┘ └─────────────┘
所有实现条件已满足! 你现在可以实现:
- ✅ 更新 YAML 到 Git
- ✅ 自动同步到生产环境
- ✅ 版本控制和快速回滚
- ✅ 完整的审计追踪
详细使用说明请查看 README.md 文件!🎊
✻ Churned for 4m 47s

View File

@@ -0,0 +1,844 @@
# K3s 集群扩展指南
## 📋 目录
- [当前集群状态](#当前集群状态)
- [前置条件](#前置条件)
- [架构设计方案](#架构设计方案)
- [2节点集群1 Master + 2 Worker](#2节点集群1-master--2-worker)
- [4节点集群3 Master + 4 Worker](#4节点集群3-master--4-worker)
- [6节点集群3 Master + 6 Worker](#6节点集群3-master--6-worker)
- [节点加入步骤](#节点加入步骤)
- [高可用配置](#高可用配置)
- [存储配置](#存储配置)
- [验证和测试](#验证和测试)
- [故障排查](#故障排查)
---
## 📊 当前集群状态
```
Master 节点: vmus9
IP 地址: 134.195.210.237
k3s 版本: v1.34.3+k3s1
节点令牌: K109d35a131f48b4d40b162398a828b766d60735f29dd7b4a37b030c1d1c0e26b23::server:72e04c3a9e3e762cbdefffc96f348a2d
```
**重要**: 请妥善保管节点令牌,这是其他节点加入集群的凭证!
---
## ✅ 前置条件
### 所有新节点需要满足:
#### 1. 硬件要求
```
最低配置:
- CPU: 2 核
- 内存: 2GB (建议 4GB+)
- 磁盘: 20GB (Longhorn 存储建议 50GB+)
推荐配置:
- CPU: 4 核
- 内存: 8GB
- 磁盘: 100GB SSD
```
#### 2. 操作系统
```bash
# 支持的系统
- Ubuntu 20.04/22.04/24.04
- Debian 10/11/12
- CentOS 7/8
- RHEL 7/8
# 检查系统版本
cat /etc/os-release
```
#### 3. 网络要求
```bash
# 所有节点之间需要能够互相访问
# 需要开放的端口:
Master 节点:
- 6443: Kubernetes API Server
- 10250: Kubelet metrics
- 2379-2380: etcd (仅 HA 模式)
Worker 节点:
- 10250: Kubelet metrics
- 30000-32767: NodePort Services
所有节点:
- 8472: Flannel VXLAN (UDP)
- 51820: Flannel WireGuard (UDP)
```
#### 4. 系统准备
在每个新节点上执行:
```bash
# 1. 更新系统
sudo apt update && sudo apt upgrade -y
# 2. 禁用 swap (k8s 要求)
sudo swapoff -a
sudo sed -i '/ swap / s/^/#/' /etc/fstab
# 3. 配置主机名 (每个节点不同)
sudo hostnamectl set-hostname worker-node-1
# 4. 配置时间同步
sudo apt install -y chrony
sudo systemctl enable --now chrony
# 5. 安装必要工具
sudo apt install -y curl wget git
# 6. 配置防火墙 (如果启用)
# Ubuntu/Debian
sudo ufw allow 6443/tcp
sudo ufw allow 10250/tcp
sudo ufw allow 8472/udp
sudo ufw allow 51820/udp
```
---
## 🏗️ 架构设计方案
### 方案一2节点集群1 Master + 2 Worker
**适用场景**: 开发/测试环境,小型应用
```
┌─────────────────────────────────────────────────┐
│ 负载均衡 (可选) │
│ *.u6.net3w.com (Traefik) │
└─────────────────────────────────────────────────┘
┌─────────────┼─────────────┐
│ │ │
┌───────▼──────┐ ┌────▼─────┐ ┌────▼─────┐
│ Master │ │ Worker-1 │ │ Worker-2 │
│ vmus9 │ │ │ │ │
│ Control Plane│ │ 应用负载 │ │ 应用负载 │
│ + etcd │ │ │ │ │
│ 134.195.x.x │ │ 新节点1 │ │ 新节点2 │
└──────────────┘ └──────────┘ └──────────┘
```
**特点**:
- ✅ 简单易维护
- ✅ 成本低
- ❌ Master 单点故障
- ❌ 不适合生产环境
**资源分配建议**:
- Master: 4C8G (运行控制平面 + 部分应用)
- Worker-1: 4C8G (运行应用负载)
- Worker-2: 4C8G (运行应用负载)
---
### 方案二4节点集群3 Master + 4 Worker
**适用场景**: 生产环境,中等规模应用
```
┌──────────────────────────────────────────────────┐
│ 外部负载均衡 (必需) │
│ HAProxy/Nginx/云厂商 LB │
│ *.u6.net3w.com │
└──────────────────────────────────────────────────┘
┌─────────────┼─────────────┬─────────────┐
│ │ │ │
┌───────▼──────┐ ┌────▼─────┐ ┌────▼─────┐ ┌─────▼────┐
│ Master-1 │ │ Master-2 │ │ Master-3 │ │ Worker-1 │
│ vmus9 │ │ │ │ │ │ │
│ Control Plane│ │ Control │ │ Control │ │ 应用负载 │
│ + etcd │ │ + etcd │ │ + etcd │ │ │
└──────────────┘ └──────────┘ └──────────┘ └──────────┘
┌──────────┐
│ Worker-2 │
│ 应用负载 │
└──────────┘
┌──────────┐
│ Worker-3 │
│ 应用负载 │
└──────────┘
┌──────────┐
│ Worker-4 │
│ 应用负载 │
└──────────┘
```
**特点**:
- ✅ 高可用 (HA)
- ✅ Master 节点冗余
- ✅ 适合生产环境
- ✅ 可承载中等规模应用
- ⚠️ 需要外部负载均衡
**资源分配建议**:
- Master-1/2/3: 4C8G (仅运行控制平面)
- Worker-1/2/3/4: 8C16G (运行应用负载)
**etcd 集群**: 3 个 Master 节点组成 etcd 集群,可容忍 1 个节点故障
---
### 方案三6节点集群3 Master + 6 Worker
**适用场景**: 大规模生产环境,高负载应用
```
┌──────────────────────────────────────────────────┐
│ 外部负载均衡 (必需) │
│ HAProxy/Nginx/云厂商 LB │
│ *.u6.net3w.com │
└──────────────────────────────────────────────────┘
┌─────────────┼─────────────┬─────────────┐
│ │ │ │
┌───────▼──────┐ ┌────▼─────┐ ┌────▼─────┐ │
│ Master-1 │ │ Master-2 │ │ Master-3 │ │
│ vmus9 │ │ │ │ │ │
│ Control Plane│ │ Control │ │ Control │ │
│ + etcd │ │ + etcd │ │ + etcd │ │
└──────────────┘ └──────────┘ └──────────┘ │
┌─────────────┬─────────────┬─────────────┘
│ │ │
┌───────▼──────┐ ┌────▼─────┐ ┌────▼─────┐
│ Worker-1 │ │ Worker-2 │ │ Worker-3 │
│ Web 应用层 │ │ Web 层 │ │ Web 层 │
└──────────────┘ └──────────┘ └──────────┘
┌──────────────┐ ┌──────────┐ ┌──────────┐
│ Worker-4 │ │ Worker-5 │ │ Worker-6 │
│ 数据库层 │ │ 缓存层 │ │ 存储层 │
└──────────────┘ └──────────┘ └──────────┘
```
**特点**:
- ✅ 高可用 + 高性能
- ✅ 可按功能分层部署
- ✅ 支持大规模应用
- ✅ Longhorn 存储性能最佳
- ⚠️ 管理复杂度较高
- ⚠️ 成本较高
**资源分配建议**:
- Master-1/2/3: 4C8G (专用控制平面)
- Worker-1/2/3: 8C16G (Web 应用层)
- Worker-4: 8C32G (数据库层,高内存)
- Worker-5: 8C16G (缓存层)
- Worker-6: 4C8G + 200GB SSD (存储层)
**节点标签策略**:
```bash
# Web 层
kubectl label nodes worker-1 node-role=web
kubectl label nodes worker-2 node-role=web
kubectl label nodes worker-3 node-role=web
# 数据库层
kubectl label nodes worker-4 node-role=database
# 缓存层
kubectl label nodes worker-5 node-role=cache
# 存储层
kubectl label nodes worker-6 node-role=storage
```
---
## 🚀 节点加入步骤
### 场景 A: 加入 Worker 节点(适用于 2 节点方案)
#### 在新节点上执行:
```bash
# 1. 设置 Master 节点信息
export MASTER_IP="134.195.210.237"
export NODE_TOKEN="K109d35a131f48b4d40b162398a828b766d60735f29dd7b4a37b030c1d1c0e26b23::server:72e04c3a9e3e762cbdefffc96f348a2d"
# 2. 安装 k3s agent (Worker 节点)
curl -sfL https://get.k3s.io | K3S_URL=https://${MASTER_IP}:6443 \
K3S_TOKEN=${NODE_TOKEN} \
sh -
# 3. 验证安装
sudo systemctl status k3s-agent
# 4. 检查节点是否加入
# (在 Master 节点执行)
kubectl get nodes
```
#### 为 Worker 节点添加标签:
```bash
# 在 Master 节点执行
kubectl label nodes <worker-node-name> node-role.kubernetes.io/worker=worker
kubectl label nodes <worker-node-name> workload=application
```
---
### 场景 B: 加入 Master 节点(适用于 4/6 节点 HA 方案)
#### 前提条件:需要外部负载均衡器
##### 1. 配置外部负载均衡器
**选项 1: 使用 HAProxy**
在一台独立服务器上安装 HAProxy
```bash
# 安装 HAProxy
sudo apt install -y haproxy
# 配置 HAProxy
sudo tee /etc/haproxy/haproxy.cfg > /dev/null <<EOF
global
log /dev/log local0
log /dev/log local1 notice
chroot /var/lib/haproxy
stats socket /run/haproxy/admin.sock mode 660 level admin
stats timeout 30s
user haproxy
group haproxy
daemon
defaults
log global
mode tcp
option tcplog
option dontlognull
timeout connect 5000
timeout client 50000
timeout server 50000
frontend k3s-api
bind *:6443
mode tcp
default_backend k3s-masters
backend k3s-masters
mode tcp
balance roundrobin
option tcp-check
server master-1 134.195.210.237:6443 check fall 3 rise 2
server master-2 <MASTER-2-IP>:6443 check fall 3 rise 2
server master-3 <MASTER-3-IP>:6443 check fall 3 rise 2
EOF
# 重启 HAProxy
sudo systemctl restart haproxy
sudo systemctl enable haproxy
```
**选项 2: 使用 Nginx**
```bash
# 安装 Nginx
sudo apt install -y nginx
# 配置 Nginx Stream
sudo tee /etc/nginx/nginx.conf > /dev/null <<EOF
stream {
upstream k3s_servers {
server 134.195.210.237:6443 max_fails=3 fail_timeout=5s;
server <MASTER-2-IP>:6443 max_fails=3 fail_timeout=5s;
server <MASTER-3-IP>:6443 max_fails=3 fail_timeout=5s;
}
server {
listen 6443;
proxy_pass k3s_servers;
}
}
EOF
# 重启 Nginx
sudo systemctl restart nginx
```
##### 2. 在第一个 Master 节点(当前节点)启用 HA
```bash
# 在当前 Master 节点执行
export LB_IP="<负载均衡器IP>"
# 重新安装 k3s 为 HA 模式
curl -sfL https://get.k3s.io | sh -s - server \
--cluster-init \
--tls-san=${LB_IP} \
--write-kubeconfig-mode 644
# 获取新的 token
sudo cat /var/lib/rancher/k3s/server/node-token
```
##### 3. 加入第二个 Master 节点
```bash
# 在新的 Master 节点执行
export MASTER_IP="134.195.210.237" # 第一个 Master
export LB_IP="<负载均衡器IP>"
export NODE_TOKEN="<新的 token>"
curl -sfL https://get.k3s.io | sh -s - server \
--server https://${MASTER_IP}:6443 \
--token ${NODE_TOKEN} \
--tls-san=${LB_IP} \
--write-kubeconfig-mode 644
```
##### 4. 加入第三个 Master 节点
```bash
# 在第三个 Master 节点执行(同上)
export MASTER_IP="134.195.210.237"
export LB_IP="<负载均衡器IP>"
export NODE_TOKEN="<token>"
curl -sfL https://get.k3s.io | sh -s - server \
--server https://${MASTER_IP}:6443 \
--token ${NODE_TOKEN} \
--tls-san=${LB_IP} \
--write-kubeconfig-mode 644
```
##### 5. 验证 HA 集群
```bash
# 检查所有 Master 节点
kubectl get nodes
# 检查 etcd 集群状态
kubectl get pods -n kube-system | grep etcd
# 检查 etcd 成员
sudo k3s etcd-snapshot save --etcd-s3=false
```
---
### 场景 C: 混合加入(先加 Master 再加 Worker
**推荐顺序**:
1. 配置外部负载均衡器
2. 转换第一个节点为 HA 模式
3. 加入第 2、3 个 Master 节点
4. 验证 Master 集群正常
5. 依次加入 Worker 节点
---
## 💾 存储配置
### Longhorn 多节点配置
当集群有 3+ 节点时Longhorn 可以提供分布式存储和数据冗余。
#### 1. 在所有节点安装依赖
```bash
# 在每个节点执行
sudo apt install -y open-iscsi nfs-common
# 启动 iscsid
sudo systemctl enable --now iscsid
```
#### 2. 配置 Longhorn 副本数
```bash
# 在 Master 节点执行
kubectl edit settings.longhorn.io default-replica-count -n longhorn-system
# 修改为:
# value: "3" # 3 副本(需要至少 3 个节点)
# value: "2" # 2 副本(需要至少 2 个节点)
```
#### 3. 为节点添加存储标签
```bash
# 标记哪些节点用于存储
kubectl label nodes worker-1 node.longhorn.io/create-default-disk=true
kubectl label nodes worker-2 node.longhorn.io/create-default-disk=true
kubectl label nodes worker-3 node.longhorn.io/create-default-disk=true
# 排除某些节点(如纯计算节点)
kubectl label nodes worker-4 node.longhorn.io/create-default-disk=false
```
#### 4. 配置存储路径
```bash
# 在每个存储节点创建目录
sudo mkdir -p /var/lib/longhorn
sudo chmod 700 /var/lib/longhorn
```
#### 5. 访问 Longhorn UI
```bash
# 创建 Ingress (如果还没有)
kubectl apply -f k3s/my-blog/longhorn-ingress.yaml
# 访问: https://longhorn.u6.net3w.com
```
---
## ✅ 验证和测试
### 1. 检查节点状态
```bash
# 查看所有节点
kubectl get nodes -o wide
# 查看节点详细信息
kubectl describe node <node-name>
# 查看节点资源使用
kubectl top nodes
```
### 2. 测试 Pod 调度
```bash
# 创建测试 Deployment
kubectl create deployment nginx-test --image=nginx --replicas=6
# 查看 Pod 分布
kubectl get pods -o wide
# 清理测试
kubectl delete deployment nginx-test
```
### 3. 测试存储
```bash
# 创建测试 PVC
cat <<EOF | kubectl apply -f -
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: test-pvc
spec:
accessModes:
- ReadWriteOnce
storageClassName: longhorn
resources:
requests:
storage: 1Gi
EOF
# 检查 PVC 状态
kubectl get pvc test-pvc
# 清理
kubectl delete pvc test-pvc
```
### 4. 测试高可用(仅 HA 集群)
```bash
# 模拟 Master 节点故障
# 在一个 Master 节点执行
sudo systemctl stop k3s
# 在另一个节点检查集群是否正常
kubectl get nodes
# 恢复节点
sudo systemctl start k3s
```
### 5. 测试网络连通性
```bash
# 在 Master 节点创建测试 Pod
kubectl run test-pod --image=busybox --restart=Never -- sleep 3600
# 进入 Pod 测试网络
kubectl exec -it test-pod -- sh
# 在 Pod 内测试
ping 8.8.8.8
nslookup kubernetes.default
# 清理
kubectl delete pod test-pod
```
---
## 🔧 故障排查
### 问题 1: 节点无法加入集群
**症状**: `k3s-agent` 服务启动失败
**排查步骤**:
```bash
# 1. 检查服务状态
sudo systemctl status k3s-agent
# 2. 查看日志
sudo journalctl -u k3s-agent -f
# 3. 检查网络连通性
ping <MASTER_IP>
telnet <MASTER_IP> 6443
# 4. 检查 token 是否正确
echo $NODE_TOKEN
# 5. 检查防火墙
sudo ufw status
```
**解决方案**:
```bash
# 重新安装
sudo /usr/local/bin/k3s-agent-uninstall.sh
curl -sfL https://get.k3s.io | K3S_URL=https://${MASTER_IP}:6443 \
K3S_TOKEN=${NODE_TOKEN} sh -
```
---
### 问题 2: 节点状态为 NotReady
**症状**: `kubectl get nodes` 显示节点 NotReady
**排查步骤**:
```bash
# 1. 检查节点详情
kubectl describe node <node-name>
# 2. 检查 kubelet 日志
# 在问题节点执行
sudo journalctl -u k3s-agent -n 100
# 3. 检查网络插件
kubectl get pods -n kube-system | grep flannel
```
**解决方案**:
```bash
# 重启 k3s 服务
sudo systemctl restart k3s-agent
# 如果是网络问题,检查 CNI 配置
sudo ls -la /etc/cni/net.d/
```
---
### 问题 3: Pod 无法调度到新节点
**症状**: Pod 一直 Pending 或只调度到旧节点
**排查步骤**:
```bash
# 1. 检查节点污点
kubectl describe node <node-name> | grep Taints
# 2. 检查节点标签
kubectl get nodes --show-labels
# 3. 检查 Pod 的调度约束
kubectl describe pod <pod-name>
```
**解决方案**:
```bash
# 移除污点
kubectl taint nodes <node-name> node.kubernetes.io/not-ready:NoSchedule-
# 添加标签
kubectl label nodes <node-name> node-role.kubernetes.io/worker=worker
```
---
### 问题 4: Longhorn 存储无法使用
**症状**: PVC 一直 Pending
**排查步骤**:
```bash
# 1. 检查 Longhorn 组件
kubectl get pods -n longhorn-system
# 2. 检查节点是否满足要求
kubectl get nodes -o jsonpath='{.items[*].status.conditions[?(@.type=="Ready")].status}'
# 3. 检查 iscsid 服务
sudo systemctl status iscsid
```
**解决方案**:
```bash
# 在新节点安装依赖
sudo apt install -y open-iscsi
sudo systemctl enable --now iscsid
# 重启 Longhorn manager
kubectl rollout restart deployment longhorn-driver-deployer -n longhorn-system
```
---
### 问题 5: etcd 集群不健康HA 模式)
**症状**: Master 节点无法正常工作
**排查步骤**:
```bash
# 1. 检查 etcd 成员
sudo k3s etcd-snapshot ls
# 2. 检查 etcd 日志
sudo journalctl -u k3s -n 100 | grep etcd
# 3. 检查 etcd 端口
sudo netstat -tlnp | grep 2379
```
**解决方案**:
```bash
# 从快照恢复(谨慎操作)
sudo k3s server \
--cluster-reset \
--cluster-reset-restore-path=/var/lib/rancher/k3s/server/db/snapshots/<snapshot-name>
```
---
## 📚 快速参考
### 常用命令
```bash
# 查看集群信息
kubectl cluster-info
kubectl get nodes -o wide
kubectl get pods -A
# 查看节点资源
kubectl top nodes
kubectl describe node <node-name>
# 管理节点
kubectl cordon <node-name> # 标记为不可调度
kubectl drain <node-name> # 驱逐 Pod
kubectl uncordon <node-name> # 恢复调度
# 删除节点
kubectl delete node <node-name>
# 在节点上卸载 k3s
# Worker 节点
sudo /usr/local/bin/k3s-agent-uninstall.sh
# Master 节点
sudo /usr/local/bin/k3s-uninstall.sh
```
### 节点标签示例
```bash
# 角色标签
kubectl label nodes <node> node-role.kubernetes.io/worker=worker
kubectl label nodes <node> node-role.kubernetes.io/master=master
# 功能标签
kubectl label nodes <node> workload=database
kubectl label nodes <node> workload=web
kubectl label nodes <node> workload=cache
# 区域标签
kubectl label nodes <node> topology.kubernetes.io/zone=zone-a
kubectl label nodes <node> topology.kubernetes.io/region=us-east
```
---
## 🎯 最佳实践
### 1. 节点命名规范
```
master-1, master-2, master-3
worker-1, worker-2, worker-3, ...
```
### 2. 逐步扩展
- 先加入 1 个节点测试
- 验证正常后再批量加入
- 避免同时加入多个节点
### 3. 监控和告警
```bash
# 部署 Prometheus + Grafana
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/main/manifests/setup/
kubectl apply -f https://raw.githubusercontent.com/prometheus-operator/kube-prometheus/main/manifests/
```
### 4. 定期备份
```bash
# 备份 etcd
sudo k3s etcd-snapshot save --name backup-$(date +%Y%m%d-%H%M%S)
# 查看备份
sudo k3s etcd-snapshot ls
```
### 5. 资源预留
```bash
# 为系统组件预留资源
kubectl apply -f - <<EOF
apiVersion: v1
kind: ResourceQuota
metadata:
name: system-quota
namespace: kube-system
spec:
hard:
requests.cpu: "2"
requests.memory: 4Gi
EOF
```
---
## 📞 获取帮助
- k3s 官方文档: https://docs.k3s.io
- Longhorn 文档: https://longhorn.io/docs
- Kubernetes 文档: https://kubernetes.io/docs
---
**文档版本**: v1.0
**最后更新**: 2026-01-21
**适用于**: k3s v1.34.3+k3s1

View File

@@ -0,0 +1,161 @@
# K3s 集群扩展快速参考
## 🚀 快速开始
### 当前集群信息
```
Master IP: 134.195.210.237
Token: K109d35a131f48b4d40b162398a828b766d60735f29dd7b4a37b030c1d1c0e26b23::server:72e04c3a9e3e762cbdefffc96f348a2d
```
### 一键加入脚本
#### Worker 节点(最简单)
```bash
# 在新节点上执行
sudo bash scripts/join-worker.sh
```
#### Master 节点HA 模式)
```bash
# 在新节点上执行
sudo bash scripts/join-master.sh
```
---
## 📊 扩展方案对比
| 方案 | 节点配置 | 适用场景 | 高可用 | 成本 |
|------|---------|---------|--------|------|
| **2节点** | 1M + 2W | 开发/测试 | ❌ | 💰 |
| **4节点** | 3M + 4W | 生产环境 | ✅ | 💰💰💰 |
| **6节点** | 3M + 6W | 大规模生产 | ✅ | 💰💰💰💰 |
M = Master, W = Worker
---
## 🔧 手动加入命令
### Worker 节点
```bash
export MASTER_IP="134.195.210.237"
export NODE_TOKEN="K109d35a131f48b4d40b162398a828b766d60735f29dd7b4a37b030c1d1c0e26b23::server:72e04c3a9e3e762cbdefffc96f348a2d"
curl -sfL https://get.k3s.io | K3S_URL=https://${MASTER_IP}:6443 \
K3S_TOKEN=${NODE_TOKEN} sh -
```
### Master 节点(需要先配置负载均衡器)
```bash
export FIRST_MASTER="134.195.210.237"
export LB_IP="<负载均衡器IP>"
export NODE_TOKEN="K109d35a131f48b4d40b162398a828b766d60735f29dd7b4a37b030c1d1c0e26b23::server:72e04c3a9e3e762cbdefffc96f348a2d"
curl -sfL https://get.k3s.io | sh -s - server \
--server https://${FIRST_MASTER}:6443 \
--token ${NODE_TOKEN} \
--tls-san=${LB_IP} \
--write-kubeconfig-mode 644
```
---
## ✅ 验证命令
```bash
# 查看节点
kubectl get nodes -o wide
# 健康检查
bash scripts/check-node-health.sh
# 查看节点详情
kubectl describe node <node-name>
# 查看资源使用
kubectl top nodes
```
---
## 🏷️ 节点标签
```bash
# Worker 节点
kubectl label nodes <node> node-role.kubernetes.io/worker=worker
# 功能标签
kubectl label nodes <node> workload=web
kubectl label nodes <node> workload=database
kubectl label nodes <node> workload=cache
```
---
## 🔥 常见问题
### 节点无法加入?
```bash
# 检查网络
ping 134.195.210.237
telnet 134.195.210.237 6443
# 查看日志
sudo journalctl -u k3s-agent -f
```
### 节点 NotReady
```bash
# 重启服务
sudo systemctl restart k3s-agent
# 检查详情
kubectl describe node <node-name>
```
### 删除节点
```bash
# 在 Master 节点
kubectl drain <node-name> --ignore-daemonsets
kubectl delete node <node-name>
# 在要删除的节点
sudo /usr/local/bin/k3s-agent-uninstall.sh
```
---
## 📚 详细文档
- 完整扩展指南: [K3S-CLUSTER-EXPANSION.md](K3S-CLUSTER-EXPANSION.md)
- GitOps 自动部署: [README.md](README.md)
---
## 🎯 推荐流程
### 2节点扩展开发环境
1. ✅ 准备 2 台新 VPS
2. ✅ 执行 `join-worker.sh`
3. ✅ 验证节点状态
4. ✅ 测试应用部署
### 4节点扩展生产环境
1. ✅ 配置外部负载均衡器
2. ✅ 转换第一个节点为 HA 模式
3. ✅ 加入 2 个 Master 节点
4. ✅ 加入 4 个 Worker 节点
5. ✅ 配置 Longhorn 多副本
6. ✅ 测试高可用
### 6节点扩展大规模
1. ✅ 同 4 节点方案
2. ✅ 按功能分层部署
3. ✅ 配置节点亲和性
4. ✅ 优化资源分配
---
**快速帮助**: 遇到问题?运行 `bash scripts/check-node-health.sh` 进行诊断

View File

@@ -0,0 +1,39 @@
我已经准备好了服务文件。现在请执行以下命令来完成设置:
# 1. 将服务文件复制到系统目录(需要输入密码)
sudo cp /tmp/code-server.service /etc/systemd/system/
# 2. 重载 systemd 配置
sudo systemctl daemon-reload
# 3. 启用开机自启动
sudo systemctl enable code-server
# 4. 立即启动服务
sudo systemctl start code-server
# 5. 查看服务状态
sudo systemctl status code-server
或者你可以一次性执行:
sudo cp /tmp/code-server.service /etc/systemd/system/ && \
sudo systemctl daemon-reload && \
sudo systemctl enable --now code-server && \
sudo systemctl status code-server
服务配置说明:
- 监听地址0.0.0.0:8080所有网络接口
- 工作目录:/home/fei
- 运行用户fei
- 自动重启:服务崩溃后 10 秒自动重启
常用管理命令:
- 查看状态sudo systemctl status code-server
- 停止服务sudo systemctl stop code-server
- 重启服务sudo systemctl restart code-server
- 查看日志sudo journalctl -u code-server -f
- 禁用开机启动sudo systemctl disable code-server
如果你需要修改配置(如端口号或其他参数),可以编辑 /etc/systemd/system/code-server.service 文件,然后执行 sudo systemctl daemon-reload 和 sudo systemctl restart code-server