Merge remote-tracking branch 'origin/dev' into dev

This commit is contained in:
西大锐 2024-01-11 10:51:50 +08:00
commit 5f3604a393
71 changed files with 3918 additions and 38 deletions

View File

@ -1,8 +1,8 @@
<p align="center"> <p align="center">
<img alt="logo" src="https://oscimg.oschina.net/oscnet/up-b99b286755aef70355a7084753f89cdb7c9.png"> <img alt="logo" src="https://oscimg.oschina.net/oscnet/up-b99b286755aef70355a7084753f89cdb7c9.png">
</p> </p>
<h1 align="center" style="margin: 30px 0 30px; font-weight: bold;">RuoYi v3.6.3</h1> <h1 align="center" style="margin: 30px 0 30px; font-weight: bold;">ci4s-cloud 1.0</h1>
<h4 align="center">基于 Vue/Element UI 和 Spring Boot/Spring Cloud & Alibaba 前后端分离的分布式微服务架构</h4> <h4 align="center">基于 React 和 Spring Boot/Spring Cloud & Alibaba 前后端分离的分布式微服务架构</h4>
<p align="center"> <p align="center">
<a href="https://gitee.com/y_project/RuoYi-Cloud/stargazers"><img src="https://gitee.com/y_project/RuoYi-Cloud/badge/star.svg?theme=dark"></a> <a href="https://gitee.com/y_project/RuoYi-Cloud/stargazers"><img src="https://gitee.com/y_project/RuoYi-Cloud/badge/star.svg?theme=dark"></a>
<a href="https://gitee.com/y_project/RuoYi-Cloud"><img src="https://img.shields.io/badge/RuoYi-v3.6.3-brightgreen.svg"></a> <a href="https://gitee.com/y_project/RuoYi-Cloud"><img src="https://img.shields.io/badge/RuoYi-v3.6.3-brightgreen.svg"></a>
@ -11,19 +11,12 @@
## 平台简介 ## 平台简介
若依是一套全部开源的快速开发平台,毫无保留给个人及企业免费使用 ci4s-cloud是复杂智能软件统一管理平台
* 采用前后端分离的模式,微服务版本前端(基于 [RuoYi-Vue](https://gitee.com/y_project/RuoYi-Vue))。 * 采用前后端分离的模式,微服务版本前端(基于React)。
* 后端采用Spring Boot、Spring Cloud & Alibaba。 * 后端采用Spring Boot、Spring Cloud & Alibaba。
* 注册中心、配置中心选型Nacos权限认证使用Redis。 * 注册中心、配置中心选型Nacos权限认证使用Redis。
* 流量控制框架选型Sentinel分布式事务选型Seata。 * 流量控制框架选型Sentinel分布式事务选型Seata。
* 提供了技术栈([Vue3](https://v3.cn.vuejs.org) [Element Plus](https://element-plus.org/zh-CN) [Vite](https://cn.vitejs.dev))版本[RuoYi-Cloud-Vue3](https://github.com/yangzongzhuan/RuoYi-Cloud-Vue3),保持同步更新。
* 如需不分离应用,请移步 [RuoYi](https://gitee.com/y_project/RuoYi),如需分离应用,请移步 [RuoYi-Vue](https://gitee.com/y_project/RuoYi-Vue)
* 阿里云折扣场:[点我进入](http://aly.ruoyi.vip),腾讯云秒杀场:[点我进入](http://txy.ruoyi.vip)&nbsp;&nbsp;
* 阿里云优惠券:[点我领取](https://www.aliyun.com/minisite/goods?userCode=brki8iof&share_source=copy_link),腾讯云优惠券:[点我领取](https://cloud.tencent.com/redirect.php?redirect=1025&cps_key=198c8df2ed259157187173bc7f4f32fd&from=console)&nbsp;&nbsp;
#### 友情链接 [若依/RuoYi-Cloud](https://gitee.com/zhangmrit/ruoyi-cloud) Ant Design版本。
## 系统模块 ## 系统模块
~~~ ~~~
@ -47,6 +40,7 @@ com.ruoyi
│ └── ruoyi-gen // 代码生成 [9202] │ └── ruoyi-gen // 代码生成 [9202]
│ └── ruoyi-job // 定时任务 [9203] │ └── ruoyi-job // 定时任务 [9203]
│ └── ruoyi-file // 文件服务 [9300] │ └── ruoyi-file // 文件服务 [9300]
│ └── management-platform // 文件服务 [9300]
├── ruoyi-visual // 图形化管理模块 ├── ruoyi-visual // 图形化管理模块
│ └── ruoyi-visual-monitor // 监控中心 [9100] │ └── ruoyi-visual-monitor // 监控中心 [9100]
├──pom.xml // 公共依赖 ├──pom.xml // 公共依赖
@ -76,13 +70,7 @@ com.ruoyi
16. 在线构建器拖动表单元素生成相应的HTML代码。 16. 在线构建器拖动表单元素生成相应的HTML代码。
17. 连接池监视监视当前系统数据库连接池状态可进行分析SQL找出系统性能瓶颈。 17. 连接池监视监视当前系统数据库连接池状态可进行分析SQL找出系统性能瓶颈。
## 在线体验
- admin/admin123
- 陆陆续续收到一些打赏,为了更好的体验已用于演示服务器升级。谢谢各位小伙伴。
演示地址http://ruoyi.vip
文档地址http://doc.ruoyi.vip
## 演示图 ## 演示图
@ -126,6 +114,16 @@ com.ruoyi
</table> </table>
## 若依微服务交流群 ## 复杂智能软件统一管理平台安装部署
QQ群 [![加入QQ群](https://img.shields.io/badge/已满-42799195-blue.svg)](https://jq.qq.com/?_wv=1027&k=yqInfq0S) [![加入QQ群](https://img.shields.io/badge/已满-170157040-blue.svg)](https://jq.qq.com/?_wv=1027&k=Oy1mb3p8) [![加入QQ群](https://img.shields.io/badge/已满-130643120-blue.svg)](https://jq.qq.com/?_wv=1027&k=rvxkJtXK) [![加入QQ群](https://img.shields.io/badge/已满-225920371-blue.svg)](https://jq.qq.com/?_wv=1027&k=0Ck3PvTe) [![加入QQ群](https://img.shields.io/badge/已满-201705537-blue.svg)](https://jq.qq.com/?_wv=1027&k=FnHHP4TT) [![加入QQ群](https://img.shields.io/badge/已满-236543183-blue.svg)](https://jq.qq.com/?_wv=1027&k=qdT1Ojpz) [![加入QQ群](https://img.shields.io/badge/已满-213618602-blue.svg)](https://jq.qq.com/?_wv=1027&k=nw3OiyXs) [![加入QQ群](https://img.shields.io/badge/已满-148794840-blue.svg)](https://jq.qq.com/?_wv=1027&k=kiU5WDls) [![加入QQ群](https://img.shields.io/badge/已满-118752664-blue.svg)](https://jq.qq.com/?_wv=1027&k=MtBy6YfT) [![加入QQ群](https://img.shields.io/badge/已满-101038945-blue.svg)](https://jq.qq.com/?_wv=1027&k=FqImHgH2) [![加入QQ群](https://img.shields.io/badge/128355254-blue.svg)](http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=G4jZ4EtdT50PhnMBudTnEwgonxkXOscJ&authKey=FkGHYfoTKlGE6wHdKdjH9bVoOgQjtLP9WM%2Fj7pqGY1msoqw9uxDiBo39E2mLgzYg&noverify=0&group_code=128355254) 点击按钮入群。 上传安装脚本
#### step1: 安装mysql
helm install mysql . -n ci4s-test
#### step2: 安装redis
helm install redis . -n ci4s-test
#### step3: 安装nacos
在第一步安装的mysql中创建nacos-ci4s-config数据库(选UTF-8)并且运行naocs初始化脚本
kubectl create -f k8s-3nacos.yaml
#### step4: 安装ruoyi服务
kubectl create -f *.yaml

View File

@ -0,0 +1,15 @@
# 基础镜像
FROM openjdk:8-jre
# author
MAINTAINER ruoyi
# 挂载目录
VOLUME /home/ruoyi
# 创建目录
RUN mkdir -p /home/ruoyi
# 指定路径
WORKDIR /home/ruoyi
# 复制jar文件到路径
COPY ./jar/ruoyi-auth.jar /home/ruoyi/ruoyi-auth.jar
# 启动认证服务
ENTRYPOINT ["java","-jar","ruoyi-auth.jar"]

View File

@ -0,0 +1,13 @@
#!/bin/bash
# 定义一个名为version的变量
version=$1
# 打印变量的值
echo "版本号为: $version"
docker build -t ci4s-gateway:$version -f gateway-dockerfile .
docker build -t ci4s-auth:$version -f auth-dockerfile .
docker build -t ci4s-file:$version -f file-dockerfile .
docker build -t ci4s-gen:$version -f gen-dockerfile .
docker build -t ci4s-job:$version -f job-dockerfile .
docker build -t ci4s-visual:$version -f visual-dockerfile .
docker build -t ci4s-system:$version -f system-dockerfile .

View File

@ -0,0 +1,41 @@
worker_processes 1;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
sendfile on;
keepalive_timeout 65;
server {
listen 80;
server_name localhost;
location / {
root /home/ruoyi/projects/ruoyi-ui;
try_files $uri $uri/ /index.html;
index index.html index.htm;
}
location /prod-api/{
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header REMOTE-HOST $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_pass http://ruoyi-gateway:8080/;
}
# 避免actuator暴露
if ($request_uri ~ "/actuator") {
return 403;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
}

View File

@ -0,0 +1,15 @@
# 基础镜像
FROM openjdk:8-jre
# author
MAINTAINER ruoyi
# 挂载目录
VOLUME /home/ruoyi
# 创建目录
RUN mkdir -p /home/ruoyi
# 指定路径
WORKDIR /home/ruoyi
# 复制jar文件到路径
COPY ./jar/ruoyi-modules-file.jar /home/ruoyi/ruoyi-modules-file.jar
# 启动文件服务
ENTRYPOINT ["java","-jar","ruoyi-modules-file.jar"]

View File

@ -0,0 +1,15 @@
# 基础镜像
FROM openjdk:8-jre
# author
MAINTAINER ruoyi
# 挂载目录
VOLUME /home/ruoyi
# 创建目录
RUN mkdir -p /home/ruoyi
# 指定路径
WORKDIR /home/ruoyi
# 复制jar文件到路径
COPY ./jar/ruoyi-gateway.jar /home/ruoyi/ruoyi-gateway.jar
# 启动网关服务
ENTRYPOINT ["java","-jar","ruoyi-gateway.jar"]

View File

@ -0,0 +1,15 @@
# 基础镜像
FROM openjdk:8-jre
# author
MAINTAINER ruoyi
# 挂载目录
VOLUME /home/ruoyi
# 创建目录
RUN mkdir -p /home/ruoyi
# 指定路径
WORKDIR /home/ruoyi
# 复制jar文件到路径
COPY ./jar/ruoyi-modules-gen.jar /home/ruoyi/ruoyi-modules-gen.jar
# 启动代码生成服务
ENTRYPOINT ["java","-jar","ruoyi-modules-gen.jar"]

1
k8s/dockerfiles/html/dist/readme.txt vendored Normal file
View File

@ -0,0 +1 @@
存放前端ruoyi-ui构建好的静态文件用于nginx请求访问。

View File

@ -0,0 +1,15 @@
# 基础镜像
FROM openjdk:8-jre
# author
MAINTAINER ruoyi
# 挂载目录
VOLUME /home/ruoyi
# 创建目录
RUN mkdir -p /home/ruoyi
# 指定路径
WORKDIR /home/ruoyi
# 复制jar文件到路径
COPY ./jar/ruoyi-modules-job.jar /home/ruoyi/ruoyi-modules-job.jar
# 启动定时任务服务
ENTRYPOINT ["java","-jar","ruoyi-modules-job.jar"]

View File

@ -0,0 +1,15 @@
# 基础镜像
FROM openjdk:8-jre
# author
MAINTAINER ruoyi
# 挂载目录
VOLUME /home/ruoyi
# 创建目录
RUN mkdir -p /home/ruoyi
# 指定路径
WORKDIR /home/ruoyi
# 复制jar文件到路径
COPY ./jar/management-platform.jar /home/ruoyi/management-platform.jar
# 启动系统服务
ENTRYPOINT ["java","-jar","management-platform.jar"]

View File

@ -0,0 +1,15 @@
# 基础镜像
FROM nginx
# author
MAINTAINER ruoyi
# 挂载目录
VOLUME /home/ruoyi/projects/ruoyi-ui
# 创建目录
RUN mkdir -p /home/ruoyi/projects/ruoyi-ui
# 指定路径
WORKDIR /home/ruoyi/projects/ruoyi-ui
# 复制conf文件到路径
COPY ./conf/nginx.conf /etc/nginx/nginx.conf
# 复制html文件到路径
COPY ./html/dist /home/ruoyi/projects/ruoyi-ui

View File

@ -0,0 +1,15 @@
# 基础镜像
FROM openjdk:8-jre
# author
MAINTAINER ruoyi
# 挂载目录
VOLUME /home/ruoyi
# 创建目录
RUN mkdir -p /home/ruoyi
# 指定路径
WORKDIR /home/ruoyi
# 复制jar文件到路径
COPY ./jar/ruoyi-modules-system.jar /home/ruoyi/ruoyi-modules-system.jar
# 启动系统服务
ENTRYPOINT ["java","-jar","ruoyi-modules-system.jar"]

View File

@ -0,0 +1,15 @@
# 基础镜像
FROM openjdk:8-jre
# author
MAINTAINER ruoyi
# 挂载目录
VOLUME /home/ruoyi
# 创建目录
RUN mkdir -p /home/ruoyi
# 指定路径
WORKDIR /home/ruoyi
# 复制jar文件到路径
COPY ./jar/ruoyi-visual-monitor.jar /home/ruoyi/ruoyi-visual-monitor.jar
# 启动系统服务
ENTRYPOINT ["java","-jar","ruoyi-visual-monitor.jar"]

View File

@ -0,0 +1,2 @@
.git
OWNERS

View File

@ -0,0 +1,16 @@
apiVersion: v1
appVersion: 5.7.30
deprecated: true
description: DEPRECATED - Fast, reliable, scalable, and easy to use open-source relational
database system.
home: https://www.mysql.com/
icon: https://www.mysql.com/common/logos/logo-mysql-170x115.png
keywords:
- mysql
- database
- sql
name: mysql
sources:
- https://github.com/kubernetes/charts
- https://github.com/docker-library/mysql
version: 1.6.9

255
k8s/helm-1mysql/README.md Normal file
View File

@ -0,0 +1,255 @@
# ⚠️ Repo Archive Notice
As of Nov 13, 2020, charts in this repo will no longer be updated.
For more information, see the Helm Charts [Deprecation and Archive Notice](https://github.com/helm/charts#%EF%B8%8F-deprecation-and-archive-notice), and [Update](https://helm.sh/blog/charts-repo-deprecation/).
# MySQL
[MySQL](https://MySQL.org) is one of the most popular database servers in the world. Notable users include Wikipedia, Facebook and Google.
## DEPRECATION NOTICE
This chart is deprecated and no longer supported.
## Introduction
This chart bootstraps a single node MySQL deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
## Prerequisites
- Kubernetes 1.10+ with Beta APIs enabled
- PV provisioner support in the underlying infrastructure
## Installing the Chart
To install the chart with the release name `my-release`:
```bash
$ helm install --name my-release stable/mysql
```
The command deploys MySQL on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation.
By default a random password will be generated for the root user. If you'd like to set your own password change the mysqlRootPassword
in the values.yaml.
You can retrieve your root password by running the following command. Make sure to replace [YOUR_RELEASE_NAME]:
printf $(printf '\%o' `kubectl get secret [YOUR_RELEASE_NAME]-mysql -o jsonpath="{.data.mysql-root-password[*]}"`)
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```bash
$ helm delete --purge my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release completely.
## Configuration
The following table lists the configurable parameters of the MySQL chart and their default values.
| Parameter | Description | Default |
| -------------------------------------------- | -------------------------------------------------------------------------------------------- | ---------------------------------------------------- |
| `args` | Additional arguments to pass to the MySQL container. | `[]` |
| `initContainer.resources` | initContainer resource requests/limits | Memory: `10Mi`, CPU: `10m` |
| `image` | `mysql` image repository. | `mysql` |
| `imageTag` | `mysql` image tag. | `5.7.30` |
| `busybox.image` | `busybox` image repository. | `busybox` |
| `busybox.tag` | `busybox` image tag. | `1.32` |
| `testFramework.enabled` | `test-framework` switch. | `true` |
| `testFramework.image` | `test-framework` image repository. | `bats/bats` |
| `testFramework.tag` | `test-framework` image tag. | `1.2.1` |
| `testFramework.imagePullPolicy` | `test-framework` image pull policy. | `IfNotPresent` |
| `testFramework.securityContext` | `test-framework` securityContext | `{}` |
| `imagePullPolicy` | Image pull policy | `IfNotPresent` |
| `existingSecret` | Use Existing secret for Password details | `nil` |
| `extraVolumes` | Additional volumes as a string to be passed to the `tpl` function | |
| `extraVolumeMounts` | Additional volumeMounts as a string to be passed to the `tpl` function | |
| `extraInitContainers` | Additional init containers as a string to be passed to the `tpl` function | |
| `extraEnvVars` | Additional environment variables as a string to be passed to the `tpl` function | |
| `mysqlRootPassword` | Password for the `root` user. Ignored if existing secret is provided | Random 10 characters |
| `mysqlUser` | Username of new user to create. | `nil` |
| `mysqlPassword` | Password for the new user. Ignored if existing secret is provided | Random 10 characters |
| `mysqlDatabase` | Name for new database to create. | `nil` |
| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 |
| `livenessProbe.periodSeconds` | How often to perform the probe | 10 |
| `livenessProbe.timeoutSeconds` | When the probe times out | 5 |
| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 |
| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 3 |
| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 |
| `readinessProbe.periodSeconds` | How often to perform the probe | 10 |
| `readinessProbe.timeoutSeconds` | When the probe times out | 1 |
| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | 1 |
| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 3 |
| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` |
| `mysqlx.port.enabled` | Boolean to toggle a port for mysqlx `33060` protocol. | false |
| `persistence.enabled` | Create a volume to store data | true |
| `persistence.size` | Size of persistent volume claim | 8Gi RW |
| `persistence.storageClass` | Type of persistent volume claim | nil |
| `persistence.accessMode` | ReadWriteOnce or ReadOnly | ReadWriteOnce |
| `persistence.existingClaim` | Name of existing persistent volume | `nil` |
| `persistence.subPath` | Subdirectory of the volume to mount | `nil` |
| `persistence.annotations` | Persistent Volume annotations | {} |
| `nodeSelector` | Node labels for pod assignment | {} |
| `affinity` | Affinity rules for pod assignment | {} |
| `tolerations` | Pod taint tolerations for deployment | {} |
| `metrics.enabled` | Start a side-car prometheus exporter | `false` |
| `metrics.image` | Exporter image | `prom/mysqld-exporter` |
| `metrics.imageTag` | Exporter image | `v0.10.0` |
| `metrics.imagePullPolicy` | Exporter image pull policy | `IfNotPresent` |
| `metrics.resources` | Exporter resource requests/limit | `nil` |
| `metrics.livenessProbe.initialDelaySeconds` | Delay before metrics liveness probe is initiated | 15 |
| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 |
| `metrics.readinessProbe.initialDelaySeconds` | Delay before metrics readiness probe is initiated | 5 |
| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 1 |
| `metrics.flags` | Additional flags for the mysql exporter to use | `[]` |
| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` |
| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` |
| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` |
| `configurationFiles` | List of mysql configuration files | `nil` |
| `configurationFilesPath` | Path of mysql configuration files | `/etc/mysql/conf.d/` |
| `securityContext.enabled` | Enable security context (mysql pod) | `false` |
| `securityContext.fsGroup` | Group ID for the container (mysql pod) | 999 |
| `securityContext.runAsUser` | User ID for the container (mysql pod) | 999 |
| `service.annotations` | Kubernetes annotations for mysql | {} |
| `service.type` | Kubernetes service type | ClusterIP |
| `service.loadBalancerIP` | LoadBalancer service IP | `""` |
| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` |
| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the mysql.fullname template |
| `ssl.enabled` | Setup and use SSL for MySQL connections | `false` |
| `ssl.secret` | Name of the secret containing the SSL certificates | mysql-ssl-certs |
| `ssl.certificates[0].name` | Name of the secret containing the SSL certificates | `nil` |
| `ssl.certificates[0].ca` | CA certificate | `nil` |
| `ssl.certificates[0].cert` | Server certificate (public key) | `nil` |
| `ssl.certificates[0].key` | Server key (private key) | `nil` |
| `imagePullSecrets` | Name of Secret resource containing private registry credentials | `nil` |
| `initializationFiles` | List of SQL files which are run after the container started | `nil` |
| `timezone` | Container and mysqld timezone (TZ env) | `nil` (UTC depending on image) |
| `podAnnotations` | Map of annotations to add to the pods | `{}` |
| `podLabels` | Map of labels to add to the pods | `{}` |
| `priorityClassName` | Set pod priorityClassName | `{}` |
| `deploymentAnnotations` | Map of annotations for deployment | `{}` |
| `strategy` | Update strategy policy | `{type: "Recreate"}` |
Some of the parameters above map to the env variables defined in the [MySQL DockerHub image](https://hub.docker.com/_/mysql/).
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```bash
$ helm install --name my-release \
--set mysqlRootPassword=secretpassword,mysqlUser=my-user,mysqlPassword=my-password,mysqlDatabase=my-database \
stable/mysql
```
The above command sets the MySQL `root` account password to `secretpassword`. Additionally it creates a standard database user named `my-user`, with the password `my-password`, who has access to a database named `my-database`.
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```bash
$ helm install --name my-release -f values.yaml stable/mysql
```
> **Tip**: You can use the default [values.yaml](values.yaml)
## Persistence
The [MySQL](https://hub.docker.com/_/mysql/) image stores the MySQL data and configurations at the `/var/lib/mysql` path of the container.
By default a PersistentVolumeClaim is created and mounted into that directory. In order to disable this functionality
you can change the values.yaml to disable persistence and use an emptyDir instead.
> *"An emptyDir volume is first created when a Pod is assigned to a Node, and exists as long as that Pod is running on that node. When a Pod is removed from a node for any reason, the data in the emptyDir is deleted forever."*
**Notice**: You may need to increase the value of `livenessProbe.initialDelaySeconds` when enabling persistence by using PersistentVolumeClaim from PersistentVolume with varying properties. Since its IO performance has impact on the database initialization performance. The default limit for database initialization is `60` seconds (`livenessProbe.initialDelaySeconds` + `livenessProbe.periodSeconds` * `livenessProbe.failureThreshold`). Once such initialization process takes more time than this limit, kubelet will restart the database container, which will interrupt database initialization then causing persisent data in an unusable state.
## Custom MySQL configuration files
The [MySQL](https://hub.docker.com/_/mysql/) image accepts custom configuration files at the path `/etc/mysql/conf.d`. If you want to use a customized MySQL configuration, you can create your alternative configuration files by passing the file contents on the `configurationFiles` attribute. Note that according to the MySQL documentation only files ending with `.cnf` are loaded.
```yaml
configurationFiles:
mysql.cnf: |-
[mysqld]
skip-host-cache
skip-name-resolve
sql-mode=STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION
mysql_custom.cnf: |-
[mysqld]
```
## MySQL initialization files
The [MySQL](https://hub.docker.com/_/mysql/) image accepts *.sh, *.sql and *.sql.gz files at the path `/docker-entrypoint-initdb.d`.
These files are being run exactly once for container initialization and ignored on following container restarts.
If you want to use initialization scripts, you can create initialization files by passing the file contents on the `initializationFiles` attribute.
```yaml
initializationFiles:
first-db.sql: |-
CREATE DATABASE IF NOT EXISTS first DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
second-db.sql: |-
CREATE DATABASE IF NOT EXISTS second DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
```
## SSL
This chart supports configuring MySQL to use [encrypted connections](https://dev.mysql.com/doc/refman/5.7/en/encrypted-connections.html) with TLS/SSL certificates provided by the user. This is accomplished by storing the required Certificate Authority file, the server public key certificate, and the server private key as a Kubernetes secret. The SSL options for this chart support the following use cases:
* Manage certificate secrets with helm
* Manage certificate secrets outside of helm
## Manage certificate secrets with helm
Include your certificate data in the `ssl.certificates` section. For example:
```
ssl:
enabled: false
secret: mysql-ssl-certs
certificates:
- name: mysql-ssl-certs
ca: |-
-----BEGIN CERTIFICATE-----
...
-----END CERTIFICATE-----
cert: |-
-----BEGIN CERTIFICATE-----
...
-----END CERTIFICATE-----
key: |-
-----BEGIN RSA PRIVATE KEY-----
...
-----END RSA PRIVATE KEY-----
```
> **Note**: Make sure your certificate data has the correct formatting in the values file.
## Manage certificate secrets outside of helm
1. Ensure the certificate secret exist before installation of this chart.
2. Set the name of the certificate secret in `ssl.secret`.
3. Make sure there are no entries underneath `ssl.certificates`.
To manually create the certificate secret from local files you can execute:
```
kubectl create secret generic mysql-ssl-certs \
--from-file=ca.pem=./ssl/certificate-authority.pem \
--from-file=server-cert.pem=./ssl/server-public-key.pem \
--from-file=server-key.pem=./ssl/server-private-key.pem
```
> **Note**: `ca.pem`, `server-cert.pem`, and `server-key.pem` **must** be used as the key names in this generic secret.
If you are using a certificate your configurationFiles must include the three ssl lines under [mysqld]
```
[mysqld]
ssl-ca=/ssl/ca.pem
ssl-cert=/ssl/server-cert.pem
ssl-key=/ssl/server-key.pem
```

View File

@ -0,0 +1,48 @@
MySQL can be accessed via port 3306 on the following DNS name from within your cluster:
{{ template "mysql.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
{{- if .Values.mysqlx.port.enabled }}
Connection to the X protocol of MySQL can be done via 33060 on the following DNS name from within your cluster:
{{ template "mysql.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
{{- end }}
{{- if .Values.existingSecret }}
If you have not already created the mysql password secret:
kubectl create secret generic {{ .Values.existingSecret }} --namespace {{ .Release.Namespace }} --from-file=./mysql-root-password --from-file=./mysql-password
{{ else }}
To get your root password run:
MYSQL_ROOT_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "mysql.fullname" . }} -o jsonpath="{.data.mysql-root-password}" | base64 --decode; echo)
{{- end }}
To connect to your database:
1. Run an Ubuntu pod that you can use as a client:
kubectl run -i --tty ubuntu --image=ubuntu:16.04 --restart=Never -- bash -il
2. Install the mysql client:
$ apt-get update && apt-get install mysql-client -y
3. Connect using the mysql cli, then provide your password:
$ mysql -h {{ template "mysql.fullname" . }} -p
To connect to your database directly from outside the K8s cluster:
{{- if contains "NodePort" .Values.service.type }}
MYSQL_HOST=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath='{.items[0].status.addresses[0].address}')
MYSQL_PORT=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "mysql.fullname" . }} -o jsonpath='{.spec.ports[0].nodePort}')
{{- else if contains "ClusterIP" .Values.service.type }}
MYSQL_HOST=127.0.0.1
MYSQL_PORT={{ .Values.service.port }}
# Execute the following command to route the connection:
kubectl port-forward svc/{{ template "mysql.fullname" . }} {{ .Values.service.port }}
{{- end }}
mysql -h ${MYSQL_HOST} -P${MYSQL_PORT} -u root -p${MYSQL_ROOT_PASSWORD}

View File

@ -0,0 +1,43 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "mysql.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "mysql.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- printf .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Generate chart secret name
*/}}
{{- define "mysql.secretName" -}}
{{ default (include "mysql.fullname" .) .Values.existingSecret }}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "mysql.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "mysql.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,12 @@
{{- if .Values.configurationFiles }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "mysql.fullname" . }}-configuration
namespace: {{ .Release.Namespace }}
data:
{{- range $key, $val := .Values.configurationFiles }}
{{ $key }}: |-
{{ $val | indent 4}}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,259 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ template "mysql.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "mysql.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
{{- with .Values.deploymentAnnotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
spec:
strategy:
{{ toYaml .Values.strategy | indent 4 }}
selector:
matchLabels:
app: {{ template "mysql.fullname" . }}
release: {{ .Release.Name }}
template:
metadata:
labels:
app: {{ template "mysql.fullname" . }}
release: {{ .Release.Name }}
{{- with .Values.podLabels }}
{{ toYaml . | indent 8 }}
{{- end }}
{{- with .Values.podAnnotations }}
annotations:
{{ toYaml . | indent 8 }}
{{- end }}
spec:
{{- if .Values.schedulerName }}
schedulerName: "{{ .Values.schedulerName }}"
{{- end }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{ toYaml .Values.imagePullSecrets | indent 8 }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: "{{ .Values.priorityClassName }}"
{{- end }}
{{- if .Values.securityContext.enabled }}
securityContext:
fsGroup: {{ .Values.securityContext.fsGroup }}
runAsUser: {{ .Values.securityContext.runAsUser }}
{{- end }}
serviceAccountName: {{ template "mysql.serviceAccountName" . }}
initContainers:
- name: "remove-lost-found"
image: "{{ .Values.busybox.image}}:{{ .Values.busybox.tag }}"
imagePullPolicy: {{ .Values.imagePullPolicy | quote }}
resources:
{{ toYaml .Values.initContainer.resources | indent 10 }}
command: ["rm", "-fr", "/var/lib/mysql/lost+found"]
volumeMounts:
- name: data
mountPath: /var/lib/mysql
{{- if .Values.persistence.subPath }}
subPath: {{ .Values.persistence.subPath }}
{{- end }}
{{- if .Values.extraInitContainers }}
{{ tpl .Values.extraInitContainers . | indent 6 }}
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.affinity }}
affinity:
{{ toYaml .Values.affinity | indent 8 }}
{{- end }}
{{- if .Values.tolerations }}
tolerations:
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
containers:
- name: {{ template "mysql.fullname" . }}
image: "{{ .Values.image }}:{{ .Values.imageTag }}"
imagePullPolicy: {{ .Values.imagePullPolicy | quote }}
{{- with .Values.args }}
args:
{{- range . }}
- {{ . | quote }}
{{- end }}
{{- end }}
resources:
{{ toYaml .Values.resources | indent 10 }}
env:
{{- if .Values.mysqlAllowEmptyPassword }}
- name: MYSQL_ALLOW_EMPTY_PASSWORD
value: "true"
{{- end }}
{{- if not (and .Values.allowEmptyRootPassword (not .Values.mysqlRootPassword)) }}
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: {{ template "mysql.secretName" . }}
key: mysql-root-password
{{- if .Values.mysqlAllowEmptyPassword }}
optional: true
{{- end }}
{{- end }}
{{- if not (and .Values.allowEmptyRootPassword (not .Values.mysqlPassword)) }}
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
name: {{ template "mysql.secretName" . }}
key: mysql-password
{{- if or .Values.mysqlAllowEmptyPassword (empty .Values.mysqlUser) }}
optional: true
{{- end }}
{{- end }}
- name: MYSQL_USER
value: {{ default "" .Values.mysqlUser | quote }}
- name: MYSQL_DATABASE
value: {{ default "" .Values.mysqlDatabase | quote }}
{{- if .Values.timezone }}
- name: TZ
value: {{ .Values.timezone }}
{{- end }}
{{- if .Values.extraEnvVars }}
{{ tpl .Values.extraEnvVars . | indent 8 }}
{{- end }}
ports:
- name: mysql
containerPort: 3306
{{- if .Values.mysqlx.port.enabled }}
- name: mysqlx
port: 33060
{{- end }}
livenessProbe:
exec:
command:
{{- if .Values.mysqlAllowEmptyPassword }}
- mysqladmin
- ping
{{- else }}
- sh
- -c
- "mysqladmin ping -u root -p${MYSQL_ROOT_PASSWORD}"
{{- end }}
initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.livenessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }}
successThreshold: {{ .Values.livenessProbe.successThreshold }}
failureThreshold: {{ .Values.livenessProbe.failureThreshold }}
readinessProbe:
exec:
command:
{{- if .Values.mysqlAllowEmptyPassword }}
- mysqladmin
- ping
{{- else }}
- sh
- -c
- "mysqladmin ping -u root -p${MYSQL_ROOT_PASSWORD}"
{{- end }}
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }}
successThreshold: {{ .Values.readinessProbe.successThreshold }}
failureThreshold: {{ .Values.readinessProbe.failureThreshold }}
volumeMounts:
- name: data
mountPath: /var/lib/mysql
{{- if .Values.persistence.subPath }}
subPath: {{ .Values.persistence.subPath }}
{{- end }}
{{- if .Values.configurationFiles }}
{{- range $key, $val := .Values.configurationFiles }}
- name: configurations
mountPath: {{ $.Values.configurationFilesPath }}{{ $key }}
subPath: {{ $key }}
{{- end -}}
{{- end }}
{{- if .Values.initializationFiles }}
- name: migrations
mountPath: /docker-entrypoint-initdb.d
{{- end }}
{{- if .Values.ssl.enabled }}
- name: certificates
mountPath: /ssl
{{- end }}
{{- if .Values.extraVolumeMounts }}
{{ tpl .Values.extraVolumeMounts . | indent 8 }}
{{- end }}
{{- if .Values.metrics.enabled }}
- name: metrics
image: "{{ .Values.metrics.image }}:{{ .Values.metrics.imageTag }}"
imagePullPolicy: {{ .Values.metrics.imagePullPolicy | quote }}
{{- if .Values.mysqlAllowEmptyPassword }}
command:
- 'sh'
- '-c'
- 'DATA_SOURCE_NAME="root@(localhost:3306)/" /bin/mysqld_exporter'
{{- else }}
env:
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: {{ template "mysql.secretName" . }}
key: mysql-root-password
command:
- 'sh'
- '-c'
- 'DATA_SOURCE_NAME="root:$MYSQL_ROOT_PASSWORD@(localhost:3306)/" /bin/mysqld_exporter'
{{- end }}
{{- range $f := .Values.metrics.flags }}
- {{ $f | quote }}
{{- end }}
ports:
- name: metrics
containerPort: 9104
livenessProbe:
httpGet:
path: /
port: metrics
initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }}
timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }}
readinessProbe:
httpGet:
path: /
port: metrics
initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }}
timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }}
resources:
{{ toYaml .Values.metrics.resources | indent 10 }}
{{- end }}
volumes:
{{- if .Values.configurationFiles }}
- name: configurations
configMap:
name: {{ template "mysql.fullname" . }}-configuration
{{- end }}
{{- if .Values.initializationFiles }}
- name: migrations
configMap:
name: {{ template "mysql.fullname" . }}-initialization
{{- end }}
{{- if .Values.ssl.enabled }}
- name: certificates
secret:
secretName: {{ .Values.ssl.secret }}
{{- end }}
- name: data
{{- if .Values.persistence.enabled }}
persistentVolumeClaim:
claimName: {{ .Values.persistence.existingClaim | default (include "mysql.fullname" .) }}
{{- else }}
emptyDir: {}
{{- end -}}
{{- if .Values.extraVolumes }}
{{ tpl .Values.extraVolumes . | indent 6 }}
{{- end }}

View File

@ -0,0 +1,12 @@
{{- if .Values.initializationFiles }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "mysql.fullname" . }}-initialization
namespace: {{ .Release.Namespace }}
data:
{{- range $key, $val := .Values.initializationFiles }}
{{ $key }}: |-
{{ $val | indent 4}}
{{- end }}
{{- end -}}

View File

@ -0,0 +1,29 @@
{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }}
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: {{ template "mysql.fullname" . }}
namespace: {{ .Release.Namespace }}
{{- with .Values.persistence.annotations }}
annotations:
{{ toYaml . | indent 4 }}
{{- end }}
labels:
app: {{ template "mysql.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
spec:
accessModes:
- {{ .Values.persistence.accessMode | quote }}
resources:
requests:
storage: {{ .Values.persistence.size | quote }}
{{- if .Values.persistence.storageClass }}
{{- if (eq "-" .Values.persistence.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistence.storageClass }}"
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,51 @@
{{- if not .Values.existingSecret }}
{{- if or (not .Values.allowEmptyRootPassword) (or .Values.mysqlRootPassword .Values.mysqlPassword) }}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "mysql.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "mysql.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
type: Opaque
data:
{{ if .Values.mysqlRootPassword }}
mysql-root-password: {{ .Values.mysqlRootPassword | b64enc | quote }}
{{ else }}
{{ if not .Values.allowEmptyRootPassword }}
mysql-root-password: {{ randAlphaNum 10 | b64enc | quote }}
{{ end }}
{{ end }}
{{ if .Values.mysqlPassword }}
mysql-password: {{ .Values.mysqlPassword | b64enc | quote }}
{{ else }}
{{ if not .Values.allowEmptyRootPassword }}
mysql-password: {{ randAlphaNum 10 | b64enc | quote }}
{{ end }}
{{ end }}
{{ end }}
{{- if .Values.ssl.enabled }}
{{ if .Values.ssl.certificates }}
{{- range .Values.ssl.certificates }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ .name }}
labels:
app: {{ template "mysql.fullname" $ }}
chart: "{{ $.Chart.Name }}-{{ $.Chart.Version }}"
release: "{{ $.Release.Name }}"
heritage: "{{ $.Release.Service }}"
type: Opaque
data:
ca.pem: {{ .ca | b64enc }}
server-cert.pem: {{ .cert | b64enc }}
server-key.pem: {{ .key | b64enc }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,11 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "mysql.serviceAccountName" . }}
labels:
app: {{ template "mysql.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
{{- end }}

View File

@ -0,0 +1,26 @@
{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ include "mysql.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "mysql.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
{{- if .Values.metrics.serviceMonitor.additionalLabels }}
{{ toYaml .Values.metrics.serviceMonitor.additionalLabels | indent 4 }}
{{- end }}
spec:
endpoints:
- port: metrics
interval: 30s
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
app: {{ include "mysql.fullname" . }}
release: {{ .Release.Name }}
{{- end }}

View File

@ -0,0 +1,42 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "mysql.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "mysql.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
release: "{{ .Release.Name }}"
heritage: "{{ .Release.Service }}"
annotations:
{{- if .Values.service.annotations }}
{{ toYaml .Values.service.annotations | indent 4 }}
{{- end }}
{{- if and (.Values.metrics.enabled) (.Values.metrics.annotations) }}
{{ toYaml .Values.metrics.annotations | indent 4 }}
{{- end }}
spec:
type: {{ .Values.service.type }}
{{- if (and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP))) }}
loadBalancerIP: {{ .Values.service.loadBalancerIP }}
{{- end }}
ports:
- name: mysql
port: {{ .Values.service.port }}
targetPort: mysql
{{- if .Values.service.nodePort }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
{{- if .Values.mysqlx.port.enabled }}
- name: mysqlx
port: 33060
targetPort: mysqlx
protocol: TCP
{{- end }}
{{- if .Values.metrics.enabled }}
- name: metrics
port: 9104
targetPort: metrics
{{- end }}
selector:
app: {{ template "mysql.fullname" . }}

View File

@ -0,0 +1,23 @@
{{- if .Values.testFramework.enabled }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "mysql.fullname" . }}-test
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "mysql.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
heritage: "{{ .Release.Service }}"
release: "{{ .Release.Name }}"
data:
run.sh: |-
{{- if .Values.ssl.enabled | and .Values.mysqlRootPassword }}
@test "Testing SSL MySQL Connection" {
mysql --host={{ template "mysql.fullname" . }} --port={{ .Values.service.port | default "3306" }} --ssl-cert=/ssl/server-cert.pem --ssl-key=ssl/server-key.pem -u root -p{{ .Values.mysqlRootPassword }}
}
{{- else if .Values.mysqlRootPassword }}
@test "Testing MySQL Connection" {
mysql --host={{ template "mysql.fullname" . }} --port={{ .Values.service.port | default "3306" }} -u root -p{{ .Values.mysqlRootPassword }}
}
{{- end }}
{{- end }}

View File

@ -0,0 +1,59 @@
{{- if .Values.testFramework.enabled }}
apiVersion: v1
kind: Pod
metadata:
name: {{ template "mysql.fullname" . }}-test
namespace: {{ .Release.Namespace }}
labels:
app: {{ template "mysql.fullname" . }}
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
heritage: "{{ .Release.Service }}"
release: "{{ .Release.Name }}"
annotations:
"helm.sh/hook": test-success
spec:
{{- if .Values.testFramework.securityContext }}
securityContext: {{ toYaml .Values.testFramework.securityContext | nindent 4 }}
{{- end }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets:
{{- range .Values.imagePullSecrets }}
- name: {{ . }}
{{- end}}
{{- end }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{ toYaml . | indent 4 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{ toYaml . | indent 4 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{ toYaml . | indent 4 }}
{{- end }}
containers:
- name: {{ .Release.Name }}-test
image: "{{ .Values.testFramework.image }}:{{ .Values.testFramework.tag }}"
imagePullPolicy: "{{ .Values.testFramework.imagePullPolicy}}"
command: ["/opt/bats/bin/bats", "-t", "/tests/run.sh"]
volumeMounts:
- mountPath: /tests
name: tests
readOnly: true
{{- if .Values.ssl.enabled }}
- name: certificates
mountPath: /ssl
{{- end }}
volumes:
- name: tests
configMap:
name: {{ template "mysql.fullname" . }}-test
{{- if .Values.ssl.enabled }}
- name: certificates
secret:
secretName: {{ .Values.ssl.secret }}
{{- end }}
restartPolicy: Never
{{- end }}

246
k8s/helm-1mysql/values.yaml Normal file
View File

@ -0,0 +1,246 @@
## mysql image version
## ref: https://hub.docker.com/r/library/mysql/tags/
##
image: "mysql"
imageTag: "5.7.30"
strategy:
type: Recreate
busybox:
image: "busybox"
tag: "1.28"
testFramework:
enabled: true
image: "bats/bats"
tag: "1.2.1"
imagePullPolicy: IfNotPresent
securityContext: {}
## Specify password for root user
##
## Default: random 10 character string
mysqlRootPassword: qazxc123456.
## Create a database user
##
# mysqlUser:
## Default: random 10 character string
# mysqlPassword:
## Allow unauthenticated access, uncomment to enable
##
mysqlAllowEmptyPassword: true
## Create a database
##
# mysqlDatabase:
## Specify an imagePullPolicy (Required)
## It's recommended to change this to 'Always' if the image tag is 'latest'
## ref: http://kubernetes.io/docs/user-guide/images/#updating-images
##
imagePullPolicy: IfNotPresent
## Additionnal arguments that are passed to the MySQL container.
## For example use --default-authentication-plugin=mysql_native_password if older clients need to
## connect to a MySQL 8 instance.
args: []
extraVolumes: |
# - name: extras
# emptyDir: {}
extraVolumeMounts: |
# - name: extras
# mountPath: /usr/share/extras
# readOnly: true
extraInitContainers: |
# - name: do-something
# image: busybox
# command: ['do', 'something']
## A string to add extra environment variables
# extraEnvVars: |
# - name: EXTRA_VAR
# value: "extra"
# Optionally specify an array of imagePullSecrets.
# Secrets must be manually created in the namespace.
# ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
# imagePullSecrets:
# - name: myRegistryKeySecretName
## Node selector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
nodeSelector: {}
## Affinity
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
## Tolerations for pod assignment
## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
##
tolerations: []
livenessProbe:
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 3
readinessProbe:
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
## Persist data to a persistent volume
persistence:
enabled: true
## database data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: "storage-nfs"
accessMode: ReadWriteOnce
size: 10Gi
annotations: {}
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Security context
securityContext:
enabled: false
runAsUser: 999
fsGroup: 999
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
resources:
requests:
memory: 256Mi
cpu: 100m
# Custom mysql configuration files path
configurationFilesPath: /etc/mysql/conf.d/
# Custom mysql configuration files used to override default mysql settings
configurationFiles: {}
# mysql.cnf: |-
# [mysqld]
# skip-name-resolve
# ssl-ca=/ssl/ca.pem
# ssl-cert=/ssl/server-cert.pem
# ssl-key=/ssl/server-key.pem
# Custom mysql init SQL files used to initialize the database
initializationFiles: {}
# first-db.sql: |-
# CREATE DATABASE IF NOT EXISTS first DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
# second-db.sql: |-
# CREATE DATABASE IF NOT EXISTS second DEFAULT CHARACTER SET utf8 DEFAULT COLLATE utf8_general_ci;
# To enaable the mysql X Protocol's port
# .. will expose the port 33060
# .. Note the X Plugin needs installation
# ref: https://dev.mysql.com/doc/refman/8.0/en/x-plugin-checking-installation.html
mysqlx:
port:
enabled: false
metrics:
enabled: false
image: prom/mysqld-exporter
imageTag: v0.10.0
imagePullPolicy: IfNotPresent
resources: {}
annotations: {}
# prometheus.io/scrape: "true"
# prometheus.io/port: "9104"
livenessProbe:
initialDelaySeconds: 15
timeoutSeconds: 5
readinessProbe:
initialDelaySeconds: 5
timeoutSeconds: 1
flags: []
serviceMonitor:
enabled: false
additionalLabels: {}
## Configure the service
## ref: http://kubernetes.io/docs/user-guide/services/
service:
annotations: {}
## Specify a service type
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types
type: NodePort
port: 3306
nodePort: 31201
# loadBalancerIP:
## Pods Service Account
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
serviceAccount:
## Specifies whether a ServiceAccount should be created
##
create: false
## The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the mariadb.fullname template
# name:
ssl:
enabled: false
secret: mysql-ssl-certs
certificates:
# - name: mysql-ssl-certs
# ca: |-
# -----BEGIN CERTIFICATE-----
# ...
# -----END CERTIFICATE-----
# cert: |-
# -----BEGIN CERTIFICATE-----
# ...
# -----END CERTIFICATE-----
# key: |-
# -----BEGIN RSA PRIVATE KEY-----
# ...
# -----END RSA PRIVATE KEY-----
## Populates the 'TZ' system timezone environment variable
## ref: https://dev.mysql.com/doc/refman/5.7/en/time-zone-support.html
##
## Default: nil (mysql will use image's default timezone, normally UTC)
## Example: 'Australia/Sydney'
# timezone:
# Deployment Annotations
deploymentAnnotations: {}
# To be added to the database server pod(s)
podAnnotations: {}
podLabels: {}
## Set pod priorityClassName
# priorityClassName: {}
## Init container resources defaults
initContainer:
resources:
requests:
memory: 10Mi
cpu: 10m

View File

@ -0,0 +1,17 @@
apiVersion: v1
appVersion: 5.0.6
deprecated: true
description: DEPRECATED - Highly available Kubernetes implementation of Redis
engine: gotpl
home: http://redis.io/
icon: https://upload.wikimedia.org/wikipedia/en/thumb/6/6b/Redis_Logo.svg/1200px-Redis_Logo.svg.png
keywords:
- redis
- keyvalue
- database
name: redis-ha
sources:
- https://redis.io/download
- https://github.com/scality/Zenko/tree/development/1.0/kubernetes/zenko/charts/redis-ha
- https://github.com/oliver006/redis_exporter
version: 4.4.6

View File

@ -0,0 +1,244 @@
# ⚠️ Repo Archive Notice
As of Nov 13, 2020, charts in this repo will no longer be updated.
For more information, see the Helm Charts [Deprecation and Archive Notice](https://github.com/helm/charts#%EF%B8%8F-deprecation-and-archive-notice), and [Update](https://helm.sh/blog/charts-repo-deprecation/).
# Redis
----------------------------------------
# Deprecation Warning
*As part of the [deprecation timeline](https://github.com/helm/charts/#deprecation-timeline). We will move this to an official repository [here](https://github.com/DandyDeveloper/charts)*
Please make PRs / Issues here from now on
We will keep the changes in sync as best we can, but we will be notifying people to submit PRs here from now on instead. If you have any questions, feel free to get in touch with either of the maintainers.
----------------------------------------
[Redis](http://redis.io/) is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperloglogs.
## TL;DR;
```bash
$ helm install stable/redis-ha
```
By default this chart install 3 pods total:
* one pod containing a redis master and sentinel container (optional prometheus metrics exporter sidecar available)
* two pods each containing a redis slave and sentinel containers (optional prometheus metrics exporter sidecars available)
## Introduction
This chart bootstraps a [Redis](https://redis.io) highly available master/slave statefulset in a [Kubernetes](http://kubernetes.io) cluster using the Helm package manager.
## Prerequisites
- Kubernetes 1.8+ with Beta APIs enabled
- PV provisioner support in the underlying infrastructure
## Upgrading the Chart
Please note that there have been a number of changes simplifying the redis management strategy (for better failover and elections) in the 3.x version of this chart. These changes allow the use of official [redis](https://hub.docker.com/_/redis/) images that do not require special RBAC or ServiceAccount roles. As a result when upgrading from version >=2.0.1 to >=3.0.0 of this chart, `Role`, `RoleBinding`, and `ServiceAccount` resources should be deleted manually.
### Upgrading the chart from 3.x to 4.x
Starting from version `4.x` HAProxy sidecar prometheus-exporter removed and replaced by the embedded [HAProxy metrics endpoint](https://github.com/haproxy/haproxy/tree/master/contrib/prometheus-exporter), as a result when upgrading from version 3.x to 4.x section `haproxy.exporter` should be removed and the `haproxy.metrics` need to be configured for fit your needs.
## Installing the Chart
To install the chart
```bash
$ helm install stable/redis-ha
```
The command deploys Redis on the Kubernetes cluster in the default configuration. By default this chart install one master pod containing redis master container and sentinel container along with 2 redis slave pods each containing their own sentinel sidecars. The [configuration](#configuration) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the deployment:
```bash
$ helm delete <chart-name>
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Configuration
The following table lists the configurable parameters of the Redis chart and their default values.
| Parameter | Description | Default |
|:--------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------|
| `image` | Redis image | `redis` |
| `imagePullSecrets` | Reference to one or more secrets to be used when pulling redis images | [] |
| `tag` | Redis tag | `5.0.6-alpine` |
| `replicas` | Number of redis master/slave pods | `3` |
| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` |
| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the redis-ha.fullname template |
| `rbac.create` | Create and use RBAC resources | `true` |
| `redis.port` | Port to access the redis service | `6379` |
| `redis.masterGroupName` | Redis convention for naming the cluster group: must match `^[\\w-\\.]+$` and can be templated | `mymaster` |
| `redis.config` | Any valid redis config options in this section will be applied to each server (see below) | see values.yaml |
| `redis.customConfig` | Allows for custom redis.conf files to be applied. If this is used then `redis.config` is ignored | `` |
| `redis.resources` | CPU/Memory for master/slave nodes resource requests/limits | `{}` |
| `sentinel.port` | Port to access the sentinel service | `26379` |
| `sentinel.quorum` | Minimum number of servers necessary to maintain quorum | `2` |
| `sentinel.config` | Valid sentinel config options in this section will be applied as config options to each sentinel (see below) | see values.yaml |
| `sentinel.customConfig` | Allows for custom sentinel.conf files to be applied. If this is used then `sentinel.config` is ignored | `` |
| `sentinel.resources` | CPU/Memory for sentinel node resource requests/limits | `{}` |
| `init.resources` | CPU/Memory for init Container node resource requests/limits | `{}` |
| `auth` | Enables or disables redis AUTH (Requires `redisPassword` to be set) | `false` |
| `redisPassword` | A password that configures a `requirepass` and `masterauth` in the conf parameters (Requires `auth: enabled`) | `` |
| `authKey` | The key holding the redis password in an existing secret. | `auth` |
| `existingSecret` | An existing secret containing a key defined by `authKey` that configures `requirepass` and `masterauth` in the conf parameters (Requires `auth: enabled`, cannot be used in conjunction with `.Values.redisPassword`) | `` |
| `nodeSelector` | Node labels for pod assignment | `{}` |
| `tolerations` | Toleration labels for pod assignment | `[]` |
| `hardAntiAffinity` | Whether the Redis server pods should be forced to run on separate nodes. | `true` |
| `additionalAffinities` | Additional affinities to add to the Redis server pods. | `{}` |
| `securityContext` | Security context to be added to the Redis server pods. | `{runAsUser: 1000, fsGroup: 1000, runAsNonRoot: true}` |
| `affinity` | Override all other affinity settings with a string. | `""` |
| `persistentVolume.size` | Size for the volume | 10Gi |
| `persistentVolume.annotations` | Annotations for the volume | `{}` |
| `persistentVolume.reclaimPolicy` | Method used to reclaim an obsoleted volume. `Delete` or `Retain` | `""` |
| `emptyDir` | Configuration of `emptyDir`, used only if persistentVolume is disabled and no hostPath specified | `{}` |
| `exporter.enabled` | If `true`, the prometheus exporter sidecar is enabled | `false` |
| `exporter.image` | Exporter image | `oliver006/redis_exporter` |
| `exporter.tag` | Exporter tag | `v0.31.0` |
| `exporter.port` | Exporter port | `9121` |
| `exporter.annotations` | Prometheus scrape annotations | `{prometheus.io/path: /metrics, prometheus.io/port: "9121", prometheus.io/scrape: "true"}` |
| `exporter.extraArgs` | Additional args for the exporter | `{}` |
| `exporter.script` | A custom custom Lua script that will be mounted to exporter for collection of custom metrics. Creates a ConfigMap and sets env var `REDIS_EXPORTER_SCRIPT`. | |
| `exporter.serviceMonitor.enabled` | Use servicemonitor from prometheus operator | `false` |
| `exporter.serviceMonitor.namespace` | Namespace the service monitor is created in | `default` |
| `exporter.serviceMonitor.interval` | Scrape interval, If not set, the Prometheus default scrape interval is used | `nil` |
| `exporter.serviceMonitor.telemetryPath` | Path to redis-exporter telemetry-path | `/metrics` |
| `exporter.serviceMonitor.labels` | Labels for the servicemonitor passed to Prometheus Operator | `{}` |
| `exporter.serviceMonitor.timeout` | How long until a scrape request times out. If not set, the Prometheus default scape timeout is used | `nil` |
| `haproxy.enabled` | Enabled HAProxy LoadBalancing/Proxy | `false` |
| `haproxy.replicas` | Number of HAProxy instances | `3` |
| `haproxy.image.repository`| HAProxy Image Repository | `haproxy` |
| `haproxy.image.tag` | HAProxy Image Tag | `2.0.1` |
| `haproxy.image.pullPolicy`| HAProxy Image PullPolicy | `IfNotPresent` |
| `haproxy.imagePullSecrets`| Reference to one or more secrets to be used when pulling haproxy images | [] |
| `haproxy.annotations` | HAProxy template annotations | `{}` |
| `haproxy.customConfig` | Allows for custom config-haproxy.cfg file to be applied. If this is used then default config will be overwriten | `` |
| `haproxy.extraConfig` | Allows to place any additional configuration section to add to the default config-haproxy.cfg | `` |
| `haproxy.resources` | HAProxy resources | `{}` |
| `haproxy.emptyDir` | Configuration of `emptyDir` | `{}` |
| `haproxy.service.type` | HAProxy service type "ClusterIP", "LoadBalancer" or "NodePort" | `ClusterIP` |
| `haproxy.service.nodePort` | HAProxy service nodePort value (haproxy.service.type must be NodePort) | not set |
| `haproxy.service.annotations` | HAProxy service annotations | `{}` |
| `haproxy.stickyBalancing` | HAProxy sticky load balancing to Redis nodes. Helps with connections shutdown. | `false` |
| `haproxy.hapreadport.enable` | Enable a read only port for redis slaves | `false` |
| `haproxy.hapreadport.port` | Haproxy port for read only redis slaves | `6380` |
| `haproxy.metrics.enabled` | HAProxy enable prometheus metric scraping | `false` |
| `haproxy.metrics.port` | HAProxy prometheus metrics scraping port | `9101` |
| `haproxy.metrics.portName` | HAProxy metrics scraping port name | `exporter-port` |
| `haproxy.metrics.scrapePath` | HAProxy prometheus metrics scraping port | `/metrics` |
| `haproxy.metrics.serviceMonitor.enabled` | Use servicemonitor from prometheus operator for HAProxy metrics | `false` |
| `haproxy.metrics.serviceMonitor.namespace` | Namespace the service monitor for HAProxy metrics is created in | `default` |
| `haproxy.metrics.serviceMonitor.interval` | Scrape interval, If not set, the Prometheus default scrape interval is used | `nil` |
| `haproxy.metrics.serviceMonitor.telemetryPath` | Path to HAProxy metrics telemetry-path | `/metrics` |
| `haproxy.metrics.serviceMonitor.labels` | Labels for the HAProxy metrics servicemonitor passed to Prometheus Operator | `{}` |
| `haproxy.metrics.serviceMonitor.timeout` | How long until a scrape request times out. If not set, the Prometheus default scape timeout is used | `nil` |
| `haproxy.init.resources` | Extra init resources | `{}` |
| `haproxy.timeout.connect` | haproxy.cfg `timeout connect` setting | `4s` |
| `haproxy.timeout.server` | haproxy.cfg `timeout server` setting | `30s` |
| `haproxy.timeout.client` | haproxy.cfg `timeout client` setting | `30s` |
| `haproxy.timeout.check` | haproxy.cfg `timeout check` setting | `2s` |
| `haproxy.priorityClassName` | priorityClassName for `haproxy` deployment | not set |
| `haproxy.securityContext` | Security context to be added to the HAProxy deployment. | `{runAsUser: 1000, fsGroup: 1000, runAsNonRoot: true}` |
| `haproxy.hardAntiAffinity` | Whether the haproxy pods should be forced to run on separate nodes. | `true` |
| `haproxy.affinity` | Override all other haproxy affinity settings with a string. | `""` |
| `haproxy.additionalAffinities` | Additional affinities to add to the haproxy server pods. | `{}` |
| `podDisruptionBudget` | Pod Disruption Budget rules | `{}` |
| `priorityClassName` | priorityClassName for `redis-ha-statefulset` | not set |
| `hostPath.path` | Use this path on the host for data storage | not set |
| `hostPath.chown` | Run an init-container as root to set ownership on the hostPath | `true` |
| `sysctlImage.enabled` | Enable an init container to modify Kernel settings | `false` |
| `sysctlImage.command` | sysctlImage command to execute | [] |
| `sysctlImage.registry` | sysctlImage Init container registry | `docker.io` |
| `sysctlImage.repository` | sysctlImage Init container name | `busybox` |
| `sysctlImage.tag` | sysctlImage Init container tag | `1.31.1` |
| `sysctlImage.pullPolicy` | sysctlImage Init container pull policy | `Always` |
| `sysctlImage.mountHostSys`| Mount the host `/sys` folder to `/host-sys` | `false` |
| `sysctlImage.resources` | sysctlImage resources | `{}` |
| `schedulerName` | Alternate scheduler name | `nil` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```bash
$ helm install \
--set image=redis \
--set tag=5.0.5-alpine \
stable/redis-ha
```
The above command sets the Redis server within `default` namespace.
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```bash
$ helm install -f values.yaml stable/redis-ha
```
> **Tip**: You can use the default [values.yaml](values.yaml)
## Custom Redis and Sentinel config options
This chart allows for most redis or sentinel config options to be passed as a key value pair through the `values.yaml` under `redis.config` and `sentinel.config`. See links below for all available options.
[Example redis.conf](http://download.redis.io/redis-stable/redis.conf)
[Example sentinel.conf](http://download.redis.io/redis-stable/sentinel.conf)
For example `repl-timeout 60` would be added to the `redis.config` section of the `values.yaml` as:
```yml
repl-timeout: "60"
```
Note:
1. Some config options should be renamed by redis versione.g.:
```
# In redis 5.xsee https://raw.githubusercontent.com/antirez/redis/5.0/redis.conf
min-replicas-to-write: 1
min-replicas-max-lag: 5
# In redis 4.x and redis 3.xsee https://raw.githubusercontent.com/antirez/redis/4.0/redis.conf and https://raw.githubusercontent.com/antirez/redis/3.0/redis.conf
min-slaves-to-write 1
min-slaves-max-lag 5
```
Sentinel options supported must be in the the `sentinel <option> <master-group-name> <value>` format. For example, `sentinel down-after-milliseconds 30000` would be added to the `sentinel.config` section of the `values.yaml` as:
```yml
down-after-milliseconds: 30000
```
If more control is needed from either the redis or sentinel config then an entire config can be defined under `redis.customConfig` or `sentinel.customConfig`. Please note that these values will override any configuration options under their respective section. For example, if you define `sentinel.customConfig` then the `sentinel.config` is ignored.
## Host Kernel Settings
Redis may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages.
To do so, you can set up a privileged initContainer with the `sysctlImage` config values, for example:
```
sysctlImage:
enabled: true
mountHostSys: true
command:
- /bin/sh
- -xc
- |-
sysctl -w net.core.somaxconn=10000
echo never > /host-sys/kernel/mm/transparent_hugepage/enabled
```
## HAProxy startup
When HAProxy is enabled, it will attempt to connect to each announce-service of each redis replica instance in its init container before starting.
It will fail if announce-service IP is not available fast enough (10 seconds max by announce-service).
A such case could happen if the orchestator is pending the nomination of redis pods.
Risk is limited because announce-service is using `publishNotReadyAddresses: true`, although, in such case, HAProxy pod will be rescheduled afterward by the orchestrator.

View File

@ -0,0 +1,10 @@
---
## Enable HAProxy to manage Load Balancing
haproxy:
enabled: true
annotations:
any.domain/key: "value"
serviceAccount:
create: true
metrics:
enabled: true

View File

@ -0,0 +1,25 @@
Redis can be accessed via port {{ .Values.redis.port }} and Sentinel can be accessed via port {{ .Values.sentinel.port }} on the following DNS name from within your cluster:
{{ template "redis-ha.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
To connect to your Redis server:
{{- if .Values.auth }}
1. To retrieve the redis password:
echo $(kubectl get secret {{ template "redis-ha.fullname" . }} -o "jsonpath={.data['auth']}" | base64 --decode)
2. Connect to the Redis master pod that you can use as a client. By default the {{ template "redis-ha.fullname" . }}-server-0 pod is configured as the master:
kubectl exec -it {{ template "redis-ha.fullname" . }}-server-0 sh -n {{ .Release.Namespace }}
3. Connect using the Redis CLI (inside container):
redis-cli -a <REDIS-PASS-FROM-SECRET>
{{- else }}
1. Run a Redis pod that you can use as a client:
kubectl exec -it {{ template "redis-ha.fullname" . }}-server-0 sh -n {{ .Release.Namespace }}
2. Connect using the Redis CLI:
redis-cli -h {{ template "redis-ha.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local
{{- end }}

View File

@ -0,0 +1,275 @@
{{/* vim: set filetype=mustache: */}}
{{- define "config-redis.conf" }}
{{- if .Values.redis.customConfig }}
{{ tpl .Values.redis.customConfig . | indent 4 }}
{{- else }}
dir "/data"
port {{ .Values.redis.port }}
{{- range $key, $value := .Values.redis.config }}
{{ $key }} {{ $value }}
{{- end }}
{{- if .Values.auth }}
requirepass replace-default-auth
masterauth replace-default-auth
{{- end }}
{{- end }}
{{- end }}
{{- define "config-sentinel.conf" }}
{{- if .Values.sentinel.customConfig }}
{{ tpl .Values.sentinel.customConfig . | indent 4 }}
{{- else }}
dir "/data"
{{- range $key, $value := .Values.sentinel.config }}
{{- if eq "maxclients" $key }}
{{ $key }} {{ $value }}
{{- else }}
sentinel {{ $key }} {{ template "redis-ha.masterGroupName" $ }} {{ $value }}
{{- end }}
{{- end }}
{{- if .Values.auth }}
sentinel auth-pass {{ template "redis-ha.masterGroupName" . }} replace-default-auth
{{- end }}
{{- end }}
{{- end }}
{{- define "config-init.sh" }}
HOSTNAME="$(hostname)"
INDEX="${HOSTNAME##*-}"
MASTER="$(redis-cli -h {{ template "redis-ha.fullname" . }} -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ template "redis-ha.masterGroupName" . }} | grep -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')"
MASTER_GROUP="{{ template "redis-ha.masterGroupName" . }}"
QUORUM="{{ .Values.sentinel.quorum }}"
REDIS_CONF=/data/conf/redis.conf
REDIS_PORT={{ .Values.redis.port }}
SENTINEL_CONF=/data/conf/sentinel.conf
SENTINEL_PORT={{ .Values.sentinel.port }}
SERVICE={{ template "redis-ha.fullname" . }}
set -eu
sentinel_update() {
echo "Updating sentinel config with master $MASTER"
eval MY_SENTINEL_ID="\${SENTINEL_ID_$INDEX}"
sed -i "1s/^/sentinel myid $MY_SENTINEL_ID\\n/" "$SENTINEL_CONF"
sed -i "2s/^/sentinel monitor $MASTER_GROUP $1 $REDIS_PORT $QUORUM \\n/" "$SENTINEL_CONF"
echo "sentinel announce-ip $ANNOUNCE_IP" >> $SENTINEL_CONF
echo "sentinel announce-port $SENTINEL_PORT" >> $SENTINEL_CONF
}
redis_update() {
echo "Updating redis config"
echo "slaveof $1 $REDIS_PORT" >> "$REDIS_CONF"
echo "slave-announce-ip $ANNOUNCE_IP" >> $REDIS_CONF
echo "slave-announce-port $REDIS_PORT" >> $REDIS_CONF
}
copy_config() {
cp /readonly-config/redis.conf "$REDIS_CONF"
cp /readonly-config/sentinel.conf "$SENTINEL_CONF"
}
setup_defaults() {
echo "Setting up defaults"
if [ "$INDEX" = "0" ]; then
echo "Setting this pod as the default master"
redis_update "$ANNOUNCE_IP"
sentinel_update "$ANNOUNCE_IP"
sed -i "s/^.*slaveof.*//" "$REDIS_CONF"
else
DEFAULT_MASTER="$(getent hosts "$SERVICE-announce-0" | awk '{ print $1 }')"
if [ -z "$DEFAULT_MASTER" ]; then
echo "Unable to resolve host"
exit 1
fi
echo "Setting default slave config.."
redis_update "$DEFAULT_MASTER"
sentinel_update "$DEFAULT_MASTER"
fi
}
find_master() {
echo "Attempting to find master"
if [ "$(redis-cli -h "$MASTER"{{ if .Values.auth }} -a "$AUTH"{{ end }} ping)" != "PONG" ]; then
echo "Can't ping master, attempting to force failover"
if redis-cli -h "$SERVICE" -p "$SENTINEL_PORT" sentinel failover "$MASTER_GROUP" | grep -q 'NOGOODSLAVE' ; then
setup_defaults
return 0
fi
sleep 10
MASTER="$(redis-cli -h $SERVICE -p $SENTINEL_PORT sentinel get-master-addr-by-name $MASTER_GROUP | grep -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}')"
if [ "$MASTER" ]; then
sentinel_update "$MASTER"
redis_update "$MASTER"
else
echo "Could not failover, exiting..."
exit 1
fi
else
echo "Found reachable master, updating config"
sentinel_update "$MASTER"
redis_update "$MASTER"
fi
}
mkdir -p /data/conf/
echo "Initializing config.."
copy_config
ANNOUNCE_IP=$(getent hosts "$SERVICE-announce-$INDEX" | awk '{ print $1 }')
if [ -z "$ANNOUNCE_IP" ]; then
"Could not resolve the announce ip for this pod"
exit 1
elif [ "$MASTER" ]; then
find_master
else
setup_defaults
fi
if [ "${AUTH:-}" ]; then
echo "Setting auth values"
ESCAPED_AUTH=$(echo "$AUTH" | sed -e 's/[\/&]/\\&/g');
sed -i "s/replace-default-auth/${ESCAPED_AUTH}/" "$REDIS_CONF" "$SENTINEL_CONF"
fi
echo "Ready..."
{{- end }}
{{- define "config-haproxy.cfg" }}
{{- if .Values.haproxy.customConfig }}
{{ .Values.haproxy.customConfig | indent 4}}
{{- else }}
defaults REDIS
mode tcp
timeout connect {{ .Values.haproxy.timeout.connect }}
timeout server {{ .Values.haproxy.timeout.server }}
timeout client {{ .Values.haproxy.timeout.client }}
timeout check {{ .Values.haproxy.timeout.check }}
listen health_check_http_url
bind :8888
mode http
monitor-uri /healthz
option dontlognull
{{- $root := . }}
{{- $fullName := include "redis-ha.fullname" . }}
{{- $replicas := int (toString .Values.replicas) }}
{{- $masterGroupName := include "redis-ha.masterGroupName" . }}
{{- range $i := until $replicas }}
# Check Sentinel and whether they are nominated master
backend check_if_redis_is_master_{{ $i }}
mode tcp
option tcp-check
tcp-check connect
{{- if $root.auth }}
tcp-check send AUTH\ {{ $root.redisPassword }}\r\n
tcp-check expect string +OK
{{- end }}
tcp-check send PING\r\n
tcp-check expect string +PONG
tcp-check send SENTINEL\ get-master-addr-by-name\ {{ $masterGroupName }}\r\n
tcp-check expect string REPLACE_ANNOUNCE{{ $i }}
tcp-check send QUIT\r\n
tcp-check expect string +OK
{{- range $i := until $replicas }}
server R{{ $i }} {{ $fullName }}-announce-{{ $i }}:26379 check inter 1s
{{- end }}
{{- end }}
# decide redis backend to use
#master
frontend ft_redis_master
bind *:{{ $root.Values.redis.port }}
use_backend bk_redis_master
{{- if .Values.haproxy.readOnly.enabled }}
#slave
frontend ft_redis_slave
bind *:{{ .Values.haproxy.readOnly.port }}
use_backend bk_redis_slave
{{- end }}
# Check all redis servers to see if they think they are master
backend bk_redis_master
{{- if .Values.haproxy.stickyBalancing }}
balance source
hash-type consistent
{{- end }}
mode tcp
option tcp-check
tcp-check connect
{{- if .Values.auth }}
tcp-check send AUTH\ REPLACE_AUTH_SECRET\r\n
tcp-check expect string +OK
{{- end }}
tcp-check send PING\r\n
tcp-check expect string +PONG
tcp-check send info\ replication\r\n
tcp-check expect string role:master
tcp-check send QUIT\r\n
tcp-check expect string +OK
{{- range $i := until $replicas }}
use-server R{{ $i }} if { srv_is_up(R{{ $i }}) } { nbsrv(check_if_redis_is_master_{{ $i }}) ge 2 }
server R{{ $i }} {{ $fullName }}-announce-{{ $i }}:{{ $root.Values.redis.port }} check inter 1s fall 1 rise 1
{{- end }}
{{- if .Values.haproxy.readOnly.enabled }}
backend bk_redis_slave
{{- if .Values.haproxy.stickyBalancing }}
balance source
hash-type consistent
{{- end }}
mode tcp
option tcp-check
tcp-check connect
{{- if .Values.auth }}
tcp-check send AUTH\ REPLACE_AUTH_SECRET\r\n
tcp-check expect string +OK
{{- end }}
tcp-check send PING\r\n
tcp-check expect string +PONG
tcp-check send info\ replication\r\n
tcp-check expect string role:slave
tcp-check send QUIT\r\n
tcp-check expect string +OK
{{- range $i := until $replicas }}
server R{{ $i }} {{ $fullName }}-announce-{{ $i }}:{{ $root.Values.redis.port }} check inter 1s fall 1 rise 1
{{- end }}
{{- end }}
{{- if .Values.haproxy.metrics.enabled }}
frontend metrics
mode http
bind *:{{ .Values.haproxy.metrics.port }}
option http-use-htx
http-request use-service prometheus-exporter if { path {{ .Values.haproxy.metrics.scrapePath }} }
{{- end }}
{{- if .Values.haproxy.extraConfig }}
# Additional configuration
{{ .Values.haproxy.extraConfig | indent 4 }}
{{- end }}
{{- end }}
{{- end }}
{{- define "config-haproxy_init.sh" }}
HAPROXY_CONF=/data/haproxy.cfg
cp /readonly/haproxy.cfg "$HAPROXY_CONF"
{{- $fullName := include "redis-ha.fullname" . }}
{{- $replicas := int (toString .Values.replicas) }}
{{- range $i := until $replicas }}
for loop in $(seq 1 10); do
getent hosts {{ $fullName }}-announce-{{ $i }} && break
echo "Waiting for service {{ $fullName }}-announce-{{ $i }} to be ready ($loop) ..." && sleep 1
done
ANNOUNCE_IP{{ $i }}=$(getent hosts "{{ $fullName }}-announce-{{ $i }}" | awk '{ print $1 }')
if [ -z "$ANNOUNCE_IP{{ $i }}" ]; then
echo "Could not resolve the announce ip for {{ $fullName }}-announce-{{ $i }}"
exit 1
fi
sed -i "s/REPLACE_ANNOUNCE{{ $i }}/$ANNOUNCE_IP{{ $i }}/" "$HAPROXY_CONF"
if [ "${AUTH:-}" ]; then
echo "Setting auth values"
ESCAPED_AUTH=$(echo "$AUTH" | sed -e 's/[\/&]/\\&/g');
sed -i "s/REPLACE_AUTH_SECRET/${ESCAPED_AUTH}/" "$HAPROXY_CONF"
fi
{{- end }}
{{- end }}

View File

@ -0,0 +1,83 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "redis-ha.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "redis-ha.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Return sysctl image
*/}}
{{- define "redis.sysctl.image" -}}
{{- $registryName := default "docker.io" .Values.sysctlImage.registry -}}
{{- $tag := default "latest" .Values.sysctlImage.tag | toString -}}
{{- printf "%s/%s:%s" $registryName .Values.sysctlImage.repository $tag -}}
{{- end -}}
{{- /*
Credit: @technosophos
https://github.com/technosophos/common-chart/
labels.standard prints the standard Helm labels.
The standard labels are frequently used in metadata.
*/ -}}
{{- define "labels.standard" -}}
app: {{ template "redis-ha.name" . }}
heritage: {{ .Release.Service | quote }}
release: {{ .Release.Name | quote }}
chart: {{ template "chartref" . }}
{{- end -}}
{{- /*
Credit: @technosophos
https://github.com/technosophos/common-chart/
chartref prints a chart name and version.
It does minimal escaping for use in Kubernetes labels.
Example output:
zookeeper-1.2.3
wordpress-3.2.1_20170219
*/ -}}
{{- define "chartref" -}}
{{- replace "+" "_" .Chart.Version | printf "%s-%s" .Chart.Name -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "redis-ha.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "redis-ha.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{- define "redis-ha.masterGroupName" -}}
{{- $masterGroupName := tpl ( .Values.redis.masterGroupName | default "") . -}}
{{- $validMasterGroupName := regexMatch "^[\\w-\\.]+$" $masterGroupName -}}
{{- if $validMasterGroupName -}}
{{ $masterGroupName }}
{{- else -}}
{{ required "A valid .Values.redis.masterGroupName entry is required (matching ^[\\w-\\.]+$)" ""}}
{{- end -}}
{{- end -}}

View File

@ -0,0 +1,12 @@
{{- if and .Values.auth (not .Values.existingSecret) -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "redis-ha.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{ include "labels.standard" . | indent 4 }}
type: Opaque
data:
{{ .Values.authKey }}: {{ .Values.redisPassword | b64enc | quote }}
{{- end -}}

View File

@ -0,0 +1,41 @@
{{- $fullName := include "redis-ha.fullname" . }}
{{- $namespace := .Release.Namespace -}}
{{- $replicas := int (toString .Values.replicas) }}
{{- $root := . }}
{{- range $i := until $replicas }}
---
apiVersion: v1
kind: Service
metadata:
name: {{ $fullName }}-announce-{{ $i }}
namespace: {{ $namespace }}
labels:
{{ include "labels.standard" $root | indent 4 }}
annotations:
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
{{- if $root.Values.serviceAnnotations }}
{{ toYaml $root.Values.serviceAnnotations | indent 4 }}
{{- end }}
spec:
publishNotReadyAddresses: true
type: ClusterIP
ports:
- name: server
port: {{ $root.Values.redis.port }}
protocol: TCP
targetPort: redis
- name: sentinel
port: {{ $root.Values.sentinel.port }}
protocol: TCP
targetPort: sentinel
{{- if $root.Values.exporter.enabled }}
- name: exporter
port: {{ $root.Values.exporter.port }}
protocol: TCP
targetPort: exporter-port
{{- end }}
selector:
release: {{ $root.Release.Name }}
app: {{ include "redis-ha.name" $root }}
"statefulset.kubernetes.io/pod-name": {{ $fullName }}-server-{{ $i }}
{{- end }}

View File

@ -0,0 +1,25 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "redis-ha.fullname" . }}-configmap
namespace: {{ .Release.Namespace }}
labels:
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
app: {{ template "redis-ha.fullname" . }}
data:
redis.conf: |
{{- include "config-redis.conf" . }}
sentinel.conf: |
{{- include "config-sentinel.conf" . }}
init.sh: |
{{- include "config-init.sh" . }}
{{ if .Values.haproxy.enabled }}
haproxy.cfg: |-
{{- include "config-haproxy.cfg" . }}
{{- end }}
haproxy_init.sh: |
{{- include "config-haproxy_init.sh" . }}

View File

@ -0,0 +1,11 @@
{{- if .Values.exporter.script }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "redis-ha.fullname" . }}-exporter-script-configmap
namespace: {{ .Release.Namespace }}
labels:
{{ include "labels.standard" . | indent 4 }}
data:
script: {{ toYaml .Values.exporter.script | indent 2 }}
{{- end }}

View File

@ -0,0 +1,15 @@
{{- if .Values.podDisruptionBudget -}}
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: {{ template "redis-ha.fullname" . }}-pdb
namespace: {{ .Release.Namespace }}
labels:
{{ include "labels.standard" . | indent 4 }}
spec:
selector:
matchLabels:
release: {{ .Release.Name }}
app: {{ template "redis-ha.name" . }}
{{ toYaml .Values.podDisruptionBudget | indent 2 }}
{{- end -}}

View File

@ -0,0 +1,19 @@
{{- if and .Values.serviceAccount.create .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ template "redis-ha.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
app: {{ template "redis-ha.fullname" . }}
rules:
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
{{- end }}

View File

@ -0,0 +1,19 @@
{{- if and .Values.serviceAccount.create .Values.rbac.create }}
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "redis-ha.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
app: {{ template "redis-ha.fullname" . }}
subjects:
- kind: ServiceAccount
name: {{ template "redis-ha.serviceAccountName" . }}
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ template "redis-ha.fullname" . }}
{{- end }}

View File

@ -0,0 +1,36 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "redis-ha.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
{{ include "labels.standard" . | indent 4 }}
{{- if and ( .Values.exporter.enabled ) ( .Values.exporter.serviceMonitor.enabled ) }}
servicemonitor: enabled
{{- end }}
annotations:
{{- if .Values.serviceAnnotations }}
{{ toYaml .Values.serviceAnnotations | indent 4 }}
{{- end }}
spec:
type: NodePort
# clusterIP: None
ports:
- name: server
port: {{ .Values.redis.port }}
protocol: TCP
targetPort: redis
nodePort: 31202
- name: sentinel
port: {{ .Values.sentinel.port }}
protocol: TCP
targetPort: sentinel
{{- if .Values.exporter.enabled }}
- name: exporter-port
port: {{ .Values.exporter.port }}
protocol: TCP
targetPort: exporter-port
{{- end }}
selector:
release: {{ .Release.Name }}
app: {{ template "redis-ha.name" . }}

View File

@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "redis-ha.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels:
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
app: {{ template "redis-ha.fullname" . }}
{{- end }}

View File

@ -0,0 +1,35 @@
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.exporter.serviceMonitor.enabled ) ( .Values.exporter.enabled ) }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
{{- if .Values.exporter.serviceMonitor.labels }}
labels:
{{ toYaml .Values.exporter.serviceMonitor.labels | indent 4}}
{{- end }}
name: {{ template "redis-ha.fullname" . }}
namespace: {{ .Release.Namespace }}
{{- if .Values.exporter.serviceMonitor.namespace }}
namespace: {{ .Values.exporter.serviceMonitor.namespace }}
{{- end }}
spec:
endpoints:
- targetPort: {{ .Values.exporter.port }}
{{- if .Values.exporter.serviceMonitor.interval }}
interval: {{ .Values.exporter.serviceMonitor.interval }}
{{- end }}
{{- if .Values.exporter.serviceMonitor.telemetryPath }}
path: {{ .Values.exporter.serviceMonitor.telemetryPath }}
{{- end }}
{{- if .Values.exporter.serviceMonitor.timeout }}
scrapeTimeout: {{ .Values.exporter.serviceMonitor.timeout }}
{{- end }}
jobLabel: {{ template "redis-ha.fullname" . }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
app: {{ template "redis-ha.name" . }}
release: {{ .Release.Name }}
servicemonitor: enabled
{{- end }}

View File

@ -0,0 +1,319 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ template "redis-ha.fullname" . }}-server
namespace: {{ .Release.Namespace }}
labels:
{{ template "redis-ha.fullname" . }}: replica
{{ include "labels.standard" . | indent 4 }}
spec:
selector:
matchLabels:
release: {{ .Release.Name }}
app: {{ template "redis-ha.name" . }}
serviceName: {{ template "redis-ha.fullname" . }}
replicas: {{ .Values.replicas }}
podManagementPolicy: OrderedReady
updateStrategy:
type: RollingUpdate
template:
metadata:
annotations:
checksum/init-config: {{ print (include "config-redis.conf" .) (include "config-init.sh" .) | sha256sum }}
{{- if .Values.podAnnotations }}
{{ toYaml .Values.podAnnotations | indent 8 }}
{{- end }}
{{- if .Values.exporter.enabled }}
prometheus.io/port: "{{ .Values.exporter.port }}"
prometheus.io/scrape: "true"
prometheus.io/path: {{ .Values.exporter.scrapePath }}
{{- end }}
labels:
release: {{ .Release.Name }}
app: {{ template "redis-ha.name" . }}
{{ template "redis-ha.fullname" . }}: replica
{{- range $key, $value := .Values.labels }}
{{ $key }}: {{ $value }}
{{- end }}
spec:
{{- if .Values.schedulerName }}
schedulerName: "{{ .Values.schedulerName }}"
{{- end }}
{{- if .Values.nodeSelector }}
nodeSelector:
{{ toYaml .Values.nodeSelector | indent 8 }}
{{- end }}
{{- if .Values.tolerations }}
tolerations:
{{ toYaml .Values.tolerations | indent 8 }}
{{- end }}
affinity:
{{- if .Values.affinity }}
{{- with .Values.affinity }}
{{ tpl . $ | indent 8 }}
{{- end }}
{{- else }}
{{- if .Values.additionalAffinities }}
{{ toYaml .Values.additionalAffinities | indent 8 }}
{{- end }}
podAntiAffinity:
{{- if .Values.hardAntiAffinity }}
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: {{ template "redis-ha.name" . }}
release: {{ .Release.Name }}
{{ template "redis-ha.fullname" . }}: replica
topologyKey: kubernetes.io/hostname
{{- else }}
preferredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: {{ template "redis-ha.name" . }}
release: {{ .Release.Name }}
{{ template "redis-ha.fullname" . }}: replica
topologyKey: kubernetes.io/hostname
{{- end }}
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchLabels:
app: {{ template "redis-ha.name" . }}
release: {{ .Release.Name }}
{{ template "redis-ha.fullname" . }}: replica
topologyKey: failure-domain.beta.kubernetes.io/zone
{{- end }}
{{- if .Values.imagePullSecrets }}
imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 8 }}
{{- end }}
securityContext:
{{ toYaml .Values.securityContext | indent 8 }}
serviceAccountName: {{ template "redis-ha.serviceAccountName" . }}
initContainers:
{{- if .Values.sysctlImage.enabled }}
- name: init-sysctl
image: {{ template "redis.sysctl.image" . }}
imagePullPolicy: {{ .Values.sysctlImage.pullPolicy }}
resources:
{{ toYaml .Values.sysctlImage.resources | indent 10 }}
{{- if .Values.sysctlImage.mountHostSys }}
volumeMounts:
- name: host-sys
mountPath: /host-sys
{{- end }}
command:
{{ toYaml .Values.sysctlImage.command | indent 10 }}
securityContext:
runAsNonRoot: false
privileged: true
runAsUser: 0
{{- end }}
{{- if and .Values.hostPath.path .Values.hostPath.chown }}
- name: hostpath-chown
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
securityContext:
runAsNonRoot: false
runAsUser: 0
command:
- chown
- "{{ .Values.securityContext.runAsUser }}"
- /data
volumeMounts:
- name: data
mountPath: /data
{{- end }}
- name: config-init
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
resources:
{{ toYaml .Values.init.resources | indent 10 }}
command:
- sh
args:
- /readonly-config/init.sh
env:
{{- $replicas := int (toString .Values.replicas) -}}
{{- range $i := until $replicas }}
- name: SENTINEL_ID_{{ $i }}
value: {{ printf "%s\n%s\nindex: %d" (include "redis-ha.name" $) ($.Release.Name) $i | sha1sum }}
{{ end -}}
{{- if .Values.auth }}
- name: AUTH
valueFrom:
secretKeyRef:
{{- if .Values.existingSecret }}
name: {{ .Values.existingSecret }}
{{- else }}
name: {{ template "redis-ha.fullname" . }}
{{- end }}
key: {{ .Values.authKey }}
{{- end }}
volumeMounts:
- name: config
mountPath: /readonly-config
readOnly: true
- name: data
mountPath: /data
containers:
- name: redis
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- redis-server
args:
- /data/conf/redis.conf
{{- if .Values.auth }}
env:
- name: AUTH
valueFrom:
secretKeyRef:
{{- if .Values.existingSecret }}
name: {{ .Values.existingSecret }}
{{- else }}
name: {{ template "redis-ha.fullname" . }}
{{- end }}
key: {{ .Values.authKey }}
{{- end }}
livenessProbe:
tcpSocket:
port: {{ .Values.redis.port }}
initialDelaySeconds: 15
resources:
{{ toYaml .Values.redis.resources | indent 10 }}
ports:
- name: redis
containerPort: {{ .Values.redis.port }}
volumeMounts:
- mountPath: /data
name: data
- name: sentinel
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
imagePullPolicy: {{ .Values.image.pullPolicy }}
command:
- redis-sentinel
args:
- /data/conf/sentinel.conf
{{- if .Values.auth }}
env:
- name: AUTH
valueFrom:
secretKeyRef:
{{- if .Values.existingSecret }}
name: {{ .Values.existingSecret }}
{{- else }}
name: {{ template "redis-ha.fullname" . }}
{{- end }}
key: {{ .Values.authKey }}
{{- end }}
livenessProbe:
tcpSocket:
port: {{ .Values.sentinel.port }}
initialDelaySeconds: 15
resources:
{{ toYaml .Values.sentinel.resources | indent 10 }}
ports:
- name: sentinel
containerPort: {{ .Values.sentinel.port }}
volumeMounts:
- mountPath: /data
name: data
{{- if .Values.exporter.enabled }}
- name: redis-exporter
image: "{{ .Values.exporter.image }}:{{ .Values.exporter.tag }}"
imagePullPolicy: {{ .Values.exporter.pullPolicy }}
args:
{{- range $key, $value := .Values.exporter.extraArgs }}
- --{{ $key }}={{ $value }}
{{- end }}
env:
- name: REDIS_ADDR
value: redis://localhost:{{ .Values.redis.port }}
{{- if .Values.auth }}
- name: REDIS_PASSWORD
valueFrom:
secretKeyRef:
{{- if .Values.existingSecret }}
name: {{ .Values.existingSecret }}
{{- else }}
name: {{ template "redis-ha.fullname" . }}
{{- end }}
key: {{ .Values.authKey }}
{{- end }}
{{- if .Values.exporter.script }}
- name: REDIS_EXPORTER_SCRIPT
value: /script/script.lua
{{- end }}
livenessProbe:
httpGet:
path: {{ .Values.exporter.scrapePath }}
port: {{ .Values.exporter.port }}
initialDelaySeconds: 15
timeoutSeconds: 1
periodSeconds: 15
resources:
{{ toYaml .Values.exporter.resources | indent 10 }}
ports:
- name: exporter-port
containerPort: {{ .Values.exporter.port }}
{{- if .Values.exporter.script }}
volumeMounts:
- mountPath: /script
name: script-mount
{{- end }}
{{- end }}
{{- if .Values.priorityClassName }}
priorityClassName: {{ .Values.priorityClassName }}
{{- end }}
volumes:
- name: config
configMap:
name: {{ template "redis-ha.fullname" . }}-configmap
{{- if .Values.sysctlImage.mountHostSys }}
- name: host-sys
hostPath:
path: /sys
{{- end }}
{{- if .Values.exporter.script }}
- name: script-mount
configMap:
name: {{ template "redis-ha.fullname" . }}-exporter-script-configmap
items:
- key: script
path: script.lua
{{- end }}
{{- if .Values.persistentVolume.enabled }}
volumeClaimTemplates:
- metadata:
name: data
annotations:
{{- range $key, $value := .Values.persistentVolume.annotations }}
{{ $key }}: {{ $value }}
{{- end }}
spec:
accessModes:
{{- range .Values.persistentVolume.accessModes }}
- {{ . | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.persistentVolume.size | quote }}
{{- if .Values.persistentVolume.storageClass }}
{{- if (eq "-" .Values.persistentVolume.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: "{{ .Values.persistentVolume.storageClass }}"
{{- end }}
{{- end }}
{{- if .Values.persistentVolume.reclaimPolicy }}
persistentVolumeReclaimPolicy: "{{ .Values.persistentVolume.reclaimPolicy }}"
{{- end }}
{{- else if .Values.hostPath.path }}
- name: data
hostPath:
path: {{ tpl .Values.hostPath.path .}}
{{- else }}
- name: data
emptyDir:
{{ toYaml .Values.emptyDir | indent 10 }}
{{- end }}

View File

@ -0,0 +1,151 @@
{{- if .Values.haproxy.enabled }}
kind: Deployment
apiVersion: apps/v1
metadata:
name: {{ template "redis-ha.fullname" . }}-haproxy
namespace: {{ .Release.Namespace }}
labels:
{{ include "labels.standard" . | indent 4 }}
spec:
strategy:
type: RollingUpdate
revisionHistoryLimit: 1
replicas: {{ .Values.haproxy.replicas }}
selector:
matchLabels:
app: {{ template "redis-ha.name" . }}-haproxy
release: {{ .Release.Name }}
template:
metadata:
name: {{ template "redis-ha.fullname" . }}-haproxy
labels:
app: {{ template "redis-ha.name" . }}-haproxy
release: {{ .Release.Name }}
annotations:
{{- if .Values.haproxy.metrics.enabled }}
prometheus.io/port: "{{ .Values.haproxy.metrics.port }}"
prometheus.io/scrape: "true"
prometheus.io/path: "{{ .Values.haproxy.metrics.scrapePath }}"
{{- end }}
checksum/config: {{ print (include "config-haproxy.cfg" .) (include "config-haproxy_init.sh" .) | sha256sum }}
{{- if .Values.haproxy.annotations }}
{{ toYaml .Values.haproxy.annotations | indent 8 }}
{{- end }}
spec:
# Needed when using unmodified rbac-setup.yml
{{ if .Values.haproxy.serviceAccount.create }}
serviceAccountName: {{ template "redis-ha.serviceAccountName" . }}-haproxy
{{ end }}
nodeSelector:
{{ toYaml .Values.nodeSelector | indent 8 }}
tolerations:
{{ toYaml .Values.tolerations | indent 8 }}
affinity:
{{- if .Values.haproxy.affinity }}
{{- with .Values.haproxy.affinity }}
{{ tpl . $ | indent 8 }}
{{- end }}
{{- else }}
{{- if .Values.haproxy.additionalAffinities }}
{{ toYaml .Values.haproxy.additionalAffinities | indent 8 }}
{{- end }}
podAntiAffinity:
{{- if .Values.haproxy.hardAntiAffinity }}
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: {{ template "redis-ha.name" . }}-haproxy
release: {{ .Release.Name }}
topologyKey: kubernetes.io/hostname
{{- else }}
preferredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app: {{ template "redis-ha.name" . }}-haproxy
release: {{ .Release.Name }}
topologyKey: kubernetes.io/hostname
{{- end }}
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchLabels:
app: {{ template "redis-ha.name" . }}-haproxy
release: {{ .Release.Name }}
topologyKey: failure-domain.beta.kubernetes.io/zone
{{- end }}
initContainers:
- name: config-init
image: {{ .Values.haproxy.image.repository }}:{{ .Values.haproxy.image.tag }}
imagePullPolicy: {{ .Values.haproxy.image.pullPolicy }}
resources:
{{ toYaml .Values.haproxy.init.resources | indent 10 }}
command:
- sh
args:
- /readonly/haproxy_init.sh
{{- if .Values.auth }}
env:
- name: AUTH
valueFrom:
secretKeyRef:
{{- if .Values.existingSecret }}
name: {{ .Values.existingSecret }}
{{- else }}
name: {{ template "redis-ha.fullname" . }}
{{- end }}
key: {{ .Values.authKey }}
{{- end }}
volumeMounts:
- name: config-volume
mountPath: /readonly
readOnly: true
- name: data
mountPath: /data
{{- if .Values.haproxy.imagePullSecrets }}
imagePullSecrets: {{ toYaml .Values.haproxy.imagePullSecrets | nindent 8 }}
{{- end }}
securityContext:
{{ toYaml .Values.haproxy.securityContext | indent 8 }}
containers:
- name: haproxy
image: {{ .Values.haproxy.image.repository }}:{{ .Values.haproxy.image.tag }}
imagePullPolicy: {{ .Values.haproxy.image.pullPolicy }}
livenessProbe:
httpGet:
path: /healthz
port: 8888
initialDelaySeconds: 5
periodSeconds: 3
ports:
- name: redis
containerPort: {{ default "6379" .Values.redis.port }}
{{- if .Values.haproxy.readOnly.enabled }}
- name: readonlyport
containerPort: {{ default "6380" .Values.haproxy.readOnly.port }}
{{- end }}
{{- if .Values.haproxy.metrics.enabled }}
- name: metrics-port
containerPort: {{ default "9101" .Values.haproxy.metrics.port }}
{{- end }}
resources:
{{ toYaml .Values.haproxy.resources | indent 10 }}
volumeMounts:
- name: data
mountPath: /usr/local/etc/haproxy
- name: shared-socket
mountPath: /run/haproxy
{{- if .Values.haproxy.priorityClassName }}
priorityClassName: {{ .Values.haproxy.priorityClassName }}
{{- end }}
volumes:
- name: config-volume
configMap:
name: {{ template "redis-ha.fullname" . }}-configmap
- name: shared-socket
emptyDir:
{{ toYaml .Values.haproxy.emptyDir | indent 10 }}
- name: data
emptyDir:
{{ toYaml .Values.haproxy.emptyDir | indent 10 }}
{{- end }}

View File

@ -0,0 +1,42 @@
{{- if .Values.haproxy.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "redis-ha.fullname" . }}-haproxy
namespace: {{ .Release.Namespace }}
labels:
{{ include "labels.standard" . | indent 4 }}
component: {{ template "redis-ha.fullname" . }}-haproxy
annotations:
{{- if .Values.haproxy.service.annotations }}
{{ toYaml .Values.haproxy.service.annotations | indent 4 }}
{{- end }}
spec:
type: {{ default "ClusterIP" .Values.haproxy.service.type }}
{{- if and (eq .Values.haproxy.service.type "LoadBalancer") .Values.haproxy.service.loadBalancerIP }}
loadBalancerIP: {{ .Values.haproxy.service.loadBalancerIP }}
{{- end }}
ports:
- name: haproxy
port: {{ .Values.redis.port }}
protocol: TCP
targetPort: redis
{{- if and (eq .Values.haproxy.service.type "NodePort") .Values.haproxy.service.nodePort }}
nodePort: {{ .Values.haproxy.service.nodePort }}
{{- end }}
{{- if .Values.haproxy.readOnly.enabled }}
- name: haproxyreadonly
port: {{ .Values.haproxy.readOnly.port }}
protocol: TCP
targetPort: {{ .Values.haproxy.readOnly.port }}
{{- end }}
{{- if .Values.haproxy.metrics.enabled }}
- name: {{ .Values.haproxy.metrics.portName }}
port: {{ .Values.haproxy.metrics.port }}
protocol: TCP
targetPort: metrics-port
{{- end }}
selector:
release: {{ .Release.Name }}
app: {{ template "redis-ha.name" . }}-haproxy
{{- end }}

View File

@ -0,0 +1,12 @@
{{- if and .Values.haproxy.serviceAccount.create .Values.haproxy.enabled }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "redis-ha.serviceAccountName" . }}-haproxy
namespace: {{ .Release.Namespace }}
labels:
heritage: {{ .Release.Service }}
release: {{ .Release.Name }}
chart: {{ .Chart.Name }}-{{ .Chart.Version }}
app: {{ template "redis-ha.fullname" . }}
{{- end }}

View File

@ -0,0 +1,34 @@
{{- if and ( .Capabilities.APIVersions.Has "monitoring.coreos.com/v1" ) ( .Values.haproxy.metrics.serviceMonitor.enabled ) ( .Values.haproxy.metrics.enabled ) }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
{{- with .Values.haproxy.metrics.serviceMonitor.labels }}
labels: {{ toYaml . | nindent 4}}
{{- end }}
name: {{ template "redis-ha.fullname" . }}-haproxy
namespace: {{ .Release.Namespace }}
{{- if .Values.haproxy.metrics.serviceMonitor.namespace }}
namespace: {{ .Values.haproxy.metrics.serviceMonitor.namespace }}
{{- end }}
spec:
endpoints:
- targetPort: {{ .Values.haproxy.metrics.port }}
{{- if .Values.haproxy.metrics.serviceMonitor.interval }}
interval: {{ .Values.haproxy.metrics.serviceMonitor.interval }}
{{- end }}
{{- if .Values.haproxy.metrics.serviceMonitor.telemetryPath }}
path: {{ .Values.haproxy.metrics.serviceMonitor.telemetryPath }}
{{- end }}
{{- if .Values.haproxy.metrics.serviceMonitor.timeout }}
scrapeTimeout: {{ .Values.haproxy.metrics.serviceMonitor.timeout }}
{{- end }}
jobLabel: {{ template "redis-ha.fullname" . }}-haproxy
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
selector:
matchLabels:
app: {{ template "redis-ha.name" . }}
release: {{ .Release.Name }}
component: {{ template "redis-ha.fullname" . }}-haproxy
{{- end }}

View File

@ -0,0 +1,27 @@
apiVersion: v1
kind: Pod
metadata:
name: {{ template "redis-ha.fullname" . }}-configmap-test
labels:
{{ include "labels.standard" . | indent 4 }}
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: check-init
image: koalaman/shellcheck:v0.5.0
args:
- --shell=sh
- /readonly-config/init.sh
volumeMounts:
- name: config
mountPath: /readonly-config
readOnly: true
{{- if .Values.imagePullSecrets }}
imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 4 }}
{{- end }}
restartPolicy: Never
volumes:
- name: config
configMap:
name: {{ template "redis-ha.fullname" . }}-configmap

View File

@ -0,0 +1,20 @@
apiVersion: v1
kind: Pod
metadata:
name: {{ template "redis-ha.fullname" . }}-service-test
labels:
{{ include "labels.standard" . | indent 4 }}
annotations:
"helm.sh/hook": test-success
spec:
containers:
- name: "{{ .Release.Name }}-service-test"
image: {{ .Values.image.repository }}:{{ .Values.image.tag }}
command:
- sh
- -c
- redis-cli -h {{ template "redis-ha.fullname" . }} -p {{ .Values.redis.port }} info server
{{- if .Values.imagePullSecrets }}
imagePullSecrets: {{ toYaml .Values.imagePullSecrets | nindent 4 }}
{{- end }}
restartPolicy: Never

View File

@ -0,0 +1,362 @@
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
image:
repository: redis
tag: 5.0.6-alpine
pullPolicy: IfNotPresent
## Reference to one or more secrets to be used when pulling images
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## This imagePullSecrets is only for redis images
##
imagePullSecrets: []
# - name: "image-pull-secret"
## replicas number for each component
replicas: 1
## Kubernetes priorityClass name for the redis-ha-server pod
# priorityClassName: ""
## Custom labels for the redis pod
labels: {}
## Pods Service Account
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
serviceAccount:
## Specifies whether a ServiceAccount should be created
##
create: true
## The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the redis-ha.fullname template
# name:
## Enables a HA Proxy for better LoadBalancing / Sentinel Master support. Automatically proxies to Redis master.
## Recommend for externally exposed Redis clusters.
## ref: https://cbonte.github.io/haproxy-dconv/1.9/intro.html
haproxy:
enabled: false
# Enable if you want a dedicated port in haproxy for redis-slaves
readOnly:
enabled: false
port: 6380
replicas: 3
image:
repository: haproxy
tag: 2.0.4
pullPolicy: IfNotPresent
## Reference to one or more secrets to be used when pulling images
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
imagePullSecrets: []
# - name: "image-pull-secret"
annotations: {}
resources: {}
emptyDir: {}
## Enable sticky sessions to Redis nodes via HAProxy
## Very useful for long-living connections as in case of Sentry for example
stickyBalancing: false
## Kubernetes priorityClass name for the haproxy pod
# priorityClassName: ""
## Service type for HAProxy
##
service:
type: ClusterIP
loadBalancerIP:
annotations: {}
serviceAccount:
create: true
## Official HAProxy embedded prometheus metrics settings.
## Ref: https://github.com/haproxy/haproxy/tree/master/contrib/prometheus-exporter
##
metrics:
enabled: false
# prometheus port & scrape path
port: 9101
portName: exporter-port
scrapePath: /metrics
serviceMonitor:
# When set true then use a ServiceMonitor to configure scraping
enabled: false
# Set the namespace the ServiceMonitor should be deployed
# namespace: monitoring
# Set how frequently Prometheus should scrape
# interval: 30s
# Set path to redis-exporter telemtery-path
# telemetryPath: /metrics
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
# labels: {}
# Set timeout for scrape
# timeout: 10s
init:
resources: {}
timeout:
connect: 4s
server: 30s
client: 30s
check: 2s
securityContext:
runAsUser: 1000
fsGroup: 1000
runAsNonRoot: true
## Whether the haproxy pods should be forced to run on separate nodes.
hardAntiAffinity: false
## Additional affinities to add to the haproxy pods.
additionalAffinities: {}
## Override all other affinity settings for the haproxy pods with a string.
affinity: |
## Custom config-haproxy.cfg files used to override default settings. If this file is
## specified then the config-haproxy.cfg above will be ignored.
# customConfig: |-
# Define configuration here
## Place any additional configuration section to add to the default config-haproxy.cfg
# extraConfig: |-
# Define configuration here
## Role Based Access
## Ref: https://kubernetes.io/docs/admin/authorization/rbac/
##
rbac:
create: true
sysctlImage:
enabled: false
command: []
registry: docker.io
repository: busybox
tag: 1.31.1
pullPolicy: Always
mountHostSys: false
resources: {}
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Redis specific configuration options
redis:
port: 6379
masterGroupName: "mymaster" # must match ^[\\w-\\.]+$) and can be templated
config:
## Additional redis conf options can be added below
## For all available options see http://download.redis.io/redis-stable/redis.conf
min-replicas-to-write: 0
min-replicas-max-lag: 5 # Value in seconds
maxmemory: "0" # Max memory to use for each redis instance. Default is unlimited.
maxmemory-policy: "volatile-lru" # Max memory policy to use for each redis instance. Default is volatile-lru.
# Determines if scheduled RDB backups are created. Default is false.
# Please note that local (on-disk) RDBs will still be created when re-syncing with a new slave. The only way to prevent this is to enable diskless replication.
save: "900 1"
# When enabled, directly sends the RDB over the wire to slaves, without using the disk as intermediate storage. Default is false.
repl-diskless-sync: "yes"
rdbcompression: "yes"
rdbchecksum: "yes"
## Custom redis.conf files used to override default settings. If this file is
## specified then the redis.config above will be ignored.
# customConfig: |-
# Define configuration here
resources: {}
# requests:
# memory: 200Mi
# cpu: 100m
# limits:
# memory: 700Mi
## Sentinel specific configuration options
sentinel:
port: 26379
quorum: 2
config:
## Additional sentinel conf options can be added below. Only options that
## are expressed in the format simialar to 'sentinel xxx mymaster xxx' will
## be properly templated expect maxclients option.
## For available options see http://download.redis.io/redis-stable/sentinel.conf
down-after-milliseconds: 10000
## Failover timeout value in milliseconds
failover-timeout: 180000
parallel-syncs: 5
maxclients: 10000
## Custom sentinel.conf files used to override default settings. If this file is
## specified then the sentinel.config above will be ignored.
# customConfig: |-
# Define configuration here
resources: {}
# requests:
# memory: 200Mi
# cpu: 100m
# limits:
# memory: 200Mi
securityContext:
runAsUser: 1000
fsGroup: 1000
runAsNonRoot: true
## Node labels, affinity, and tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
nodeSelector: {}
## Whether the Redis server pods should be forced to run on separate nodes.
## This is accomplished by setting their AntiAffinity with requiredDuringSchedulingIgnoredDuringExecution as opposed to preferred.
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity-beta-feature
##
hardAntiAffinity: false
## Additional affinities to add to the Redis server pods.
##
## Example:
## nodeAffinity:
## preferredDuringSchedulingIgnoredDuringExecution:
## - weight: 50
## preference:
## matchExpressions:
## - key: spot
## operator: NotIn
## values:
## - "true"
##
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
additionalAffinities: {}
## Override all other affinity settings for the Redis server pods with a string.
##
## Example:
## affinity: |
## podAntiAffinity:
## requiredDuringSchedulingIgnoredDuringExecution:
## - labelSelector:
## matchLabels:
## app: {{ template "redis-ha.name" . }}
## release: {{ .Release.Name }}
## topologyKey: kubernetes.io/hostname
## preferredDuringSchedulingIgnoredDuringExecution:
## - weight: 100
## podAffinityTerm:
## labelSelector:
## matchLabels:
## app: {{ template "redis-ha.name" . }}
## release: {{ .Release.Name }}
## topologyKey: failure-domain.beta.kubernetes.io/zone
##
affinity: |
# Prometheus exporter specific configuration options
exporter:
enabled: false
image: oliver006/redis_exporter
tag: v1.3.2
pullPolicy: IfNotPresent
# prometheus port & scrape path
port: 9121
scrapePath: /metrics
# cpu/memory resource limits/requests
resources: {}
# Additional args for redis exporter
extraArgs: {}
# Used to mount a LUA-Script via config map and use it for metrics-collection
# script: |
# -- Example script copied from: https://github.com/oliver006/redis_exporter/blob/master/contrib/sample_collect_script.lua
# -- Example collect script for -script option
# -- This returns a Lua table with alternating keys and values.
# -- Both keys and values must be strings, similar to a HGETALL result.
# -- More info about Redis Lua scripting: https://redis.io/commands/eval
#
# local result = {}
#
# -- Add all keys and values from some hash in db 5
# redis.call("SELECT", 5)
# local r = redis.call("HGETALL", "some-hash-with-stats")
# if r ~= nil then
# for _,v in ipairs(r) do
# table.insert(result, v) -- alternating keys and values
# end
# end
#
# -- Set foo to 42
# table.insert(result, "foo")
# table.insert(result, "42") -- note the string, use tostring() if needed
#
# return result
serviceMonitor:
# When set true then use a ServiceMonitor to configure scraping
enabled: false
# Set the namespace the ServiceMonitor should be deployed
# namespace: monitoring
# Set how frequently Prometheus should scrape
# interval: 30s
# Set path to redis-exporter telemtery-path
# telemetryPath: /metrics
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
# labels: {}
# Set timeout for scrape
# timeout: 10s
podDisruptionBudget: {}
# maxUnavailable: 1
# minAvailable: 1
## Configures redis with AUTH (requirepass & masterauth conf params)
auth: false
# redisPassword:
## Use existing secret containing key `authKey` (ignores redisPassword)
# existingSecret:
## Defines the key holding the redis password in existing secret.
authKey: auth
persistentVolume:
enabled: true
## redis-ha data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: "storage-nfs"
accessModes:
- ReadWriteOnce
size: 2Gi
annotations: {}
# reclaimPolicy per https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaiming
reclaimPolicy: ""
init:
resources: {}
# To use a hostPath for data, set persistentVolume.enabled to false
# and define hostPath.path.
# Warning: this might overwrite existing folders on the host system!
hostPath:
## path is evaluated as template so placeholders are replaced
# path: "/data/{{ .Release.Name }}"
# if chown is true, an init-container with root permissions is launched to
# change the owner of the hostPath folder to the user defined in the
# security context
chown: true
emptyDir: {}

362
k8s/helm-2redis-ha/! Normal file
View File

@ -0,0 +1,362 @@
## Configure resource requests and limits
## ref: http://kubernetes.io/docs/user-guide/compute-resources/
##
image:
repository: redis
tag: 5.0.6-alpine
pullPolicy: IfNotPresent
## Reference to one or more secrets to be used when pulling images
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
## This imagePullSecrets is only for redis images
##
imagePullSecrets: []
# - name: "image-pull-secret"
## replicas number for each component
replicas: 2
## Kubernetes priorityClass name for the redis-ha-server pod
# priorityClassName: ""
## Custom labels for the redis pod
labels: {}
## Pods Service Account
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
serviceAccount:
## Specifies whether a ServiceAccount should be created
##
create: true
## The name of the ServiceAccount to use.
## If not set and create is true, a name is generated using the redis-ha.fullname template
# name:
## Enables a HA Proxy for better LoadBalancing / Sentinel Master support. Automatically proxies to Redis master.
## Recommend for externally exposed Redis clusters.
## ref: https://cbonte.github.io/haproxy-dconv/1.9/intro.html
haproxy:
enabled: false
# Enable if you want a dedicated port in haproxy for redis-slaves
readOnly:
enabled: false
port: 6380
replicas: 3
image:
repository: haproxy
tag: 2.0.4
pullPolicy: IfNotPresent
## Reference to one or more secrets to be used when pulling images
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
##
imagePullSecrets: []
# - name: "image-pull-secret"
annotations: {}
resources: {}
emptyDir: {}
## Enable sticky sessions to Redis nodes via HAProxy
## Very useful for long-living connections as in case of Sentry for example
stickyBalancing: false
## Kubernetes priorityClass name for the haproxy pod
# priorityClassName: ""
## Service type for HAProxy
##
service:
type: ClusterIP
loadBalancerIP:
annotations: {}
serviceAccount:
create: true
## Official HAProxy embedded prometheus metrics settings.
## Ref: https://github.com/haproxy/haproxy/tree/master/contrib/prometheus-exporter
##
metrics:
enabled: false
# prometheus port & scrape path
port: 9101
portName: exporter-port
scrapePath: /metrics
serviceMonitor:
# When set true then use a ServiceMonitor to configure scraping
enabled: false
# Set the namespace the ServiceMonitor should be deployed
# namespace: monitoring
# Set how frequently Prometheus should scrape
# interval: 30s
# Set path to redis-exporter telemtery-path
# telemetryPath: /metrics
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
# labels: {}
# Set timeout for scrape
# timeout: 10s
init:
resources: {}
timeout:
connect: 4s
server: 30s
client: 30s
check: 2s
securityContext:
runAsUser: 1000
fsGroup: 1000
runAsNonRoot: true
## Whether the haproxy pods should be forced to run on separate nodes.
hardAntiAffinity: false
## Additional affinities to add to the haproxy pods.
additionalAffinities: {}
## Override all other affinity settings for the haproxy pods with a string.
affinity: |
## Custom config-haproxy.cfg files used to override default settings. If this file is
## specified then the config-haproxy.cfg above will be ignored.
# customConfig: |-
# Define configuration here
## Place any additional configuration section to add to the default config-haproxy.cfg
# extraConfig: |-
# Define configuration here
## Role Based Access
## Ref: https://kubernetes.io/docs/admin/authorization/rbac/
##
rbac:
create: true
sysctlImage:
enabled: false
command: []
registry: docker.io
repository: busybox
tag: 1.31.1
pullPolicy: Always
mountHostSys: false
resources: {}
## Use an alternate scheduler, e.g. "stork".
## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
##
# schedulerName:
## Redis specific configuration options
redis:
port: 6379
masterGroupName: "mymaster" # must match ^[\\w-\\.]+$) and can be templated
config:
## Additional redis conf options can be added below
## For all available options see http://download.redis.io/redis-stable/redis.conf
min-replicas-to-write: 1
min-replicas-max-lag: 5 # Value in seconds
maxmemory: "0" # Max memory to use for each redis instance. Default is unlimited.
maxmemory-policy: "volatile-lru" # Max memory policy to use for each redis instance. Default is volatile-lru.
# Determines if scheduled RDB backups are created. Default is false.
# Please note that local (on-disk) RDBs will still be created when re-syncing with a new slave. The only way to prevent this is to enable diskless replication.
save: "900 1"
# When enabled, directly sends the RDB over the wire to slaves, without using the disk as intermediate storage. Default is false.
repl-diskless-sync: "yes"
rdbcompression: "yes"
rdbchecksum: "yes"
## Custom redis.conf files used to override default settings. If this file is
## specified then the redis.config above will be ignored.
# customConfig: |-
# Define configuration here
resources: {}
# requests:
# memory: 200Mi
# cpu: 100m
# limits:
# memory: 700Mi
## Sentinel specific configuration options
sentinel:
port: 26379
quorum: 2
config:
## Additional sentinel conf options can be added below. Only options that
## are expressed in the format simialar to 'sentinel xxx mymaster xxx' will
## be properly templated expect maxclients option.
## For available options see http://download.redis.io/redis-stable/sentinel.conf
down-after-milliseconds: 10000
## Failover timeout value in milliseconds
failover-timeout: 180000
parallel-syncs: 5
maxclients: 10000
## Custom sentinel.conf files used to override default settings. If this file is
## specified then the sentinel.config above will be ignored.
# customConfig: |-
# Define configuration here
resources: {}
# requests:
# memory: 200Mi
# cpu: 100m
# limits:
# memory: 200Mi
securityContext:
runAsUser: 1000
fsGroup: 1000
runAsNonRoot: true
## Node labels, affinity, and tolerations for pod assignment
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature
## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
nodeSelector: {}
## Whether the Redis server pods should be forced to run on separate nodes.
## This is accomplished by setting their AntiAffinity with requiredDuringSchedulingIgnoredDuringExecution as opposed to preferred.
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity-beta-feature
##
hardAntiAffinity: false
## Additional affinities to add to the Redis server pods.
##
## Example:
## nodeAffinity:
## preferredDuringSchedulingIgnoredDuringExecution:
## - weight: 50
## preference:
## matchExpressions:
## - key: spot
## operator: NotIn
## values:
## - "true"
##
## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
##
additionalAffinities: {}
## Override all other affinity settings for the Redis server pods with a string.
##
## Example:
## affinity: |
## podAntiAffinity:
## requiredDuringSchedulingIgnoredDuringExecution:
## - labelSelector:
## matchLabels:
## app: {{ template "redis-ha.name" . }}
## release: {{ .Release.Name }}
## topologyKey: kubernetes.io/hostname
## preferredDuringSchedulingIgnoredDuringExecution:
## - weight: 100
## podAffinityTerm:
## labelSelector:
## matchLabels:
## app: {{ template "redis-ha.name" . }}
## release: {{ .Release.Name }}
## topologyKey: failure-domain.beta.kubernetes.io/zone
##
affinity: |
# Prometheus exporter specific configuration options
exporter:
enabled: false
image: oliver006/redis_exporter
tag: v1.3.2
pullPolicy: IfNotPresent
# prometheus port & scrape path
port: 9121
scrapePath: /metrics
# cpu/memory resource limits/requests
resources: {}
# Additional args for redis exporter
extraArgs: {}
# Used to mount a LUA-Script via config map and use it for metrics-collection
# script: |
# -- Example script copied from: https://github.com/oliver006/redis_exporter/blob/master/contrib/sample_collect_script.lua
# -- Example collect script for -script option
# -- This returns a Lua table with alternating keys and values.
# -- Both keys and values must be strings, similar to a HGETALL result.
# -- More info about Redis Lua scripting: https://redis.io/commands/eval
#
# local result = {}
#
# -- Add all keys and values from some hash in db 5
# redis.call("SELECT", 5)
# local r = redis.call("HGETALL", "some-hash-with-stats")
# if r ~= nil then
# for _,v in ipairs(r) do
# table.insert(result, v) -- alternating keys and values
# end
# end
#
# -- Set foo to 42
# table.insert(result, "foo")
# table.insert(result, "42") -- note the string, use tostring() if needed
#
# return result
serviceMonitor:
# When set true then use a ServiceMonitor to configure scraping
enabled: false
# Set the namespace the ServiceMonitor should be deployed
# namespace: monitoring
# Set how frequently Prometheus should scrape
# interval: 30s
# Set path to redis-exporter telemtery-path
# telemetryPath: /metrics
# Set labels for the ServiceMonitor, use this to define your scrape label for Prometheus Operator
# labels: {}
# Set timeout for scrape
# timeout: 10s
podDisruptionBudget: {}
# maxUnavailable: 1
# minAvailable: 1
## Configures redis with AUTH (requirepass & masterauth conf params)
auth: false
# redisPassword:
## Use existing secret containing key `authKey` (ignores redisPassword)
# existingSecret:
## Defines the key holding the redis password in existing secret.
authKey: auth
persistentVolume:
enabled: true
## redis-ha data Persistent Volume Storage Class
## If defined, storageClassName: <storageClass>
## If set to "-", storageClassName: "", which disables dynamic provisioning
## If undefined (the default) or set to null, no storageClassName spec is
## set, choosing the default provisioner. (gp2 on AWS, standard on
## GKE, AWS & OpenStack)
##
storageClass: "storage-nfs"
accessModes:
- ReadWriteOnce
size: 2Gi
annotations: {}
# reclaimPolicy per https://kubernetes.io/docs/concepts/storage/persistent-volumes/#reclaiming
reclaimPolicy: ""
init:
resources: {}
# To use a hostPath for data, set persistentVolume.enabled to false
# and define hostPath.path.
# Warning: this might overwrite existing folders on the host system!
hostPath:
## path is evaluated as template so placeholders are replaced
# path: "/data/{{ .Release.Name }}"
# if chown is true, an init-container with root permissions is launched to
# change the owner of the hostPath folder to the user defined in the
# security context
chown: true
emptyDir: {}

36
k8s/k8s-10gen.yaml Normal file
View File

@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-gen-deployment
namespace: ci4s-test
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-gen
template:
metadata:
labels:
app: ci4s-gen
spec:
containers:
- name: ci4s-gen
image: ci4s-gen:v1.0
ports:
- containerPort: 9202
---
apiVersion: v1
kind: Service
metadata:
name: ci4s-gen-service
namespace: ci4s-test
spec:
type: NodePort
ports:
- port: 9202
nodePort: 31211
protocol: TCP
selector:
app: ci4s-gen

36
k8s/k8s-11visual.yaml Normal file
View File

@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-visual-deployment
namespace: ci4s-test
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-visual
template:
metadata:
labels:
app: ci4s-visual
spec:
containers:
- name: ci4s-visual
image: ci4s-visual:v1.0
ports:
- containerPort: 9100
---
apiVersion: v1
kind: Service
metadata:
name: ci4s-visual-service
namespace: ci4s-test
spec:
type: NodePort
ports:
- port: 9100
nodePort: 31212
protocol: TCP
selector:
app: ci4s-visual

62
k8s/k8s-3nacos.yaml Normal file
View File

@ -0,0 +1,62 @@
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: ci4s-test
name: nacos-ci4s
labels:
app: nacos-ci4s
spec:
replicas: 1
selector:
matchLabels:
app: nacos-ci4s
template:
metadata:
labels:
app: nacos-ci4s
spec:
containers:
- name: nacos-ci4s
image: nacos/nacos-server:v2.2.0
env:
- name: SPRING_DATASOURCE_PLATFORM
value: mysql
- name: MODE
value: standalone
- name: MYSQL_SERVICE_HOST
value: mysql.ci4s-test.svc
- name: MYSQL_SERVICE_PORT
value: "3306"
- name: MYSQL_SERVICE_DB_NAME
value: nacos-ci4s-config
- name: MYSQL_SERVICE_USER
value: root
- name: MYSQL_SERVICE_PASSWORD
value: qazxc123456.
ports:
- containerPort: 8848
- containerPort: 9848
restartPolicy: Always
---
apiVersion: v1
kind: Service
metadata:
namespace: ci4s-test
name: nacos-ci4s
labels:
app: nacos-ci4s
spec:
type: NodePort
selector:
app: nacos-ci4s
ports:
- port: 8848
targetPort: 8848
nodePort: 31203
name: web
- port: 9848
targetPort: 9848
nodePort: 31204
name: podsa

36
k8s/k8s-4gateway.yaml Normal file
View File

@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-gateway-deployment
namespace: ci4s-test
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-gateway
template:
metadata:
labels:
app: ci4s-gateway
spec:
containers:
- name: ci4s-gateway
image: ci4s-gateway:v1.0
ports:
- containerPort: 8082
---
apiVersion: v1
kind: Service
metadata:
name: ci4s-gateway-service
namespace: ci4s-test
spec:
type: NodePort
ports:
- port: 8082
nodePort: 31205
protocol: TCP
selector:
app: ci4s-gateway

36
k8s/k8s-5auth.yaml Normal file
View File

@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-auth-deployment
namespace: ci4s-test
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-auth
template:
metadata:
labels:
app: ci4s-auth
spec:
containers:
- name: ci4s-auth
image: ci4s-auth:v1.0
ports:
- containerPort: 9200
---
apiVersion: v1
kind: Service
metadata:
name: ci4s-auth-service
namespace: ci4s-test
spec:
type: NodePort
ports:
- port: 9200
nodePort: 31206
protocol: TCP
selector:
app: ci4s-auth

36
k8s/k8s-6system.yaml Normal file
View File

@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-system-deployment
namespace: ci4s-test
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-system
template:
metadata:
labels:
app: ci4s-system
spec:
containers:
- name: ci4s-system
image: ci4s-system:v1.0
ports:
- containerPort: 9201
---
apiVersion: v1
kind: Service
metadata:
name: ci4s-system-service
namespace: ci4s-test
spec:
type: NodePort
ports:
- port: 9201
nodePort: 31207
protocol: TCP
selector:
app: ci4s-system

36
k8s/k8s-7management.yaml Normal file
View File

@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-management-platform-deployment
namespace: ci4s-test
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-management-platform
template:
metadata:
labels:
app: ci4s-management-platform
spec:
containers:
- name: ci4s-management-platform
image: ci4s-managent:20240110
ports:
- containerPort: 9300
---
apiVersion: v1
kind: Service
metadata:
name: ci4s-management-platform-service
namespace: ci4s-test
spec:
type: NodePort
ports:
- port: 9300
nodePort: 31208
protocol: TCP
selector:
app: ci4s-management-platform

36
k8s/k8s-8file.yaml Normal file
View File

@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-file-deployment
namespace: ci4s-test
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-file
template:
metadata:
labels:
app: ci4s-file
spec:
containers:
- name: ci4s-file
image: ci4s-file:v1.0
ports:
- containerPort: 9300
---
apiVersion: v1
kind: Service
metadata:
name: ci4s-file-service
namespace: ci4s-test
spec:
type: NodePort
ports:
- port: 9300
nodePort: 31209
protocol: TCP
selector:
app: ci4s-file

36
k8s/k8s-9job.yaml Normal file
View File

@ -0,0 +1,36 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: ci4s-job-deployment
namespace: ci4s-test
spec:
replicas: 1
selector:
matchLabels:
app: ci4s-job
template:
metadata:
labels:
app: ci4s-job
spec:
containers:
- name: ci4s-job
image: ci4s-job:v1.0
ports:
- containerPort: 9203
---
apiVersion: v1
kind: Service
metadata:
name: ci4s-job-service
namespace: ci4s-test
spec:
type: NodePort
ports:
- port: 9203
nodePort: 31210
protocol: TCP
selector:
app: ci4s-job

View File

@ -14,10 +14,10 @@ spring:
nacos: nacos:
discovery: discovery:
# 服务注册地址 # 服务注册地址
server-addr: 172.20.32.181:18848 server-addr: nacos-ci4s.ci4s-test.svc:8848
config: config:
# 配置中心地址 # 配置中心地址
server-addr: 172.20.32.181:18848 server-addr: nacos-ci4s.ci4s-test.svc:8848
# 配置文件格式 # 配置文件格式
file-extension: yml file-extension: yml
# 共享配置 # 共享配置

View File

@ -14,10 +14,10 @@ spring:
nacos: nacos:
discovery: discovery:
# 服务注册地址 # 服务注册地址
server-addr: 172.20.32.181:18848 server-addr: nacos-ci4s.ci4s-test.svc:8848
config: config:
# 配置中心地址 # 配置中心地址
server-addr: 172.20.32.181:18848 server-addr: nacos-ci4s.ci4s-test.svc:8848
# 配置文件格式 # 配置文件格式
file-extension: yml file-extension: yml
# 共享配置 # 共享配置
@ -28,12 +28,12 @@ spring:
eager: true eager: true
transport: transport:
# 控制台地址 # 控制台地址
dashboard: 127.0.0.1:8718 dashboard: sentinel-ci4s.ci4s-test.svc:8718
# nacos配置持久化 # nacos配置持久化
datasource: datasource:
ds1: ds1:
nacos: nacos:
server-addr: 172.20.32.181:18848 server-addr: nacos-ci4s.ci4s-test.svc:8848
dataId: sentinel-ruoyi-gateway dataId: sentinel-ruoyi-gateway
groupId: DEFAULT_GROUP groupId: DEFAULT_GROUP
data-type: json data-type: json

View File

@ -14,10 +14,10 @@ spring:
nacos: nacos:
discovery: discovery:
# 服务注册地址 # 服务注册地址
server-addr: 172.20.32.181:18848 server-addr: nacos-ci4s.ci4s-test.svc:8848
config: config:
# 配置中心地址 # 配置中心地址
server-addr: 172.20.32.181:18848 server-addr: nacos-ci4s.ci4s-test.svc:8848
# 配置文件格式 # 配置文件格式
file-extension: yml file-extension: yml
# 共享配置 # 共享配置

View File

@ -14,10 +14,10 @@ spring:
nacos: nacos:
discovery: discovery:
# 服务注册地址 # 服务注册地址
server-addr: 172.20.32.181:18848 server-addr: nacos-ci4s.ci4s-test.svc:8848
config: config:
# 配置中心地址 # 配置中心地址
server-addr: 172.20.32.181:18848 server-addr: nacos-ci4s.ci4s-test.svc:8848
# 配置文件格式 # 配置文件格式
file-extension: yml file-extension: yml
# 共享配置 # 共享配置

View File

@ -14,10 +14,10 @@ spring:
nacos: nacos:
discovery: discovery:
# 服务注册地址 # 服务注册地址
server-addr: 172.20.32.181:18848 server-addr: nacos-ci4s.ci4s-test.svc:8848
config: config:
# 配置中心地址 # 配置中心地址
server-addr: 172.20.32.181:18848 server-addr: nacos-ci4s.ci4s-test.svc:8848
# 配置文件格式 # 配置文件格式
file-extension: yml file-extension: yml
# 共享配置 # 共享配置

View File

@ -14,10 +14,10 @@ spring:
nacos: nacos:
discovery: discovery:
# 服务注册地址 # 服务注册地址
server-addr: 172.20.32.181:18848 server-addr: nacos-ci4s.ci4s-test.svc:8848
config: config:
# 配置中心地址 # 配置中心地址
server-addr: 172.20.32.181:18848 server-addr: nacos-ci4s.ci4s-test.svc:8848
# 配置文件格式 # 配置文件格式
file-extension: yml file-extension: yml
# 共享配置 # 共享配置

View File

@ -14,10 +14,10 @@ spring:
nacos: nacos:
discovery: discovery:
# 服务注册地址 # 服务注册地址
server-addr: 172.20.32.181:18848 server-addr: nacos-ci4s.ci4s-test.svc:8848
config: config:
# 配置中心地址 # 配置中心地址
server-addr: 172.20.32.181:18848 server-addr: nacos-ci4s.ci4s-test.svc:8848
# 配置文件格式 # 配置文件格式
file-extension: yml file-extension: yml
# 共享配置 # 共享配置

View File

@ -14,10 +14,10 @@ spring:
nacos: nacos:
discovery: discovery:
# 服务注册地址 # 服务注册地址
server-addr: 172.20.32.181:18848 server-addr: nacos-ci4s.ci4s-test.svc:8848
config: config:
# 配置中心地址 # 配置中心地址
server-addr: 172.20.32.181:18848 server-addr: nacos-ci4s.ci4s-test.svc:8848
# 配置文件格式 # 配置文件格式
file-extension: yml file-extension: yml
# 共享配置 # 共享配置