天枢环境搭建

This commit is contained in:
somunslotus 2023-07-27 09:06:19 +08:00
parent 5d08f27534
commit aaf63d86f1
6 changed files with 279 additions and 0 deletions

40
.env.production Normal file
View File

@ -0,0 +1,40 @@
ENV = 'production'
# 默认BASE URL, 后端服务地址
VUE_APP_BASE_API = 'http://173.15.15.70:32023'
# TODO: 目前后端连接位于 8960端口 k8s 服务,需要后端调整后再同步调整
# WebSocket 连接地址
VUE_APP_WS_API = 'ws://173.15.15.70:30960/ws'
# 数据管理
VUE_APP_DATA_API = ''
# 训练可视化
VUE_APP_VISUAL_API = ''
# 用户 minio 访问地址
VUE_APP_MINIO_API = 'http://173.15.15.70:30900/minio'
# atlas 服务,需要单独部署
VUE_APP_ATLAS_HOST = 'http://127.0.0.1'
# 医疗影像 DCM4CHEE 服务访问地址
# 部署文档参考http://docs.dubhe.ai/docs/setup/deploy-algorithm
VUE_APP_DCM_API = 'http://173.15.15.70:30088/dcm4chee/dcm4chee-arc/aets/DCM4CHEE_ADMIN'
# minIO 服务 IP
# 部署文档参考http://docs.dubhe.ai/docs/setup/deploy-minio
VUE_APP_MINIO_ENDPOINT = '173.15.15.70'
# minIO 服务 端口
VUE_APP_MINIO_PORT = '30900'
# 是否开启 SSL
VUE_APP_MINIO_USESSL = 'false'
# bucketName
VUE_APP_MINIO_BUCKETNAME = 'dubhe-prod'
# 文档链接
VUE_APP_DOCS_URL = http://docs.tianshu.org.cn/docs/

View File

@ -1,2 +1,5 @@
# tianshu-env-build
### .env.production 前端启动配置文件
### configmap.yaml 做镜像的配置文件
### config文件 k8s配置文件部署运行operator的参考文件

19
config Normal file
View File

@ -0,0 +1,19 @@
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM2VENDQWRHZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQ0FYRFRJek1EUXhNakEyTkRNd04xb1lEekl4TWpNd016RTVNRFkwTXpBM1dqQVZNUk13RVFZRApWUVFERXdwcmRXSmxjbTVsZEdWek1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBCnRkN2dPWXk0VHhOQm9yMGZPUmZRd1BOQ1Z2Y2Z5U0VmVFZ5NmE3QjVuWFczZG9WRVI4MkRIQ3ZOMXFyNktMZnYKYnNSR2N0cjZwSTltNFc1eGFITzB0aEc1akU4OXhWak9UMDJPeUNMZnlYcHY1NkQwS3dYeDRudHgvUkdWejRwQQpMSnFmOUhQdXl2amxybFd2MlAvbmxHVlVHeERwUUFLN0FRczM1NGhmVy8xTkk4WHZpbnl4RUQyNTJ5aGVabGNLCm5YRzJmZ1JxbDgzSVJrWjhSNHZlR0lPcExMKzhENURlWGpNcnYxUE56SlNEamk4TG1KeEZ4YXJoOFpGV3BWS1QKSW8yV3dyTlNVVElMMHJ0by9VRlpEV1NCYkRTOGtIaXEycEJOVHZ4bUN1N2JSTXRmRExmMWxKd0ZKdkZ1NjFIYgpvVWZMZUkvc3RJQTFUQ0FRc2J0YnB3SURBUUFCbzBJd1FEQU9CZ05WSFE4QkFmOEVCQU1DQXFRd0R3WURWUjBUCkFRSC9CQVV3QXdFQi96QWRCZ05WSFE0RUZnUVUyVnVpRHEzVW9QWFB2aTdVUEVxVWY2bW4yUm93RFFZSktvWkkKaHZjTkFRRUxCUUFEZ2dFQkFFc0JvQTFNcWlpVmVvbk02a09CVk9uVmIxSkd3azVRTmkrdzNNQkRQd3M4US9WTQpVaFpocGVpRnZxeGxkdlpLY1pSRkNCeFBLSW5vejA3UlFpTXV6UCtxQjZBTGpPT21zWVFJeUxneWk4dzZLY0MwCllaRmxFL1RVZGJHWmg4YUdDVFZtMkhnTTJGcFRQMmtvajc4OXFUQ2N3WmsrTThaZUVGNGNkNDE4bk5IaDd1cW4KZVA0VkZJK0VrOUxrMm43MGdneXhFWHpFVVNJdGNtdHlkcS9vSUI4TTRidm9ZaWZhelcvQW1XYmVSd1lhOTR6MQo2QkdzSVhKcGpYN1BHSG1FR1owK3liZmlXWFpxS2RURys3NHkvQ3RUTU53ZVhlS1ZkUktJTVp6VGswRVRxUWkyCnNDQ3FWRGhYVi82M0hSRlhjcmZkdDc4WnRrUzVFOFg3RTZLSVRMND0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
server: https://90.1.0.70:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubernetes-admin
name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGVENDQWYyZ0F3SUJBZ0lJTDFjcFhPWjViWU13RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWdGdzB5TXpBME1USXdOalF6TURkYUdBOHlNVEl6TURNeE9UQTJORE14TUZvdwpOREVYTUJVR0ExVUVDaE1PYzNsemRHVnRPbTFoYzNSbGNuTXhHVEFYQmdOVkJBTVRFR3QxWW1WeWJtVjBaWE10CllXUnRhVzR3Z2dFaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLQW9JQkFRRHBXZ29SbmRnbkwwY2MKSERiQ0FVMGs0QlRCa3YzNWlKTWpENU85Z1hLR1hNTFBBYndubENiTnUxSjBqOCtLbVFjSGRFVDJDei82czVESApUQTlNdDhzc3BPWWRVNW1XUkk3NWVwVmp5WTBBSWJNQWJzU09NSEhIM2pOdjU1QmtZeTE1SmlDRGk1K1laNFppClBEd3NpZ1RUb3UySXZMVnBVbS9FUTNTUURoVXhSbXJ2OUNxMWpiWW5EaVQzZEtOZkcxOS9lUHR1V3c1by9ralIKbXNSSVZGTG9zdWsrZlRnOFg1Mnd6YWxuZTJ5VHdTL3AxZ0dvUHNFc2Fic0hwcjVDTndYNVBDZWlNMlhWRk9tcQpEVGx3ZUNwQ1A3U3AwNW9rQThDcFFmUmNLNzVDamV1eldDZnZqRk9pSGpoeXpCRFhHZFBvNWVaVWozUmdheW9uCjZNejdUNDdaQWdNQkFBR2pTREJHTUE0R0ExVWREd0VCL3dRRUF3SUZvREFUQmdOVkhTVUVEREFLQmdnckJnRUYKQlFjREFqQWZCZ05WSFNNRUdEQVdnQlRaVzZJT3JkU2c5YysrTHRROFNwUi9xYWZaR2pBTkJna3Foa2lHOXcwQgpBUXNGQUFPQ0FRRUFsQlNwbHRPcWhaLzl2YzN0dEN1UVMyRHFSWTRrVjNHa25sbDZqQ2ZNMC9iWSswNlVmODFaCnI2b1FjYk8raEthT1kzUkRzeHExZjhvL29oSjNQY2xNMHNKTTQyZmptcHordGtFWnlMcDJaVnhSdXZzTGcrSlYKeGROVHFPcHZIaFQzMXg5Z1NtSzU2elBxbkxYWWVWUnAxN2pwdFBwVE01aDYySXNXL1VxQzVhb2hXNFZhdGg1SApXeFhUc2s3UUZyVUpyWUtXK0RHYWd5Z1gxZWJOUE9GOVFCcXh6eXpld2JVUmhFV3kzSmtKSG03S0dsK0o3VFh5Cjg2SEY2eDJ1d2EzNDREMnBEY05tcEVEMGRnckhxVU9mL1BjSDJHS0FzcjVmOXJlRlJmSHJEb1o0blVBK04zUk8KOTd0NGJaNmx3KzNacGR0cFNUREdlZ0t6ekphUVFzYU9TZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBNlZvS0VaM1lKeTlISEJ3MndnRk5KT0FVd1pMOStZaVRJdytUdllGeWhsekN6d0c4Cko1UW16YnRTZEkvUGlwa0hCM1JFOWdzLytyT1F4MHdQVExmTExLVG1IVk9abGtTTytYcVZZOG1OQUNHekFHN0UKampCeHg5NHpiK2VRWkdNdGVTWWdnNHVmbUdlR1lqdzhMSW9FMDZMdGlMeTFhVkp2eEVOMGtBNFZNVVpxNy9RcQp0WTIySnc0azkzU2pYeHRmZjNqN2Jsc09hUDVJMFpyRVNGUlM2TExwUG4wNFBGK2RzTTJwWjN0c2s4RXY2ZFlCCnFEN0JMR203QjZhK1FqY0YrVHdub2pObDFSVHBxZzA1Y0hncVFqKzBxZE9hSkFQQXFVSDBYQ3UrUW8zcnMxZ24KNzR4VG9oNDRjc3dRMXhuVDZPWG1WSTkwWUdzcUorak0rMCtPMlFJREFRQUJBb0lCQVFDOU1XbytkRHAvTXA2MQoxTTQwcHpkenNWWkN4N213NmlGWmFOVEE3Y1g3MkJOK0lJcjdqK3VTRzlFemZqR256b0JiYzlKQnBrZGVMTjR5Cko2R3o4c1FBMDBWYSs1L25ySGc3SHo4bDQ1QzFUWjFDdVl3Vk9JUFhwM2tPdnJGY0l1a0ljM2dqRHozN0k5eFoKb3pxcVpkQ2FBbHdSNCtZMW1VZlMyVm9WUGNYQzV5ZElIbmlsTGtkRzVXRHZ3K2dFRVBVYjRvc3BwQUZKWHEyawpGQzRuakdMcmJHTXZVRDBFdjRXaHM3VWtydHUyR3BJTTd1S3N0bnArSkVNN2R5RjJBVEZvMzRmZFNrbFdiVlM5Ck13b1JIWGdCeUx4eWxvVnZkS2ExeTc0VWpUN1FHQ0w3Vlgxek1GZTlXRVpXRitUY3RKL3lhS1ZTSVdub1lFejgKNUQrbEd0MEJBb0dCQVAzd25iOVlYQ3lhdWNadWF3N0tTemZrRFhqUFMyc2JqQTFHTjdZRHIyWFhpN2kxNmVCcAplWmVDVkNsTU5DKzZILzN2aGtadkNxK0FjaVNjV0RFWlRLRWMxbGU5ZHY5RDIyaG1YbHhRek52eFgzMWtkbWZGCjdIR2lCakxMMWxqSlhTTlpvNU0wMFR4L3BzWjlHeVhqOFEvbUZBbUVDVlljMURlUzh4TGdiejA1QW9HQkFPcysKcWx1M2xUTFI2OVMzMWFpa3EwTlpkR2lndG51U2VqMXJlVkt4aVVPZDhoR3lDVmpBOVhWNzVac0c5Z3RVN2EzTQo5ZkVVZnJVTFdBTDFZSCtkc3VGRTA0cVpLQlA4R09WL28yYVJpSUZwM09BL2lOWmE1ck12anFUbWphNnVGUGF1Cm1yMGNJL1VkNktURnhDTDc4S0VFc090QUJyYUtSSGtGSk5mMitINmhBb0dBUlA1dy9hSDJPVEE0dFU4ZXl6L3YKTzlvOFJzeWJneGhTN3ZCYkJwcnR0cy9mYUdvVWh5SmhKT2NHRXNwZmkzQVliUmY4OVhvSi96RDQwaldnU0liRgpLU2o2bTVBZ21xNUhHSkJucHRGVk41VFhDdXlXdHc4eGRKWSs5T0lvOUtxUklaK2pMOFd6NjZvTEYvYTV2NHUwCjNVcjN1cllqQzBCOThETiswMndqRFJFQ2dZQVNtcmNnWDhsMTVuSXZOWWE1T3FTc3crTlg3dlhmbWZyKzVvUUcKelRHYzNsM3BsbG4wNGZYSG44L056ZVFoaTByc3h0dmdnQTF4UmY5azVJRUlOckdSN1Q2SnBJa3Q0MjhZbjFxTgozdzZicFlLcFhYejE3dVIxNFZuM29xQXI0MnFYQThJTEdMR2pUZ1U3R3NUak8yZVJaR3lWZmsvSm1lL0dYbWpHCmg2cXBZUUtCZ1FEcHVsZWxORWpZYzR0MFBxK2w1RGk5MHVmdDNmbU05cmlHTk96WHQ2cGlHNGNSNzFsbDMzYk4KS2ZRUFBEZng2U1RpQzVYNVQvdmFLUUhhSlhzbCtmYVdRRlZ1ZTRGUWtnRzBMRm5sM0VjL1ZBMWFEUXIzc1Z4VApuTVRBckQzbzU5RHh2WDRvRzROS0k1OWh4Ympwa0VxaHhHTzV4TVlDWmh3aUcvdDFjek1lMnc9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo=

84
configmap.yaml Normal file
View File

@ -0,0 +1,84 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: dubhe-cm
namespace: dubhe-system
data:
# define csi name
# If Dubhe is deployed in one node, then we can config storage.classname as hostpath.
# That means all data will be located in local path defined in "storage.path".
storage.classname: "hostpath"
# Dubhe-Storage
storage.size: "50Gi"
# Local data path when storage.calssname is "hostpath"
storage.path: "/home/common/tianshu"
# Harbor config (填入你自己的镜像仓库地址及账号密码,支持阿里云镜像仓库)
harbor.server: "harbor.advanced.com:18443"
harbor.username: "admin"
harbor.password: "Harbor12345"
# 将下面IP替换成你自己服务器的IP地址(内网)
prometheus.server: "10.97.61.244:9090"
elasticsearch.server: "173.15.15.70"
backend.terminal.host: "173.15.15.70"
# If defined, web service will visit node ip instead of services defined in k8s.
#假如您的服务器在公网,请把下面的ip换成公网的IP,如果只有内网IP即填写内网IP
web.proxy.host: "173.15.15.70"
# Dubhe code to compile
code.repo: "https://gitee.com/zhijiangtianshu/Dubhe.git"
code.branch: "master"
# Environment config
dubhe.environment: "dubhe-prod"
# Mysql config
mysql.db.server: "mysql.dubhe-system.svc.cluster.local"
mysql.port: "3306"
mysql.root.password: "root"
# Nacos config
nacos.server: "nacos.dubhe-system.svc.cluster.local"
nacos.port: "8848"
nacos.mysql.db.name: "nacos_dev"
nacos.mysql.user: "nacos"
nacos.mysql.password: "nacos"
# Minio config
minio.server: "minio.dubhe-system.svc.cluster.local"
minio.port: "9000"
minio.accesskey: "admin"
minio.secretkey: "abcdefg123456"
# Redis config
redis.server: "redis.dubhe-system.svc.cluster.local"
redis.password: "Tianshu@123"
# Backend config
backend.server: "backend.dubhe-system.svc.cluster.local"
backend.password: "123456"
backend.k8s.server: "backend-k8s.dubhe-system.svc.cluster.local"
backend.gateway.server: "backend-gateway.dubhe-system.svc.cluster.local"
backend.visual.server: "backend-visual.dubhe-system.svc.cluster.local"
backend.model-measure.server: "backend-model-measure.dubhe-system.svc.cluster.local"
backend.model-convert.server: "backend-model-converter.dubhe-system.svc.cluster.local"
backend.data.server: "backend-data.dubhe-system.svc.cluster.local"
backend.data.dcm.server: "backend-data-dcm.dubhe-system.svc.cluster.local"
backend.dcm4chee.server: "backend-dcm4chee.dubhe-system.svc.cluster.local"
web.server: "web.dubhe-system.svc.cluster.local"
grafana.server: "grafana.kube-system.svc.cluster.local:30006"
# All image can be configured and redeploy.
# Image can be rebuild by "./dubhectl build-image", please refer to "./dubhectl help"
image.imgprocess.algorithm: "registry.cn-hangzhou.aliyuncs.com/enlin/imgprocess:v1"
image.ofrecord.algorithm: "registry.cn-hangzhou.aliyuncs.com/enlin/ofrecord:v1"
image.videosample.algorithm: "registry.cn-hangzhou.aliyuncs.com/enlin/videosample:v1"
image.backend: "registry.cn-hangzhou.aliyuncs.com/enlin/dubhe-java:v1"
image.backend-init: "backend-init:v1"
image.backend-visual: "registry.cn-hangzhou.aliyuncs.com/enlin/visual-server:v1"
image.converter: "registry.cn-hangzhou.aliyuncs.com/enlin/model-converter:v1"
image.measuring: "registry.cn-hangzhou.aliyuncs.com/enlin/model-measuring:v1"
image.optimize: "registry.cn-hangzhou.aliyuncs.com/enlin/oneflow-gpu:base"
image.tadl: "registry.cn-hangzhou.aliyuncs.com/enlin/automl-nas-pytorch17:v1"
image.minio: "minio/minio:RELEASE.2020-04-28T23-56-56Z"
image.minio-init: "minio-init:v1"
image.mysql: "nacos/nacos-mysql:5.7"
image.mysql-init: "mysql-init:v1"
image.nacos: "nacos/nacos-server:2.0.3"
image.nacos-init: "nacos-init:v1"
image.redis: "redis:5.0.7"
image.storage-init: "registry.cn-hangzhou.aliyuncs.com/enlin/storage-init:v1"
# You need build web images separatel. Use command "./dubhectl build-image web"
image.web: "web:v1"

View File

@ -0,0 +1,125 @@
### 天枢后台服务部署
1. 下载一键部署脚本
* 下载部署脚本<br/>
`git clone https://gitee.com/zhijiangtianshu/dubhe-deploy.git`
2. 修改参数配置
* 2.1 进入代码目录<br/>
`cd dubhe-deploy/`
* 2.2 修改 configmap.yaml 文件<br/>
`vim configmap.yaml`
```# 单机部署,请选择 storage.classname 为 "hostpath",并通过 storage.path 指定具体存储目录
# 如果是 K8s集群部署则依赖 storageclassname 请填写 storage.classname 为具体的名字。(例:/data
# 修改镜像仓库仓库 及 对应服务器IP
---
data:
# define csi name
# If Dubhe is deployed in one node, then we can config storage.classname as hostpath.
# That means all data will be located in local path defined in "storage.path".
storage.classname: "hostpath"
# Dubhe-Storage
storage.size: "50Gi"
# Local data path when storage.calssname is "hostpath"
storage.path: "/data"
# Harbor config (填入你自己的镜像仓库地址及账号密码,支持阿里云镜像仓库)
harbor.server: "docker.hub"
harbor.username: "admin"
harbor.password: "Harbor12345"
# 将下面IP替换成你自己服务器的IP地址(内网)
# k8s prometheus 服务内网ip和端口
prometheus.server: "127.0.0.1:30003"
# 填服务器的ip地址
elasticsearch.server: "127.0.0.1"
#填服务器的ip地址
backend.terminal.host: "127.0.0.1"
# If defined, web service will visit node ip instead of services defined in k8s.
#假如您的服务器在公网,请把下面的ip换成公网的IP,如果只有内网IP即填写内网IP
web.proxy.host: "127.0.0.1"
```
* 修改证书参数
```
# admin.conf 如果不存在进行创建
vim /etc/kubernetes/admin.conf
# 修改参数 server 将域名修改为 IP, 如果服务有多个ip, 一个ip不行请尝试换另外一个
server: https://{服务器IP}:6443
sudo cp /root/.kube/config /etc/kubernetes/admin.conf
```
3. 将kubeconfig文件添加到kubeconfig.yaml<br/>
```
cd dubhe-deploy/
cat /etc/kubernetes/admin.conf > template/kubeconfig.yaml
```
4. 执行构建镜像操作<br/>
需要时间和网络有关耐心等待构建失败使用ps -ef | grep dubhe|grep -v grep |awk '{print $2}' | xargs kill -9 终止进程
下面的命令后台镜像可以打包好,前端的镜像打包会出错,不用管,后面会单独做前端镜像
```markdown
./dubhectl build-image all
```
5. 安装dubhe服务
```markdown
./dubhectl install
```
6. 修改前端配置文件并部署前端
* 6.1 修改配置文件
```
cd dubhe-deploy
vim Dubhe/webapp/.env.production
ENV = 'production'
# 默认BASE URL, 后端服务backend-gateway的ip:port, 使用 kubectl get svc -n dubhe-system | grep backend-gateway 查看
# 构建出的yaml没有暴露出nodeport, 需要自己修改yaml文件暴露nodeport才可以访问
VUE_APP_BASE_API = 'http://173.15.15.70:32023'
# TODO: 目前后端连接位于 8960端口 k8s 服务,需要后端调整后再同步调整
# WebSocket 连接地址
VUE_APP_WS_API = 'ws://173.15.15.70:30960/ws'
# 数据管理
VUE_APP_DATA_API = ''
# 训练可视化
VUE_APP_VISUAL_API = ''
# 用户 minio 访问地址
VUE_APP_MINIO_API = 'http://173.15.15.70:30900/minio'
# atlas 服务,需要单独部署
VUE_APP_ATLAS_HOST = 'http://127.0.0.1'
# 医疗影像 DCM4CHEE 服务访问地址
# 部署文档参考http://docs.dubhe.ai/docs/setup/deploy-algorithm
VUE_APP_DCM_API = 'http://173.15.15.70:30088/dcm4chee/dcm4chee-arc/aets/DCM4CHEE_ADMIN'
# minIO 服务 IP 服务器的ip地址
# 部署文档参考http://docs.dubhe.ai/docs/setup/deploy-minio
VUE_APP_MINIO_ENDPOINT = '173.15.15.70'
# minIO 服务 端口
VUE_APP_MINIO_PORT = '30900'
# 是否开启 SSL
VUE_APP_MINIO_USESSL = 'false'
# bucketName
VUE_APP_MINIO_BUCKETNAME = 'dubhe-prod'
# 文档链接
VUE_APP_DOCS_URL = http://docs.tianshu.org.cn/docs/
```
* 6.2 前端代码编译
```
cd dubhe-deploy
# 编译前端代码
docker run --rm --entrypoint="" -v $PWD/Dubhe:/Dubhe node:12.22.4 /bin/bash -c 'cd /Dubhe/webapp/ && npm install && npm run build:prod'
```
* 6.3 前端镜像打包
```
cd dubhe-deploy
cp -rf Dubhe/web/dist images/web/Dubhe/
cd images/web
docker build -t web:v1 .
```
* 6.4 前端部署
```
./dubhectl reinstall web
```

8
部署训练operator.md Normal file
View File

@ -0,0 +1,8 @@
# 部署训练operator
详情见文档https://docs.tianshu.org.cn/docs/setup/deploy-distribute-train-operator
需要修改的地方:
yaml文件里面
```
value: "distribute-train-operator-1.0.jar --k8s.kubeconfig=/root/config --spring.redis.host=<rdis-ip> --spring.redis.password=<redis-password> --spring.redis.port=<redis-port>"
--k8s.kubeconfig 这个配置不能直接使用/root/.kube/config文件(apiserver.cluster.local域名不能解析)需要复制一份挂载进去apiserver.cluster.local域名改为ip
```