Compare commits

...

114 Commits

Author SHA1 Message Date
opengauss-bot 601f3d2828 !1066 修复打开慢sql后出现的内存泄漏
Merge pull request !1066 from 杨皓/1.1.0
2021-07-01 02:44:58 +00:00
yanghao 232c049678 fix memory leak in slow query 2021-07-01 09:56:36 +08:00
opengauss-bot 4b77ee957a !1051 修复数据库shutdown时出现的死锁问题
Merge pull request !1051 from ashnah/my1.1.0
2021-06-29 02:29:34 +00:00
ashnah b9c9a2684a add RESUME_INTERRUPTS before throw 2021-06-28 09:09:27 +08:00
ashnah ae3ee01cf3 modify build error 2021-06-25 17:45:56 +08:00
ashnah 65a2624385 use holdinterrupt instead of block signal 2021-06-25 17:31:38 +08:00
ashnah 383af73b89 Fix the problem of deadlock when shutting down 2021-06-25 11:18:30 +08:00
opengauss-bot 6e883d3ffa !1044 将全局变量relfilenode_skey改为session级,修复逻辑复制问题
Merge pull request !1044 from maxiang/1.1.0
2021-06-19 15:35:54 +08:00
maxiang 23f0e5a74c update src/include/knl/knl_session.h. 2021-06-18 16:52:44 +08:00
Li Bingchen c06b45eb5e session 2021-06-18 15:38:53 +08:00
opengauss-bot f80e4b5c06 !948 解决备机createRestartpoint时报错current dirty page list head smaller than redo lsn
Merge pull request !948 from cchen676/new1.1
2021-05-12 14:15:55 +08:00
cchen676 a37e711c3a fixes 备机createRestartpoint时报错current dirty page list head smaller than redo lsn 2021-05-06 20:29:16 +08:00
opengauss-bot 7c4a8857c8 !848 fix bug during upgrade
Merge pull request !848 from zhengxue/dev_1_1_0
2021-03-27 16:34:58 +08:00
shirley_zhengx 504a857c47 fix bug during upgrade 2021-03-23 10:24:50 +08:00
opengauss-bot 1cbac06f20 !830 修复升级问题
Merge pull request !830 from 邓旭玥/efix_upgrade_1.1.0_0317
2021-03-19 17:57:23 +08:00
dengxuyue c49d2f3c06 fix LLT after dual removal 2021-03-18 07:52:37 +08:00
dengxuyue 09594e3393 Added pg_catalog.gs_session_memory_context/gs_thread_memory_context 2021-03-17 21:17:33 +08:00
dengxuyue 4b242aa2a3 Removed duplicated table_skewness 2021-03-17 21:14:40 +08:00
dengxuyue e0164f9573 Removed duplicated report_application_error 2021-03-17 21:09:37 +08:00
dengxuyue b3cd15c986 Fix SQL syntax error in upgrade script 2021-03-17 21:07:49 +08:00
dengxuyue d62fbaadfa Removed dual view 2021-03-17 20:58:51 +08:00
opengauss-bot fffb972f4a !753 add SIGUSR1 handler for wal receiver writer thread
Merge pull request !753 from 熊小军/1.1.0_tmp
2021-03-03 16:36:00 +08:00
opengauss-bot 98f79ce2f5 !754 备机IO写放大问题合入
Merge pull request !754 from 熊小军/1.1.0_tmp_new
2021-03-03 16:35:41 +08:00
MuJinqiang 72d4ada21a fix core restartpoint 2021-03-03 14:59:31 +08:00
MuJinqiang 4b995e942c 备机io+autovacuum优化 2021-03-03 14:59:12 +08:00
LiHeng 58a659fd5e add SIGUSR1 handler for wal receiver writer thread
The SIGUSR1 handler will wake up wal receiver thread once wal receiver
thread had invoked SetLatch().
2021-03-03 11:31:35 +08:00
opengauss-bot e130d900b3 !709 fix bug in gs_index_advisor of 1.10
Merge pull request !709 from scarbor_fair/11ad
2021-02-08 17:36:19 +08:00
yupeng d749c3ac10 backport gs_index_davisor patch to 1.10 2021-02-08 16:05:08 +08:00
opengauss-bot 49b024f022 !680 回合master分支的bugfix到1.1.0
Merge pull request !680 from TotaJ/tmp
2021-02-08 10:43:14 +08:00
opengauss-bot 2d38c78bb8 !695 支持在线扩容内核适配修改
Merge pull request !695 from 薛蒙恩/1.1.0
2021-02-08 09:39:22 +08:00
opengauss-bot bf2dcfa9e7 !699 AlterSystemSet添加黑名单与用例修复
Merge pull request !699 from gentle_hu/1.1.0
2021-02-08 09:18:00 +08:00
gentle_hu e3accb2ab7 consider mix case and fix test case of alter system set 2021-02-06 23:32:51 +08:00
gentle_hu 448132fb60 add black list for sysadmin in alter system set 2021-02-06 23:31:47 +08:00
薛蒙恩 0c4d74d1da 增加函数说明 2021-02-05 16:30:18 +08:00
TotaJ 030c075a74 Fix memory alloc failed in batch mode. 2021-02-05 16:08:07 +08:00
opengauss-bot d5daa40497 !690 modify gs_ctl --help information & fix gsql core bug when running '\e'
Merge pull request !690 from chenxiaobin/110_bugfix
2021-02-05 15:41:35 +08:00
薛蒙恩 86841daa26 优化代码 2021-02-05 14:33:00 +08:00
opengauss-bot d3469153ca !694 【WDR】wdr报告中增加instance efficiency 其他相关统计信息
Merge pull request !694 from 宋清怡/wdr
2021-02-05 14:26:10 +08:00
薛蒙恩 9f21d6a58a 修改书写错误 2021-02-05 11:45:50 +08:00
xue_meng_en 120e056a94 支持在线扩容内核适配修改 2021-02-05 10:55:52 +08:00
sqyyeah 8f2d05da48 change cpu_to_elapsd to effective_cpu 2021-02-05 10:28:37 +08:00
sqyyeah eaeccaeaad add 4 instance efficiency percentages to wdr 2021-02-05 10:28:24 +08:00
TotaJ 8c186e5103 test 2021-02-04 16:57:56 +08:00
chenxiaobin 918ecfe9b0 fix gsql core bug when running '\e' 2021-02-04 14:48:12 +08:00
chenxiaobin 541d5b3355 modify gs_ctl --help information 2021-02-04 14:48:03 +08:00
TotaJ 1c7b127f4c Fix create procedure failed with multiple param. 2021-02-02 19:22:50 +08:00
opengauss-bot 5aab3b6668 !679 fix the issue 'pg_recvlogical --plugin=XXX' run abnormal
Merge pull request !679 from cchen676/new1.1
2021-02-02 17:46:43 +08:00
chenc 9f058b9c11 fix the issue 'pg_recvlogical --plugin=XXX' run abnormal
解决pg_recvlogical --plugin=XXX不能执行的问题
复现条件:执行pg_recvlogical --plugin=test_encoding命令
问题原因:代码中plugin参数的变量类型为static const char*, 导致在进行该参数初始化时无法free掉
解决方案:plugin参数的变量类型修改为static char*
2021-02-02 16:50:27 +08:00
opengauss-bot 0dd12ebd85 !677 fix bug about gs_guc reload for the parameter without assigning value
Merge pull request !677 from shirley_zhengx/1_1_0_dev
2021-02-02 15:40:35 +08:00
zhengxue c020db56d2 fix bug 2021-02-02 14:59:43 +08:00
zhengxue 1a68c72912 fix bug about gs_guc_reload for the parameter without assigning value 2021-02-02 14:53:55 +08:00
opengauss-bot 22fcd86dc3 !655 gs_probackup: add passwd from interaction to connect info
Merge pull request !655 from 吴岳川/gs_probackup_1.1.0
2021-02-02 11:39:44 +08:00
opengauss-bot 53f56bea93 !663 use memcpy_s to copy the return time_str
Merge pull request !663 from 吴岳川/walTimeout
2021-02-02 11:36:46 +08:00
opengauss-bot e3b1f129dc !669 set WalSndCaughtUp to thread context variable
Merge pull request !669 from chenxiaobin/110
2021-02-02 11:35:43 +08:00
opengauss-bot 127f1ae94d !660 Add help information about REFERENCES of column_constraint.
Merge pull request !660 from Yuejia/stable_1_1_0
2021-02-02 11:10:02 +08:00
opengauss-bot 091c5dec87 !664 emit error message for incorrect GAUSSLOG
Merge pull request !664 from liang/1.1.0_log
2021-02-02 11:08:49 +08:00
opengauss-bot 05f96d74ff !670 主备场景,日志空间满,导致数据库夯住,修改写日志线程死循环写日志逻辑,写日志返回失败就退出不在死循环写
Merge pull request !670 from liuchun/110
2021-02-02 11:07:11 +08:00
opengauss-bot d3b9b3944c !659 回合master分支的bugfix到 1.1.0 分支
Merge pull request !659 from TotaJ/110/remerge
2021-02-02 10:59:17 +08:00
l00280231 aa8922601a 主备场景,日志空间满,导致数据库夯住,修改写日志线程死循环写日志逻辑,写日志返回失败就退出不再死循环写 2021-01-30 17:33:26 +08:00
chenxiaobin 4af9fb96cc set WalSndCaughtUp to thread context variable 2021-01-30 15:17:58 +08:00
liang_-123 43bd119bb4 emit error message for incorrect GAUSSLOG 2021-01-29 14:42:17 +08:00
opengauss-bot b91f261cd6 !614 修复统一superuser定义的残留bug
Merge pull request !614 from Cross-罗/1.1.0
2021-01-29 09:08:44 +08:00
syj 54ecb9fd1f Add help information about REFERENCES of column_constraint. 2021-01-28 18:39:32 +08:00
TotaJ f75d0e5fe4 Release childslot when ARCH thread exit. 2021-01-28 17:20:04 +08:00
TotaJ 3d4621061c Fix create schema with grant all. 2021-01-28 17:19:47 +08:00
TotaJ 1d4815bd7c Validate schema name when create. 2021-01-28 17:19:28 +08:00
opengauss-bot 5a9f118223 !618 【轻量级 PR】:修复归档告警信息刷屏的问题
Merge pull request !618 from TotaJ/N/A
2021-01-28 16:08:08 +08:00
opengauss-bot 0d81a9c599 !622 【SQL时间预测】敏感信息释放前置零
Merge pull request !622 from 宋清怡/1.1.0
2021-01-28 15:56:42 +08:00
opengauss-bot 71a435d6ff !612 解决编译告警
Merge pull request !612 from 周雄佳/davidzhou2
2021-01-28 15:54:31 +08:00
wuyuechuan abc09a49f6 gs_probackup: add passwd from interaction to connect info 2021-01-26 11:25:15 +08:00
opengauss-bot 381b8434e8 !636 fix xlog could not cleanup with cascade standby
Merge pull request !636 from LiHeng/1.1.0
2021-01-20 11:30:03 +08:00
LiHeng 5c65202cb6 fix xlog could not cleanup with cascade standby 2021-01-17 19:19:44 +08:00
wuyuechuan bbec57c7d7 use memcpy_s to copy the return time_str 2021-01-15 10:01:39 +08:00
songqingyi 989b195d54 AI SAFETY: clean cert info before free 2021-01-14 08:51:44 +08:00
TotaJ 37f0942754 修复归档告警信息刷屏的问题 2021-01-13 10:54:09 +08:00
luozihao 5c4957d054 修复统一superuser定义时的残留bug 2021-01-13 10:49:07 +08:00
luozihao 1b214a4799 Merge tag '1.1.0' of https://gitee.com/opengauss/openGauss-server into 1.1.0
1.1.0
2021-01-13 10:42:02 +08:00
opengauss-bot 392c043824 !588 Disabled gs_ktool and removed kmc
Merge pull request !588 from dengxuyue/master
2020-12-31 19:55:42 +08:00
dengxuyue c76d64f14f Disabled gs_ktool and removed kmc 2020-12-31 19:42:37 +08:00
opengauss-bot c681fe0ab4 !587 fix bug guc
Merge pull request !587 from 赵文浩/master
2020-12-31 16:58:23 +08:00
zhaowenhao cd24cc83cf fix bug guc 2020-12-31 16:08:54 +08:00
opengauss-bot b4fd9bf06d !583 修复部分static变量少加了THR_LOCAL的问题
Merge pull request !583 from TotaJ/bugfix/thr_local
2020-12-31 12:45:11 +08:00
TotaJ d87b5257cc Fix bug of miss add THR_LOCAL symbol to some file. 2020-12-31 11:42:35 +08:00
opengauss-bot d7dd756a95 !581 修改readme环境配置示例错误
Merge pull request !581 from 赵文浩/master
2020-12-31 10:39:01 +08:00
zhaowenhao 8f19af80f3 修改readme环境配置示例 2020-12-31 09:40:34 +08:00
opengauss-bot ba9edb158b !580 修改readme信息
Merge pull request !580 from 赵文浩/master
2020-12-31 08:21:45 +08:00
zhaowenhao f3d16740b1 修改readme 2020-12-31 02:35:52 +08:00
opengauss-bot 4736e1329c !579 社区issue修复合并提交
Merge pull request !579 from 赵文浩/master
2020-12-30 23:56:46 +08:00
opengauss-bot dfc6cba5c2 !578 第2次同步source code
Merge pull request !578 from dengxuyue/master
2020-12-30 23:53:05 +08:00
zhaowenhao c6026da3f5 bug fix 函数pg_get_tabledef()支持显示全局临时表的DDL 2020-12-30 23:35:04 +08:00
zhaowenhao 067e71b6a3 bug fix 函数pg_get_tabledef()支持显示全局临时表的DDL 2020-12-30 23:26:27 +08:00
xue_meng_en 82f5687df9 禁止用户向系统模式添加表 2020-12-30 23:00:22 +08:00
TotaJ 99a9dcbaf8 Add log for checkpoint. 2020-12-30 22:58:58 +08:00
xiong_xjun 96c73e99d8 modify most_avaible_sync's meaning: primary is allowed to stop waiting for standby WAL sync when there is synchronous standby WAL senders are disconnected. 2020-12-30 22:57:22 +08:00
douxin 1dc22834b1 函数pg_get_tabledef()支持显示全局临时表的DDL 2020-12-30 22:56:18 +08:00
douxin 482df78c44 [bugfix]修复当libpq.so与gsql的版本不匹配时,无法正常连接数据库 2020-12-30 22:54:41 +08:00
syj c9edc3a053 Fix bug of invalid username/password when "SET ROLE role_name PASSWORD 'passwd'" is used in loop. 2020-12-30 22:53:55 +08:00
syj 0f5b0f4bbf Limit the max string length of enum type to NAMEDATALEN-1(64-1=63) 2020-12-30 22:53:39 +08:00
syj bff12037f4 support the the usage of "ALTER TYPE name OWNER TO { CURRENT_USER | SESSION_USER }" 2020-12-30 22:53:22 +08:00
@luo_zihao5524 bc6ad8a271 删除参数 2020-12-30 22:52:28 +08:00
l00584793 a8c75f8830 bug 2020-12-30 22:51:30 +08:00
gongsiyi150 a0d7b33693 修改版本号为1.1.0以及更新中文README.MD 2020-12-30 22:47:42 +08:00
zhengxue b53aaa8cf2 bug 2020-12-30 22:45:37 +08:00
dengxuyue 0607e9952a rectify jdk version 2020-12-30 22:42:06 +08:00
zhengxue c23967c7f8 fix guc_parameter_value_range and alter system the same parameter 2020-12-30 22:37:52 +08:00
zhengxue 2943edef86 fix bug 2020-12-30 22:34:37 +08:00
chenxiaobin ca44783855 modify default value of max_wal_senders and synchronous_standby_names 2020-12-30 22:05:53 +08:00
zhouxiongjia d687ed5243 Set BASE_GLOBAL_VARIABLE_NUM to 224 for the success of fastcheck 2020-12-30 22:04:33 +08:00
zhouxiongjia 8b4b150031 add "disable_memory_protect" in cluster_guc.conf 2020-12-30 22:04:33 +08:00
liang_-123 22251c5e31 fix unrecognized node type 355 for outer join 2020-12-30 22:02:31 +08:00
zhang_xubo 91b36a7c5a 代码回合:解决shared buffer过大时实际占用共享内存超出预期的BUGhttps://gitee.com/opengauss/openGauss-server/pulls/441 2020-12-30 21:59:46 +08:00
zhang_xubo 15c3b05737 代码回合:解决pg_os_threads视图中IncrBgWriter线程信息缺失的问题https://gitee.com/opengauss/openGauss-server/pulls/401 2020-12-30 21:59:46 +08:00
dengxuyue 582f65f9f5 merge master 2020-12-30 21:30:30 +08:00
dengxuyue fb20dc830f 2nd round patches for openGauss 1.1.0 2020-12-30 21:29:17 +08:00
195 changed files with 5330 additions and 42273 deletions

View File

@ -67,7 +67,7 @@ When the Xlog is transferred to the standby node, the standby node flushs the Xl
**MOT Engine (beta release)**
The Memory-Optimized Tables (MOT) storage engine is a transactional rowstore optimized for many-core and large memory and delivering extreme OLTP performance and high resources utilization. With data and indexes stored totally in-memory, a NUMA-aware design, algorithms that eliminate lock and latch contention and query native compilation (JIT), MOT provides low latency data access and more efficient transaction execution. See [MOT Engine documentation](https://opengauss.org/en/docs/1.0.1/docs/Developerguide/mot.html).
The Memory-Optimized Tables (MOT) storage engine is a transactional rowstore optimized for many-core and large memory and delivering extreme OLTP performance and high resources utilization. With data and indexes stored totally in-memory, a NUMA-aware design, algorithms that eliminate lock and latch contention and query native compilation (JIT), MOT provides low latency data access and more efficient transaction execution. See [MOT Engine documentation](https://opengauss.org/en/docs/1.1.0/docs/Developerguide/mot.html).
**Security**
@ -397,7 +397,7 @@ https://opengauss.org/zh/
From the following website, you can obtain the binarylibs we have compiled. Please unzip it and rename to **binarylibs** after you download.
https://opengauss.obs.cn-south-1.myhuaweicloud.com/1.0.1/openGauss-third_party_binarylibs.tar.gz
https://opengauss.obs.cn-south-1.myhuaweicloud.com/1.1.0/openGauss-third_party_binarylibs.tar.gz
Now we have completed openGauss code. For example, we store it in following directories.
@ -572,7 +572,7 @@ Installation package packaging log: **./package/make_package.log**
## Quick Start
See the [Quick Start](https://opengauss.org/en/docs/1.0.1/docs/Quickstart/Quickstart.html).
See the [Quick Start](https://opengauss.org/en/docs/1.1.0/docs/Quickstart/Quickstart.html).
## Docs
@ -596,7 +596,7 @@ Welcome contributions. See our [Contributor](https://opengauss.org/en/contributi
## Release Notes
For the release notes, see our [RELEASE](https://opengauss.org/en/docs/1.0.1/docs/Releasenotes/Releasenotes.html).
For the release notes, see our [RELEASE](https://opengauss.org/en/docs/1.1.0/docs/Releasenotes/Releasenotes.html).
## License

638
README.md
View File

@ -1,299 +1,320 @@
![openGauss Logo](doc/openGauss-logo.png "openGauss logo")
============================================================
- [What Is openGauss?](#What Is openGauss?)
- [Installation](#Installation)
- [Creating a Configuration File](#Creating a Configuration File)
- [Initializing the Installation Environment](#Initializing the Installation Environment)
- [Executing Installation](#Executing Installation)
[Uninstalling the openGauss](#Uninstalling the openGauss)
- [Compilation](#Compilation)
- [Overview](#Overview)
- [OS and Software Dependency Requirements](# OS and Software Dependency Requirements)
- [Downloading openGauss](# Downloading openGauss)
- [Compiling Third-Party Software](#Compiling Third-Party Software)
- [Compiling by build.sh](#Compiling by build.sh)
- [Compiling by Command](#Compiling by Command)
- [Compiling the Installation Package](#Compiling the Installation Package)
- [Quick Start](#Quick Start)
- [Docs](#Docs)
- [Community](#Community)
- [Governance](#Governance)
- [Communication](#Communication)
- [Contribution](#Contribution)
- [Release Notes](#Release Notes)
- [License](#License)
[English](./README.en.md) | 简体中文
## What Is openGauss?
openGauss is an open source relational database management system. It has multi-core high-performance, full link security, intelligent operation and maintenance for enterprise features. openGauss, which is early originated from PostgreSQL, integrates Huawei's core experience in database field for many years. It optimizes the architecture, transaction, storage engine, optimizer and ARM architecture. At the meantime, openGauss as a global database open source community, aims to further advance the development and enrichment of the database software/hardware application ecosystem.
<img src="doc/openGauss-architecture.png" alt="opengauss Architecture" width="600"/>
- [什么是openGauss](#什么是openGauss)
- [安装](#安装)
- [创建配置文件](#创建配置文件)
- [初始化安装环境](#初始化安装环境)
- [执行安装](#执行安装)
- [卸载openGauss](#卸载openGauss)
- [编译](#编译)
- [概述](#概述)
- [操作系统和软件依赖要求](#操作系统和软件依赖要求)
- [下载openGauss](#下载openGauss)
- [编译第三方软件](#编译第三方软件)
- [使用build.sh编译](#使用build编译)
- [使用命令编译](#使用命令编译)
- [编译安装包](#编译安装包)
- [快速入门](#快速入门)
- [文档](#文档)
- [社区](#社区)
- [治理](#治理)
- [交流](#交流)
- [贡献](#贡献)
- [发行说明](#发行说明)
- [许可证](许可证)
**High Performance**
## 什么是openGauss
openGauss breaks through the bottleneck of multi-core CPU, 2-way Kunpeng 128 core 1.5 million TPMC.
openGauss是一款开源的关系型数据库管理系统它具有多核高性能、全链路安全性、智能运维等企业级特性。
openGauss内核早期源自开源数据库PostgreSQL融合了华为在数据库领域多年的内核经验在架构、事务、存储引擎、优化器及ARM架构上进行了适配与优化。作为一个开源数据库期望与广泛的开发者共同构建一个多元化技术的开源数据库社区。
**Partitions**
<img src="doc/openGauss-architecture.png" alt="openGauss架构" width="600"/>
Divide key data structure shared by internal threads into different partitions to reduce lock access conflicts. For example, CLOG uses partition optimization to solve the bottleneck of ClogControlLock.
**高性能**
**NUMA Structure**
openGauss突破了多核CPU的瓶颈实现两路鲲鹏128核150万tpmC内存优化表MOT引擎达350万tpmC。
Malloc key data structures help reduce cross CPU access. The global PGPROC array is divided into several parts according to the number of NUMA nodes, solving the bottleneck of ProcArrayLock.
**数据分区**
**Binding Cores**
内部线程共享的关键数据结构进行数据分区减少加锁访问冲突。比如CLOG就采用分区优化解决ClogControlLock锁瓶颈。
Bind NIC interrupts to different cores and bind cores to different background threads to avoid performance instability due to thread migration between cores.
**NUMA化内核数据结构**
**ARM Optimization**
关键数据结构NUMA化分配减少跨CPU访问。比如全局PGPROC数组按照NUMA Node的数目分为多份分别在对应NUMA Node上申请内存。解决ProcArrayLock锁瓶颈。
Optimize atomic operations based on ARM platform LSE instructions, impletmenting efficient operation of critical sections.
**绑核优化**
**SQL Bypass**
把网络中断绑核和后台业务线程绑核区分开,避免运行线程在核间迁移造成的性能不稳定。
Optimize SQL execution process through SQL bypass, reducing CPU execution overhead.
**ARM指令优化**
**High Reliability**
结合ARM平台的原子操作lse进行优化实现关键互斥变量原子高效操作。
Under normal service loads, the RTO is less than 10 seconds, reducing the service interruption time caused by node failure.
**SQL BY PASS**
**Parallel Recovery**
通过SQL BY PASS优化SQL执行流程简化CPU执行开销。
When the Xlog is transferred to the standby node, the standby node flushs the Xlog to storage medium. At the mean time, the Xlog is sent to the redo recovery dispatch thread. The dispatch thread sends the Xlog to multiple parallel recovery threads to replay. Ensure that the redo speed of the standby node keeps up with the generation speed of the primary host. The standby node is ready in real time, which can be promoted to primary instantly.
**高可靠**
**Security**
正常业务负载情况下RTO小于10秒降低节点故障导致的业务不可用时间。
openGauss supports account management, account authentication, account locking, password complexity check, privilege management and verification, transmission encryption, and operation audit, protecting service data security.
**并行恢复**
**Easy Operation and Maintenance**
主机日志传输到备机时备机日志落盘的同时发送给重做恢复分发线程分发线程根据日志类型和日志操作的数据页发给多个并行恢复线程进行日志重做保证备机的重做速度跟上主机日志的产生速度。这样备机实时处于ready状态从而实现瞬间故障切换。
openGauss integrates AI algorithms into databases, reducing the burden of database maintenance.
- **SQL Prediction**
**MOT引擎Beta发布**
openGauss supports SQL execution time prediction based on collected historical performance data.
内存优化表MOT存储引擎是一个专为多核大内存优化的存储引擎具有极高的联机事务处理OLTP性能和资源利用率。MOT的数据和索引完全存储在内存中通过NUMA感知执行算法消除闩锁争用以及查询JIT本地编译提供低时延数据访问及高效事务执行。更多请参考[MOT引擎文档](https://opengauss.org/zh/docs/1.1.0/docs/Developerguide/%E5%86%85%E5%AD%98%E8%A1%A8%E7%89%B9%E6%80%A7.html)。
- **SQL Diagnoser **
**安全**
openGauss supports the diagnoser for SQL execution statements, finding out slow queries in advance..
openGauss支持账号管理账号认证口令复杂度检查账号锁定权限管理和校验传输加密操作
审计等全方位的数据库安全能力,保护业务满足安全要求。
- **Automatical Parameter Adjustment**
**易运维**
openGauss supports automatically adjusting database parameters, reducing the cost and time of parameter adjustment.
openGauss将AI算法集成到数据库中减少数据库维护的负担。
## Installation
- **SQL预测**
### Creating a Configuration File
openGauss根据收集的历史性能数据进行编码和基于深度学习的训练及预测支持SQL执行时间预测。
Before installing the openGauss, you need to create a configuration file. The configuration file in the XML format contains the information about the server where the openGauss is deployed, installation path, IP address, and port number. This file is used to guide how to deploy the openGauss. You need to configure the configuration file according to the actual deployment requirements.
- **SQL诊断器**
The following describes how to create an XML configuration file based on the deployment solution of one primary node and one standby node.
The information in bold is only an example. You can replace it as required. Each line of information is commented out.
openGauss支持SQL执行语句的诊断器提前发现慢查询。
- **参数自动调整**
openGauss通过机器学习方法自动调整数据库参数提高调参效率降低正确调参成本。
## 安装
### 创建配置文件
在安装openGauss之前需要创建clusterconfig.xml配置文件。XML文件包含部署openGauss的服务器信息、安装路径、IP地址以及端口号等。用于告知openGauss如何部署。用户需根据不同场配置对应的XML文件。
下面以一主一备的部署方案为例说明如何创建XML配置文件。
以下value取值信息仅为示例可自行替换。每行信息均有注释进行说明。
```
<?xml version="1.0" encoding="utf-8"?>
<?xml version="1.0" encoding="UTF-8"?>
<ROOT>
<!-- Overall information -->
<CLUSTER>
<!-- Database name -->
<PARAM name="clusterName" value="Cluster_template" />
<!-- Database node name (hostname) -->
<PARAM name="nodeNames" value="node1_hostname,node2_hostname"/>
<!-- Database installation path -->
<PARAM name="gaussdbAppPath" value="/opt/huawei/install/app" />
<!-- Log directory -->
<PARAM name="gaussdbLogPath" value="/var/log/omm" />
<!-- Temporary file directory -->
<PARAM name="tmpMppdbPath" value="/opt/huawei/tmp"/>
<!-- Database tool directory -->
<PARAM name="gaussdbToolPath" value="/opt/huawei/install/om" />
<!--Directory of the core file of the database -->
<PARAM name="corePath" value="/opt/huawei/corefile"/>
<!-- Node IP addresses corresponding to the node names, respectively -->
<PARAM name="backIp1s" value="192.168.0.1,192.168.0.2"/>
</CLUSTER>
<!-- Information about node deployment on each server -->
<DEVICELIST>
<!-- Information about the node deployment on node1 -->
<DEVICE sn="node1_hostname">
<!-- Host name of node1 -->
<PARAM name="name" value="node1_hostname"/>
<!-- AZ where node1 is located and AZ priority -->
<PARAM name="azName" value="AZ1"/>
<PARAM name="azPriority" value="1"/>
<!-- IP address of node1. If only one NIC is available for the server, set backIP1 and sshIP1 to the same IP address. -->
<PARAM name="backIp1" value="192.168.0.1"/>
<PARAM name="sshIp1" value="192.168.0.1"/>
<!--DBnode-->
<PARAM name="dataNum" value="1"/>
<!-- Database node port number -->
<PARAM name="dataPortBase" value="15400"/>
<!-- Data directory on the primary database node and data directories of standby nodes -->
<PARAM name="dataNode1" value="/opt/huawei/install/data/dn,node2_hostname,/opt/huawei/install/data/dn"/>
<!-- Number of nodes for which the synchronization mode is set on the database node -->
<PARAM name="dataNode1_syncNum" value="0"/>
</DEVICE>
<!-- Information about the node deployment on node2 -->
<DEVICE sn="node2_hostname">
<!-- Host name of node2 -->
<PARAM name="name" value="node2_hostname"/>
<!-- AZ where node1 is located and AZ priority -->
<PARAM name="azName" value="AZ1"/>
<PARAM name="azPriority" value="1"/>
<!-- IP address of node1. If only one NIC is available for the server, set backIP1 and sshIP1 to the same IP address. -->
<PARAM name="backIp1" value="192.168.0.2"/>
<PARAM name="sshIp1" value="192.168.0.2"/>
</DEVICE>
</DEVICELIST>
<!-- openGauss整体信息 -->
<CLUSTER>
<!-- 数据库名称 -->
<PARAM name="clusterName" value="dbCluster" />
<!-- 数据库节点名称(hostname) -->
<PARAM name="nodeNames" value="node1,node2" />
<!-- 节点IP与nodeNames一一对应 -->
<PARAM name="backIp1s" value="192.168.0.11,192.168.0.12"/>
<!-- 数据库安装目录-->
<PARAM name="gaussdbAppPath" value="/opt/huawei/install/app" />
<!-- 日志目录-->
<PARAM name="gaussdbLogPath" value="/var/log/omm" />
<!-- 临时文件目录-->
<PARAM name="tmpMppdbPath" value="/opt/huawei/tmp"/>
<!--数据库工具目录-->
<PARAM name="gaussdbToolPath" value="/opt/huawei/install/om" />
<!--数据库core文件目录-->
<PARAM name="corePath" value="/opt/huawei/corefile"/>
<!-- openGauss类型此处示例为单机类型“single-inst”表示单机一主多备部署形态-->
<PARAM name="clusterType" value="single-inst"/>
</CLUSTER>
<!-- 每台服务器上的节点部署信息 -->
<DEVICELIST>
<!-- node1上的节点部署信息 -->
<DEVICE sn="1000001">
<!-- node1的hostname -->
<PARAM name="name" value="node1"/>
<!-- node1所在的AZ及AZ优先级 -->
<PARAM name="azName" value="AZ1"/>
<PARAM name="azPriority" value="1"/>
<!-- 如果服务器只有一个网卡可用将backIP1和sshIP1配置成同一个IP -->
<PARAM name="backIp1" value="192.168.0.11"/>
<PARAM name="sshIp1" value="192.168.0.11"/>
<!--dbnode-->
<PARAM name="dataNum" value="1"/>
<!--DBnode端口号-->
<PARAM name="dataPortBase" value="26000"/>
<!--DBnode主节点上数据目录及备机数据目录-->
<PARAM name="dataNode1" value="/opt/huawei/install/data/db1,node2,/opt/huawei/install/data/db1"/>
<!--DBnode节点上设定同步模式的节点数-->
<PARAM name="dataNode1_syncNum" value="0"/>
</DEVICE>
<!-- node2上的节点部署信息其中“name”的值配置为主机名称hostname -->
<DEVICE sn="1000002">
<PARAM name="name" value="node2"/>
<PARAM name="azName" value="AZ1"/>
<PARAM name="azPriority" value="1"/>
<!-- 如果服务器只有一个网卡可用将backIP1和sshIP1配置成同一个IP -->
<PARAM name="backIp1" value="192.168.0.12"/>
<PARAM name="sshIp1" value="192.168.0.12"/>
</DEVICE>
</DEVICELIST>
</ROOT>
```
### Initializing the Installation Environment
### 初始化安装环境
After the openGauss configuration file is created, you need to run the gs_preinstall script to prepare the account and environment so that you can perform openGauss installation and management operations with the minimum permission, ensuring system security.
创建完openGauss配置文件后在执行安装前为了后续能以最小权限进行安装及openGauss管理操作保证系统安全性需要运行安装前置脚本gs_preinstall准备好安装用户及环境。
**Precautions**
安装前置脚本gs_preinstall可以协助用户自动完成如下的安装环境准备工作
- You must check the upper-layer directory permissions to ensure that the user has the read, write, and execution permissions on the installation package and configuration file directory.
- The mapping between each host name and IP address in the XML configuration file must be correct.
- Only user root is authorized to run the gs_preinstall command.
- 自动设置Linux内核参数以达到提高服务器负载能力的目的。这些参数直接影响数据库系统的运行状态请仅在确认必要时调整。
- 自动将openGauss配置文件、安装包拷贝到openGauss主机的相同目录下。
- openGauss安装用户、用户组不存在时自动创建安装用户以及用户组。
- 读取openGauss配置文件中的目录信息并创建将目录权限授予安装用户。
**Procedure**
**注意事项**
1. Log in to any host where the openGauss is to be installed as user root and create a directory for storing the installation package as planned.
- 用户需要检查上层目录权限,保证安装用户对安装包和配置文件目录读写执行的权限。
- xml文件中各主机的名称与IP映射配置正确。
- 只能使用root用户执行gs_preinstall命令。
**操作步骤**
1.以root用户登录待安装openGauss的任意主机并按规划创建存放安装包的目录。
```
mkdir -p /opt/software/openGauss
chmod 755 -R /opt/software
mkdir -p /opt/software/openGauss
chmod 755 -R /opt/software
```
> **NOTE:**
> **说明**
>
> - Do not create the directory in the home directory or subdirectory of any openGauss user because you may lack permissions for such directories.
> - The openGauss user must have the read and write permissions on the /opt/software/openGauss directory.
> - 不建议把安装包的存放目录规划到openGauss用户的家目录或其子目录下可能导致权限问题。
> - openGauss用户须具有/opt/software/openGauss目录的读写权限。
2. The release package is used as an example. Upload the installation package openGauss_x.x.x_PACKAGES_RELEASE.tar.gz and the configuration file clusterconfig.xml to the directory created in the previous step.
2.将安装包“openGauss-x.x.x-openEULER-64bit.tar.gz”和配置文件“clusterconfig.xml”都上传至上一步所创建的目录中。
3. Go to the directory for storing the uploaded software package and decompress the package.
3.在安装包所在的目录下解压安装包openGauss-x.x.x-openEULER-64bit.tar.gz。安装包解压后在/opt/software/openGauss目录下自动生成script目录。在script目录下生成gs_preinstall等OM工具脚本。
```
cd /opt/software/openGauss
tar -zxvf openGauss-x.x.x-openEULER-64bit.tar.gz
```
4.进入工具脚本目录。
```
cd /opt/software/openGauss
tar -zxvf openGauss_x.x.x_PACKAGES_RELEASE.tar.gz
cd /opt/software/openGauss/script
```
4. Decompress the openGauss-x.x.x-openEULER-64bit.tar.gz package.
5.如果是openEuler的操作系统执行如下命令打开performance.sh文件用#注释sysctl -w vm.min_free_kbytes=112640 &> /dev/null键入“ESC”键进入指令模式执行**:wq**保存并退出修改。
```
vi /etc/profile.d/performance.sh
```
6.为确保openssl版本正确执行预安装前请加载安装包中lib库。执行命令如下其中*{packagePath}*为用户安装包放置的路径,本示例中为/opt/software/openGauss。
```
tar -zxvf openGauss-x.x.x-openEULER-64bit.tar.gz
```
After the installation package is decompressed, the script subdirectory is automatically generated in /opt/software/openGauss. OM tool scripts such as gs_preinstall are generated in the script subdirectory.
5. Go to the directory for storing tool scripts.
```
cd /opt/software/openGauss/script
```
6. To ensure that the OpenSSL version is correct, load the lib library in the installation package before preinstallation. Run the following command. {packagePath} indicates the path where the installation package is stored. In this example, the path is /opt/software/openGauss.
```
export LD_LIBRARY_PATH={packagePath}/script/gspylib/clib:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH={packagePath}/script/gspylib/clib:$LD_LIBRARY_PATH
```
7. To ensure successful installation, check whether the values of hostname and /etc/hostname are the same. During preinstallation, the host name is checked.
7.为确保成功安装,检查 hostname 与 /etc/hostname 是否一致。预安装过程中会对hostname进行检查。
8. Execute gs_preinstall to configure the installation environment. If the shared environment is used, add the --sep-env-file=ENVFILE parameter to separate environment variables to avoid mutual impact with other users. The environment variable separation file path is specified by users.
Execute gs_preinstall in interactive mode. During the execution, the mutual trust between users root and between clusteropenGauss users is automatically established.
8.使用gs_preinstall准备好安装环境。若为共用环境需加入--sep-env-file=ENVFILE参数分离环境变量避免与其他用户相互影响ENVFILE为用户自行指定的环境变量分离文件的路径。
执行如下命令即采用交互模式执行前置并在执行过程中自动创建root用户互信和openGauss用户互信
```
./gs_preinstall -U omm -G dbgrp -X /opt/software/ openGauss/clusterconfig.xml
./gs_preinstall -U omm -G dbgrp -X /opt/software/openGauss/clusterconfig.xml
```
omm is the database administrator (also the OS user running the openGauss), dbgrp is the group name of the OS user running the openGauss, and /opt/software/ openGauss/clusterconfig.xml is the path of the openGauss configuration file. During the execution, you need to determine whether to establish mutual trust as prompted and enter the password of user root or the openGauss user.
omm为数据库管理员用户即运行openGauss的操作系统用户,dbgrp为运行openGauss的操作系统用户的组名/opt/software/ openGauss/clusterconfig.xml为openGauss的配置文件路径。执行过程中需要根据提示选择建立互信并输入root或openGauss用户的密码。
### Executing Installation
### 执行安装
After the openGauss installation environment is prepared by executing the pre-installation script, deploy openGauss based on the installation process.
**Prerequisites**
执行前置脚本准备好openGauss安装环境之后按照启动安装过程部署openGauss。
- You have successfully executed the gs_preinstall script.
- All the server OSs and networks are functioning properly.
- You have checked that the locale parameter for each server is set to the same value.
**前提条件**
**Procedure**
- 已成功执行前置脚本gs_preinstall。
- 所有服务器操作系统和网络均正常运行。
- 用户需确保各个主机上的locale保持一致。
1. (Optional) Check whether the installation package and openGauss configuration file exist in the planned directories. If no such package or file exists, perform the preinstallation again..
**操作步骤**
2. Log in to any host of the openGauss and switch to the omm user.
1.可选检查安装包和openGauss配置文件在规划路径下是否已存在如果没有重新执行预安装确保预安装成功再执行以下步骤。
2.登录到openGauss的主机并切换到omm用户。
```
su - omm
su - omm
```
> **NOTE:**
> **说明**
>
> - omm indicates the user specified by the -U parameter in the gs_preinstall script.
> - You need to execute the gs_install script as user omm specified in the gs_preinstall script. Otherwise, an execution error will be reported.
> - omm为gs_preinstall脚本中-U参数指定的用户。
> - 以上述omm用户执行gs_install脚本。否则会报执行错误。
3. Use gs_install to install the openGauss. If the openGauss is installed in environment variable separation mode, run the source command to obtain the environment variable separation file ENVFILE.
3.使用gs_install安装openGauss。若为环境变量分离的模式安装的集群需要source环境变量分离文件ENVFILE。
```
gs_install -X /opt/software/ openGauss/clusterconfig.xml
gs_install -X /opt/software/openGauss/clusterconfig.xml
```
The password must meet the following complexity requirements:
/opt/software/openGauss/script/clusterconfig.xml为openGauss配置文件的路径。在执行过程中用户需根据提示输入数据库的密码密码具有一定的复杂度为保证用户正常使用该数据库请记住输入的数据库密码。
- Contain at least eight characters.
- Cannot be the same as the username, the current password (ALTER), or the current password in an inverted sequence.
- Contain at least three of the following: uppercase characters (A to Z), lowercase characters (a to z), digits (0 to 9), and other characters (limited to ~!@#$%^&*()-_=+\|[{}];:,<.>/?).
密码复杂度要求:
4. After the installation is successful, manually delete the trust between users root on the host, that is, delete the mutual trust file on each openGauss database node.
- 长度至少8个字符。
- 不能和用户名、当前密码ALTER、当前密码的倒序相同。
- 以下至少包含三类大写字母A~Z、小写字母a~z、数字0~9、其他字符仅限~!@#$%^&*()-_=+\|[{}];:,<.>/?)。
4.安装执行成功之后需要手动删除主机root用户的互信即删除openGauss数据库各节点上的互信文件。
```
rm rf ~/.ssh
rm -rf ~/.ssh
```
### Uninstalling the openGauss
### 卸载openGauss
The process of uninstalling the openGauss includes uninstalling the openGauss and clearing the environment of the openGauss server.↵
卸载openGauss的过程包括卸载openGauss和清理openGauss服务器环境。
##### **Executing Uninstallation**
#### **执行卸载**
The openGauss provides an uninstallation script to help users uninstall the openGauss.
openGauss提供了卸载脚本帮助用户卸载openGauss。
**Procedure**
**操作步骤**
1. Log in as the OS user omm to the host where the CN is located.
1.以操作系统用户omm登录数据库主节点。
2. Execute the gs_uninstall script to uninstall the database cluster.
2.使用gs_uninstall卸载openGauss。
```
gs_uninstall --delete-data
gs_uninstall --delete-data
```
Alternatively, execute uninstallation on each openGauss node.
或者在openGauss中每个节点执行本地卸载。
```
gs_uninstall --delete-data -L
gs_uninstall --delete-data -L
```
##### **Deleting openGauss Configurations**
#### **一键式环境清理**
After the openGauss is uninstalled, execute the gs_postuninstall script to delete configurations from all servers in the openGauss if you do not need to re-deploy the openGauss using these configurations. These configurations are made by the gs_preinstall script.
**Prerequisites**
在openGauss卸载完成后如果不需要在环境上重新部署openGauss可以运行脚本gs_postuninstall对openGauss服务器上环境信息做清理。openGauss环境清理是对环境准备脚本gs_preinstall所做设置的清理。
**前提条件**
- The openGauss uninstallation task has been successfully executed.
- User root is trustworthy and available.
- Only user root is authorized to run the gs_postuninstall command.
- openGauss卸载执行成功。
- root用户互信可用。
- 只能使用root用户执行gs_postuninstall命令。
**Procedure**
**操作步骤**
1. Log in to the openGauss server as user root.
1.以root用户登录openGauss服务器。
2. Run the ssh Host name command to check whether mutual trust has been successfully established. Then, enter exit.
2.查看互信是否建成功,可以互相执行**ssh 主机名**。输入exit退出。
```
plat1:~ # ssh plat2
@ -304,268 +325,283 @@ After the openGauss is uninstalled, execute the gs_postuninstall script to delet
plat1:~ #
```
3. Go to the following path:
3.进入script路径下。
```
cd /opt/software/openGauss/script
```
4. Run the gs_postuninstall command to clear the environment. If the openGauss is installed in environment variable separation mode, run the source command to obtain the environment variable separation file ENVFILE.
4.使用gs_postuninstall进行清理。若为环境变量分离的模式安装的集群需要source环境变量分离文件ENVFILE。
```
./gs_postuninstall -U omm -X /opt/software/openGauss/clusterconfig.xml --delete-user --delete-group
```
Alternatively, locally use the gs_postuninstall tool to clear each openGauss node.
或者在openGauss中每个节点执行本地后置清理。
```
./gs_postuninstall -U omm -X /opt/software/openGauss/clusterconfig.xml --delete-user --delete-group -L
```
omm is the name of the OS user who runs the openGauss, and the path of the openGauss configuration file is /opt/software/openGauss/clusterconfig.xml.
If the cluster is installed in environment variable separation mode, delete the environment variable separation parameter ENV obtained by running the source command.
omm为运行openGauss的操作系统用户名/opt/software/openGauss/clusterconfig.xml为openGauss配置文件路径。
```
unset MPPDB_ENV_SEPARATE_PATH
```
若为环境变量分离的模式安装的集群需删除之前source的环境变量分离的env参数unset MPPDB_ENV_SEPARATE_PATH
5. Delete the mutual trust between the users root on each openGauss database node.
5.删除各openGauss数据库节点root用户互信。
## Compilation
## 编译
### Overview
### 概述
To compile openGauss, you need two components: openGauss-server and binarylibs.
编译openGauss需要openGauss-server和binarylibs两个组件。
- openGauss-server: main code of openGauss. You can obtain it from the open source community.
- openGauss-serveropenGauss的主要代码。可以从开源社区获取。
- binarylibs: third party open source software that openGauss depends on. You can obtain it by compiling the openGauss-third_party code or downloading from the open source community on which we have compiled a copy and uploaded it . The first method will be introduced in the following chapter.
- binarylibsopenGauss依赖的第三方开源软件你可以直接编译openGauss-third_party代码获取也可以从开源社区下载已经编译好的并上传的一个副本。
Before you compile openGaussplease check the OS and software dependency requirements.
在编译openGauss之前请检查操作系统和软件依赖要求。
You can compile openGauss by build.sh, a one-click shell tool, which we will introduce later, or compile by command. Also, an installation package is produced by build.sh.
openGauss可以通过一键式shell工具build.sh进行编译也可以通过命令进行编译。安装包由build.sh生成。
### OS and Software Dependency Requirements
### 操作系统和软件依赖要求
The following OSs are supported:
openGauss支持以下操作系统
- CentOS 7.6 (x86 architecture)
- CentOS 7.6x86架构
- openEuler-20.03-LTS (aarch64 architecture)
- openEuler-20.03-LTSaarch64架构
The following table lists the software requirements for compiling the openGauss.
以下表格列举了编译openGauss的软件要求。
You are advised to use the default installation packages of the following dependent software in the listed OS installation CD-ROMs or sources. If the following software does not exist, refer to the recommended versions of the software.
建议使用从列出的操作系统安装盘或安装源中获取的以下依赖软件的默认安装包进行安装。如果不存在以下软件,请参考推荐的软件版本。
Software dependency requirements are as follows:
软件依赖要求如下:
| Software | Recommended Version |
| ------------- | ------------------- |
| libaio-devel | 0.3.109-13 |
| flex | 2.5.31 or later |
| bison | 2.7-4 |
| ncurses-devel | 5.9-13.20130511 |
| glibc.devel | 2.17-111 |
| patch | 2.7.1-10 |
| lsb_release | 4.1 |
| 软件 | 推荐版本 |
| ------------- | --------------- |
| libaio-devel | 0.3.109-13 |
| flex | 2.5.31及以上版本 |
| bison | 2.7-4 |
| ncurses-devel | 5.9-13.20130511 |
| glibc.devel | 2.17-111 |
| patch | 2.7.1-10 |
| lsb_release | 4.1 |
### Downloading openGauss
### 下载openGauss
You can download openGauss-server and openGauss-third_party from open source community.
可以从开源社区下载openGauss-server和openGauss-third_party。
https://opengauss.org/zh/
From the following website, you can obtain the binarylibs we have compiled. Please unzip it and rename to **binarylibs** after you download.
可以通过以下网站获取编译好的binarylibs。下载后请解压缩并重命名为**binarylibs**。
https://opengauss.obs.cn-south-1.myhuaweicloud.com/1.0.1/openGauss-third_party_binarylibs.tar.gz
https://opengauss.obs.cn-south-1.myhuaweicloud.com/1.1.0/openGauss-third_party_binarylibs.tar.gz
Now we have completed openGauss code, for example, we store it in following directories.
现在我们已经拥有完整的openGauss代码把它存储在以下目录中以sda为例
- /sda/openGauss-server
- /sda/binarylibs
- /sda/openGauss-third_party
### Compiling Third-Party Software
### 编译第三方软件
Before compiling the openGauss, compile and build the open-source and third-party software on which the openGauss depends. These open-source and third-party software is stored in the openGauss-third_party code repository and usually needs to be built only once. If the open-source software is updated, rebuild the software.
在编译openGauss之前需要先编译openGauss依赖的开源及第三方软件。这些开源及第三方软件存储在openGauss-third_party代码仓库中通常只需要构建一次。如果开源软件有更新需要重新构建软件。
You can also directly obtain the output file of the open-source software compilation and build from the **binarylibs** repository.
用户也可以直接从**binarylibs**库中获取开源软件编译和构建的输出文件。
If you want to compile third-party by yourself, please go to openGauss-third_party repository to see details.
如果你想自己编译第三方软件请到openGauss-third_party仓库查看详情。
After the preceding script is executed, the final compilation and build result is stored in the **binarylibs** directory at the same level as **openGauss-third_party**. These files will be used during the compilation of **openGauss-server**.
执行完上述脚本后,最终编译和构建的结果保存在与**openGauss-third_party**同级的**binarylibs**目录下。在编译**openGauss-server**时会用到这些文件。
### Compiling by build.sh
### 代码编译
build.sh in openGauss-server is an important script tool during compilation. It integrates software installation and compilation and product installation package compilation functions to quickly compile and package code.
##### 使用build.sh编译代码
The following table describes the parameters.
openGauss-server中的build.sh是编译过程中的重要脚本工具。该工具集成了软件安装编译和产品安装包编译功能可快速进行代码编译和打包。。
| Option | Default Value | Parameter | Description |
| :----- | :--------------------------- | :----------------------------- | :--------------------------------------- |
| -h | Do not use this option. | - | Help menu. |
| -m | release | [debug \| release \| memcheck] | Selects the target version. |
| -3rd | ${Code directory}/binarylibs | [binarylibs path] | Specifies the path of binarylibs. The path must be an absolute path. |
| -pkg | Do not use this option. | - | Compresses the code compilation result into an installation package. |
参数说明请见以下表格。
> **NOTICE:**
| 选项 | 缺省值 | 参数 | 说明 |
| :---- | :--------------------------- | :------------------------------------- | :------------------------------------------------ |
| -h | 请勿使用此选项。 | - | 帮助菜单。 |
| -m | release | [debug &#124; release &#124; memcheck] | 选择目标版本。 |
| -3rd | ${Code directory}/binarylibs | [binarylibs path] | 指定binarylibs路径。该路径必须是绝对路径。 |
| -pkg | 请勿使用此选项。 | - | 将代码编译结果压缩至安装包。 |
| -nopt | 请勿使用此选项。 | - | 如果使用此功能则对鲲鹏平台的相关CPU不进行优化。 |
> **注意**
>
> 1. **-m [debug | release | memcheck]** indicates that three target versions can be selected:
> - **release**: indicates that the binary program of the release version is generated. During compilation of this version, the GCC high-level optimization option is configured to remove the kernel debugging code. This option is usually used in the generation environment or performance test environment.
> - **debug**: indicates that a binary program of the debug version is generated. During compilation of this version, the kernel code debugging function is added, which is usually used in the development self-test environment.
> - **memcheck**: indicates that a binary program of the memcheck version is generated. During compilation of this version, the ASAN function is added based on the debug version to locate memory problems.
> 2. **-3rd [binarylibs path]** is the path of **binarylibs**. By default, **binarylibs** exists in the current code folder. If **binarylibs** is moved to **openGauss-server** or a soft link to **binarylibs** is created in **openGauss-server**, you do not need to specify the parameter. However, if you do so, please note that the file is easy to be deleted by the **git clean** command.
> 3. Each option in this script has a default value. The number of options is small and the dependency is simple. Therefore, this script is easy to use. If the required value is different from the default value, set this parameter based on the actual requirements.
> - **-m [debug | release | memcheck]**表示有三个目标版本可以选择:
> - **release**生成release版本的二进制程序。此版本编译时通过配置GCC高级优化选项去除内核调试代码。此选项通常在生成环境或性能测试环境中使用。
> - **debug**表示生成debug版本的二进制程序。此版本编译时增加了内核代码调试功能一般用于开发自测环境。
> - **memcheck**表示生成memcheck版本的二进制程序。此版本编译时在debug版本的基础上增加了ASAN功能用于定位内存问题。
> - **-3rd [binarylibs path]**为**binarylibs**的路径。默认设置为当前代码文件夹下存在**binarylibs**,因此如果**binarylibs**被移至**openGauss-server**中,或者在**openGauss-server**中创建了到**binarylibs**的软链接,则不需要指定此参数。但请注意,这样做的话,该文件很容易被**git clean**命令删除。
> - 该脚本中的每个选项都有一个默认值。选项数量少,依赖简单。因此,该脚本易于使用。如果实际需要的参数值与默认值不同,请根据实际情况配置。
Now you know the usage of build.sh, so you can compile the openGauss-server by one command with build.sh.
现在你已经知晓build.sh的用法只需使用如下命令即可编译openGauss-server。
```
[user@linux openGauss-server]$ sh build.sh -m [debug | release | memcheck] -3rd [binarylibs path]
```
For example:
举例:
```
[user@linux openGauss-server]$ sh build.sh # Compile openGauss of the release version. The binarylibs or its soft link must exist in the code directory. Otherwise, the operation fails.
[user@linux openGauss-server]$ sh build.sh -m debug -3rd /sda/binarylibs # Compilate openGauss of the debug version using binarylibs we put on /sda/
[user@linux openGauss-server]$ sh build.sh # 编译安装release版本的openGauss。需代码目录下有binarylibs或者其软链接否则将会失败。
[user@linux openGauss-server]$ sh build.sh -m debug -3rd /sda/binarylibs # 编译安装debug版本的openGauss
```
The software installation path after compilation is **/sda/openGauss-server/dest**.
编译后的软件安装路径为:**/sda/openGauss-server/dest**
The compiled binary files are stored in **/sda/openGauss-server/dest/bin**.
编译后的二进制文件路径为:**/sda/openGauss-server/dest/bin**
Compilation log: **make_compile.log**
编译日志: **make_compile.log**
### Compiling by Command
##### 使用命令编译代码
1. Run the following script to obtain the system version:
1.执行以下脚本获取系统版本号:
```
[user@linux openGauss-server]$ sh src/get_PlatForm_str.sh
```
> **NOTICE:**
> **注意**
>
> - The command output indicates the OSs supported by the openGauss. The OSs supported by the openGauss are centos7.6_x86_64 and openeuler_aarch64.
> - If **Failed** or another version is displayed, the openGauss does not support the current operating system.
> - 命令回显信息即为openGauss支持的操作系统。目前openGauss支持的操作系统为centos7.6_x86_64和openeuler_aarch64。
> - 如果显示**Failed**或其他版本表示openGauss不支持当前操作系统。
2. Configure environment variables, add **____** based on the code download location, and replace *** with the result obtained in the previous step.
2.配置环境变量,根据代码下载位置添加**____**,并将***替换为上一步的结果。
```
export CODE_BASE=________ # Path of the openGauss-server file
export BINARYLIBS=________ # Path of the binarylibs file
export GAUSSHOME=$CODE_BASE/dest/
export GCC_PATH=$BINARYLIBS/buildtools/***/gcc7.3/
export CC=$GCC_PATH/gcc/bin/gccexport CXX=$GCC_PATH/gcc/bin/g++
export CC=$GCC_PATH/gcc/bin/gcc
export CXX=$GCC_PATH/gcc/bin/g++
export LD_LIBRARY_PATH=$GAUSSHOME/lib:$GCC_PATH/gcc/lib64:$GCC_PATH/isl/lib:$GCC_PATH/mpc/lib/:$GCC_PATH/mpfr/lib/:$GCC_PATH/gmp/lib/:$LD_LIBRARY_PATH
export PATH=$GAUSSHOME/bin:$GCC_PATH/gcc/bin:$PATH
```
3. Select a version and configure it.
例如在CENTOS X86-64平台上binarylibs目录被作为openGauss-server目录的兄弟目录。
在openGauss-server目录下执行以下命令。
**debug** version:
```
export CODE_BASE=`pwd`
export BINARYLIBS=`pwd`/../binarylibs
export GAUSSHOME=$CODE_BASE/dest/
export GCC_PATH=$BINARYLIBS/buildtools/centos7.6_x86_64/gcc7.3/
export CC=$GCC_PATH/gcc/bin/gcc
export CXX=$GCC_PATH/gcc/bin/g++
export LD_LIBRARY_PATH=$GAUSSHOME/lib:$GCC_PATH/gcc/lib64:$GCC_PATH/isl/lib:$GCC_PATH/mpc/lib/:$GCC_PATH/mpfr/lib/:$GCC_PATH/gmp/lib/:$LD_LIBRARY_PATH
export PATH=$GAUSSHOME/bin:$GCC_PATH/gcc/bin:$PATH
```
3.选择一个版本进行配置。
**debug**版本:
```
./configure --gcc-version=7.3.0 CC=g++ CFLAGS='-O0' --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-debug --enable-cassert --enable-thread-safety --without-readline --without-zlib
```
**release** version:
**release**版本:
```
./configure --gcc-version=7.3.0 CC=g++ CFLAGS="-O2 -g3" --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-thread-safety --without-readline --without-zlib
```
**memcheck** version:
**memcheck**版本:
```
./configure --gcc-version=7.3.0 CC=g++ CFLAGS='-O0' --prefix=$GAUSSHOME --3rd=$BINARYLIBS --enable-debug --enable-cassert --enable-thread-safety --without-readline --without-zlib --enable-memory-check
```
> **NOTICE:**
> **注意**
>
> 1. *[debug | release | memcheck]* indicates that three target versions are available.
> 2. On the ARM-based platform, **-D__USE_NUMA** needs to be added to **CFLAGS**.
> 3. On the **ARMv8.1** platform or a later version (for example, Kunpeng 920), **-D__ARM_LSE** needs to be added to **CFLAGS**.
> 4. If **binarylibs** is moved to **openGauss-server** or a soft link to **binarylibs** is created in **openGauss-server**, you do not need to specify the **--3rd** parameter. However, if you do so, please note that the file is easy to be deleted by the `git clean` command.
> 5. To build with mysql_fdw, add **--enable-mysql-fdw** when configure. Note that before build mysql_fdw, MariaDB's C client library is needed.
> 6. To build with oracle_fdw, add **--enable-oracle-fdw** when configure. Note that before build oracle_fdw, Oracle's C client library is needed.
> - *[debug | release | memcheck]*表示有三个目标版本可用。
> - 在ARM平台上需要把**-D__USE_NUMA**添加至**CFLAGS**中。
> - 在**ARMv8.1**及以上平台如鲲鹏920需要把**-D__ARM_LSE**添加至**CFLAGS**中。
> - 如果**binarylibs**被移至**openGauss-server**中,或者在**openGauss-server**中创建了到**binarylibs**的软链接,则不需要指定**--3rd**参数。但请注意,这样做的话,该文件很容易被`git clean`命令删除。
4. Run the following commands to compile openGauss:
4.执行以下命令编译openGauss
```
[user@linux openGauss-server]$ make -sj
[user@linux openGauss-server]$ make install -sj
```
5. If the following information is displayed, the compilation and installation are successful:
5.显示如下信息,表示编译和安装成功。
```
openGauss installation complete.
```
The software installation path after compilation is **$GAUSSHOME**.
- 编译后的软件安装路径为**$GAUSSHOME**。
The compiled binary files are stored in **$GAUSSHOME/bin**.
- 编译后的二进制文件存放路径为:**$GAUSSHOME/bin**。
### Compiling the Installation Package
Please read the chapter **Compiling by build.sh** first to understand the usage of build.sh and how to compile openGauss by using the script.
### 编译安装包
Now you can compile the installation package with just adding a option `-pkg`.
请先阅读[使用build.sh编译](#使用build.sh编译)章节了解build.sh的用法以及如何使用该脚本编译openGauss。
现在,只需添加一个-pkg选项就可以编译安装包。
```
[user@linux openGauss-server]$ sh build.sh -m [debug | release | memcheck] -3rd [binarylibs path] -pkg
```
For example:
举例:
```
[user@linux openGauss-server]$ sh build.sh -pkg # Compile openGauss installation package of the release version. The binarylibs or its soft link must exist in the code directory. Otherwise, the operation fails.
[user@linux openGauss-server]$ sh build.sh -m debug -3rd /sda/binarylibs -pkg # Compile openGauss installation package of the debug version using binarylibs we put on /sda/
sh build.sh -pkg # 生成release版本的openGauss安装包。需代码目录下有binarylibs或者其软链接否则将会失败。
sh build.sh -m debug -3rd /sdc/binarylibs -pkg # 生成debug版本的openGauss安装包
```
The generated installation package is stored in the **./package** directory.
- 生成的安装包存放目录:**./package**。
Compilation log: **make_compile.log**
- 编译日志: **make_compile.log**
Installation package packaging log: **./package/make_package.log**
- 安装包打包日志: **./package/make_package.log**
## Quick Start
See the [Quick Start](https://opengauss.org/zh/docs/1.0.1/docs/Quickstart/Quickstart.html) to implement the image classification.
## 快速入门
## Docs
参考[快速入门](https://opengauss.org/zh/docs/1.1.0/docs/Quickstart/Quickstart.html)。
For more details about the installation guide, tutorials, and APIs, please see the [User Documentation](https://gitee.com/opengauss/docs).
## 文档
## Community
更多安装指南、教程和API请参考[用户文档](https://gitee.com/opengauss/docs)。
### Governance
## 社区
Check out how openGauss implements open governance [works](https://gitee.com/opengauss/community/blob/master/governance.md).
### 治理
### Communication
查看openGauss是如何实现开放[治理](https://gitee.com/opengauss/community/blob/master/governance.md)。
- WeLink- Communication platform for developers.
- IRC channel at `#opengauss-meeting` (only for meeting minutes logging purpose)
- Mailing-list: <https://opengauss.org/zh/community/onlineCommunication.html>
### 交流
## Contribution
- WeLink开发者的交流平台。
- IRC频道`#opengauss-meeting`(仅用于会议纪要)。
- 邮件列表https://opengauss.org/zh/community/onlineCommunication.html
Welcome contributions. See our [Contributor](https://opengauss.org/zh/contribution.html) for more details.
## 贡献
## Release Notes
欢迎大家来参与贡献。详情请参阅我们的[社区贡献](https://opengauss.org/zh/contribution.html)。
For the release notes, see our [RELEASE](https://opengauss.org/zh/docs/1.0.1/docs/Releasenotes/Releasenotes.html).
## 发行说明
## License
请参见[发行说明](https://opengauss.org/zh/docs/1.1.0/docs/Releasenotes/Releasenotes.html)。
[Apache License 2.0](https://gitee.com/opengauss/community/blob/master/LICENSE)
## 许可证
[MulanPSL-2.0](http://license.coscl.org.cn/MulanPSL2/)

View File

@ -44,27 +44,17 @@ function doing()
#------------------------------
# gsql things
#------------------------------
function cofig_gsql_and_gs_ktool()
function cofig_gsql()
{
doing 'Configuring LD_LIBRARY_PATH, PATH and GS_KTOOL_FILE_PATH for gsql and gs_ktool...'
doing 'Configuring LD_LIBRARY_PATH and PATH for gsql...'
LIB_PATH="${LOCAL_PATH}/lib"
BIN_PATH="${LOCAL_PATH}/bin"
GS_KT_FILE_PATH="${LOCAL_PATH}/gs_ktool_file"
if [ ! -f "${LOCAL_PATH}/bin/gsql" ]; then
logerr "failed to locate ./bin/gsql, please source this file at the path where it is. "
return 1;
fi;
if [ ! -f "${LOCAL_PATH}/bin/gs_ktool" ]; then
logerr "failed to locate ./bin/gs_ktool, please source this file at the path where it is. "
return 1;
fi;
if [ ! -f "${LOCAL_PATH}/gs_ktool_file/gs_ktool_conf.ini" ]; then
logerr "failed to locate ./gs_ktool_file/gs_ktool_con.ini, please source this file at the path where it is. "
return 1;
fi;
export LD_LIBRARY_PATH=${LIB_PATH}:${LD_LIBRARY_PATH}
export PATH=${BIN_PATH}:${PATH}
export GS_KTOOL_FILE_PATH=${GS_KT_FILE_PATH}
echo 'done'
return 0
}
@ -73,7 +63,7 @@ if [ ! -z "$1" ]; then
echo "Usage:"
echo " source $0"
else
cofig_gsql_and_gs_ktool
cofig_gsql
if [ 0 -eq $? ]; then
echo 'All things done.'
fi

View File

@ -300,13 +300,6 @@ fi
if [ "$product_mode"x == "opengauss"x ]; then
release_file_list=$(echo ${release_file_list}_single | sed -e 's/mpp_release/opengauss_release/')
if [ ! -f $binarylib_dir/dependency/masstree/masstree-beta-0.9.0.tar.gz ]; then
echo "ERROR: there is no necessary files in $binarylib_dir/dependency/masstree/"
echo " You may copy them from 'third_party' repo by executing:"
echo " mkdir -p $binarylib_dir/dependency/masstree && cp -fr <third_party>/dependency/masstree/* $binarylib_dir/dependency/masstree"
echo " You should substitute <third_party> by real path to third_party repo"
# exit 1
fi
fi
read_mpp_version
@ -973,7 +966,6 @@ function make_package_gsql()
mkdir -p gsql
mkdir -p gsql/bin
mkdir -p gsql/lib
mkdir -p gsql/gs_ktool_file
# copy gsql and depend *.so
cp ${BUILD_DIR}/bin/gsql gsql/bin
@ -981,16 +973,6 @@ function make_package_gsql()
die "copy gsql failed."
fi
cp ${BUILD_DIR}/bin/gs_ktool gsql/bin
if [ $? -ne 0 ]; then
die "copy gsql failed."
fi
cp -r ${BUILD_DIR}/etc/gs_ktool_file/gs_ktool_conf.ini gsql/gs_ktool_file
if [ $? -ne 0 ]; then
die "copy gs_ktool_con.ini failed."
fi
cd gsql
tar -xvf ${package_path}/${libpq_package_name}
if [ $? -ne 0 ]; then

View File

@ -2,7 +2,6 @@
./bin/gds
./bin/gs_log
./bin/gsql
./bin/gs_ktool
./bin/gaussdb
./bin/gaussdb.version.GaussDB200
./bin/gaussdb.version.GaussDB300
@ -790,8 +789,6 @@
./lib/postgresql/pg_upgrade_support.so
./lib/postgresql/java/pljava.jar
./lib/libpljava.so
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
@ -1214,7 +1211,6 @@
./jre/THIRD_PARTY_README
[client]
./bin/gsql
./bin/gs_ktool
./bin/gs_dump
./bin/gs_dumpall
./bin/gs_restore
@ -1255,8 +1251,6 @@
./lib/postgresql/utf8_and_big5.so
./lib/postgresql/java/pljava.jar
./lib/libpljava.so
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
@ -1290,8 +1284,6 @@
./odbc/lib/psqlodbcw.la
./odbc/lib/psqlodbcw.so
[libpq]
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5

View File

@ -2,7 +2,6 @@
./bin/gds
./bin/gs_log
./bin/gsql
./bin/gs_ktool
./bin/gaussdb
./bin/gaussdb.version.GaussDB200
./bin/gaussdb.version.GaussDB300
@ -780,8 +779,6 @@
./lib/postgresql/java/pljava.jar
./lib/postgresql/gsredistribute.so
./lib/libpljava.so
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
@ -1203,7 +1200,6 @@
./jre/THIRD_PARTY_README
[client]
./bin/gsql
./bin/gs_ktool
./bin/gs_dump
./bin/gs_dumpall
./bin/gs_restore
@ -1244,8 +1240,6 @@
./lib/postgresql/utf8_and_big5.so
./lib/postgresql/java/pljava.jar
./lib/libpljava.so
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
@ -1279,8 +1273,6 @@
./odbc/lib/psqlodbcw.la
./odbc/lib/psqlodbcw.so
[libpq]
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5

View File

@ -2,7 +2,6 @@
./bin/gds
./bin/gs_log
./bin/gsql
./bin/gs_ktool
./bin/gaussdb
./bin/gaussdb.version.GaussDB200
./bin/gaussdb.version.GaussDB300
@ -775,8 +774,6 @@
./lib/postgresql/java/pljava.jar
./lib/postgresql/postgres_fdw.so
./lib/libpljava.so
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
@ -1196,7 +1193,6 @@
./jre/THIRD_PARTY_README
[client]
./bin/gsql
./bin/gs_ktool
./bin/gs_dump
./bin/gs_dumpall
./bin/gs_restore
@ -1237,8 +1233,6 @@
./lib/postgresql/utf8_and_big5.so
./lib/postgresql/java/pljava.jar
./lib/libpljava.so
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
@ -1272,8 +1266,6 @@
./odbc/lib/psqlodbcw.la
./odbc/lib/psqlodbcw.so
[libpq]
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5

View File

@ -2,7 +2,6 @@
./bin/gds
./bin/gs_log
./bin/gsql
./bin/gs_ktool
./bin/gaussdb
./bin/gaussdb.version.GaussDB200
./bin/gaussdb.version.GaussDB300
@ -803,8 +802,6 @@
./lib/libpq_ce.so
./lib/libpq_ce.so.5
./lib/libpq_ce.so.5.5
./lib/libkmc.so
./lib/libgs_ktool.so
./lib/libcgroup.so
./lib/libcgroup.so.1
./lib/libcom_err_gauss.so
@ -1221,7 +1218,6 @@
./jre/THIRD_PARTY_README
[client]
./bin/gsql
./bin/gs_ktool
./bin/gs_dump
./bin/gs_dumpall
./bin/gs_restore
@ -1269,8 +1265,6 @@
./lib/libpq_ce.so
./lib/libpq_ce.so.5
./lib/libpq_ce.so.5.5
./lib/libkmc.so
./lib/libgs_ktool.so
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libstdc++.so.6
@ -1312,8 +1306,6 @@
./lib/libcmcommon.so.2
./lib/libcmpq.so
./lib/libcmpq.so.1
./lib/libkmc.so
./lib/libgs_ktool.so
./lib/libcrypto.so
./lib/libcrypto.so.1.1
./lib/libstdc++.so.6

View File

@ -2,7 +2,6 @@
./bin/gds
./bin/gs_log
./bin/gsql
./bin/gs_ktool
./bin/gaussdb
./bin/gaussdb.version.GaussDB200
./bin/gaussdb.version.GaussDB300
@ -797,8 +796,6 @@
./lib/postgresql/java/pljava.jar
./lib/postgresql/gsredistribute.so
./lib/libpljava.so
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
@ -1226,7 +1223,6 @@
./jre/THIRD_PARTY_README
[client]
./bin/gsql
./bin/gs_ktool
./bin/gs_dump
./bin/gs_dumpall
./bin/gs_restore
@ -1267,8 +1263,6 @@
./lib/postgresql/utf8_and_big5.so
./lib/postgresql/java/pljava.jar
./lib/libpljava.so
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
@ -1302,8 +1296,6 @@
./odbc/lib/psqlodbcw.la
./odbc/lib/psqlodbcw.so
[libpq]
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5

View File

@ -2,7 +2,6 @@
./bin/gds
./bin/gs_log
./bin/gsql
./bin/gs_ktool
./bin/gaussdb
./bin/gaussdb.version.GaussDB200
./bin/gaussdb.version.GaussDB300
@ -792,8 +791,6 @@
./lib/postgresql/java/pljava.jar
./lib/postgresql/postgres_fdw.so
./lib/libpljava.so
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
@ -1216,7 +1213,6 @@
./jre/THIRD_PARTY_README
[client]
./bin/gsql
./bin/gs_ktool
./bin/gs_dump
./bin/gs_dumpall
./bin/gs_restore
@ -1257,8 +1253,6 @@
./lib/postgresql/utf8_and_big5.so
./lib/postgresql/java/pljava.jar
./lib/libpljava.so
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
@ -1292,8 +1286,6 @@
./odbc/lib/psqlodbcw.la
./odbc/lib/psqlodbcw.so
[libpq]
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5

View File

@ -2,7 +2,6 @@
./bin/gds
./bin/gs_log
./bin/gsql
./bin/gs_ktool
./bin/gaussdb
./bin/gaussdb.version.GaussDB200
./bin/gaussdb.version.GaussDB300
@ -785,8 +784,6 @@
./lib/postgresql/java/pljava.jar
./lib/postgresql/postgres_fdw.so
./lib/libpljava.so
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
@ -1209,7 +1206,6 @@
./jre/THIRD_PARTY_README
[client]
./bin/gsql
./bin/gs_ktool
./bin/gs_dump
./bin/gs_dumpall
./bin/gs_restore
@ -1250,8 +1246,6 @@
./lib/postgresql/utf8_and_big5.so
./lib/postgresql/java/pljava.jar
./lib/libpljava.so
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
@ -1285,8 +1279,6 @@
./odbc/lib/psqlodbcw.la
./odbc/lib/psqlodbcw.so
[libpq]
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5

View File

@ -2,7 +2,6 @@
[server]:./bin/gds
[server]:./bin/gs_log
[server]:./bin/gsql
[server]:./bin/gs_ktool
[server]:./bin/gaussdb
[server]:./bin/gaussdb.version.GaussDB200
[server]:./bin/gaussdb.version.GaussDB300
@ -776,8 +775,6 @@
[server]:./lib/postgresql/java/pljava.jar
[server]:./lib/postgresql/gsredistribute.so*
[server]:./lib/libpljava.so*
[server]:./lib/libgs_ktool.so*
[server]:./lib/libkmc.so
[server]:./lib/libpq.a
[server]:./lib/libpq.so*
[server]:./lib/libpq_ce.so*
@ -1121,7 +1118,6 @@
[server]:./jre/THIRD_PARTY_README
[client]
[client]:./bin/gsql
[client]:./bin/gs_ktool
[client]:./bin/gs_dump
[client]:./bin/gs_dumpall
[client]:./bin/gs_restore
@ -1162,8 +1158,6 @@
[client]:./lib/postgresql/utf8_and_big5.so*
[client]:./lib/postgresql/java/pljava.jar
[client]:./lib/libpljava.so*
[client]:./lib/libgs_ktool.so*
[client]:./lib/libkmc.so
[client]:./lib/libpq.a
[client]:./lib/libpq.so*
[client]:./lib/libpq_ce.so*
@ -1192,8 +1186,6 @@
[odbc]:./lib/libkrb5_gauss.so*
[odbc]:./lib/libcom_err_gauss.so*
[odbc]:./lib/libpgport_tool.so*
[odbc]:./lib/libgs_ktool.so*
[odbc]:./lib/libkmc.so
[odbc]:./lib/libpq.a
[odbc]:./lib/libpq.so*
[odbc]:./lib/libpq_ce.so*
@ -1206,8 +1198,6 @@
[libpq]:./lib/libpq.a
[libpq]:./lib/libpq.so*
[libpq]:./lib/libpq_ce.so*
[libpq]:./lib/libgs_ktool.so*
[libpq]:./lib/libkmc.so
[libpq]:./lib/libstdc++.so*
[libpq]:./lib/libconfig.so*
[libpq]:./lib/libcmclient.so*

View File

@ -2,7 +2,6 @@
./bin/gds
./bin/gs_log
./bin/gsql
./bin/gs_ktool
./bin/gaussdb
./bin/gaussdb.version.GaussDB200
./bin/gaussdb.version.GaussDB300
@ -784,8 +783,6 @@
./lib/postgresql/java/pljava.jar
./lib/postgresql/gsredistribute.so
./lib/libpljava.so
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
@ -1212,7 +1209,6 @@
./jre/THIRD_PARTY_README
[client]
./bin/gsql
./bin/gs_ktool
./bin/gs_dump
./bin/gs_dumpall
./bin/gs_restore
@ -1253,8 +1249,6 @@
./lib/postgresql/utf8_and_big5.so
./lib/postgresql/java/pljava.jar
./lib/libpljava.so
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
@ -1288,8 +1282,6 @@
./odbc/lib/psqlodbcw.la
./odbc/lib/psqlodbcw.so
[libpq]
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5

View File

@ -82,8 +82,6 @@
./share/postgresql/extension/gc_fdw.control
./share/postgresql/extension/log_fdw--1.0.sql
./share/postgresql/extension/log_fdw.control
./share/postgresql/extension/mot_fdw--1.0.sql
./share/postgresql/extension/mot_fdw.control
./share/postgresql/extension/dimsearch--1.0.sql
./share/postgresql/extension/dimsearch.control
./share/postgresql/extension/packages--1.0.sql
@ -779,9 +777,6 @@
./lib/libcom_err_gauss.so
./lib/libcom_err_gauss.so.3
./lib/libcom_err_gauss.so.3.0
./lib/libatomic.so
./lib/libatomic.so.1
./lib/libatomic.so.1.2.0
./lib/libgpr.so
./lib/libgpr.so.9
./lib/libgpr.so.9.0.0

View File

@ -1,7 +1,6 @@
[server]
./bin/gs_log
./bin/gsql
./bin/gs_ktool
./bin/gaussdb
./bin/gaussdb.version.GaussDB200
./bin/gaussdb.version.GaussDB300
@ -771,8 +770,6 @@
./lib/postgresql/java/pljava.jar
./lib/postgresql/postgres_fdw.so
./lib/libpljava.so
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
@ -788,6 +785,7 @@
./lib/libatomic.so
./lib/libatomic.so.1
./lib/libatomic.so.1.2.0
./lib/libmasstree.so
./lib/libgpr.so
./lib/libgpr.so.9
./lib/libgpr.so.9.0.0
@ -1185,7 +1183,6 @@
./jre/THIRD_PARTY_README
[client]
./bin/gsql
./bin/gs_ktool
./bin/gs_dump
./bin/gs_dumpall
./bin/gs_restore
@ -1226,8 +1223,6 @@
./lib/postgresql/utf8_and_big5.so
./lib/postgresql/java/pljava.jar
./lib/libpljava.so
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
@ -1261,8 +1256,6 @@
./odbc/lib/psqlodbcw.la
./odbc/lib/psqlodbcw.so
[libpq]
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5

View File

@ -1,7 +1,6 @@
[server]
./bin/gs_log
./bin/gsql
./bin/gs_ktool
./bin/gaussdb
./bin/gaussdb.version.GaussDB200
./bin/gaussdb.version.GaussDB300
@ -775,8 +774,6 @@
./lib/postgresql/java/pljava.jar
./lib/postgresql/postgres_fdw.so
./lib/libpljava.so
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
@ -792,6 +789,7 @@
./lib/libatomic.so
./lib/libatomic.so.1
./lib/libatomic.so.1.2.0
./lib/libmasstree.so
./lib/libgpr.so
./lib/libgpr.so.9
./lib/libgpr.so.9.0.0
@ -1192,7 +1190,6 @@
./jre/THIRD_PARTY_README
[client]
./bin/gsql
./bin/gs_ktool
./bin/gs_dump
./bin/gs_dumpall
./bin/gs_restore
@ -1233,8 +1230,6 @@
./lib/postgresql/utf8_and_big5.so
./lib/postgresql/java/pljava.jar
./lib/libpljava.so
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5
@ -1268,8 +1263,6 @@
./odbc/lib/psqlodbcw.la
./odbc/lib/psqlodbcw.so
[libpq]
./lib/libgs_ktool.so
./lib/libkmc.so
./lib/libpq.a
./lib/libpq.so
./lib/libpq.so.5

View File

@ -786,6 +786,7 @@
./lib/libatomic.so
./lib/libatomic.so.1
./lib/libatomic.so.1.2.0
./lib/libmasstree.so
./lib/libgpr.so
./lib/libgpr.so.9
./lib/libgpr.so.9.0.0

View File

@ -782,6 +782,7 @@
./lib/libatomic.so
./lib/libatomic.so.1
./lib/libatomic.so.1.2.0
./lib/libmasstree.so
./lib/libgpr.so
./lib/libgpr.so.9
./lib/libgpr.so.9.0.0

View File

@ -110,11 +110,11 @@ else
fi
SCRIPT_DIR=$(cd $(dirname $SCRIPT_PATH) && pwd)
package_path=$SCRIPT_DIR
test -d ${SCRIPT_DIR}/../../output || mkdir -p ${SCRIPT_DIR}/../../output
output_path=$(cd ${SCRIPT_DIR}/../../output && pwd)
#######################################################################
##version 1.0.1
##version 1.1.0
#######################################################################
function read_srv_version()
{
@ -124,8 +124,16 @@ function read_srv_version()
#auto read the number from kernal globals.cpp, no need to change it here
}
read_srv_version
function deploy_pkgs()
{
for pkg in $@; do
if [ -f $pkg ]; then
mv $pkg $output_path/
fi
done
}
read_srv_version
#########################################################################
##read command line paramenters
@ -281,6 +289,7 @@ function install_gaussdb()
./separate_debug_information.sh
cd $SCRIPT_DIR
mv symbols.tar.gz $kernel_symbol_package_name
deploy_pkgs $kernel_symbol_package_name
fi
#insert the commitid to version.cfg as the upgrade app path specification
@ -388,7 +397,8 @@ function make_package_srv()
mkdir -p temp
mkdir -p ${BUILD_DIR}/temp/etc
target_file_copy "$copydest" ${BUILD_DIR}/temp
mv ${sha256_name} ${kernel_package_name} ${package_path}
deploy_pkgs ${sha256_name} ${kernel_package_name}
echo "make server(all) package success!"
}
@ -420,7 +430,8 @@ function make_package_upgrade_sql()
chmod 600 ${UPGRADE_SQL_TAR}
chmod 600 ${UPGRADE_SQL_SHA256}
mv ${UPGRADE_SQL_TAR} ${UPGRADE_SQL_SHA256} ${package_path}
deploy_pkgs ${UPGRADE_SQL_TAR} ${UPGRADE_SQL_SHA256}
echo "Successfully packaged upgrade_sql files."
}
@ -485,8 +496,9 @@ function make_package_libpq()
if [ $? -ne 0 ]; then
die "$package_command ${libpq_package_name} failed"
fi
mv ${libpq_package_name} ${package_path}
echo "install $pkgname tools is ${libpq_package_name} of ${package_path} directory " >> "$LOG_FILE" 2>&1
deploy_pkgs ${libpq_package_name}
echo "install $pkgname tools is ${libpq_package_name} of ${output_path} directory " >> "$LOG_FILE" 2>&1
echo "success!"
}
@ -541,8 +553,9 @@ function make_package_tools()
if [ $? -ne 0 ]; then
die "$package_command ${tools_package_name} failed"
fi
mv ${tools_package_name} ${package_path}
echo "install $pkgname tools is ${tools_package_name} of ${package_path} directory " >> "$LOG_FILE" 2>&1
deploy_pkgs ${tools_package_name}
echo "install $pkgname tools is ${tools_package_name} of ${output_path} directory " >> "$LOG_FILE" 2>&1
echo "success!"
}

6
configure vendored
View File

@ -2650,10 +2650,10 @@ $as_echo "$as_me: error: argument required for --with-gs-version option" >&2;}
else
product=$(cat build/script/gauss.spec | grep 'PRODUCT' | awk -F "=" '{print $2}')
version=$(cat build/script/gauss.spec | grep 'VERSION' | awk -F "=" '{print $2}')
gitversion=$(git log | grep commit | head -1 | awk '{print $2}' | cut -b 1-8)
commits=$(git log | grep "See in merge request" | wc -l)
gitversion=$(git log 2>/dev/null | grep commit | head -1 | awk '{print $2}' | cut -b 1-8)
commits=$(git log 2>/dev/null | grep "See in merge request" | wc -l)
debug_str=""
mrid=$(git log | grep "See merge request" | head -1 | awk -F! '{print $2}' | grep -o '[0-9]\+')
mrid=$(git log 2>/dev/null | grep "See merge request" | head -1 | awk -F! '{print $2}' | grep -o '[0-9]\+')
if test "$enable_debug" = yes; then
debug_str="debug"
fi

View File

@ -71,7 +71,9 @@ where column_constraint can be:
CHECK ( expression ) |
DEFAULT default_expr |
UNIQUE index_parameters |
PRIMARY KEY index_parameters }
PRIMARY KEY index_parameters |
REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ]
[ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
where compress_mode can be:
{ DELTA | PREFIX | DICTIONARY | NUMSTR | NOCOMPRESS }

View File

@ -31,7 +31,9 @@ where column_constraint can be:
CHECK ( expression ) |
DEFAULT default_expr |
UNIQUE index_parameters |
PRIMARY KEY index_parameters }
PRIMARY KEY index_parameters |
REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ]
[ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
where table_constraint can be:
[ CONSTRAINT constraint_name ]

View File

@ -35,7 +35,9 @@ where column_constraint can be:
CHECK ( expression ) |
DEFAULT default_expr |
UNIQUE index_parameters |
PRIMARY KEY index_parameters }
PRIMARY KEY index_parameters |
REFERENCES reftable [ ( refcolumn ) ] [ MATCH FULL | MATCH PARTIAL | MATCH SIMPLE ]
[ ON DELETE action ] [ ON UPDATE action ] }
[ DEFERRABLE | NOT DEFERRABLE | INITIALLY DEFERRED | INITIALLY IMMEDIATE ]
where table_constraint can be:
[ CONSTRAINT constraint_name ]

View File

@ -319,7 +319,7 @@ ifeq ($(with_3rd), NONE)
LIBNANOMSG_HOME = $(top_builddir)/$(BINARYPATH)/nanomsg/comm
HLL_HOME = $(top_builddir)/$(BINARYPATH)/postgresql-hll/$(LIB_SUPPORT_LLT)
PLJAVA_HOME = $(top_builddir)/$(BINARYPATH)/pljava/$(LIB_SUPPORT_LLT)
MASSTREE_HOME = $(top_builddir)/dependency/masstree
MASSTREE_HOME = $(top_builddir)/$(BINARYPATH)/masstree/comm
MYFDW_HOME = $(top_builddir)/dependency/mysql_fdw
ORCFDW_HOME = $(top_builddir)/dependency/oracle_fdw
PLDBG_HOME = $(top_builddir)/dependency/pldebugger
@ -355,7 +355,7 @@ else
LIBORC_HOME = $(with_3rd)/$(BINARYPATH)/liborc/$(LIB_SUPPORT_LLT)
SNAPPY_HOME = $(with_3rd)/$(BINARYPATH)/snappy/$(LIB_SUPPORT_LLT)
LIBOPENSSL_HOME = $(with_3rd)/$(BINARYPATH)/openssl/$(LIB_NOT_SUPPORT_LLT)
LIBKMC_HOME = $(with_3rd)/$(PLATFORMPATH)/kmc/comm
SECURE_HOME = $(with_3rd)/$(PLATFORMPATH)/Huawei_Secure_C/$(LIB_NOT_SUPPORT_LLT)
SECUREDYNAMICLIB_HOME = $(with_3rd)/$(PLATFORMPATH)/Huawei_Secure_C/Dynamic_Lib
LICENSE_HOME = $(with_3rd)/$(PLATFORMPATH)/AdaptiveLM_C_V100R005C01SPC002/$(LIB_SUPPORT_LLT)
@ -371,7 +371,7 @@ else
LIBNANOMSG_HOME = $(with_3rd)/$(BINARYPATH)/nanomsg/comm
HLL_HOME = $(with_3rd)/$(BINARYPATH)/postgresql-hll/$(LIB_SUPPORT_LLT)
PLJAVA_HOME = $(with_3rd)/$(BINARYPATH)/pljava/$(LIB_SUPPORT_LLT)
MASSTREE_HOME = $(with_3rd)/dependency/masstree
MASSTREE_HOME = $(with_3rd)/$(BINARYPATH)/masstree/comm
MYFDW_HOME = $(with_3rd)/dependency/mysql_fdw
ORCFDW_HOME = $(with_3rd)/dependency/oracle_fdw
PLDBG_HOME = $(with_3rd)/dependency/pldebugger
@ -616,12 +616,6 @@ LIBOPENSSL_LIB_PATH = $(LIBOPENSSL_HOME)/lib
LIBOPENSSL_SSL_PATH = $(LIBOPENSSL_HOME)/ssl
LIBOPENSSL_INCLUDE_PATH = $(LIBOPENSSL_HOME)/include
#############################################################################
# kmc component
#############################################################################
LIBKMC_LIB_PATH = $(LIBKMC_HOME)/lib
LIBKMC_INCLUDE_PATH = $(LIBKMC_HOME)/include
#############################################################################
# security component
#############################################################################
@ -680,6 +674,12 @@ NUMA_LIB_PATH = $(NUMA_HOME)/lib
LIBCURL_INCLUDE_PATH = $(LIBCURL_HOME)/include
LIBCURL_LIB_PATH = $(LIBCURL_HOME)/lib
#############################################################################
# masstree component
#############################################################################
MASSTREE_INCLUDE_PATH = $(MASSTREE_HOME)/include
MASSTREE_LIB_PATH = $(MASSTREE_HOME)/lib
############################################################################
#
# Programs and flags
@ -733,6 +733,10 @@ ifeq ($(SUPPORT_HOTPATCH), yes)
override CPPFLAGS := $(CPPFLAGS) -I$(LIBHOTPATCH_INCLUDE_PATH)
endif
ifeq ($(enable_mot), yes)
override CPPFLAGS := $(CPPFLAGS) -I$(MASSTREE_INCLUDE_PATH)
endif
CC = @CC@
GCC = @GCC@
C = gcc
@ -866,6 +870,9 @@ endif
LDFLAGS += -L$(GSTRACE_LIB_PATH)
LDFLAGS += -L$(NUMA_LIB_PATH)
LDFLAGS += -L$(LIBCURL_LIB_PATH)
ifeq ($(enable_mot), yes)
LDFLAGS += -L$(MASSTREE_LIB_PATH)
endif
LDFLAGS += @LDFLAGS@
LDFLAGS_EX = @LDFLAGS_EX@
@ -1052,7 +1059,7 @@ endif
# This macro is for use by libraries linking to libpq. (Because libpgport
# isn't created with the same link flags as libpq, it can't be used.)
libpq = -L$(libpq_builddir) -lpq
libpq_ce = -L$(libpq_builddir) -lpq_ce -L$(top_builddir)/src/bin/gs_ktool/ -lgs_ktool -lsecurec -lkmc
libpq_ce = -L$(libpq_builddir) -lpq_ce
# If doing static linking, shared library dependency info isn't available,
# so add in the libraries that libpq depends on.

View File

@ -34,8 +34,7 @@ SUBDIRS = \
gsqlerr \
pg_upgrade \
pg_basebackup \
pg_probackup \
gs_ktool
pg_probackup
ifeq ($(PORTNAME), win32)
SUBDIRS += pgevent
@ -55,9 +54,7 @@ SUBDIRS = \
gs_guc \
gsqlerr \
pg_basebackup \
pg_probackup \
gs_ktool
pg_probackup

View File

@ -505,6 +505,7 @@ stream_multiple|real|0,1.79769e+308|NULL|NULL|
string_hash_compatible|bool|0,0|NULL|NULL|
enable_slow_query_log|bool|0,0|NULL|NULL|
support_batch_bind|bool|0,0|NULL|NULL|
enable_beta_opfusion|bool|0,0|NULL|NULL|
support_extended_features|bool|0,0|NULL|NULL|
lastval_supported|bool|0,0|NULL|NULL|
enable_beta_features|bool|0,0|NULL|NULL|
@ -678,6 +679,7 @@ catchup2normal_wait_time|int|-1,10000|ms|The maximal allowed duration for waitin
max_concurrent_autonomous_transactions|int|0,262143|NULL|NULL|
sync_config_strategy|enum|all_node,only_sync_node,none_node|NULL|Synchronization strategy for configuration files between host and standby.|
time_to_target_rpo|int|0,3600|NULL|NULL|
disable_memory_protect|bool|0,0|NULL|NULL|
[gtm]
nodename|string|0,0|NULL|Name of this GTM/GTM-Standby.|
port|int|1,65535|NULL|Listen Port of GTM or GTM standby server.|

View File

@ -36,6 +36,7 @@
#include "common/config/cm_config.h"
#include <limits.h>
#include <fcntl.h>
#include <math.h>
const int CLUSTER_CONFIG_SUCCESS = 0;
const int CLUSTER_CONFIG_ERROR = 1;
@ -178,6 +179,23 @@ const int MB_PER_GB = 1024;
#define MIN_PER_D (60 * 24)
#define H_PER_D 24
/* the number of the unit type */
const int UNIT_TYPE = 8;
/*
* transform unit matrix
* Elements in each line represent the transform value between this unit and another unit when a unit is basic unit.
*/
const int g_unit_transform[UNIT_TYPE][UNIT_TYPE] = {
{1, KB_PER_MB, KB_PER_GB, 0, 0, 0, 0, 0}, /* the transform value based on KB */
{0, 1, MB_PER_GB, 0, 0, 0, 0, 0}, /* the transform value based on MB */
{0, 0, 1, 0, 0, 0, 0, 0}, /* the transform value based on GB */
{0, 0, 0, 1, MS_PER_S, MS_PER_MIN, MS_PER_H, MS_PER_D}, /* the transform value based on ms */
{0, 0, 0, 0, 1, S_PER_MIN, S_PER_H, S_PER_D}, /* the transform value based on s */
{0, 0, 0, 0, 0, 1, MIN_PER_H, MIN_PER_D}, /* the transform value based on min */
{0, 0, 0, 0, 0, 0, 1, H_PER_D}, /* the transform value based on h */
{0, 0, 0, 0, 0, 0, 0, 1} /* the transform value based on d */
};
/* execute result */
#define SUCCESS 0
#define FAILURE 1
@ -270,7 +288,10 @@ void get_instance_configfile(const char* datadir);
char* get_ctl_command_type();
void* pg_malloc(size_t size);
void* pg_malloc_zero(size_t size);
#ifdef __cplusplus
template <typename T>
static int parse_value(
const char* paraname, const UnitType unitval, const UnitType new_unitval, const char* endptr, T* tmp_val);
#ifdef __cplusplus
extern "C" {
#endif /* __cplusplus */
@ -326,8 +347,12 @@ int check_parameter_value(
const char* paraname, GucParaType type, char* guc_list_value, const char* guc_list_unit, const char* value);
int check_parameter_name(char** guc_opt, int type);
bool check_parameter_is_valid(int type);
int parse_value(const char* paraname, const char* value, const char* guc_list_unit, int64* result_int,
double* result_double, bool isInt);
static int check_int_overflow(
const char* paraname, const UnitType unitval, const UnitType tmp_unitval, int64 tmp_int_val);
static int check_double_overflow(
const char* paraname, const double val, const bool inf_is_valid, const bool zero_is_valid);
int parse_int_value(const char* paraname, const char* value, const char* guc_list_unit, int64* result_int);
int parse_double_value(const char* paraname, const char* value, const char* guc_list_unit, double* result_double);
int get_guc_minmax_value(const char* guc_list_val, struct guc_minmax_value& value_list);
int check_int_real_type_value(
const char* paraname, const char* guc_list_value, const char* guc_list_unit, const char* value, bool isInt);
@ -3924,8 +3949,8 @@ int check_int_value(const char* paraname, const struct guc_minmax_value& value_l
bool is_in_list = false;
bool is_exists_alpha = false;
/* makesure the min/max value from guc config list file is correct */
if ((FAILURE == parse_value(paraname, value_list.min_val_str, NULL, &int_min_val, NULL, true)) ||
(FAILURE == parse_value(paraname, value_list.max_val_str, NULL, &int_max_val, NULL, true))) {
if ((FAILURE == parse_int_value(paraname, value_list.min_val_str, NULL, &int_min_val)) ||
(FAILURE == parse_int_value(paraname, value_list.max_val_str, NULL, &int_max_val))) {
(void)write_stderr("ERROR: The minmax value of parameter \"%s\" requires an integer value.\n", paraname);
return FAILURE;
}
@ -3982,8 +4007,8 @@ int check_real_value(const char* paraname, const struct guc_minmax_value& value_
double double_min_val, double double_max_val)
{
/* makesure the min/max value from guc config list file is correct */
if ((FAILURE == parse_value(paraname, value_list.min_val_str, NULL, NULL, &double_min_val, false)) ||
(FAILURE == parse_value(paraname, value_list.max_val_str, NULL, NULL, &double_max_val, false))) {
if ((FAILURE == parse_double_value(paraname, value_list.min_val_str, NULL, &double_min_val)) ||
(FAILURE == parse_double_value(paraname, value_list.max_val_str, NULL, &double_max_val))) {
(void)write_stderr("ERROR: The minmax value of parameter \"%s\" requires a numeric value.\n", paraname);
return FAILURE;
}
@ -4031,12 +4056,16 @@ int check_int_real_type_value(
securec_check_c(nRet, "\0", "\0");
/* parse int_newval/double_newval value*/
if (FAILURE == parse_value(paraname, value, guc_list_unit, &int_newval, &double_newval, isInt)) {
if (isInt)
if (isInt) {
if (FAILURE == parse_int_value(paraname, value, guc_list_unit, &int_newval)) {
(void)write_stderr("ERROR: The parameter \"%s\" requires an integer value.\n", paraname);
else
return FAILURE;
}
} else {
if (FAILURE == parse_double_value(paraname, value, guc_list_unit, &double_newval)) {
(void)write_stderr("ERROR: The parameter \"%s\" requires a numeric value.\n", paraname);
return FAILURE;
return FAILURE;
}
}
/* get min/max value from guc config file */
@ -4056,48 +4085,94 @@ int check_int_real_type_value(
/*
************************************************************************************
Function: parse_value
Desc : parese value from guc config file.
Function: check_int_overflow
Desc : check to see if a int val has underflowed or overflowed for parameter.
paraname parameter name
value parameter value
guc_list_unit the unit of parameter from guc config file
result_int the parse result about int
result_double the parse result about double
isInt true is int, false is real
unitval the unit of parameter from guc config file
new_unitval the unit of parameter
new_int_val the value of parameter
Return : SUCCESS
FAILURE
************************************************************************************
*/
int parse_value(const char* paraname, const char* value, const char* guc_list_unit, int64* result_int,
double* result_double, bool isInt)
{
int64 int_val = INT_MIN;
double double_val;
long double tmp_double_val;
char* endptr = NULL;
UnitType unitval = UNIT_ERROR;
bool contain_space = false;
static int check_int_overflow(
const char* paraname, const UnitType unitval, const UnitType new_unitval, int64 new_int_val)
{
int per_unit_transfer = INT_MIN;
if (NULL != result_int)
*result_int = 0;
if (NULL != result_double)
*result_double = 0;
errno = 0;
if (isInt) {
/* transform value into long int */
int_val = strtoll(value, &endptr, 0);
if (endptr == value || errno == ERANGE)
return FAILURE;
tmp_double_val = (long double)int_val;
} else {
/* transform value into double */
double_val = strtod(value, &endptr);
if (endptr == value || errno == ERANGE)
return FAILURE;
tmp_double_val = (long double)double_val;
/* the transformation value */
per_unit_transfer = g_unit_transform[unitval][new_unitval];
if (new_int_val > (LLONG_MAX / per_unit_transfer) || new_int_val < (LLONG_MIN / per_unit_transfer)) {
(void)write_stderr("ERROR: An overflow occurs for this parameter \"%s\".\n", paraname);
return FAILURE;
}
return SUCCESS;
}
/*
************************************************************************************
Function: check_double_overflow
Desc : check to see if a double val has underflowed or overflowed for parameter.
paraname parameter name
val the value of parameter after calculation
inf_is_valid infinity is valid
zero_is_valid zero is valid
Return : SUCCESS
FAILURE
************************************************************************************
*/
static int check_double_overflow(
const char* paraname, const double val, const bool inf_is_valid, const bool zero_is_valid)
{
if (isinf(val) && !(inf_is_valid)) {
(void)write_stderr("ERROR: An overflow occurs for this parameter \"%s\".\n", paraname);
return FAILURE;
}
if ((val) == 0.0 && !(zero_is_valid)) {
(void)write_stderr("ERROR: An overflow occurs for this parameter \"%s\".\n", paraname);
return FAILURE;
}
return SUCCESS;
}
/*
************************************************************************************
Function: parse_int_value
Desc : parese int value from guc config file.
paraname parameter name
value parameter value
guc_list_unit the unit of parameter from guc config file
result_int the parse result about int
Return : SUCCESS
FAILURE
************************************************************************************
*/
int parse_int_value(const char* paraname, const char* value, const char* guc_list_unit, int64* result_int)
{
int64 int_val = INT_MIN;
int64 tmp_int_val;
char* endptr = NULL;
bool contain_space = false;
UnitType int_unitval = UNIT_ERROR;
UnitType new_int_unitval = UNIT_ERROR;
if (NULL != result_int) {
*result_int = 0;
}
errno = 0;
/* transform value into long int */
int_val = strtoll(value, &endptr, 0);
if (endptr == value || errno == ERANGE) {
return FAILURE;
}
tmp_int_val = int_val;
/* skill the blank */
while (isspace((unsigned char)*endptr)) {
endptr++;
@ -4106,152 +4181,115 @@ int parse_value(const char* paraname, const char* value, const char* guc_list_un
if ('\0' != *endptr) {
/* if unit is NULL, it means the value is incorrect */
if (NULL == guc_list_unit || '\0' == guc_list_unit[0])
if (NULL == guc_list_unit || '\0' == guc_list_unit[0]) {
return FAILURE;
}
if (contain_space) {
(void)write_stderr("ERROR: There should not hava space between value and unit.\n");
return FAILURE;
}
unitval = get_guc_unit(guc_list_unit);
if (UNIT_ERROR == unitval) {
(void)write_stderr("ERROR: Invalid units for this parameter \"%s\".\n", paraname);
/* the unit of parameter from guc config file */
int_unitval = get_guc_unit(guc_list_unit);
/* the unit of real parameter */
new_int_unitval = get_guc_unit(endptr);
/* get_guc_unit */
if (FAILURE == parse_value(paraname, int_unitval, new_int_unitval, endptr, &int_val)) {
return FAILURE;
} else if (UNIT_KB == unitval) {
if (strncmp(endptr, "kB", 2) == 0) {
endptr += 2;
} else if (strncmp(endptr, "MB", 2) == 0) {
endptr += 2;
tmp_double_val *= KB_PER_MB;
} else if (strncmp(endptr, "GB", 2) == 0) {
endptr += 2;
tmp_double_val *= KB_PER_GB;
} else {
(void)write_stderr(
"ERROR: Valid units for this parameter \"%s\" are \"kB\", \"MB\" and \"GB\".\n", paraname);
return FAILURE;
}
} else if (UNIT_MB == unitval) {
if (strncmp(endptr, "MB", 2) == 0) {
endptr += 2;
} else if (strncmp(endptr, "GB", 2) == 0) {
endptr += 2;
tmp_double_val *= MB_PER_GB;
} else {
(void)write_stderr("ERROR: Valid units for this parameter \"%s\" are \"MB\" and \"GB\".\n", paraname);
return FAILURE;
}
} else if (UNIT_GB == unitval) {
if (strncmp(endptr, "GB", 2) == 0) {
endptr += 2;
} else {
(void)write_stderr("ERROR: Valid units for this parameter \"%s\" is \"GB\".\n", paraname);
return FAILURE;
}
} else if (UNIT_MS == unitval) {
if (strncmp(endptr, "ms", 2) == 0) {
endptr += 2;
} else if (strncmp(endptr, "s", 1) == 0) {
endptr += 1;
tmp_double_val *= MS_PER_S;
} else if (strncmp(endptr, "min", 3) == 0) {
endptr += 3;
tmp_double_val *= MS_PER_MIN;
} else if (strncmp(endptr, "h", 1) == 0) {
endptr += 1;
tmp_double_val *= MS_PER_H;
} else if (strncmp(endptr, "d", 1) == 0) {
endptr += 1;
tmp_double_val *= MS_PER_D;
} else {
(void)write_stderr(
"ERROR: Valid units for this parameter \"%s\" are \"ms\", \"s\", \"min\", \"h\", and \"d\".\n",
paraname);
return FAILURE;
}
} else if (UNIT_S == unitval) {
if (strncmp(endptr, "s", 1) == 0) {
endptr += 1;
} else if (strncmp(endptr, "min", 3) == 0) {
endptr += 3;
tmp_double_val *= S_PER_MIN;
} else if (strncmp(endptr, "h", 1) == 0) {
endptr += 1;
tmp_double_val *= S_PER_H;
} else if (strncmp(endptr, "d", 1) == 0) {
endptr += 1;
tmp_double_val *= S_PER_D;
} else {
(void)write_stderr(
"ERROR: Valid units for this parameter \"%s\" are \"s\", \"min\", \"h\", and \"d\".\n", paraname);
return FAILURE;
}
} else if (UNIT_MIN == unitval) {
if (strncmp(endptr, "min", 3) == 0) {
endptr += 3;
} else if (strncmp(endptr, "h", 1) == 0) {
endptr += 1;
tmp_double_val *= MIN_PER_H;
} else if (strncmp(endptr, "d", 1) == 0) {
endptr += 1;
tmp_double_val *= MIN_PER_D;
} else {
(void)write_stderr(
"ERROR: Valid units for this parameter \"%s\" are \"min\", \"h\", and \"d\".\n", paraname);
return FAILURE;
}
} else if (UNIT_H == unitval) {
if (strncmp(endptr, "h", 1) == 0) {
endptr += 1;
} else if (strncmp(endptr, "d", 1) == 0) {
endptr += 1;
tmp_double_val *= H_PER_D;
} else {
(void)write_stderr(
"ERROR: Valid units for this parameter \"%s\" are \"min\", \"h\", and \"d\".\n", paraname);
return FAILURE;
}
} else if (UNIT_D == unitval) {
if (strncmp(endptr, "d", 1) == 0) {
endptr += 1;
} else {
(void)write_stderr("ERROR: Valid units for this parameter \"%s\" is \"d\".\n", paraname);
return FAILURE;
}
} else {
}
/* overflow processing */
if (FAILURE == check_int_overflow(paraname, int_unitval, new_int_unitval, tmp_int_val)) {
return FAILURE;
}
}
while (isspace((unsigned char)*endptr))
endptr++;
if (*endptr != '\0')
return FAILURE;
if (isInt) {
if (tmp_double_val > LLONG_MAX || tmp_double_val < LLONG_MIN)
return FAILURE;
if (NULL != result_int)
*result_int = (int64)tmp_double_val;
} else {
if (NULL != result_double)
*result_double = (double)tmp_double_val;
}
if (NULL != result_int) {
*result_int = int_val;
}
return SUCCESS;
}
/*************************************************************************************
Function: check_enum_type_value
Desc : check the parameter value of enum type.
Input : paraname parameter name
guc_list_value the string from config file
value parameter value
/*
************************************************************************************
Function: parse_double_value
Desc : parese double value from guc config file.
paraname parameter name
value parameter value
guc_list_unit the unit of parameter from guc config file
result_double the parse result about double
Return : SUCCESS
FAILURE
*************************************************************************************/
************************************************************************************
*/
int parse_double_value(const char* paraname, const char* value, const char* guc_list_unit, double* result_double)
{
double double_val;
double tmp_double_val;
char* endptr = NULL;
bool contain_space = false;
UnitType double_unitval = UNIT_ERROR;
UnitType new_double_unitval = UNIT_ERROR;
int per_unit_transfer = INT_MIN;
if (NULL != result_double) {
*result_double = 0;
}
errno = 0;
/* transform value into double */
double_val = strtod(value, &endptr);
if (endptr == value || errno == ERANGE) {
return FAILURE;
}
tmp_double_val = double_val;
/* skill the blank */
while (isspace((unsigned char)*endptr)) {
endptr++;
contain_space = true;
}
if ('\0' != *endptr) {
/* if unit is NULL, it means the value is incorrect */
if (NULL == guc_list_unit || '\0' == guc_list_unit[0]) {
return FAILURE;
}
if (contain_space) {
(void)write_stderr("ERROR: There should not hava space between value and unit.\n");
return FAILURE;
}
/* the unit of parameter from guc config file */
double_unitval = get_guc_unit(guc_list_unit);
/* the unit of real parameter */
new_double_unitval = get_guc_unit(endptr);
/* the transformation value */
per_unit_transfer = g_unit_transform[double_unitval][new_double_unitval];
/* get_guc_unit */
if (FAILURE == parse_value(paraname, double_unitval, new_double_unitval, endptr, &double_val)) {
return FAILURE;
}
/* overflow processing */
if (FAILURE == check_double_overflow(paraname, double_val, isinf(tmp_double_val) ||
isinf((double)per_unit_transfer), tmp_double_val == 0 || (double)per_unit_transfer == 0)) {
return FAILURE;
}
}
if (NULL != result_double) {
*result_double = double_val;
}
return SUCCESS;
}
int is_value_in_range(const char* guc_list_value, const char* value)
{
char* ptr = NULL;
@ -4275,6 +4313,17 @@ int is_value_in_range(const char* guc_list_value, const char* value)
return FAILURE;
}
/*
************************************************************************************
Function: check_enum_type_value
Desc : check the parameter value of enum type.
Input : paraname parameter name
guc_list_value the string from config file
value parameter value
Return : SUCCESS
FAILURE
************************************************************************************
*/
int check_enum_type_value(const char* paraname, char* guc_list_value, const char* value)
{
char guc_val[MAX_VALUE_LEN] = {0};
@ -4298,7 +4347,11 @@ int check_enum_type_value(const char* paraname, char* guc_list_value, const char
}
make_string_tolower(value, tmp_paraname, sizeof(tmp_paraname) / sizeof(char));
vptr = strtok_r(tmp_paraname, delims, &vouter_ptr);
if (tmp_paraname != NULL && strlen(tmp_paraname) > 0) {
vptr = strtok_r(tmp_paraname, delims, &vouter_ptr);
} else {
vptr = "";
}
while (NULL != vptr) {
p = vptr;
while (isspace((unsigned char)*p))
@ -4454,3 +4507,136 @@ static char* GetEnvStr(const char* env)
#ifdef __cplusplus
}
#endif /* __cplusplus */
/*
************************************************************************************
Function: parse_value
Desc : parese value from guc config file.
paraname parameter name
unitval the unit of parameter from guc config file
new_unitval the unit of parameter
endptr the address of parameter value
tmp_val the temporary value of parameter value
Return : SUCCESS
FAILURE
************************************************************************************
*/
template <typename T>
static int parse_value(
const char* paraname, const UnitType unitval, const UnitType new_unitval, const char* endptr, T* tmp_val)
{
switch (unitval) {
case UNIT_ERROR: {
(void)write_stderr("ERROR: Invalid units for this parameter \"%s\".\n", paraname);
return FAILURE;
}
case UNIT_KB: {
if (new_unitval == UNIT_KB || new_unitval == UNIT_MB || new_unitval == UNIT_GB) {
endptr += 2;
*tmp_val *= g_unit_transform[UNIT_KB][new_unitval];
} else {
(void)write_stderr(
"ERROR: Valid units for this parameter \"%s\" are \"kB\", \"MB\" and \"GB\".\n", paraname);
return FAILURE;
}
break;
}
case UNIT_MB: {
if (new_unitval == UNIT_MB || new_unitval == UNIT_GB) {
endptr += 2;
*tmp_val *= g_unit_transform[UNIT_MB][new_unitval];
} else {
(void)write_stderr(
"ERROR: Valid units for this parameter \"%s\" are \"MB\" and \"GB\".\n", paraname);
return FAILURE;
}
break;
}
case UNIT_GB: {
if (new_unitval == UNIT_GB) {
endptr += 2;
} else {
(void)write_stderr("ERROR: Valid units for this parameter \"%s\" is \"GB\".\n", paraname);
return FAILURE;
}
break;
}
case UNIT_MS: {
if (new_unitval == UNIT_MS) {
endptr += 2;
} else if (new_unitval == UNIT_S || new_unitval == UNIT_H || new_unitval == UNIT_D) {
endptr += 1;
*tmp_val *= g_unit_transform[UNIT_MS][new_unitval];
} else if (new_unitval == UNIT_MIN) {
endptr += 3;
*tmp_val *= g_unit_transform[UNIT_MS][new_unitval];
} else {
(void)write_stderr(
"ERROR: Valid units for this parameter \"%s\" are \"ms\", \"s\", \"min\", \"h\", and \"d\".\n",
paraname);
return FAILURE;
}
break;
}
case UNIT_S: {
if (new_unitval == UNIT_S || new_unitval == UNIT_H || new_unitval == UNIT_D) {
endptr += 1;
*tmp_val *= g_unit_transform[UNIT_S][new_unitval];
} else if (new_unitval == UNIT_MIN) {
endptr += 3;
*tmp_val *= g_unit_transform[UNIT_S][new_unitval];
} else {
(void)write_stderr(
"ERROR: Valid units for this parameter \"%s\" are \"s\", \"min\", \"h\", and \"d\".\n",
paraname);
return FAILURE;
}
break;
}
case UNIT_MIN: {
if (new_unitval == UNIT_H || new_unitval == UNIT_D) {
endptr += 1;
*tmp_val *= g_unit_transform[UNIT_MIN][new_unitval];
} else if (new_unitval == UNIT_MIN) {
endptr += 3;
} else {
(void)write_stderr(
"ERROR: Valid units for this parameter \"%s\" are \"min\", \"h\", and \"d\".\n", paraname);
return FAILURE;
}
break;
}
case UNIT_H:{
if (new_unitval == UNIT_H || new_unitval == UNIT_D) {
endptr += 1;
*tmp_val *= g_unit_transform[UNIT_H][new_unitval];
} else {
(void)write_stderr(
"ERROR: Valid units for this parameter \"%s\" are \"min\", \"h\", and \"d\".\n", paraname);
return FAILURE;
}
break;
}
case UNIT_D:{
if (new_unitval == UNIT_D) {
endptr += 1;
} else {
(void)write_stderr("ERROR: Valid units for this parameter \"%s\" is \"d\".\n", paraname);
return FAILURE;
}
break;
}
default: {
return FAILURE;
}
}
while (isspace((unsigned char)*endptr))
endptr++;
if (*endptr != '\0') {
return FAILURE;
}
return SUCCESS;
}

View File

@ -1023,6 +1023,74 @@ append_string_info(char **optLines, const char *newContext)
return optLinesResult;
}
/*******************************************************************************
Function : IsLastNotNullReplconninfo
Description : determine if replconninfoX which is being set is the last one valid replconninfo
Input : optLines - postgres.conf info before changing
replconninfoX - replconninfo param name which is being set, eg "replconninfo1"
Output : None
Return : bool
*******************************************************************************/
static bool IsLastNotNullReplconninfo(char** optLines, char* replconninfoX)
{
int notNullReplconninfoNums = 0;
bool isReplconninfoXNull = true;
bool matchReplconninfoX = false;
char* p = NULL;
for (int i = 0; optLines != NULL && optLines[i] != NULL; i++) {
p = optLines[i];
/* Skip all the blanks at the begin of the optLine */
while (p != NULL && isspace((unsigned char)*p)) {
++p;
}
if (p == NULL) {
continue;
}
if (*p == '#') {
++p;
while (p != NULL && isspace((unsigned char)*p)) {
++p;
}
/* replconninfoX must be invalid if it is commented*/
if (p != NULL && strncmp(p, replconninfoX, strlen(replconninfoX)) == 0) {
return false;
}
continue;
}
if (p != NULL && strncmp(p, "replconninfo", strlen("replconninfo")) == 0) {
if (strncmp(p, replconninfoX, strlen(replconninfoX)) == 0) {
matchReplconninfoX = true;
}
p += strlen(replconninfoX);
/* Skip all the blanks between the param and '=' */
while (p != NULL && isspace((unsigned char)*p)) {
p++;
}
/* Skip '=' */
if (p != NULL && *p == '=') {
p++;
}
/* Skip all the blanks between the '=' and value */
while (p != NULL && isspace((unsigned char)*p)) {
p++;
}
if (p != NULL && strncmp(p, "''", strlen("''")) != 0 &&
strncmp(p, "\"\"", strlen("\"\"")) != 0) {
++notNullReplconninfoNums;
if (matchReplconninfoX) {
isReplconninfoXNull = false;
}
}
}
}
/* return true if replconninfoX which is being set is the last one valid replconninfo */
if (notNullReplconninfoNums == 1 && !isReplconninfoXNull) {
return true;
}
return false;
}
/*
* @@GaussDB@@
* Brief :
@ -1114,6 +1182,14 @@ do_gucset(const char *action_type, const char *data_dir)
}
}
/* Give a warning if the last valid replconninfo is set to a invalid value currently */
if (strncmp(config_param[i], "replconninfo", strlen("replconninfo")) == 0 &&
config_value[i] != NULL && (strlen(config_value[i]) == 0 || strncmp(config_value[i], "''", strlen("''")) == 0) &&
IsLastNotNullReplconninfo(opt_lines, config_param[i])) {
write_stderr("\nWARNING: This is the last valid replConnInfo, once set to null, "
"the host role will be changed to Normal if the local_role is primary now.\n");
}
/* find the line where guc parameter in */
lines_index = find_gucoption(opt_lines, config_param[i], NULL, NULL, &optvalue_off, &optvalue_len);
/* get the type of gs_guc execution */
@ -1129,16 +1205,17 @@ do_gucset(const char *action_type, const char *data_dir)
to_generatenewline(optconf_line, newconf_line, config_param[i], config_value[i], optvalue_len);
} else {
/*
* if parameter as value is NULL; consider it as UNSET (i.e to default value)
* which means comment the configuration parameter
* if parameter value is NULL, not consider it as UNSET,
* which means maintain the configuration parameter, and
* there will be prompts telling the user to assign a value.
*/
//line is commented
if (isOptLineCommented(optconf_line)) {
rc = strncpy_s(newconf_line, MAX_PARAM_LEN*2, optconf_line, (size_t)Min(line_len, MAX_PARAM_LEN*2 - 1));
securec_check_c(rc, "\0", "\0");
} else {
nRet = snprintf_s(newconf_line, MAX_PARAM_LEN*2, MAX_PARAM_LEN*2 - 1, "#%s", optconf_line);
securec_check_ss_c(nRet, "\0", "\0");
write_stderr(_("ERROR: %s parameters value is expected\n"), config_param[i]);
return FAILURE;
}
}
updateoradd = UPDATE_PARAMETER;

View File

@ -49,7 +49,7 @@ static bool do_drop_slot = false;
static char** options;
static size_t noptions = 0;
static bool g_change_plugin = false;
static const char* plugin = "mppdb_decoding";
char* plugin = "mppdb_decoding";
/* Global State */
static int outfd = -1;
@ -710,7 +710,7 @@ static int getOptions(const int argc, char* const* argv)
break;
case 'P':
check_env_value_c(optarg);
if (plugin) {
if (g_change_plugin && plugin) {
pfree_ext(plugin);
}
plugin = pg_strdup(optarg);

View File

@ -3463,7 +3463,7 @@ static void do_help(void)
printf(_(" %s restart [-w] [-t SECS] [-D DATADIR] [-s] [-m SHUTDOWN-MODE]\n"
" [-o \"OPTIONS\"]\n"),
progname);
printf(_(" %s build [-D DATADIR] [-r SECS] [-q]\n"), progname);
printf(_(" %s build [-D DATADIR] [-b MODE] [-r SECS] [-q] [-M SERVERMODE]\n"), progname);
#endif
printf(_(" %s stop [-W] [-t SECS] [-D DATADIR] [-s] [-m SHUTDOWN-MODE]\n"), progname);
@ -3489,7 +3489,8 @@ static void do_help(void)
(void)printf(_(" %s hotpatch [-D DATADIR] [-a ACTION] [-n NAME]\n"), progname);
#endif
printf(_("\nCommon options:\n"));
printf(_(" -b, --mode=MODE the mode of building the datanode.MODE can be \"full\", \"incremental\"\n"));
printf(_(" -b, --mode=MODE the mode of building the datanode.MODE can be \"full\", \"incremental\", "
"\"auto\"\n"));
printf(_(" -D, --pgdata=DATADIR location of the database storage area\n"));
printf(_(" -s, --silent only print errors, no informational messages\n"));
printf(_(" -t, --timeout=SECS seconds to wait when using -w option\n"));

View File

@ -56,7 +56,7 @@ kwlookup.cpp: % : $(top_srcdir)/src/common/backend/parser/%
rm -f $@ && $(LN_S) $< .
all: submake-aes gs_dump gs_restore gs_dumpall
libpq_pgport:=$(subst -lpq,-lpq_ce,$(libpq_pgport)) -L$(top_builddir)/src/bin/gs_ktool/ -lgs_ktool -lsecurec -L$(LIBKMC_LIB_PATH) -lkmc
libpq_pgport:=$(subst -lpq,-lpq_ce,$(libpq_pgport))
gs_dump: pg_dump.o common.o pg_dump_sort.o $(OBJS) $(KEYWRDOBJS) $(COMMON_OBJS) | submake-libpq_ce submake-libpgport
$(CC) $(CXXFLAGS) $(MY_CFLAGS) pg_dump.o common.o pg_dump_sort.o $(KEYWRDOBJS) $(OBJS) $(EXTRA_OBJS) $(COMMON_OBJS) $(LIBS) $(libpq_pgport) $(LDFLAGS) $(LDFLAGS_EX) -o $@$(X)

View File

@ -32,7 +32,7 @@ top_builddir = ../../..
include $(top_builddir)/src/Makefile.global
EXTRA_CLEAN += logging.h
override CXXFLAGS := -std=c++11 -D_GLIBCXX_USE_CXX11_ABI=0 -fsigned-char -DHAVE_LIBZ -DSTREAMPLAN -DPGXC -O0 -g -DENABLE_GSTRACE -fpermissive -Wl,-z,relro,-z,now
override CXXFLAGS := -std=c++11 -D_GLIBCXX_USE_CXX11_ABI=0 -fsigned-char -DHAVE_LIBZ -DSTREAMPLAN -DPGXC -O0 -g -DENABLE_GSTRACE -Wl,-z,relro,-z,now
LDFLAGS += -L$(LZ4_LIB_PATH)
LIBS += -lgssapi_krb5_gauss -lgssrpc_gauss -lkrb5_gauss -lkrb5support_gauss -lk5crypto_gauss -lcom_err_gauss -llz4
PG_CPPFLAGS = -I$(libpq_srcdir) ${PTHREAD_CFLAGS} -Isrc -I$(top_builddir)/$(subdir) -I$(LZ4_INCLUDE_PATH) -I$(ZLIB_INCLUDE_PATH)

View File

@ -1719,7 +1719,7 @@ void write_table_label_and_tablespace_map(pgBackup *backup, PGresult *res,
}
void pg_stop_backup_sent(PGconn *conn, char **stop_backup_query)
void pg_stop_backup_sent(PGconn *conn, const char **stop_backup_query)
{
if (!pg_stop_backup_is_sent) {
bool sent = false;

View File

@ -1184,8 +1184,8 @@ walk_files_collect_timelines(InstanceConfig *instance)
save_backupinfo_belong_timelines(instance, timelineinfos);
if (xlog_files_list) {
parray_walk(xlog_files_list, pgFileFree);
parray_free(xlog_files_list);
parray_walk(xlog_files_list, pfree);
parray_free(xlog_files_list);
}
return timelineinfos;

View File

@ -645,7 +645,8 @@ PGconn* pgut_connect(const char *host, const char *port,
if (password == NULL || password[0] == '\0')
elog(ERROR, "no password supplied");
keywords[i] = "password";
values[i] = password;
continue;
}
elog(ERROR, "could not connect to database %s: %s",

View File

@ -21,7 +21,7 @@ REFDOCDIR= $(top_srcdir)/doc/src/sgml/ref
MAKESGMLDIR = $(top_builddir)/src/common/pgxc/tools/makesgml
SGMLDIR= $(top_builddir)/doc/src/sgml
override CPPFLAGS := -I. -I$(srcdir) -I$(libpq_srcdir) -I$(top_srcdir)/src/bin/pg_dump -DHAVE_CE -L$(top_builddir)/src/bin/gs_ktool/ -lgs_ktool -L$(LIBKMC_LIB_PATH) -lkmc $(CPPFLAGS)
override CPPFLAGS := -I. -I$(srcdir) -I$(libpq_srcdir) -I$(top_srcdir)/src/bin/pg_dump -DHAVE_CE $(CPPFLAGS)
$(top_builddir)/src/common/interfaces/libpq/client_logic_processor/stmt_processor.o:
$(MAKE) -C $(top_builddir)/src/common/interfaces/libpq/client_logic_processor/ stmt_processor.o ENABLE_CE=1
@ -65,7 +65,7 @@ FLEXFLAGS = -Cfe -b -p -p
all: submake-aes gsql
libpq_pgport:=$(subst -lpq,-lpq_ce,$(libpq_pgport)) -L$(top_builddir)/src/bin/gs_ktool/ -lgs_ktool -lsecurec -L$(LIBKMC_LIB_PATH) -lkmc
libpq_pgport:=$(subst -lpq,-lpq_ce,$(libpq_pgport))
$(top_builddir)/src/lib/elog/elog.a:
$(MAKE) -C $(top_builddir)/src/lib/elog elog.a

View File

@ -1870,22 +1870,23 @@ static bool editFile(const char* fname, int lineno)
return false;
}
check_env_value(editor_lineno_arg);
}
if (strlen(editor_lineno_arg) >= MAXPGPATH) {
psql_error("The value of \"editor_lineno_arg\" is too long.\n");
free(editor_lineno_arg);
editor_lineno_arg = NULL;
free(editorName);
editorName = NULL;
return false;
if (strlen(editor_lineno_arg) >= MAXPGPATH) {
psql_error("The value of \"editor_lineno_arg\" is too long.\n");
free(editor_lineno_arg);
editor_lineno_arg = NULL;
free(editorName);
editorName = NULL;
return false;
}
syssz = strlen(editorName) + strlen(editor_lineno_arg) + 10 /* for integer */
+ 1 + strlen(fname) + 10 + 1;
} else {
syssz = strlen(editorName) + strlen(fname) + 10 + 1;
}
/* Allocate sufficient memory for command line. */
lineno > 0 ? (syssz = strlen(editorName) + strlen(editor_lineno_arg) + 10 /* for integer */
+ 1 + strlen(fname) + 10 + 1) :
(syssz = strlen(editorName) + strlen(fname) + 10 + 1);
sys = (char*)pg_malloc(syssz);
/*
@ -1902,11 +1903,12 @@ static bool editFile(const char* fname, int lineno)
check_sprintf_s(sprintf_s(sys, syssz, "exec %s '%s'", editorName, fname));
}
#else
if (lineno > 0)
if (lineno > 0) {
check_sprintf_s(sprintf_s(
sys, syssz, SYSTEMQUOTE "\"%s\" %s%d \"%s\"" SYSTEMQUOTE, editorName, editor_lineno_arg, lineno, fname));
else
} else {
check_sprintf_s(sprintf_s(sys, syssz, SYSTEMQUOTE "\"%s\" \"%s\"" SYSTEMQUOTE, editorName, fname));
}
#endif
result = system(sys);
if (result == -1) {

View File

@ -131,7 +131,9 @@ int main(int argc, char* argv[])
if (strcmp(libpqVersionString, DEF_GS_VERSION) != 0) {
fprintf(stderr,
"The \"libpq.so\" loaded mismatch the version of gsql, please check it.\nexpected: %s\nresult: %s\n",
"[Warning]: The \"libpq.so\" loaded mismatch the version of gsql, "
"please check it.\n"
"expected: %s\nresult: %s\n",
DEF_GS_VERSION,
libpqVersionString);
#ifdef ENABLE_MULTIPLE_NODES

View File

@ -2866,6 +2866,14 @@ void CheckSetNamespace(Oid oldNspOid, Oid nspOid, Oid classid, Oid objid)
if (nspOid == PG_CATALOG_NAMESPACE)
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot move objects into system schema")));
/* disallow set into dbe_perf schema */
if (nspOid == PG_DBEPERF_NAMESPACE)
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot move objects into dbe_perf schema")));
/* disallow set into snapshot schema */
if (nspOid == PG_SNAPSHOT_NAMESPACE)
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot move objects into snapshot schema")));
}
/*

View File

@ -34,7 +34,7 @@ static int sort_order_cmp(const void* p1, const void* p2);
#define checkEnumLableValue(val) \
do { \
if (NAMEDATALEN < strlen(val) || 0 == strlen(val)) { \
if (NAMEDATALEN - 1 < strlen(val) || 0 == strlen(val)) { \
ereport(ERROR, \
(errcode(ERRCODE_INVALID_NAME), \
errmsg("invalid enum label \"%s\"", val), \

View File

@ -1850,8 +1850,9 @@ CREATE VIEW pg_user_mappings AS
REVOKE ALL on pg_user_mapping FROM public;
CREATE OR REPLACE VIEW PG_CATALOG.DUAL AS (SELECT 'X'::TEXT AS DUMMY);
GRANT SELECT ON TABLE DUAL TO PUBLIC;
--some function will use the new column that use
CREATE OR REPLACE VIEW PG_CATALOG.SYS_DUMMY AS (SELECT 'X'::TEXT AS DUMMY);
GRANT SELECT ON TABLE SYS_DUMMY TO PUBLIC;
-- these functions are added for supporting default format transformation
CREATE OR REPLACE FUNCTION to_char(NUMERIC)

View File

@ -348,7 +348,7 @@ static int process_global_settings_args(CreateClientLogicGlobal *parsetree, Oid
break;
case ClientLogicGlobalProperty::CMK_KEY_STORE: {
CmkKeyStore key_store = get_key_store_from_string(global_param->value);
if (key_store != CmkKeyStore::GS_KTOOL) {
if (key_store != CmkKeyStore::LOCALKMS) {
ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("Invalid key store")));
}
string_args.set("KEY_STORE", global_param->value);
@ -361,7 +361,7 @@ static int process_global_settings_args(CreateClientLogicGlobal *parsetree, Oid
}
case ClientLogicGlobalProperty::CMK_ALGORITHM: {
CmkAlgorithm cmk_algo = get_algorithm_from_string(global_param->value);
if (cmk_algo != CmkAlgorithm::AES_256_CBC) {
if (cmk_algo != CmkAlgorithm::RAS_2048) {
ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("Invalid algorithm")));
}
string_args.set("ALGORITHM", global_param->value);

View File

@ -2759,6 +2759,7 @@ bool raw_expression_tree_walker(Node* node, bool (*walker)(), void* context)
case T_ParamRef:
case T_A_Const:
case T_A_Star:
case T_Rownum:
/* primitive node types with no subnodes */
break;
case T_Alias:

View File

@ -545,7 +545,7 @@ static void parameter_check_execute_direct(const char* query);
%type <ival> Iconst SignedIconst
%type <str> Sconst comment_text notify_payload
%type <str> RoleId opt_granted_by opt_boolean_or_string ColId_or_Sconst
%type <str> RoleId TypeOwner opt_granted_by opt_boolean_or_string ColId_or_Sconst
%type <list> var_list
%type <str> ColId ColLabel var_name type_function_name param_name
%type <node> var_value zone_value
@ -10319,9 +10319,9 @@ createfunc_opt_list:
| createfunc_opt_list createfunc_opt_item { $$ = lappend($1, $2); }
;
opt_createproc_opt_list:
createproc_opt_item
opt_createproc_opt_list createproc_opt_item
{
$$ = list_make1($1);
$$ = lappend($1, $2);
}
| /* EMPTY */
{
@ -11969,7 +11969,7 @@ AlterOwnerStmt: ALTER AGGREGATE func_name aggr_args OWNER TO RoleId
n->newowner = $6;
$$ = (Node *)n;
}
| ALTER TYPE_P any_name OWNER TO RoleId
| ALTER TYPE_P any_name OWNER TO TypeOwner
{
AlterOwnerStmt *n = makeNode(AlterOwnerStmt);
n->objectType = OBJECT_TYPE;
@ -12035,6 +12035,11 @@ AlterOwnerStmt: ALTER AGGREGATE func_name aggr_args OWNER TO RoleId
}
;
TypeOwner: RoleId { $$ = $1; }
| CURRENT_USER { $$ = pstrdup($1); }
| SESSION_USER { $$ = pstrdup($1); }
;
/*****************************************************************************
*

View File

@ -4299,7 +4299,8 @@ List* transformCreateSchemaStmt(CreateSchemaStmt* stmt)
cxt.triggers = lappend(cxt.triggers, element);
} break;
case T_GrantStmt:
case T_GrantStmt: /* GRANT XXX ON XXX TO XXX */
case T_AlterRoleStmt: /* GRANT ALL PRIVILEGES TO XXX */
cxt.grants = lappend(cxt.grants, element);
break;

View File

@ -2171,8 +2171,12 @@ static char* pg_get_tabledef_worker(Oid tableoid)
appendStringInfo(&buf, "SET search_path = %s;", quote_identifier(get_namespace_name(tableinfo.spcid)));
appendStringInfo(&buf, "\nCREATE %s %s %s",
(tableinfo.relpersistence == RELPERSISTENCE_UNLOGGED) ? "UNLOGGED" : "", reltypename, relname);
appendStringInfo(&buf,
"\nCREATE %s%s %s",
(tableinfo.relpersistence == RELPERSISTENCE_UNLOGGED) ? "UNLOGGED " :
((tableinfo.relpersistence == RELPERSISTENCE_GLOBAL_TEMP) ? "GLOBAL TEMPORARY " : ""),
reltypename,
relname);
// get attribute info
actual_atts = get_table_attribute(tableoid, &buf, formatter, ft_frmt_clmn, cnt_ft_frmt_clmns);

View File

@ -34,9 +34,6 @@
#include "utils/relmapper.h"
#include "utils/snapmgr.h"
/* built first time through in InitializeRelfilenodeMap */
ScanKeyData relfilenode_skey[2];
typedef struct {
Oid reltablespace;
Oid relfilenode;
@ -79,26 +76,27 @@ static void RelfilenodeMapInvalidateCallback(Datum arg, Oid relid)
* RelfilenodeMapInvalidateCallback
* Initialize cache, either on first use or after a reset.
*/
static void InitializeRelfilenodeMap(void)
static void InitializeRelfilenodeMap()
{
HASHCTL ctl;
int i;
/* build skey */
errno_t ret = memset_s(&relfilenode_skey, sizeof(relfilenode_skey), 0, sizeof(relfilenode_skey));
errno_t ret = memset_s(&u_sess->relmap_cxt.relfilenodeSkey, sizeof(u_sess->relmap_cxt.relfilenodeSkey), 0,
sizeof(u_sess->relmap_cxt.relfilenodeSkey));
securec_check(ret, "\0", "\0");
for (i = 0; i < 2; i++) {
fmgr_info_cxt(F_OIDEQ, &relfilenode_skey[i].sk_func, u_sess->cache_mem_cxt);
relfilenode_skey[i].sk_strategy = BTEqualStrategyNumber;
relfilenode_skey[i].sk_subtype = InvalidOid;
relfilenode_skey[i].sk_collation = InvalidOid;
fmgr_info_cxt(F_OIDEQ, &u_sess->relmap_cxt.relfilenodeSkey[i].sk_func, u_sess->cache_mem_cxt);
u_sess->relmap_cxt.relfilenodeSkey[i].sk_strategy = BTEqualStrategyNumber;
u_sess->relmap_cxt.relfilenodeSkey[i].sk_subtype = InvalidOid;
u_sess->relmap_cxt.relfilenodeSkey[i].sk_collation = InvalidOid;
}
relfilenode_skey[0].sk_attno = Anum_pg_class_reltablespace;
relfilenode_skey[1].sk_attno = Anum_pg_class_relfilenode;
u_sess->relmap_cxt.relfilenodeSkey[0].sk_attno = Anum_pg_class_reltablespace;
u_sess->relmap_cxt.relfilenodeSkey[1].sk_attno = Anum_pg_class_relfilenode;
/* Initialize the hash table. */
HASHCTL ctl;
ret = memset_s(&ctl, sizeof(ctl), 0, sizeof(ctl));
securec_check(ret, "\0", "\0");
ctl.keysize = sizeof(RelfilenodeMapKey);
@ -135,8 +133,9 @@ Oid RelidByRelfilenode(Oid reltablespace, Oid relfilenode)
ScanKeyData skey[2];
Oid relid;
int rc = 0;
if (u_sess->relmap_cxt.RelfilenodeMapHash == NULL)
if (u_sess->relmap_cxt.RelfilenodeMapHash == NULL) {
InitializeRelfilenodeMap();
}
/* pg_class will show 0 when the value is actually u_sess->proc_cxt.MyDatabaseTableSpace */
if (reltablespace == u_sess->proc_cxt.MyDatabaseTableSpace)
@ -185,7 +184,7 @@ Oid RelidByRelfilenode(Oid reltablespace, Oid relfilenode)
relation = heap_open(RelationRelationId, AccessShareLock);
/* copy scankey to local copy, it will be modified during the scan */
rc = memcpy_s(skey, sizeof(skey), relfilenode_skey, sizeof(skey));
rc = memcpy_s(skey, sizeof(skey), u_sess->relmap_cxt.relfilenodeSkey, sizeof(skey));
securec_check(rc, "", "");
/* set scan arguments */
@ -264,7 +263,7 @@ Oid PartitionRelidByRelfilenode(Oid reltablespace, Oid relfilenode, Oid& partati
/* check plain relations by looking in pg_class */
relation = heap_open(PartitionRelationId, AccessShareLock);
rc = memcpy_s(skey, sizeof(skey), relfilenode_skey, sizeof(skey));
rc = memcpy_s(skey, sizeof(skey), u_sess->relmap_cxt.relfilenodeSkey, sizeof(skey));
securec_check(rc, "", "");
skey[0].sk_attno = Anum_pg_partition_reltablespace;
skey[1].sk_attno = Anum_pg_partition_relfilenode;

View File

@ -540,8 +540,8 @@ static bool check_inlist2joininfo(char** newval, void** extra, GucSource source)
static void assign_inlist2joininfo(const char* newval, void* extra);
static bool check_replication_type(int* newval, void** extra, GucSource source);
static bool isOptLineCommented(char* optLine);
static bool isMatchOptionName(
char* optLine, const char* paraName, int paraLength, int* paraOffset, int* valueLength, int* valueOffset);
static bool isMatchOptionName(char* optLine, const char* paraName,
int paraLength, int* paraOffset, int* valueLength, int* valueOffset, bool ignore_case);
static const char* logging_module_guc_show(void);
static bool logging_module_check(char** newval, void** extra, GucSource source);
@ -586,7 +586,8 @@ static void FinishAlterSystemSet(GucContext context);
static void ConfFileNameCat(char* ConfFileName, char* ConfTmpFileName,
char* ConfTmpBakFileName, char* ConfLockFileName);
static void WriteAlterSystemSetGucFile(char* ConfFileName, char** opt_lines, ConfFileLock* filelock);
static char** LockAndReadConfFile(char* ConfFileName, char* ConfLockFileName, ConfFileLock* filelock);
static char** LockAndReadConfFile(char* ConfFileName, char* ConfTmpFileName, char* ConfLockFileName,
ConfFileLock* filelock);
#endif
inline void scape_space(char **pp)
{
@ -609,8 +610,8 @@ inline void scape_space(char **pp)
* Returns:
* True, iff the option name is in the configure file; else false.
*/
static bool isMatchOptionName(
char* optLine, const char* paraName, int paraLength, int* paraOffset, int* valueLength, int* valueOffset)
static bool isMatchOptionName(char* optLine, const char* paraName,
int paraLength, int* paraOffset, int* valueLength, int* valueOffset, bool ignore_case)
{
char* p = NULL;
char* q = NULL;
@ -628,7 +629,9 @@ static bool isMatchOptionName(
/* Skip all the blanks after '#' and before the paraName */
scape_space(&p);
if (strncmp(p, paraName, paraLength) != 0) {
if (ignore_case && strncasecmp(p, paraName, paraLength) != 0) {
return false;
} else if (!ignore_case && strncmp(p, paraName, paraLength) != 0) {
return false;
}
@ -2879,7 +2882,7 @@ static void InitConfigureNamesBool()
"most_available_sync",
PGC_SIGHUP,
REPLICATION_MASTER,
gettext_noop("Enables master to continue as standalone on sync standbys failure."),
gettext_noop("Enables master to continue when sync standbys failure."),
NULL,
},
&u_sess->attr.attr_storage.guc_most_available_sync,
@ -4921,7 +4924,7 @@ static void InitConfigureNamesInt()
gettext_noop("Sets the maximum number of simultaneously running WAL sender processes."),
NULL},
&g_instance.attr.attr_storage.max_wal_senders,
4,
16,
0,
MAX_BACKENDS,
NULL,
@ -4982,7 +4985,11 @@ static void InitConfigureNamesInt()
{{"replication_type", PGC_POSTMASTER, WAL_SETTINGS, gettext_noop("Sets the dn's HA mode."), NULL},
&g_instance.attr.attr_storage.replication_type,
#ifdef ENABLE_MULTIPLE_NODES
RT_WITH_DUMMY_STANDBY,
#else
RT_WITH_MULTI_STANDBY,
#endif
RT_WITH_DUMMY_STANDBY,
RT_NUM,
check_replication_type,
@ -7845,7 +7852,7 @@ static void InitConfigureNamesString()
NULL,
GUC_SUPERUSER_ONLY},
&g_instance.attr.attr_common.Alarm_component,
"/opt/huawei/snas/bin/snas_cm_cmd",
"/opt/snas/bin/snas_cm_cmd",
NULL,
NULL,
NULL},
@ -8093,7 +8100,7 @@ static void InitConfigureNamesString()
NULL,
GUC_LIST_INPUT},
&u_sess->attr.attr_storage.SyncRepStandbyNames,
"",
"*",
check_synchronous_standby_names,
assign_synchronous_standby_names,
NULL},
@ -12877,9 +12884,9 @@ const char* GetConfigOption(const char* name, bool missing_ok, bool restrict_sup
ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("unrecognized configuration parameter \"%s\"", name)));
}
if (restrict_superuser && (record->flags & GUC_SUPERUSER_ONLY) && (GetUserId() != BOOTSTRAP_SUPERUSERID))
if (restrict_superuser && (record->flags & GUC_SUPERUSER_ONLY) && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be initial account to examine \"%s\"", name)));
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to examine \"%s\"", name)));
switch (record->vartype) {
case PGC_BOOL:
@ -12931,9 +12938,9 @@ const char* GetConfigOptionResetString(const char* name)
if (record == NULL)
ereport(
ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("unrecognized configuration parameter \"%s\"", name)));
if ((record->flags & GUC_SUPERUSER_ONLY) && (GetUserId() != BOOTSTRAP_SUPERUSERID))
if ((record->flags & GUC_SUPERUSER_ONLY) && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be initial account to examine \"%s\"", name)));
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to examine \"%s\"", name)));
switch (record->vartype) {
case PGC_BOOL:
@ -13122,7 +13129,7 @@ static void replace_config_value(char** optlines, char* name, char* value, confi
}
securec_check_ss(rc, "\0", "\0");
index = find_guc_option(optlines, name, NULL, NULL, &optvalue_off, &optvalue_len);
index = find_guc_option(optlines, name, NULL, NULL, &optvalue_off, &optvalue_len, true);
/* add or replace */
if (index == INVALID_LINES_IDX) {
@ -13137,13 +13144,45 @@ static void replace_config_value(char** optlines, char* name, char* value, confi
if (optlines[index] != NULL) {
pfree(optlines[index]);
optlines[index] = NULL;
}
}
}
optlines[index] = newline;
}
/*
* Only the administrator can modify the GUC.
* gs_guc can only modify GUC locally, and Alter System Set is a supplement.
* But in the case of remote connection we cannot change some param,
* otherwise, there will be some security risks.
*/
static void CheckAlterSystemSetPrivilege(const char* name)
{
if (GetUserId() == BOOTSTRAP_SUPERUSERID) {
return;
} else if (superuser()) {
/* do nothing here, check black list later. */
} else {
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("Permission denied."))));
}
static char* blackList[] = {
"modify_initial_password",
"enable_access_server_directory",
"enable_copy_server_files",
NULL
};
for (int i = 0; blackList[i] != NULL; i++) {
if (pg_strcasecmp(name, blackList[i]) == 0) {
ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("GUC:%s could only be set by initial user.", name))));
}
}
}
/*
* Persist the configuration parameter value.
*
@ -13168,11 +13207,7 @@ static void CheckAndGetAlterSystemSetParam(AlterSystemStmt* altersysstmt,
char* name = NULL;
struct config_generic *record = NULL;
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
(errmsg("must be superuser to execute ALTER SYSTEM SET command"))));
CheckAlterSystemSetPrivilege(altersysstmt->setstmt->name);
/*
* Validate the name and arguments [value1, value2 ... ].
*/
@ -13278,17 +13313,31 @@ static void WriteAlterSystemSetGucFile(char* ConfFileName, char** opt_lines, Con
}
}
static char** LockAndReadConfFile(char* ConfFileName, char* ConfLockFileName, ConfFileLock* filelock)
static char** LockAndReadConfFile(char* ConfFileName, char* ConfTmpFileName, char* ConfLockFileName,
ConfFileLock* filelock)
{
struct stat st;
char** opt_lines = NULL;
if (stat(ConfFileName, &st) == 0 && get_file_lock(ConfLockFileName, filelock) == CODE_OK) {
opt_lines = read_guc_file(ConfFileName);
char* file = NULL;
if (stat(ConfFileName, &st) == 0) {
file = ConfFileName;
} else if (stat(ConfTmpFileName, &st) == 0) {
file = ConfTmpFileName;
}
if (file != NULL && S_ISREG(st.st_mode) && get_file_lock(file, filelock) == CODE_OK) {
opt_lines = read_guc_file(file);
} else {
ereport(ERROR,
(errcode(ERRCODE_FILE_READ_FAILED),
errmsg("File does not exits or it is being used.Can not open file: %s.", ConfFileName)));
(errcode(ERRCODE_FILE_READ_FAILED), errmsg("Can not open configure file.")));
}
if (opt_lines == NULL) {
release_file_lock(filelock);
ereport(ERROR, (errcode(ERRCODE_FILE_READ_FAILED), errmsg("Read configure file falied.")));
}
return opt_lines;
}
@ -13301,12 +13350,12 @@ static char** LockAndReadConfFile(char* ConfFileName, char* ConfLockFileName, Co
*
* The configuration parameters are written to a temporary
* file then renamed to the final name. The template for the
* temporary file is postgresql.auto.conf.temp.
* temporary file is postgresql.conf.bak
*
* An LWLock is used to serialize writing to the same file.
*
* In case of an error, we leave the original automatic
* configuration file (postgresql.auto.conf) intact.
* configuration file (postgresql.conf.bak) intact.
*/
void AlterSystemSetConfigFile(AlterSystemStmt * altersysstmt)
{
@ -13332,7 +13381,7 @@ void AlterSystemSetConfigFile(AlterSystemStmt * altersysstmt)
* temporary file and then rename it to postgresql.auto.conf. In case
* there exists a temp file from previous crash, that can be reused.
*/
opt_lines = LockAndReadConfFile(ConfFileName, ConfLockFileName, &filelock);
opt_lines = LockAndReadConfFile(ConfFileName, ConfTmpFileName, ConfLockFileName, &filelock);
/*
* replace with new value if the configuration parameter already
@ -14180,9 +14229,9 @@ char* GetConfigOptionByName(const char* name, const char** varname)
ereport(
ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("unrecognized configuration parameter \"%s\"", name)));
if ((record->flags & GUC_SUPERUSER_ONLY) && (GetUserId() != BOOTSTRAP_SUPERUSERID))
if ((record->flags & GUC_SUPERUSER_ONLY) && !superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be initial account to examine \"%s\"", name)));
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), errmsg("must be superuser to examine \"%s\"", name)));
if (varname != NULL)
*varname = record->name;
@ -14207,7 +14256,7 @@ void GetConfigOptionByNum(int varnum, const char** values, bool* noshow)
if (noshow != NULL) {
if ((conf->flags & GUC_NO_SHOW_ALL) ||
((conf->flags & GUC_SUPERUSER_ONLY) && (GetUserId() != BOOTSTRAP_SUPERUSERID)))
((conf->flags & GUC_SUPERUSER_ONLY) && !superuser()))
*noshow = true;
else
*noshow = false;
@ -17597,6 +17646,11 @@ static bool check_replication_type(int* newval, void** extra, GucSource source)
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("replication_type is not allowed set 1 "
"in Current Version. Set to default (0).")));
#ifndef ENABLE_MULTIPLE_NODES
} else {
ereport(FATAL, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("replication_type is only allowed set to 1, newval=%d.", *newval)));
#endif
}
return true;
}
@ -17645,6 +17699,22 @@ static bool check_replconninfo(char** newval, void** extra, GucSource source)
return true;
}
/*
* @@GaussDB@@
* Brief : Determine if all eight replconninfos are empty.
* Description :
* Notes :
*/
static inline bool GetReplCurArrayIsNull()
{
for (int i = 1; i < MAX_REPLNODE_NUM; i++) {
if (t_thrd.postmaster_cxt.ReplConnArray[i] != NULL) {
return false;
}
}
return true;
}
/*
* @@GaussDB@@
* Brief : Parse replconninfo1.
@ -17666,6 +17736,12 @@ static void assign_replconninfo1(const char* newval, void* extra)
if (u_sess->attr.attr_storage.ReplConnInfoArr[1] != NULL && newval != NULL &&
strcmp(u_sess->attr.attr_storage.ReplConnInfoArr[1], newval) != 0) {
t_thrd.postmaster_cxt.ReplConnChanged[1] = true;
// perceive single --> primary_standby
if (t_thrd.postmaster_cxt.HaShmData != NULL &&
t_thrd.postmaster_cxt.HaShmData->current_mode == NORMAL_MODE &&
!GetReplCurArrayIsNull()) {
t_thrd.postmaster_cxt.HaShmData->current_mode = PRIMARY_MODE;
}
}
}
@ -18016,7 +18092,7 @@ ErrCode copy_guc_lines(char** copy_to_line, char** optlines, const char** opt_na
if (optlines != NULL) {
for (i = 0; i < RESERVE_SIZE; i++) {
opt_name_index = find_guc_option(optlines, opt_name[i], NULL, NULL, &optvalue_off, &optvalue_len);
opt_name_index = find_guc_option(optlines, opt_name[i], NULL, NULL, &optvalue_off, &optvalue_len, false);
if (INVALID_LINES_IDX != opt_name_index) {
errno_t errorno = EOK;
opt_name_len = strlen(optlines[opt_name_index]) + 1;
@ -18062,7 +18138,7 @@ void modify_guc_lines(char*** guc_optlines, const char** opt_name, char** copy_f
ereport(LOG, (errmsg("configuration file has not data")));
} else {
for (int i = 0; i < RESERVE_SIZE; i++) {
opt_name_index = find_guc_option(optlines, opt_name[i], NULL, NULL, &optvalue_off, &optvalue_len);
opt_name_index = find_guc_option(optlines, opt_name[i], NULL, NULL, &optvalue_off, &optvalue_len, false);
if (NULL != copy_from_line[i]) {
if (INVALID_LINES_IDX != opt_name_index) {
pfree(optlines[opt_name_index]);
@ -18116,7 +18192,7 @@ void comment_guc_lines(char** optlines, const char** opt_name)
ereport(LOG, (errmsg("configuration file has not data")));
} else {
for (int i = 0; i < RESERVE_SIZE; i++) {
opt_name_index = find_guc_option(optlines, opt_name[i], NULL, NULL, &optvalue_off, &optvalue_len);
opt_name_index = find_guc_option(optlines, opt_name[i], NULL, NULL, &optvalue_off, &optvalue_len, false);
if (opt_name_index != INVALID_LINES_IDX) {
/* Skip all the blanks at the begin of the optLine */
char *p = optlines[opt_name_index];
@ -18191,12 +18267,14 @@ int add_guc_optlines_to_buffer(char** optlines, char** buffer)
* Description : find the line info of the specified parameter in file
* Notes :
*/
int find_guc_option(
char** optlines, const char* opt_name, int* name_offset, int* name_len, int* value_offset, int* value_len)
int find_guc_option(char** optlines, const char* opt_name,
int* name_offset, int* name_len, int* value_offset, int* value_len, bool ignore_case)
{
bool isMatched = false;
int i = 0;
size_t paramlen = 0;
int targetline = 0;
int matchtimes = 0;
if (NULL == optlines || NULL == opt_name) {
return INVALID_LINES_IDX;
@ -18208,24 +18286,41 @@ int find_guc_option(
/* The first loop is to deal with the lines not commented by '#' */
for (i = 0; optlines[i] != NULL; i++) {
if (!isOptLineCommented(optlines[i])) {
isMatched = isMatchOptionName(optlines[i], opt_name, paramlen, name_offset, value_len, value_offset);
isMatched = isMatchOptionName(optlines[i], opt_name, paramlen, name_offset,
value_len, value_offset, ignore_case);
if (isMatched) {
return i;
matchtimes++;
targetline = i;
}
}
}
/* The line of last one will be recorded when there are parameters with the same name in postgresql.conf */
if (matchtimes > 1) {
ereport(NOTICE, (errmsg("There are %d \"%s\" not commented in \"postgresql.conf\", and only the "
"last one in %dth line will be set and used.",
matchtimes,
opt_name,
(targetline + 1))));
}
if (matchtimes > 0) {
return targetline;
}
/* The second loop is to deal with the lines commented by '#' */
matchtimes = 0;
for (i = 0; optlines[i] != NULL; i++) {
if (isOptLineCommented(optlines[i])) {
isMatched = isMatchOptionName(optlines[i], opt_name, paramlen, name_offset, value_len, value_offset);
isMatched = isMatchOptionName(optlines[i], opt_name, paramlen, name_offset,
value_len, value_offset, ignore_case);
if (isMatched) {
return i;
matchtimes++;
targetline = i;
}
}
}
return INVALID_LINES_IDX;
/* The line of last one will be returned, otherwise it return invaild line */
return (matchtimes > 0) ? targetline : INVALID_LINES_IDX;
}
/*
* @@GaussDB@@

View File

@ -502,7 +502,7 @@ log_line_prefix = '%m %u %d %h %p %S ' # special values:
enable_alarm = on
connection_alarm_rate = 0.9
alarm_report_interval = 10
alarm_component = '/opt/huawei/snas/bin/snas_cm_cmd'
alarm_component = '/opt/snas/bin/snas_cm_cmd'
#------------------------------------------------------------------------------
# RUNTIME STATISTICS

View File

@ -27,7 +27,7 @@ override CPPFLAGS += -I$(top_builddir)/$(subdir)/client_logic_hooks
override CPPFLAGS += -I$(top_builddir)/$(subdir)/client_logic_hooks/encryption_hooks
ifeq "$(ENABLE_CE)" "1"
override CPPFLAGS += -DHAVE_CE -DWORDS_BIGENDIAN -L$(top_builddir)/src/bin/gs_ktool/ -lgs_ktool -lsecurec -L$(LIBKMC_LIB_PATH) -lkmc
override CPPFLAGS += -DHAVE_CE -DWORDS_BIGENDIAN
endif
ifneq ($(PORTNAME), win32)
@ -162,9 +162,7 @@ all-lib-depends: libpq_ce subsystem
$(MAKE) all-lib
endif
all: gs_ktool all-lib-depends
gs_ktool:
$(MAKE) -C $(top_builddir)/src/bin/gs_ktool
all: all-lib-depends
utlibpq: all-lib
cp libpq.so $(top_builddir)/src/distribute/test/ut/lib/libutlibpq.so

View File

@ -19,7 +19,6 @@ encryption_hooks_dir = "$(top_builddir)/src/common/interfaces/libpq/client_logic
override CPPFLAGS := -DFRONTEND -DFRONTEND_PARSER -DPGXC -Wno-write-strings -fstack-protector-all -I$(srcdir) $(CPPFLAGS) -I$(top_builddir)/src/ -I$(top_builddir)/src/include
override CPPFLAGS += -I$(top_builddir)/src/common/interfaces/libpq/ -I$(top_builddir)/src/include/libpq/ -I$(top_builddir)/src/include/libpq/client_logic_cache
override CPPFLAGS += -I$(top_builddir)/src/common/interfaces/libpq/client_logic_hooks -I$(encryption_hooks_dir)
override CPPFLAGS += -L$(top_builddir)/src/bin/gs_ktool/ -lgs_ktool -lsecurec -L$(LIBKMC_LIB_PATH) -lkmc
override CPPFLAGS := $(filter-out -fPIE, $(CPPFLAGS)) -fPIC -shared
override CFLAGS := $(filter-out -fPIE, $(CFLAGS)) -fPIC -shared

View File

@ -15,8 +15,8 @@ top_builddir = ../../../../../../
# shared library parameters
NAME=client_logic_encryption_hooks
override CPPFLAGS := -DFRONTEND -DPGXC -Wno-write-strings -fstack-protector-all -I$(srcdir) $(CPPFLAGS) -I$(top_builddir)/src/ -I$(top_builddir)/src/include -I$(top_builddir)/src/include/libpq -I$(top_builddir)/src/common/interfaces/libpq -I$(top_builddir)/src/common/interfaces/libpq/client_logic_hooks/ -I.
override CPPFLAGS += -L$(top_builddir)/src/bin/gs_ktool/ -lgs_ktool -lsecurec -L$(LIBKMC_LIB_PATH) -lkmc
override CPPFLAGS := -DFRONTEND -DPGXC -Wno-write-strings -fstack-protector-all -I$(srcdir) $(CPPFLAGS) -I$(top_builddir)/src/ -I$(top_builddir)/src/include -I$(top_builddir)/src/include/libpq -I$(top_builddir)/src/common/interfaces/libpq -I$(top_builddir)/src/common/interfaces/libpq/client_logic_hooks/ -I.
override CPPFLAGS += -lsecurec -lssl -lcrypto
override CPPFLAGS := $(filter-out -fPIE, $(CPPFLAGS)) -fPIC -shared
override CFLAGS := $(filter-out -fPIE, $(CFLAGS)) -fPIC -shared

View File

@ -243,7 +243,7 @@ bool EncryptionColumnHookExecutor::deprocess_column_encryption_key(bool is_durin
* case 2 : do not report error and try again
*/
CmkKeyStore keyStore = get_key_store_from_string(key_store_str);
if (keyStore == CmkKeyStore::GS_KTOOL) {
if (keyStore == CmkKeyStore::LOCALKMS) {
if (!kt_atoi(key_path_str, &cmk_id)) {
return false;
}
@ -366,7 +366,7 @@ bool EncryptionColumnHookExecutor::pre_create(PGClientLogic &column_encryption,
unsigned char cmk_plain[DEFAULT_CMK_LEN + 1] = {0};
unsigned int cmk_id = 0;
if (keyStore == CmkKeyStore::GS_KTOOL) {
if (keyStore == CmkKeyStore::LOCALKMS) {
if (!kt_atoi(key_path_str, &cmk_id)) {
libpq_free(common_expected_value);
return false;

View File

@ -49,7 +49,7 @@ bool EncryptionGlobalHookExecutor::pre_create(const StringArgs &args,
/* check algorithm */
CmkAlgorithm cmk_algo = get_algorithm_from_string(algorithm_type_str);
if (cmk_algo != CmkAlgorithm::AES_256_CBC) {
if (cmk_algo != CmkAlgorithm::RAS_2048) {
printfPQExpBuffer(&conn->errorMessage,
libpq_gettext("ERROR(CLIENT): unsupported client master key algorithm\n"));
return false;
@ -57,7 +57,7 @@ bool EncryptionGlobalHookExecutor::pre_create(const StringArgs &args,
/* check key store */
CmkKeyStore key_store = get_key_store_from_string(key_store_str);
if (key_store != CmkKeyStore::GS_KTOOL) {
if (key_store != CmkKeyStore::LOCALKMS) {
printfPQExpBuffer(&conn->errorMessage, libpq_gettext("ERROR(CLIENT): key store are mandatory\n"));
return false;
}
@ -87,7 +87,7 @@ bool EncryptionGlobalHookExecutor::pre_create(const StringArgs &args,
}
}
/* generate cmk */
if (key_store == CmkKeyStore::GS_KTOOL) {
if (key_store == CmkKeyStore::LOCALKMS) {
unsigned int cmk_id = 0;
if (!kt_atoi(key_path_str, &cmk_id)) {

View File

@ -29,17 +29,16 @@
#include <openssl/rand.h>
#include "encrypt_decrypt.h"
#include "aead_aes_hamc_enc_key.h"
#include "gs_ktool/kt_interface.h"
#include "cmk_cache_lru.h"
static CmkCacheList *cmk_cache_list = NULL;
extern bool kt_check_algorithm_type(CmkAlgorithm algo_type)
{
if (algo_type == CmkAlgorithm::AES_256_CBC) {
if (algo_type == CmkAlgorithm::RAS_2048) {
return true;
} else {
printf("ERROR(CLIENT): Invalid algorithm, keys generated by gs_ktool are only used for AES_256_CBC.\n");
printf("ERROR(CLIENT): Invalid algorithm, keys generated by gs_ktool are only used for RAS_2048.\n");
}
return false;
@ -47,6 +46,7 @@ extern bool kt_check_algorithm_type(CmkAlgorithm algo_type)
bool kt_atoi(const char *cmk_id_str, unsigned int *cmk_id)
{
return true;
const char *key_path_tag = "gs_ktool/";
char tmp_str[MAX_KEYPATH_LEN] = {0};
int tmp_pos = 0;
@ -87,24 +87,18 @@ bool create_cmk(unsigned int cmk_id)
{
unsigned int cmk_len = 0;
if (!get_cmk_len(cmk_id, &cmk_len)) {
return false;
}
if (cmk_len != DEFAULT_CMK_LEN) {
printf(
"ERROR(GS_KTOOL): Default cmk len is %u, but the len of cmk read from gs_ktool is %u.\n",
DEFAULT_CMK_LEN, cmk_len);
return false;
}
return true;
}
bool read_cmk_plain(const unsigned int cmk_id, unsigned char *cmk_plain, bool is_report_err)
{
unsigned int cmk_len = 0;
if (cmk_plain == NULL) {
return false;
}
@ -120,21 +114,7 @@ bool read_cmk_plain(const unsigned int cmk_id, unsigned char *cmk_plain, bool is
/* case a : try to get cmk plain from cache */
if (!get_cmk_from_cache(cmk_cache_list, cmk_id, cmk_plain)) {
/* case b : failed to get cmk plian from cache, try to get it from gs_ktool */
if (!get_cmk_plain(cmk_id, cmk_plain, &cmk_len, is_report_err)) {
return false;
}
/* check the length of cmk plain read from gs_ktool */
if (cmk_len != DEFAULT_CMK_LEN) {
if (is_report_err) {
printf(
"ERROR(GS_KTOOL): Default cmk len is %u, but the len of cmk read from gs_ktool is %u.\n",
DEFAULT_CMK_LEN, cmk_len);
}
return false;
}
push_cmk_to_cache(cmk_cache_list, cmk_id, cmk_plain);
return true;
}
return true;

View File

@ -25,7 +25,6 @@
#ifndef KTOOL_KT_INTERFACES_H
#define KTOOL_KT_INTERFACES_H
#include "gs_ktool/kt_interface.h"
#include "client_logic/client_logic_enums.h"
#define DEFAULT_CMK_LEN 32

View File

@ -155,8 +155,8 @@ JARPLJAVA = pljava.jar
# MOT component
##############################################################################
ifeq ($(enable_mot), yes)
LIBS += -latomic -lmot_engine
LDFLAGS += -L$(top_builddir)/src/gausskernel/storage/mot/core/bin
LIBS += -latomic -lmot_engine -lmasstree
LDFLAGS += -L$(top_builddir)/src/gausskernel/storage/mot/core/bin -L$(MASSTREE_LIB_PATH)
CXXFLAGS += -I$(JEMALLOC_INCLUDE_PATH)
endif
@ -644,6 +644,9 @@ endif
ifneq (, $(findstring __USE_NUMA, $(CFLAGS)))
cp $(NUMA_LIB_PATH)/* '$(DESTDIR)$(libdir)/'
endif
ifeq ($(enable_mot), yes)
cp -d $(MASSTREE_LIB_PATH)/libmasstree* '$(DESTDIR)$(libdir)/'
endif
ifeq ($(with_3rd), NONE)
cp $(top_builddir)/$(BUILD_TOOLS_PATH)/gcc$(subst .0,,$(CC_VERSION))/gcc/lib64/libstdc++.so.6 '$(DESTDIR)$(libdir)/'
cp $(top_builddir)/$(BUILD_TOOLS_PATH)/gcc$(subst .0,,$(CC_VERSION))/gcc/lib64/libgcc_s.so.1 '$(DESTDIR)$(libdir)/'
@ -669,7 +672,7 @@ ifeq ($(enable_thread_check), yes)
endif
cp $(with_3rd)/buildtools/server_key/* '$(DESTDIR)$(bindir)/'
endif
cp -r $(with_3rd)/platform/$(PLAT_FORM_STR)/openjdk8/jdk1.8.222/jre/* '$(DESTDIR)$(bindir)/../jre/'
cp -r $(with_3rd)/platform/$(PLAT_FORM_STR)/openjdk8/jdk1.8.0_222/jre/* '$(DESTDIR)$(bindir)/../jre/'
cp $(PLJAVA_LIB_PATH)/* '$(DESTDIR)$(libdir)/'
cp $(PLJAVA_JAR_PATH)/$(JARPLJAVA) '$(DESTDIR)$(pkglibdir)/java/'

View File

@ -80,28 +80,41 @@ void statement_init_metric_context()
statement_commit_metirc_context();
}
HOLD_INTERRUPTS();
(void)syscalllockAcquire(&u_sess->statement_cxt.list_protect);
PG_TRY();
{
/* 1, check free list: free detail stat; reuse entry in free list */
if (u_sess->statement_cxt.free_count > 0) {
reusedHandle = (StatementStatContext*)u_sess->statement_cxt.toFreeStatementList;
u_sess->statement_cxt.curStatementMetrics = reusedHandle;
u_sess->statement_cxt.toFreeStatementList = reusedHandle->next;
u_sess->statement_cxt.free_count--;
} else {
/* 2, no free slot int free list, allocate new one */
if (u_sess->statement_cxt.allocatedCxtCnt < u_sess->attr.attr_common.track_stmt_session_slot) {
MemoryContext oldcontext = MemoryContextSwitchTo(u_sess->statement_cxt.stmt_stat_cxt);
/* 1, check free list: free detail stat; reuse entry in free list */
if (u_sess->statement_cxt.free_count > 0) {
reusedHandle = (StatementStatContext*)u_sess->statement_cxt.toFreeStatementList;
u_sess->statement_cxt.curStatementMetrics = reusedHandle;
u_sess->statement_cxt.toFreeStatementList = reusedHandle->next;
u_sess->statement_cxt.free_count--;
} else {
/* 2, no free slot int free list, allocate new one */
if (u_sess->statement_cxt.allocatedCxtCnt < u_sess->attr.attr_common.track_stmt_session_slot) {
MemoryContext oldcontext = MemoryContextSwitchTo(u_sess->statement_cxt.stmt_stat_cxt);
u_sess->statement_cxt.curStatementMetrics = palloc0_noexcept(sizeof(StatementStatContext));
if (u_sess->statement_cxt.curStatementMetrics != NULL) {
u_sess->statement_cxt.allocatedCxtCnt++;
}
(void)MemoryContextSwitchTo(oldcontext);
}
}
u_sess->statement_cxt.curStatementMetrics = palloc0_noexcept(sizeof(StatementStatContext));
if (u_sess->statement_cxt.curStatementMetrics != NULL) {
u_sess->statement_cxt.allocatedCxtCnt++;
}
(void)MemoryContextSwitchTo(oldcontext);
}
}
}
PG_CATCH();
{
(void)syscalllockRelease(&u_sess->statement_cxt.list_protect);
RESUME_INTERRUPTS();
PG_RE_THROW();
}
PG_END_TRY();
(void)syscalllockRelease(&u_sess->statement_cxt.list_protect);
RESUME_INTERRUPTS();
ereport(DEBUG1, (errmodule(MOD_INSTR), errmsg("[Statement] init - free list length: %d, suspend list length: %d",
u_sess->statement_cxt.free_count, u_sess->statement_cxt.suspend_count)));

View File

@ -528,7 +528,6 @@ static void CleanStatementTable()
static void FlushStatementToTable(StatementStatContext* suspendList, const knl_u_statement_context* statementCxt)
{
Assert (suspendList != NULL);
RangeVar* relrv = InitStatementRel();
HeapTuple tuple = NULL;
StatementStatContext *flushItem = suspendList;
@ -537,6 +536,8 @@ static void FlushStatementToTable(StatementStatContext* suspendList, const knl_u
PG_TRY();
{
StartTransactionCommand();
PushActiveSnapshot(GetTransactionSnapshot());
RangeVar* relrv = InitStatementRel();
Relation rel = heap_openrv(relrv, RowExclusiveLock);
while (flushItem != NULL) {
tuple = GetStatementTuple(rel, flushItem, statementCxt);
@ -546,6 +547,7 @@ static void FlushStatementToTable(StatementStatContext* suspendList, const knl_u
flushItem = (StatementStatContext *)flushItem->next;
}
heap_close(rel, RowExclusiveLock);
PopActiveSnapshot();
CommitTransactionCommand();
}
PG_CATCH();
@ -559,6 +561,7 @@ static void FlushStatementToTable(StatementStatContext* suspendList, const knl_u
ereport(WARNING, (errmodule(MOD_INSTR),
errmsg("[Statement] flush suspend list to statement_history failed, reason: '%s'", edata->message)));
FreeErrorData(edata);
PopActiveSnapshot();
AbortCurrentTransaction();
}
PG_END_TRY();

View File

@ -3463,19 +3463,22 @@ void GenReport::get_summary_load_profile(report_params* params)
GenReport::add_data(dash, &params->Contents);
}
static void get_summary_instance_efficiency_bufferHit(report_params* params, dashboard* dash)
/* summary ratios about instance effciency: buffer hit ratio, cpu efficiency ratio, radio of redo
* with nowait, soft parse ratio and excution without parse ratio */
static void get_summary_instance_efficiency_percentages(report_params* params, dashboard* dash)
{
List* query_result = NIL;
StringInfoData query;
initStringInfo(&query);
appendStringInfo(&query,
"select 'Buffer Hit %%: ' as \"Metric Name\", "
" case when s.all_reads = 0 then 1 else round(s.blks_hit * 100 / s.all_reads) end as \"Metric Value\" "
"select "
" unnest(array['Buffer Hit %%', 'Effective CPU %%', 'Redo NoWait %%', 'Soft Parse %%', 'Non-Parse CPU %%']) as \"Metric Name\", "
" unnest(array[case when s1.all_reads = 0 then 1 else round(s1.blks_hit * 100 / s1.all_reads) end, s2.cpu_to_elapsd, s3.redo_nowait, s4.soft_parse, s5.non_parse]) as \"Metric Value\" "
"from "
" (select (snap_2.all_reads - coalesce(snap_1.all_reads, 0)) as all_reads, "
" (snap_2.blks_hit - coalesce(snap_1.blks_hit, 0)) as blks_hit "
" from"
" from "
" (select sum(coalesce(snap_blks_read, 0) + coalesce(snap_blks_hit, 0)) as all_reads, "
" coalesce(sum(snap_blks_hit), 0) as blks_hit "
" from snapshot.snap_summary_stat_database "
@ -3483,8 +3486,64 @@ static void get_summary_instance_efficiency_bufferHit(report_params* params, das
" (select sum(coalesce(snap_blks_read, 0) + coalesce(snap_blks_hit, 0)) as all_reads, "
" coalesce(sum(snap_blks_hit), 0) as blks_hit "
" from snapshot.snap_summary_stat_database "
" where snapshot_id = %ld) snap_2"
" ) as s",
" where snapshot_id = %ld) snap_2 "
" ) s1, "
" (select round(cpu_time.snap_value * 100 / greatest(db_time.snap_value, 1)) as cpu_to_elapsd "
" from "
" (select coalesce(snap_2.snap_value, 0) - coalesce(snap_1.snap_value, 0) as snap_value "
" from "
" (select snap_stat_name, snap_value from snapshot.snap_global_instance_time "
" where snapshot_id = %ld and snap_stat_name = 'CPU_TIME') snap_1, "
" (select snap_stat_name, snap_value from snapshot.snap_global_instance_time "
" where snapshot_id = %ld and snap_stat_name = 'CPU_TIME') snap_2) cpu_time, "
" (select coalesce(snap_2.snap_value, 0) - coalesce(snap_1.snap_value, 0) as snap_value "
" from "
" (select snap_stat_name, snap_value from snapshot.snap_global_instance_time "
" where snapshot_id = %ld and snap_stat_name = 'DB_TIME') snap_1, "
" (select snap_stat_name, snap_value from snapshot.snap_global_instance_time "
" where snapshot_id = %ld and snap_stat_name = 'DB_TIME') snap_2) db_time "
" ) s2, "
" (select (bufferAccess.snap_wait - bufferFull.snap_wait) * 100 / greatest(bufferAccess.snap_wait, 1) as redo_nowait "
" from "
" (select coalesce(snap_2.snap_wait) - coalesce(snap_1.snap_wait, 0) as snap_wait "
" from "
" (select snap_wait from snapshot.snap_global_wait_events "
" where snapshot_id = %ld and snap_event = 'WALBufferFull') snap_1, "
" (select snap_wait from snapshot.snap_global_wait_events "
" where snapshot_id = %ld and snap_event = 'WALBufferFull') snap_2) bufferFull, "
" (select coalesce(snap_2.snap_wait) - coalesce(snap_1.snap_wait, 0) as snap_wait "
" from "
" (select snap_wait from snapshot.snap_global_wait_events "
" where snapshot_id = %ld and snap_event = 'WALBufferAccess') snap_1, "
" (select snap_wait from snapshot.snap_global_wait_events "
" where snapshot_id = %ld and snap_event = 'WALBufferAccess') snap_2) bufferAccess "
" ) s3, "
" (select round((snap_2.soft_parse - snap_1.soft_parse) * 100 / greatest((snap_2.hard_parse + snap_2.soft_parse)-(snap_1.hard_parse + snap_1.soft_parse), 1)) as soft_parse "
" from "
" (select sum(snap_n_soft_parse) as soft_parse, sum(snap_n_hard_parse) as hard_parse from snapshot.snap_summary_statement "
" where snapshot_id = %ld ) snap_1, "
" (select sum(snap_n_soft_parse) as soft_parse, sum(snap_n_hard_parse) as hard_parse from snapshot.snap_summary_statement "
" where snapshot_id = %ld ) snap_2 "
" ) s4, "
" (select round((snap_2.elapse_time - snap_1.elapse_time) * 100 /greatest((snap_2.elapse_time + snap_2.parse_time)-(snap_1.elapse_time + snap_1.parse_time), 1)) as non_parse "
" from "
" (select sum(snap_total_elapse_time) as elapse_time, sum(snap_parse_time) as parse_time from snapshot.snap_summary_statement "
" where snapshot_id = %ld ) snap_1, "
" (select sum(snap_total_elapse_time) as elapse_time, sum(snap_parse_time) as parse_time from snapshot.snap_summary_statement "
" where snapshot_id = %ld ) snap_2 "
" ) s5; ",
params->begin_snap_id,
params->end_snap_id,
params->begin_snap_id,
params->end_snap_id,
params->begin_snap_id,
params->end_snap_id,
params->begin_snap_id,
params->end_snap_id,
params->begin_snap_id,
params->end_snap_id,
params->begin_snap_id,
params->end_snap_id,
params->begin_snap_id,
params->end_snap_id);
@ -3510,8 +3569,8 @@ static void get_summary_instance_efficiency(report_params* params)
dash->tableTitle = "Instance Efficiency Percentages (Target 100%)";
dash->desc = lappend(dash->desc, (void*)desc);
/* instance efficiency, Buffer Hit %: */
get_summary_instance_efficiency_bufferHit(params, dash);
/* instance efficiency, Buffer Hit %, Effective CPU % extra */
get_summary_instance_efficiency_percentages(params, dash);
GenReport::add_data(dash, &params->Contents);
}

View File

@ -116,9 +116,9 @@ static char *find_field_name(List *);
static char *find_table_name(List *);
static TableCell *find_or_create_tblcell(char *, char *);
static void add_index_from_field(char *, IndexCell *);
static char *parse_group_clause(List *);
static char *parse_order_clause(List *);
static void add_index_from_group_order(TableCell *, List *, bool);
static char *parse_group_clause(List *, List *);
static char *parse_order_clause(List *, List *);
static void add_index_from_group_order(TableCell *, List *, bool, List *);
static Oid find_table_oid(List *, const char *);
static void generate_final_index(TableCell *, Oid);
static void parse_from_clause(List *);
@ -131,6 +131,7 @@ static void determine_driver_table();
static uint4 get_join_table_result_set(const char *, const char *);
static void add_index_from_join(TableCell *, char *);
static void add_index_for_drived_tables();
static inline Node *get_target_by_Const(List *, Node *);
Datum gs_index_advise(PG_FUNCTION_ARGS)
{
@ -252,10 +253,10 @@ SuggestedIndex *suggest_index(const char *query_string, _out_ int *len)
if (g_table_list) {
parse_where_clause(stmt->whereClause);
determine_driver_table();
if (parse_group_clause(stmt->groupClause)) {
add_index_from_group_order(g_driver_table, stmt->groupClause, true);
} else if (parse_order_clause(stmt->sortClause)) {
add_index_from_group_order(g_driver_table, stmt->sortClause, false);
if (parse_group_clause(stmt->groupClause, stmt->targetList)) {
add_index_from_group_order(g_driver_table, stmt->groupClause, true, stmt->targetList);
} else if (parse_order_clause(stmt->sortClause, stmt->targetList)) {
add_index_from_group_order(g_driver_table, stmt->sortClause, false, stmt->targetList);
}
if (g_table_list->length > 1 && g_driver_table) {
add_index_for_drived_tables();
@ -1355,7 +1356,17 @@ void add_index_for_drived_tables()
list_free(to_be_joined_tables);
}
char *parse_group_clause(List *group_clause)
static inline Node *get_target_by_Const(List* targetList, Node* constNode)
{
Value* val = &((A_Const*)constNode)->val;
Assert(IsA(val, Integer));
long target_pos = intVal(val);
Assert(target_pos <= list_length(targetList));
ResTarget* rt = (ResTarget*)list_nth(targetList, target_pos - 1);
return rt->val;
}
char *parse_group_clause(List *group_clause, List* targetList)
{
if (group_clause == NULL)
return NULL;
@ -1365,7 +1376,15 @@ char *parse_group_clause(List *group_clause)
char *pre_table = NULL;
foreach (group_item, group_clause) {
List *fields = ((ColumnRef *)(lfirst(group_item)))->fields;
Node* node = (Node*)lfirst(group_item);
if (nodeTag(node) == T_A_Const) {
node = get_target_by_Const(targetList, node);
}
if (nodeTag(node) != T_ColumnRef)
continue;
List *fields = ((ColumnRef *)(node))->fields;
char *table_group = find_table_name(fields);
if (!table_group) {
return NULL;
@ -1386,7 +1405,7 @@ char *parse_group_clause(List *group_clause)
return NULL;
}
char *parse_order_clause(List *order_clause)
char *parse_order_clause(List *order_clause, List* targetList)
{
if (order_clause == NULL)
return NULL;
@ -1397,8 +1416,16 @@ char *parse_order_clause(List *order_clause)
SortByDir pre_dir;
foreach (order_item, order_clause) {
List *fields = ((ColumnRef *)(((SortBy *)(lfirst(order_item)))->node))->fields;
SortByDir dir = ((SortBy *)(lfirst(order_item)))->sortby_dir;
SortBy* sortby = (SortBy *)lfirst(order_item);
Node* node = sortby->node;
if (nodeTag(node) == T_A_Const) {
node = get_target_by_Const(targetList, node);
}
if (nodeTag(node) != T_ColumnRef)
break;
List *fields = ((ColumnRef *)node)->fields;
SortByDir dir = sortby->sortby_dir;
char *table_order = find_table_name(fields);
if (!table_order) {
return NULL;
@ -1426,18 +1453,27 @@ char *parse_order_clause(List *order_clause)
*
* The index from goup or order clause is added after the index with operator '='.
*/
void add_index_from_group_order(TableCell *table, List *clause, bool flag_group_order)
void add_index_from_group_order(TableCell *table, List *clause, bool flag_group_order, List* targetList)
{
ListCell *item = NULL;
foreach (item, clause) {
List *fields = NULL;
Node *node = NULL;
char *index_name = NULL;
if (flag_group_order) {
fields = ((ColumnRef *)(lfirst(item)))->fields;
node = (Node*)lfirst(item);
} else {
fields = ((ColumnRef *)((SortBy *)lfirst(item))->node)->fields;
node = ((SortBy *)lfirst(item))->node;
}
if (nodeTag(node) == T_A_Const) {
node = get_target_by_Const(targetList, node);
}
if (nodeTag(node) != T_ColumnRef)
break;
fields = ((ColumnRef *)node)->fields;
index_name = find_field_name(fields);
IndexCell *index = (IndexCell *)palloc(sizeof(*index));
index->index_name = index_name;

View File

@ -482,7 +482,18 @@ Oid AlterObjectNamespace(Relation rel, int oidCacheId, int nameCacheId, Oid obji
*/
void ExecAlterOwnerStmt(AlterOwnerStmt* stmt)
{
Oid newowner = get_role_oid(stmt->newowner, false);
const char* newOwnerName = stmt->newowner;
Oid newowner;
if (strcmp(newOwnerName, "current_user") == 0) {
/* CURRENT_USER */
newowner = GetUserId();
} else if (strcmp(newOwnerName, "session_user") == 0) {
/* SESSION_USER */
newowner = GetSessionUserId();
} else {
/* Normal User */
newowner = get_role_oid(newOwnerName, false);
}
switch (stmt->objectType) {
case OBJECT_AGGREGATE:

View File

@ -938,11 +938,7 @@ void ExplainOnePlan(
* and than calling ExecutorStart for ExecInitNode in CN.
*/
/* only stream plan can use u_sess->instr_cxt.global_instr to collect executor info */
#ifdef ENABLE_MULTIPLE_NODES
if (IS_PGXC_COORDINATOR && queryDesc->plannedstmt->is_stream_plan == true &&
#else
if (queryDesc->plannedstmt->is_stream_plan == true &&
#endif
check_stream_support() && instrument_option != 0 && u_sess->instr_cxt.global_instr == NULL &&
queryDesc->plannedstmt->num_nodes != 0) {
int dop = queryDesc->plannedstmt->query_dop;
@ -1129,7 +1125,7 @@ void ExplainOnePlan(
/* Check plan was influenced by row level security or not, here need to skip remote dummy node */
if (range_table_walker(
plannedstmt->rtable, (bool (*)())ContainRlsQualInRteWalker, NULL, QTW_EXAMINE_RTES | QTW_IGNORE_DUMMY)) {
plannedstmt->rtable, (bool (*)())ContainRlsQualInRteWalker, NULL, QTW_EXAMINE_RTES | QTW_IGNORE_DUMMY)) {
if (t_thrd.explain_cxt.explain_perf_mode != EXPLAIN_NORMAL && es->planinfo != NULL
&& es->planinfo->m_detailInfo != NULL) {
appendStringInfo(es->planinfo->m_detailInfo->info_str,
@ -5436,10 +5432,7 @@ static void show_buffers(ExplainState* es, StringInfo infostr, const Instrumenta
appendStringInfoSpaces(es->str, es->indent * 2);
show_buffers_info(infostr, has_shared, has_local, has_temp, usage);
} else if (is_datanode) {
if (get_execute_mode(es, nodeIdx))
appendStringInfo(infostr, "(Buffers: 0)\n");
else
appendStringInfo(infostr, "(Buffers: unknown)\n");
appendStringInfo(infostr, get_execute_mode(es, nodeIdx) ? "(Buffers: 0)\n" : "(Buffers: unknown)\n");
}
/* As above, show only positive counter values. */

View File

@ -141,7 +141,10 @@ void CreateSchemaCommand(CreateSchemaStmt* stmt, const char* queryString)
//@Temp Table. We allow datanode to create pg_temp namespace to enable create namespace stmt by CN to execute on
// DN
if (!g_instance.attr.attr_common.allowSystemTableMods && !u_sess->attr.attr_common.IsInplaceUpgrade &&
IsReservedName(schemaName) && !IS_PGXC_DATANODE)
#ifdef ENABLE_MULTIPLE_NODES
!IS_PGXC_DATANODE &&
#endif
IsReservedName(schemaName))
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("unacceptable schema name \"%s\"", schemaName),

View File

@ -5108,6 +5108,11 @@ void TryLockAccount(Oid roleID, int extrafails, bool superlock)
bool lockflag = 0;
char* rolename = NULL;
/* We could not insert new xlog if recovery in process */
if (RecoveryInProgress()) {
return;
}
if (!LockAccountParaValid(roleID, extrafails, superlock)) {
return;
}

View File

@ -2473,6 +2473,11 @@ void mark_parent_child_pushdown_flag(Query *parent, Query *child)
if (IS_STREAM_PLAN && ((parent->can_push && !child->can_push) ||
(!parent->can_push && child->can_push))) {
if (check_base_rel_in_fromlist(parent, (Node *)parent->jointree)) {
#ifndef ENABLE_MULTIPLE_NODES
if (u_sess->opt_cxt.is_stream_support) {
mark_stream_unsupport();
}
#endif
set_stream_off();
} else {
parent->can_push = false;

View File

@ -20,6 +20,7 @@
#include "nodes/nodeFuncs.h"
#include "nodes/plannodes.h"
#include "optimizer/clauses.h"
#include "optimizer/stream_check.h"
#include "parser/parse_coerce.h"
#include "parser/parse_relation.h"
#include "parser/parsetree.h"
@ -1082,6 +1083,11 @@ Node* replace_rte_variables_mutator(Node* node, replace_rte_variables_context* c
expression_tree_mutator(node, (Node* (*)(Node*, void*)) replace_rte_variables_mutator, (void*)context);
if (contain_subplans(newnode)) {
#ifndef ENABLE_MULTIPLE_NODES
if (u_sess->opt_cxt.is_stream_support) {
mark_stream_unsupport();
}
#endif
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg(

View File

@ -272,6 +272,14 @@ static GS_UCHAR* DecodeClientKey(StringInfo cahome)
return plainpwd;
}
static inline void CleanCertInfo(StringInfo str)
{
errno_t rc = memset_s(str->data, str->len, 0, str->len);
securec_check(rc, "\0", "\0");
pfree_ext(str->data);
pfree_ext(str);
}
static void GetCurlClientCerts(AiConn* connHandle)
{
char* gausshome = getGaussHome();
@ -300,12 +308,11 @@ static void GetCurlClientCerts(AiConn* connHandle)
errmsg("Read certificate files failed.")));
}
pfree_ext(caPath->data);
pfree_ext(certPath->data);
pfree_ext(keyPath->data);
pfree_ext(caPath);
pfree_ext(certPath);
pfree_ext(keyPath);
CleanCertInfo(caPath);
CleanCertInfo(certPath);
CleanCertInfo(keyPath);
errno_t rc = memset_s(plainpwd, CIPHER_LEN + 1, 0, CIPHER_LEN + 1);
securec_check(rc, "\0", "\0");
pfree_ext(plainpwd);
}

View File

@ -100,7 +100,6 @@ const int MILLISECOND_TO_MICROSECOND = 1000;
static void candidate_buf_push(int buf_id, int thread_id);
static int64 get_thread_candidate_nums(int thread_id);
static uint32 get_curr_candidate_nums(void);
static uint32 get_candidate_buf(bool *contain_hashbucket);
static uint32 get_buf_form_dirty_queue(bool *contain_hashbucket);
@ -732,10 +731,9 @@ static void incre_ckpt_bgwriter_kill(int code, Datum arg)
static int64 get_bgwriter_sleep_time()
{
pg_time_t now;
uint64 now;
int64 time_diff;
int thread_id = t_thrd.bgwriter_cxt.thread_id;
BgWriterProc *bgwriter = &g_instance.bgwriter_cxt.bgwriter_procs[thread_id];
/* If primary instance do full checkpoint and not the first bgwriter thread, can scan the dirty
* page queue, help the pagewriter thread finish the dirty page flush.
@ -744,17 +742,11 @@ static int64 get_bgwriter_sleep_time()
return 0;
}
now = (pg_time_t) time(NULL);
now = get_time_ms();
if (t_thrd.bgwriter_cxt.next_flush_time > now) {
time_diff = t_thrd.bgwriter_cxt.next_flush_time - now;
} else {
time_diff = 0;
if (now - t_thrd.bgwriter_cxt.next_flush_time > u_sess->attr.attr_storage.BgWriterDelay * 3) {
ereport(WARNING, (errmodule(MOD_INCRE_BG),
errmsg("bgwriter took %ld ms to flush %d pages, please check the max_io_capacity and bgwriter_delay",
u_sess->attr.attr_storage.BgWriterDelay + now - t_thrd.bgwriter_cxt.next_flush_time,
bgwriter->thread_last_flush)));
}
}
return time_diff;
@ -771,6 +763,7 @@ void incre_ckpt_background_writer_main(void)
WritebackContext wb_context;
int thread_id = t_thrd.bgwriter_cxt.thread_id;
BgWriterProc *bgwriter = &g_instance.bgwriter_cxt.bgwriter_procs[thread_id];
uint64 now;
t_thrd.role = BGWRITER;
@ -821,9 +814,11 @@ void incre_ckpt_background_writer_main(void)
t_thrd.xlog_cxt.ThisTimeLineID = GetRecoveryTargetTLI();
}
pg_time_t now = (pg_time_t) time(NULL);
now = get_time_ms();
t_thrd.bgwriter_cxt.next_flush_time = now + u_sess->attr.attr_storage.BgWriterDelay;
pgstat_report_appname("IncrBgWriter");
pgstat_report_activity(STATE_IDLE, NULL);
/* Loop forever */
for (;;) {
int rc;
@ -857,7 +852,7 @@ void incre_ckpt_background_writer_main(void)
proc_exit(0);
}
}
pgstat_report_activity(STATE_IDLE, NULL);
sleep_time = get_bgwriter_sleep_time();
rc = WaitLatch(&t_thrd.proc->procLatch, WL_LATCH_SET | WL_TIMEOUT | WL_POSTMASTER_DEATH, sleep_time);
if (rc & WL_POSTMASTER_DEATH) {
@ -866,10 +861,15 @@ void incre_ckpt_background_writer_main(void)
/* Clear any already-pending wakeups */
ResetLatch(&t_thrd.proc->procLatch);
pgstat_report_activity(STATE_RUNNING, NULL);
now = (pg_time_t) time(NULL);
now = get_time_ms();
t_thrd.bgwriter_cxt.next_flush_time = now + u_sess->attr.attr_storage.BgWriterDelay;
if (get_curr_candidate_nums() == (uint32)g_instance.attr.attr_storage.NBuffers) {
continue;
}
/*
* When the primary instance do full checkpoint, the first thread remain scan the
* buffer pool to maintain the candidate buffer list, other threads scan the dirty
@ -888,7 +888,7 @@ void incre_ckpt_background_writer_main(void)
}
incre_ckpt_bgwriter_flush_page_batch(wb_context, need_flush_num, contain_hashbucket);
}
}
}
int get_bgwriter_thread_id(void)
@ -939,6 +939,11 @@ static uint32 get_bgwriter_flush_num()
high_water_mark = buffer_num * (percent_target + GAP_PERCENT);
cur_candidate_num = get_curr_candidate_nums();
/* If the slots are sufficient, the standby DN does not need to flush too many pages. */
if (RecoveryInProgress() && cur_candidate_num >= total_target / 2) {
max_io = max_io / 2;
}
/* max_io need greater than one batch flush num, and need less than the dirty list size */
max_io = MAX(max_io / g_instance.bgwriter_cxt.bgwriter_num, DW_DIRTY_PAGE_MAX_FOR_NOHBK);
max_io = MIN(max_io, dirty_list_size);
@ -981,9 +986,6 @@ static uint32 get_candidate_buf(bool *contain_hashbucket)
int start = MAX(bgwriter->buf_id_start, bgwriter->next_scan_loc);
int end = bgwriter->buf_id_start + bgwriter->cand_list_size;
if (RecoveryInProgress()) {
end = bgwriter->buf_id_start + bgwriter->cand_list_size * u_sess->attr.attr_storage.shared_buffers_fraction;
}
end = MIN(start + batch_scan_num, end);
for (int buf_id = start; buf_id < end; buf_id++) {
@ -1171,7 +1173,7 @@ static int64 get_thread_candidate_nums(int thread_id)
/**
* @Description: Return a rough estimate of the current number of buffers in the candidate list.
*/
static uint32 get_curr_candidate_nums(void)
uint32 get_curr_candidate_nums(void)
{
uint32 currCandidates = 0;
for (int i = 0; i < g_instance.bgwriter_cxt.bgwriter_num; i++) {

View File

@ -581,6 +581,9 @@ void CheckpointerMain(void)
*/
now = (pg_time_t)time(NULL);
elapsed_secs = now - t_thrd.checkpoint_cxt.last_checkpoint_time;
if (elapsed_secs < 0) {
elapsed_secs = 0;
}
if (elapsed_secs >= u_sess->attr.attr_storage.CheckPointTimeout)
continue; /* no sleep for us ... */

View File

@ -214,6 +214,16 @@ const incre_ckpt_view_col g_ckpt_view_col[INCRE_CKPT_VIEW_COL_NUM] = {{"node_nam
{"ckpt_predicate_flush_num", INT8OID, ckpt_view_get_predicate_flush_num},
{"ckpt_twophase_flush_num", INT8OID, ckpt_view_get_twophase_flush_num}};
uint64 get_time_ms()
{
struct timeval tv;
uint64 time_ms;
(void)gettimeofday(&tv, NULL);
time_ms = (int64)tv.tv_sec * 1000 + (int64)tv.tv_usec / 1000;
return time_ms;
}
bool IsPagewriterProcess(void)
{
return (t_thrd.role == PAGEWRITER_THREAD);
@ -416,7 +426,7 @@ static uint32 ckpt_get_expected_flush_num()
* @out Offset to the new head
* @return Actual number of dirty pages need to flush
*/
static uint32 ckpt_qsort_dirty_page_for_flush(uint32 expected_flush_num, bool *contain_hashbucket)
static uint32 ckpt_qsort_dirty_page_for_flush(bool *contain_hashbucket)
{
uint32 num_to_flush = 0;
bool retry = false;
@ -498,12 +508,6 @@ try_get_buf:
goto try_get_buf;
}
qsort(g_instance.ckpt_cxt_ctl->CkptBufferIds, num_to_flush, sizeof(CkptSortItem), ckpt_buforder_comparator);
if (u_sess->attr.attr_storage.log_pagewriter) {
ereport(LOG,
(errmodule(MOD_INCRE_CKPT),
errmsg("expected_flush_num is %u, requested_flush_num is %u",
expected_flush_num, num_to_flush)));
}
return num_to_flush;
}
@ -602,7 +606,7 @@ static void ckpt_move_queue_head_after_flush()
/* We flushed some buffers, so update the statistics */
if (actual_flushed > 0) {
g_instance.ckpt_cxt_ctl->page_writer_actual_flush += actual_flushed;
g_instance.ckpt_cxt_ctl->page_writer_last_flush = actual_flushed;
g_instance.ckpt_cxt_ctl->page_writer_last_flush += actual_flushed;
}
if (u_sess->attr.attr_storage.log_pagewriter) {
@ -622,7 +626,7 @@ static void ckpt_pagewriter_main_thread_flush_dirty_page()
WritebackContext wb_context;
uint32 requested_flush_num;
int thread_id = t_thrd.pagewriter_cxt.pagewriter_id;
uint32 expected_flush_num;
int32 expected_flush_num;
bool contain_hashbucket = false;
WritebackContextInit(&wb_context, &t_thrd.pagewriter_cxt.page_writer_after);
@ -633,8 +637,10 @@ static void ckpt_pagewriter_main_thread_flush_dirty_page()
return;
}
g_instance.ckpt_cxt_ctl->page_writer_last_flush = 0;
while (expected_flush_num > 0) {
requested_flush_num = ckpt_qsort_dirty_page_for_flush(expected_flush_num, &contain_hashbucket);
requested_flush_num = ckpt_qsort_dirty_page_for_flush(&contain_hashbucket);
if (SECUREC_UNLIKELY(requested_flush_num == 0)) {
break;
@ -659,39 +665,78 @@ static void ckpt_pagewriter_main_thread_flush_dirty_page()
* If request flush num less than the batch max, break this loop,
* It indicates that there are not many dirty pages.
*/
if (requested_flush_num < GET_DW_DIRTY_PAGE_MAX(contain_hashbucket) && !FULL_CKPT) {
if (expected_flush_num < (int32)GET_DW_DIRTY_PAGE_MAX(contain_hashbucket) && !FULL_CKPT) {
break;
}
}
return;
}
static int64 get_pagewriter_sleep_time()
{
pg_time_t now;
uint64 now;
int64 time_diff;
if (FULL_CKPT) {
return 0;
}
now = (pg_time_t) time(NULL);
now = get_time_ms();
if (t_thrd.pagewriter_cxt.next_flush_time > now) {
time_diff = MAX(t_thrd.pagewriter_cxt.next_flush_time - now, 1);
} else {
time_diff = 0;
if (now - t_thrd.pagewriter_cxt.next_flush_time > u_sess->attr.attr_storage.pageWriterSleep * 3) {
ereport(WARNING, (errmodule(MOD_INCRE_BG),
errmsg("pagewriter took %ld ms to flush %u pages, "
"please check the max_io_capacity and pagewriter_sleep",
u_sess->attr.attr_storage.pageWriterSleep + now - t_thrd.pagewriter_cxt.next_flush_time,
g_instance.ckpt_cxt_ctl->page_writer_last_flush)));
}
}
return time_diff;
}
static uint32 get_page_num_for_lsn(XLogRecPtr target_lsn)
uint64 get_loc_for_lsn(XLogRecPtr target_lsn)
{
uint64 last_loc = 0;
XLogRecPtr page_rec_lsn = InvalidXLogRecPtr;
uint64 queue_loc = pg_atomic_read_u64(&g_instance.ckpt_cxt_ctl->dirty_page_queue_head);
if (get_dirty_page_num() == 0) {
return get_dirty_page_queue_tail();
}
while (queue_loc < get_dirty_page_queue_tail()) {
Buffer buffer;
BufferDesc *buf_desc = NULL;
uint64 temp_loc = queue_loc % g_instance.ckpt_cxt_ctl->dirty_page_queue_size;
volatile DirtyPageQueueSlot *slot = &g_instance.ckpt_cxt_ctl->dirty_page_queue[temp_loc];
/* slot location is pre-occupied, but the buffer not set finish, need wait and retry. */
if (!(pg_atomic_read_u32(&slot->slot_state) & SLOT_VALID)) {
pg_usleep(1);
queue_loc = pg_atomic_read_u64(&g_instance.ckpt_cxt_ctl->dirty_page_queue_head);
continue;
}
queue_loc++;
pg_memory_barrier();
buffer = slot->buffer;
/* slot state is vaild, buffer is invalid, the slot buffer set 0 when BufferAlloc or InvalidateBuffer */
if (BufferIsInvalid(buffer)) {
continue;
}
buf_desc = GetBufferDescriptor(buffer - 1);
page_rec_lsn = pg_atomic_read_u64(&buf_desc->rec_lsn);
if (!BufferIsInvalid(slot->buffer) && XLByteLE(target_lsn, page_rec_lsn)) {
last_loc = queue_loc - 1;
break;
}
}
if (last_loc == 0) {
return get_dirty_page_queue_tail();
}
return last_loc;
}
static uint32 get_page_num_for_lsn(XLogRecPtr target_lsn, uint32 max_num)
{
uint32 i;
uint32 num_for_lsn = 0;
@ -717,10 +762,13 @@ static uint32 get_page_num_for_lsn(XLogRecPtr target_lsn)
}
buf_desc = GetBufferDescriptor(buffer - 1);
page_rec_lsn = pg_atomic_read_u64(&buf_desc->rec_lsn);
if (XLByteLT(target_lsn, page_rec_lsn)) {
if (!BufferIsInvalid(slot->buffer) && XLByteLE(target_lsn, page_rec_lsn)) {
break;
}
num_for_lsn++;
if (num_for_lsn >= max_num) {
break;
}
}
return num_for_lsn;
}
@ -746,7 +794,7 @@ uint32 calculate_thread_max_flush_num(bool is_pagewriter)
}
const int AVG_CALCULATE_NUM = 30;
const int LSN_SCAN_FASTOR = 3;
const float HIGH_WATER = 0.75;
static uint32 calculate_pagewriter_flush_num()
{
static XLogRecPtr prev_lsn = InvalidXLogRecPtr;
@ -754,18 +802,22 @@ static uint32 calculate_pagewriter_flush_num()
static pg_time_t prev_time = 0;
static int64 total_flush_num = 0;
static uint32 avg_flush_num = 0;
static uint32 prev_lsn_num = 0;
static int counter = 0;
XLogRecPtr target_lsn;
XLogRecPtr cur_lsn;
XLogRecPtr min_lsn;
uint32 flush_num = 0;
pg_time_t now;
double time_diff;
uint64 now;
int64 time_diff;
float dirty_page_pct;
float dirty_slot_pct;
uint32 num_for_dirty;
uint32 num_for_lsn;
uint32 min_io = DW_DIRTY_PAGE_MAX_FOR_NOHBK;
uint32 max_io = calculate_thread_max_flush_num(true);
uint32 num_for_lsn_max;
float dirty_percent;
/* primary get the xlog insert loc, standby get the replay loc */
if (RecoveryInProgress()) {
@ -776,16 +828,13 @@ static uint32 calculate_pagewriter_flush_num()
if (XLogRecPtrIsInvalid(prev_lsn)) {
prev_lsn = cur_lsn;
prev_time = (pg_time_t) time(NULL);
goto DEFAULT;
}
if (XLByteEQ(prev_lsn, cur_lsn)) {
prev_time = get_time_ms();
avg_flush_num = min_io;
goto DEFAULT;
}
total_flush_num += g_instance.ckpt_cxt_ctl->page_writer_last_flush;
now = (pg_time_t) time(NULL);
now = get_time_ms();
time_diff = now - prev_time;
/*
@ -796,8 +845,10 @@ static uint32 calculate_pagewriter_flush_num()
time_diff > AVG_CALCULATE_NUM * u_sess->attr.attr_storage.pageWriterSleep) {
time_diff = MAX(1, time_diff);
avg_flush_num = (uint32)((((double)total_flush_num) / time_diff + avg_flush_num) / 2);
avg_lsn_rate = ((double)(cur_lsn - prev_lsn) / time_diff + avg_lsn_rate) / 2;
avg_flush_num = (uint32)((((double)total_flush_num) / time_diff * u_sess->attr.attr_storage.pageWriterSleep
+ avg_flush_num) / 2);
avg_lsn_rate = ((double)(cur_lsn - prev_lsn) / time_diff * u_sess->attr.attr_storage.pageWriterSleep
+ avg_lsn_rate) / 2;
/* reset our variables */
prev_lsn = cur_lsn;
@ -808,11 +859,33 @@ static uint32 calculate_pagewriter_flush_num()
dirty_page_pct = g_instance.ckpt_cxt_ctl->actual_dirty_page_num / (float)(g_instance.attr.attr_storage.NBuffers);
dirty_slot_pct = get_dirty_page_num() / (float)(g_instance.ckpt_cxt_ctl->dirty_page_queue_size);
num_for_dirty = MAX(dirty_page_pct, dirty_slot_pct) /
u_sess->attr.attr_storage.dirty_page_percent_max * min_io * 2;
target_lsn = prev_lsn + avg_lsn_rate * LSN_SCAN_FASTOR;
num_for_lsn = get_page_num_for_lsn(target_lsn);
num_for_lsn = MIN(max_io * 2, num_for_lsn / LSN_SCAN_FASTOR);
dirty_percent = MAX(dirty_page_pct, dirty_slot_pct) / u_sess->attr.attr_storage.dirty_page_percent_max;
if (RecoveryInProgress()) {
max_io = max_io * 0.9;
}
if (dirty_percent < HIGH_WATER) {
num_for_dirty = min_io;
num_for_lsn_max = max_io;
} else if (dirty_percent <= 1) {
num_for_dirty = min_io + (float)(dirty_percent - HIGH_WATER) / (float)(1 - HIGH_WATER) * (max_io - min_io);
num_for_lsn_max = max_io + (float)(dirty_percent - HIGH_WATER) / (float)(1 - HIGH_WATER) * (max_io);
} else {
num_for_dirty = max_io;
num_for_lsn_max = max_io * 2;
}
min_lsn = ckpt_get_min_rec_lsn();
if (XLogRecPtrIsInvalid(min_lsn)) {
min_lsn = get_dirty_page_queue_rec_lsn();
}
target_lsn = min_lsn + avg_lsn_rate;
num_for_lsn = get_page_num_for_lsn(target_lsn, num_for_lsn_max);
num_for_lsn = (num_for_lsn + prev_lsn_num) / 2;
prev_lsn_num = num_for_lsn;
flush_num = (avg_flush_num + num_for_dirty + num_for_lsn) / 3;
DEFAULT:
@ -829,7 +902,7 @@ DEFAULT:
static void ckpt_pagewriter_main_thread_loop(void)
{
uint32 rc = 0;
pg_time_t now;
uint64 now;
int64 sleep_time;
if (t_thrd.pagewriter_cxt.got_SIGHUP) {
@ -876,7 +949,7 @@ static void ckpt_pagewriter_main_thread_loop(void)
ckpt_try_skip_invalid_elem_in_queue_head();
ckpt_try_prune_dirty_page_queue();
/* Full checkpoint, don't sleep; the num of dirty page greater than max_dirty_page_num, don't sleep */
/* Full checkpoint, don't sleep */
sleep_time = get_pagewriter_sleep_time();
while (sleep_time > 0 && !t_thrd.pagewriter_cxt.shutdown_requested && !FULL_CKPT) {
/* sleep 1ms check whether a full checkpoint is triggered */
@ -885,7 +958,7 @@ static void ckpt_pagewriter_main_thread_loop(void)
}
/* Calculate next flush time before flush this batch dirty page */
now = (pg_time_t) time(NULL);
now = get_time_ms();
t_thrd.pagewriter_cxt.next_flush_time = now + u_sess->attr.attr_storage.pageWriterSleep;
/* pagewriter thread flush dirty page */
@ -1202,7 +1275,7 @@ static bool ckpt_found_valid_and_invalid_buffer_loc(
dirty_page_num = get_dirty_page_num();
if (dirty_page_num < g_instance.ckpt_cxt_ctl->dirty_page_queue_size * NEED_PRUNE_DIRTY_QUEUE_SLOT) {
if (dirty_page_num < g_instance.ckpt_cxt_ctl->dirty_page_queue_size * NEED_PRUNE_DIRTY_QUEUE_SLOT || FULL_CKPT) {
return false;
}
@ -1269,6 +1342,7 @@ static void ckpt_try_prune_dirty_page_queue()
* pages are moved to a new position after slot 100 due to this prune queue. than
* the redo point will be wrong, because some page not flush to disk.
*/
(void)LWLockAcquire(g_instance.ckpt_cxt_ctl->prune_queue_lock, LW_EXCLUSIVE);
if (last_invalid_slot > pg_atomic_read_u64(&g_instance.ckpt_cxt_ctl->full_ckpt_expected_flush_loc)) {
pg_atomic_write_u64(&g_instance.ckpt_cxt_ctl->full_ckpt_expected_flush_loc, (last_invalid_slot + 1));
}
@ -1308,6 +1382,7 @@ static void ckpt_try_prune_dirty_page_queue()
last_invalid_slot--;
}
LWLockRelease(g_instance.ckpt_cxt_ctl->prune_queue_lock);
if (u_sess->attr.attr_storage.log_pagewriter) {
print_dirty_page_queue_info(true);

View File

@ -430,7 +430,10 @@ static void pgarch_MainLoop(void)
last_time = TIME_GET_MILLISEC(last_copy_time);
}
gettimeofday(&curtime, NULL);
const long time_diff = (long)TIME_GET_MILLISEC(curtime) - t_thrd.arch.last_arch_time;
long time_diff = (long)TIME_GET_MILLISEC(curtime) - last_time;
if (time_diff < 0) {
time_diff = 0;
}
long timeout = wait_interval - time_diff;
if (timeout < 0 && IsServerModeStandby()) {
/* sleep 100ms for check next task */

View File

@ -9496,6 +9496,13 @@ static void check_and_reset_ha_listen_port(void)
signal_child(g_instance.pid_cxt.RemoteServicePID, SIGTERM);
ListenSocketRegulation();
if (t_thrd.postmaster_cxt.HaShmData != NULL &&
t_thrd.postmaster_cxt.HaShmData->repl_list_num == 0 &&
t_thrd.postmaster_cxt.HaShmData->current_mode == PRIMARY_MODE) {
t_thrd.postmaster_cxt.HaShmData->current_mode = NORMAL_MODE;
SetServerMode(NORMAL_MODE);
}
}
return;

View File

@ -701,8 +701,7 @@ static void syslogger_erewrite(FILE* file, const char* buffer)
* and we can not report a log, because there is not space to write.
*/
if (errno == ENOSPC) {
pg_usleep(1000000);
continue;
break;
}
}
break;
@ -966,7 +965,7 @@ void write_syslogger_file(char* buffer, int count, int destination)
logfile = (destination == LOG_DESTINATION_CSVLOG) ? t_thrd.logger.csvlogFile : t_thrd.logger.syslogFile;
errno = 0;
retry1:
rc = fwrite(buffer, 1, count, logfile);
/* can't use ereport here because of possible recursion */
@ -976,8 +975,7 @@ retry1:
* and we can not report a log, because there is not space to write.
*/
if (errno == ENOSPC) {
pg_usleep(1000000);
goto retry1;
return;
}
char errorbuf[ERROR_BUF_SIZE] = {'\0'};
rc = sprintf_s(errorbuf, ERROR_BUF_SIZE, "ERROR: could not write to log file: %s\n", gs_strerror(errno));
@ -1653,8 +1651,7 @@ static void LogCtlFlushBuf(LogControlData* logctl)
* and we can not report a log, because there is not space to write.
*/
if (errno == ENOSPC) {
pg_usleep(1000000);
continue;
break;
}
/* disk IO error, print message and discard this logs */
@ -1785,7 +1782,9 @@ static void LogCtlCreateLogParentDirectory(void)
{
char* logdir = NULL;
/* create directory for profile log */
/* create directory for profile log.
* if EEXIST == errno, this directory may be created already, don't care this case.
*/
logdir = LogCtlGetLogDirectory(PROFILE_LOG_TAG, false);
if (0 == mkdir(logdir, S_IRWXU) || (EEXIST == errno)) {
/*
@ -1795,6 +1794,10 @@ static void LogCtlCreateLogParentDirectory(void)
* ignore its returned value of this case.
*/
(void)chmod(logdir, S_IRWXU);
} else if (EEXIST != errno) {
ereport(FATAL,
(errmsg(
"could not create log directory \"%s\": %s\n", logdir, gs_strerror(errno))));
}
pfree(logdir);

View File

@ -8989,7 +8989,16 @@ int PostgresMain(int argc, char* argv[], const char* dbname, const char* usernam
#endif
exec_init_poolhandles();
/*
* Enable pbe optimization in batch mode, cause it may generate too many cplan
* when enable_pbe_optimization is false, which may consume lots of memory and
* lead to 'memory alloc failed'. To avoid this problem, enable pbe optimization
* to use gplan in this batch.
*/
bool original = u_sess->attr.attr_sql.enable_pbe_optimization;
u_sess->attr.attr_sql.enable_pbe_optimization = true;
exec_batch_bind_execute(&input_message);
u_sess->attr.attr_sql.enable_pbe_optimization = original;
if (is_unique_sql_enabled() && is_local_unique_sql()) {
UpdateUniqueSQLStat(NULL, NULL, GetCurrentStatementLocalStartTimestamp());
}
@ -10290,7 +10299,6 @@ static void exec_batch_bind_execute(StringInfo input_message)
}
Assert(NULL != psrc);
SetUniqueSQLIdFromCachedPlanSource(psrc);
/* Check command type: only support IUD */
initStringInfo(&process_result);
@ -10339,6 +10347,7 @@ static void exec_batch_bind_execute(StringInfo input_message)
* we are already in one.
*/
start_xact_command();
SetUniqueSQLIdFromCachedPlanSource(psrc);
if (ENABLE_WORKLOAD_CONTROL && SqlIsValid(t_thrd.postgres_cxt.debug_query_string) &&
(IS_PGXC_COORDINATOR || IS_SINGLE_NODE) &&

View File

@ -1042,7 +1042,11 @@ static void knl_u_syscache_init(knl_u_syscache_context* syscache_cxt)
static void knl_u_pgxc_init(knl_u_pgxc_context* pgxc_cxt)
{
#ifdef ENABLE_MULTIPLE_NODES
pgxc_cxt->NumDataNodes = 0;
#else
pgxc_cxt->NumDataNodes = 1;
#endif /* ENABLE_MULTIPLE_NODES */
pgxc_cxt->NumCoords = 0;
pgxc_cxt->NumStandbyDataNodes = 0;
pgxc_cxt->datanode_count = 0;

View File

@ -1297,6 +1297,7 @@ static void knl_t_walsender_init(knl_t_walsender_context* walsender_cxt)
walsender_cxt->reply_message = (StringInfoData*)palloc0(sizeof(StringInfoData));
walsender_cxt->tmpbuf = (StringInfoData*)palloc0(sizeof(StringInfoData));
walsender_cxt->remotePort = 0;
walsender_cxt->walSndCaughtUp = false;
}
static void knl_t_tsearch_init(knl_t_tsearch_context* tsearch_cxt)

View File

@ -536,8 +536,8 @@ PlanState* ExecInitNode(Plan* node, EState* estate, int e_flags)
}
#else
if (u_sess->instr_cxt.global_instr != NULL && u_sess->instr_cxt.thread_instr && node->plan_node_id > 0 &&
u_sess->instr_cxt.global_instr->get_planIdOffsetArray()[node->plan_node_id - 1] ==
u_sess->instr_cxt.thread_instr->getSegmentId() - 1) {
(!StreamTopConsumerAmI() ||
u_sess->instr_cxt.global_instr->get_planIdOffsetArray()[node->plan_node_id - 1] == 0)) {
result->instrument = u_sess->instr_cxt.thread_instr->allocInstrSlot(
node->plan_node_id, node->parent_node_id, result->plan, estate);
} else {

View File

@ -2338,7 +2338,8 @@ static int _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, Snapshot sn
ClearCreateStmtUUIDS((CreateStmt *)stmt);
}
if (IsA(stmt, CreateRoleStmt) || IsA(stmt, AlterRoleStmt)) {
if (IsA(stmt, CreateRoleStmt) || IsA(stmt, AlterRoleStmt) ||
(IsA(stmt, VariableSetStmt) && ((VariableSetStmt *)stmt)->kind == VAR_SET_ROLEPWD)) {
stmt = (Node *)copyObject(stmt);
}
@ -2374,7 +2375,8 @@ static int _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, Snapshot sn
res = SPI_OK_UTILITY;
}
if (IsA(stmt, CreateRoleStmt) || IsA(stmt, AlterRoleStmt)) {
if (IsA(stmt, CreateRoleStmt) || IsA(stmt, AlterRoleStmt) ||
(IsA(stmt, VariableSetStmt) && ((VariableSetStmt *)stmt)->kind == VAR_SET_ROLEPWD)) {
pfree_ext(stmt);
}
}

View File

@ -1075,7 +1075,7 @@ static XLogRecPtr dw_copy_page(ThrdDwCxt* thrd_dw_cxt, int buf_desc_id, bool* is
uint16 page_num;
uint32 buf_state;
errno_t rc;
*is_skipped = false;
*is_skipped = true;
buf_desc = GetBufferDescriptor(buf_desc_id);
buf_state = LockBufHdr(buf_desc);
@ -1100,9 +1100,9 @@ static XLogRecPtr dw_copy_page(ThrdDwCxt* thrd_dw_cxt, int buf_desc_id, bool* is
*/
if (!LWLockConditionalAcquire(buf_desc->content_lock, LW_SHARED)) {
UnpinBuffer(buf_desc, true);
*is_skipped = true;
return page_lsn;
}
*is_skipped = false;
thrd_dw_cxt->write_pos++;
if (thrd_dw_cxt->write_pos <= GET_DW_BATCH_DATA_PAGE_MAX(thrd_dw_cxt->contain_hashbucket)) {
batch = (dw_batch_t*)thrd_dw_cxt->dw_buf;
@ -1205,13 +1205,13 @@ static void dw_batch_flush(knl_g_dw_context* dw_cxt, XLogRecPtr latest_lsn, Thrd
dw_file_head_t* file_head = NULL;
errno_t rc;
(void)LWLockAcquire(dw_cxt->flush_lock, LW_EXCLUSIVE);
if (!XLogRecPtrIsInvalid(latest_lsn)) {
XLogFlush(latest_lsn);
g_instance.ckpt_cxt_ctl->page_writer_xlog_flush_loc = latest_lsn;
}
(void)LWLockAcquire(dw_cxt->flush_lock, LW_EXCLUSIVE);
if (thrd_dw_cxt->contain_hashbucket) {
dw_cxt->contain_hashbucket = true;
}

View File

@ -1823,13 +1823,6 @@ static TransactionId RecordTransactionCommit(void)
XLogFlush(t_thrd.xlog_cxt.XactLastRecEnd);
/*
* Wake up all walsenders to send WAL up to the COMMIT record
* immediately if replication is enabled
*/
if (g_instance.attr.attr_storage.max_wal_senders > 0)
WalSndWakeup();
/* Now we may update the CLOG, if we wrote a COMMIT record above */
if (markXidCommitted) {
t_thrd.pgxact->needToSyncXid |= SNAPSHOT_UPDATE_NEED_SYNC;

View File

@ -142,6 +142,8 @@
#define STANDBY_SIGNAL_FILE "standby"
#define XLOG_SWITCH_HISTORY_FILE "switch.history"
#define MAX_PATH_LEN 1024
#define MAX(A, B) ((B) > (A) ? (B) : (A))
#define ENABLE_INCRE_CKPT g_instance.attr.attr_storage.enableIncrementalCheckpoint
#define RecoveryFromDummyStandby() (t_thrd.postmaster_cxt.ReplConnArray[2] != NULL && IS_DN_DUMMY_STANDYS_MODE())
@ -6706,7 +6708,7 @@ void BootStrapXLOG(void)
* segment with logid=0 logseg=1. The very first WAL segment, 0/0, is not
* used, so that we can use 0/0 to mean "before any valid WAL segment".
*/
if (g_instance.attr.attr_storage.enableIncrementalCheckpoint) {
if (ENABLE_INCRE_CKPT) {
u_sess->attr.attr_storage.fullPageWrites = false;
}
checkPoint.redo = XLogSegSize + SizeOfXLogLongPHD;
@ -8497,7 +8499,7 @@ void StartupXLOG(void)
t_thrd.xlog_cxt.RedoRecPtr = t_thrd.shemem_ptr_cxt.XLogCtl->RedoRecPtr =
t_thrd.shemem_ptr_cxt.XLogCtl->Insert.RedoRecPtr = checkPoint.redo;
if (g_instance.attr.attr_storage.enableIncrementalCheckpoint) {
if (ENABLE_INCRE_CKPT) {
t_thrd.xlog_cxt.doPageWrites = false;
} else {
t_thrd.xlog_cxt.doPageWrites = t_thrd.xlog_cxt.lastFullPageWrites;
@ -8771,7 +8773,7 @@ void StartupXLOG(void)
PublishStartupProcessInformation();
SetForwardFsyncRequests();
SendPostmasterSignal(PMSIGNAL_RECOVERY_STARTED);
if (g_instance.attr.attr_storage.enableIncrementalCheckpoint) {
if (ENABLE_INCRE_CKPT) {
t_thrd.xlog_cxt.pagewriter_launched = true;
} else {
t_thrd.xlog_cxt.bgwriterLaunched = true;
@ -9199,7 +9201,7 @@ void StartupXLOG(void)
* record before resource manager writes cleanup WAL records or checkpoint
* record is written.
*/
if (g_instance.attr.attr_storage.enableIncrementalCheckpoint) {
if (ENABLE_INCRE_CKPT) {
Insert->fullPageWrites = false;
} else {
Insert->fullPageWrites = t_thrd.xlog_cxt.lastFullPageWrites;
@ -9376,6 +9378,12 @@ void StartupXLOG(void)
xlogctl->SharedRecoveryInProgress = false;
xlogctl->IsRecoveryDone = true;
SpinLockRelease(&xlogctl->info_lck);
if (ENABLE_INCRE_CKPT) {
RecoveryQueueState *state = &g_instance.ckpt_cxt_ctl->ckpt_redo_state;
(void)LWLockAcquire(state->recovery_queue_lock, LW_EXCLUSIVE);
state->start = state->end;
(void)LWLockRelease(state->recovery_queue_lock);
}
}
NextXidAfterReovery = t_thrd.xact_cxt.ShmemVariableCache->nextXid;
@ -9967,7 +9975,7 @@ void InitXLOGAccess(void)
(void)GetRedoRecPtr();
/* Also update our copy of doPageWrites. */
if (g_instance.attr.attr_storage.enableIncrementalCheckpoint) {
if (ENABLE_INCRE_CKPT) {
t_thrd.xlog_cxt.doPageWrites = false;
} else {
t_thrd.xlog_cxt.doPageWrites = (Insert->fullPageWrites || Insert->forcePageWrites);
@ -10014,11 +10022,10 @@ XLogRecPtr GetRedoRecPtr(void)
*/
void GetFullPageWriteInfo(XLogFPWInfo *fpwInfo_p)
{
bool incremental = g_instance.attr.attr_storage.enableIncrementalCheckpoint;
fpwInfo_p->redoRecPtr = t_thrd.xlog_cxt.RedoRecPtr;
fpwInfo_p->doPageWrites = t_thrd.xlog_cxt.doPageWrites && !incremental;
fpwInfo_p->doPageWrites = t_thrd.xlog_cxt.doPageWrites && !ENABLE_INCRE_CKPT;
fpwInfo_p->forcePageWrites = t_thrd.shemem_ptr_cxt.XLogCtl->FpwBeforeFirstCkpt && !IsInitdb && !incremental;
fpwInfo_p->forcePageWrites = t_thrd.shemem_ptr_cxt.XLogCtl->FpwBeforeFirstCkpt && !IsInitdb && !ENABLE_INCRE_CKPT;
}
/*
@ -10135,6 +10142,8 @@ void ShutdownXLOG(int code, Datum arg)
ckpt_shutdown_pagewriter();
free(g_instance.ckpt_cxt_ctl->dirty_page_queue);
g_instance.ckpt_cxt_ctl->dirty_page_queue = NULL;
g_instance.ckpt_cxt_ctl->prune_queue_lock = NULL;
g_instance.ckpt_cxt_ctl->ckpt_redo_state.recovery_queue_lock = NULL;
ShutdownCLOG();
ShutdownCSNLOG();
@ -10284,7 +10293,7 @@ void CreateCheckPoint(int flags)
int nvxids = 0;
errno_t errorno = EOK;
XLogRecPtr curMinRecLSN = InvalidXLogRecPtr;
bool doFullCheckpoint = !g_instance.attr.attr_storage.enableIncrementalCheckpoint;
bool doFullCheckpoint = !ENABLE_INCRE_CKPT;
TransactionId oldest_active_xid = InvalidTransactionId;
TransactionId globalXmin = InvalidTransactionId;
@ -10376,8 +10385,7 @@ void CreateCheckPoint(int flags)
curInsert = XLogBytePosToRecPtr(Insert->CurrBytePos);
if ((g_instance.attr.attr_storage.enableIncrementalCheckpoint && (flags & CHECKPOINT_CAUSE_TIME)) ||
doFullCheckpoint) {
if ((ENABLE_INCRE_CKPT && (flags & CHECKPOINT_CAUSE_TIME)) || doFullCheckpoint) {
update_dirty_page_queue_rec_lsn(curInsert, true);
}
@ -10412,7 +10420,7 @@ void CreateCheckPoint(int flags)
gstrace_exit(GS_TRC_ID_CreateCheckPoint);
return;
}
} else if (g_instance.attr.attr_storage.enableIncrementalCheckpoint && doFullCheckpoint) {
} else if (ENABLE_INCRE_CKPT && doFullCheckpoint) {
/*
* enableIncrementalCheckpoint guc is on, but some conditions shuld do
* full checkpoint.
@ -10775,9 +10783,9 @@ void CreateCheckPoint(int flags)
}
}
if (doFullCheckpoint && g_instance.attr.attr_storage.enableIncrementalCheckpoint) {
if (doFullCheckpoint && ENABLE_INCRE_CKPT) {
XLogRecPtr MinRecLSN = ckpt_get_min_rec_lsn();
if (!XLogRecPtrIsInvalid(curMinRecLSN) && XLByteLT(MinRecLSN, t_thrd.xlog_cxt.RedoRecPtr)) {
if (!XLogRecPtrIsInvalid(MinRecLSN) && XLByteLT(MinRecLSN, t_thrd.xlog_cxt.RedoRecPtr)) {
ereport(PANIC, (errmsg("current dirty page list head recLSN %08X/%08X smaller than redo lsn %08X/%08X",
(uint32)(MinRecLSN >> XLOG_LSN_SWAP), (uint32)MinRecLSN,
(uint32)(t_thrd.xlog_cxt.RedoRecPtr >> XLOG_LSN_SWAP),
@ -10907,6 +10915,26 @@ static void CheckPointGuts(XLogRecPtr checkPointRedo, int flags, bool doFullChec
gstrace_exit(GS_TRC_ID_CheckPointGuts);
}
void PushRestartPointToQueue(XLogRecPtr recordReadRecPtr, const CheckPoint checkPoint)
{
RecoveryQueueState *state = &g_instance.ckpt_cxt_ctl->ckpt_redo_state;
uint loc = 0;
if (!ENABLE_INCRE_CKPT) {
return;
}
(void)LWLockAcquire(state->recovery_queue_lock, LW_EXCLUSIVE);
if (state->end - state->start + 1 >= RESTART_POINT_QUEUE_LEN) {
state->start++;
}
loc = state->end % RESTART_POINT_QUEUE_LEN;
state->ckpt_rec_queue[loc].CkptLSN = recordReadRecPtr;
state->ckpt_rec_queue[loc].checkpoint = checkPoint;
state->end++;
LWLockRelease(state->recovery_queue_lock);
}
/*
* Save a checkpoint for recovery restart if appropriate
*
@ -10929,21 +10957,24 @@ static void RecoveryRestartPoint(const CheckPoint checkPoint, XLogRecPtr recordR
if (IsExtremeRedo()) {
XLogRecPtr safeCheckPoint = extreme_rto::GetSafeMinCheckPoint();
if (XLByteEQ(safeCheckPoint, MAX_XLOG_REC_PTR) || XLByteLT(safeCheckPoint, recordReadRecPtr)) {
ereport(WARNING, (errmsg("RecoveryRestartPoint is false at %X/%X,last safe point is %X/%X",
ereport(WARNING, (errmsg("RecoveryRestartPoint is false at %X/%X,last safe point is %X/%X",
(uint32)(recordReadRecPtr >> 32), (uint32)(recordReadRecPtr), (uint32)(safeCheckPoint >> 32),
(uint32)(safeCheckPoint))));
return;
}
} else if (!parallel_recovery::IsRecoveryRestartPointSafeForWorkers(recordReadRecPtr)) {
ereport(WARNING, (errmsg("RecoveryRestartPointSafe is false at %X/%X",
ereport(WARNING, (errmsg("RecoveryRestartPointSafe is false at %X/%X",
static_cast<uint32>(recordReadRecPtr >> shitRightLength), static_cast<uint32>(recordReadRecPtr))));
return;
}
update_dirty_page_queue_rec_lsn(recordReadRecPtr, true);
pg_write_barrier();
PushRestartPointToQueue(recordReadRecPtr, checkPoint);
/*
* Copy the checkpoint record to shared memory, so that checkpointer can
* work out the next time it wants to perform a restartpoint.
*/
SpinLockAcquire(&xlogctl->info_lck);
xlogctl->lastCheckPointRecPtr = recordReadRecPtr;
const_cast<CheckPoint &>(xlogctl->lastCheckPoint) = const_cast<CheckPoint &>(checkPoint);
@ -10993,8 +11024,7 @@ bool IsRestartPointSafe(const XLogRecPtr checkPoint)
void wait_all_dirty_page_flush(int flags, XLogRecPtr redo)
{
/* need wait all dirty page finish flush */
if (g_instance.attr.attr_storage.enableIncrementalCheckpoint) {
update_dirty_page_queue_rec_lsn(redo, true);
if (ENABLE_INCRE_CKPT) {
g_instance.ckpt_cxt_ctl->full_ckpt_redo_ptr = redo;
g_instance.ckpt_cxt_ctl->full_ckpt_expected_flush_loc = get_dirty_page_queue_tail();
pg_write_barrier();
@ -11006,6 +11036,119 @@ void wait_all_dirty_page_flush(int flags, XLogRecPtr redo)
}
return;
}
bool RecoveryQueueIsEmpty()
{
RecoveryQueueState *state = &g_instance.ckpt_cxt_ctl->ckpt_redo_state;
int num;
(void)LWLockAcquire(state->recovery_queue_lock, LW_EXCLUSIVE);
num = state->end - state->start;
(void)LWLockRelease(state->recovery_queue_lock);
if (num == 0) {
return true;
} else {
return false;
}
}
/*
* In the recovery phase, don't push the redo point to the latest position, avoid the I/O peak.
* calculate the redo point based on the page flushing speed and max_redo_log_size.
*/
const int BYTE_PER_KB = 1024;
XLogRecPtr GetRestartPointInRecovery(CheckPoint *restartCheckPoint)
{
volatile XLogCtlData *xlogctl = t_thrd.shemem_ptr_cxt.XLogCtl;
XLogRecPtr restartRecPtr = InvalidXLogRecPtr;
XLogRecPtr curMinRecLSN = ckpt_get_min_rec_lsn();
RecoveryQueueState *state = &g_instance.ckpt_cxt_ctl->ckpt_redo_state;
if (XLogRecPtrIsInvalid(curMinRecLSN)) {
/* The dirty page queue is empty, so the redo point can be updated to the latest position. */
(void)LWLockAcquire(state->recovery_queue_lock, LW_EXCLUSIVE);
state->start = state->end > 0 ? state->end - 1 : state->end;
(void)LWLockRelease(state->recovery_queue_lock);
SpinLockAcquire(&xlogctl->info_lck);
restartRecPtr = xlogctl->lastCheckPointRecPtr;
*restartCheckPoint = const_cast<CheckPoint &>(xlogctl->lastCheckPoint);
SpinLockRelease(&xlogctl->info_lck);
Assert(XLByteLE(restartRecPtr, get_dirty_page_queue_rec_lsn()));
} else {
int num = 0;
int loc = 0;
int i = 0;
XLogRecPtr replayLastLSN;
XLogRecPtr targetLSN;
if (RecoveryQueueIsEmpty()) {
return restartRecPtr;
}
SpinLockAcquire(&xlogctl->info_lck);
replayLastLSN = xlogctl->lastCheckPointRecPtr;
SpinLockRelease(&xlogctl->info_lck);
targetLSN = replayLastLSN - u_sess->attr.attr_storage.max_redo_log_size * BYTE_PER_KB;
(void)LWLockAcquire(state->recovery_queue_lock, LW_EXCLUSIVE);
num = state->end - state->start;
/*
* If the pagewriter flush dirty page to disk quickly, push the redo point to
* the position closest to curMinRecLSN .
*/
if (XLByteLE(targetLSN, curMinRecLSN)) {
for (i = 0; i < num; i++) {
loc = (state->start + i) % RESTART_POINT_QUEUE_LEN;
if (state->ckpt_rec_queue[loc].checkpoint.redo > curMinRecLSN) {
if (i > 0) {
loc = (state->start + i - 1) % RESTART_POINT_QUEUE_LEN;
state->start = state->start + i - 1;
}
restartRecPtr = state->ckpt_rec_queue[loc].CkptLSN;
*restartCheckPoint = state->ckpt_rec_queue[loc].checkpoint;
break;
}
}
} else {
/* In other cases, push the checkpoint loc to the position closest to targetLSN. */
for (i = 0; i < num; i++) {
loc = (state->start + i) % RESTART_POINT_QUEUE_LEN;
if (state->ckpt_rec_queue[loc].CkptLSN > targetLSN) {
if (i > 0) {
uint64 gap = state->ckpt_rec_queue[loc].CkptLSN - targetLSN;
int prevLoc = (state->start + i - 1) % RESTART_POINT_QUEUE_LEN;
if (targetLSN - state->ckpt_rec_queue[prevLoc].CkptLSN < gap) {
restartRecPtr = state->ckpt_rec_queue[prevLoc].CkptLSN;
*restartCheckPoint = state->ckpt_rec_queue[prevLoc].checkpoint;
state->start = state->start + i - 1;
} else {
restartRecPtr = state->ckpt_rec_queue[loc].CkptLSN;
*restartCheckPoint = state->ckpt_rec_queue[loc].checkpoint;
state->start = state->start + i;
}
} else {
restartRecPtr = state->ckpt_rec_queue[loc].CkptLSN;
*restartCheckPoint = state->ckpt_rec_queue[loc].checkpoint;
}
break;
}
}
}
if (XLogRecPtrIsInvalid(restartRecPtr) && num > 0) {
loc = (state->end - 1) % RESTART_POINT_QUEUE_LEN;
restartRecPtr = state->ckpt_rec_queue[loc].CkptLSN;
*restartCheckPoint = state->ckpt_rec_queue[loc].checkpoint;
state->start = state->end - 1;
}
(void)LWLockRelease(state->recovery_queue_lock);
}
return restartRecPtr;
}
/*
* Establish a restartpoint if possible.
*
@ -11025,6 +11168,7 @@ bool CreateRestartPoint(int flags)
TimestampTz xtime;
errno_t errorno = EOK;
bool recoveryInProgress = true;
bool doFullCkpt = !ENABLE_INCRE_CKPT;
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = t_thrd.shemem_ptr_cxt.XLogCtl;
@ -11036,12 +11180,6 @@ bool CreateRestartPoint(int flags)
gstrace_entry(GS_TRC_ID_CreateRestartPoint);
LWLockAcquire(CheckpointLock, LW_EXCLUSIVE);
/* Get a local copy of the last safe checkpoint record. */
SpinLockAcquire(&xlogctl->info_lck);
lastCheckPointRecPtr = xlogctl->lastCheckPointRecPtr;
lastCheckPoint = const_cast<CheckPoint &>(xlogctl->lastCheckPoint);
SpinLockRelease(&xlogctl->info_lck);
recoveryInProgress = RecoveryInProgress();
/*
* Check that we're still in recovery mode. It's ok if we exit recovery
@ -11055,6 +11193,23 @@ bool CreateRestartPoint(int flags)
return false;
}
if (doFullCkpt ||
((unsigned int)flags & (CHECKPOINT_IS_SHUTDOWN | CHECKPOINT_END_OF_RECOVERY | CHECKPOINT_FORCE))) {
doFullCkpt = true;
if (ENABLE_INCRE_CKPT) {
RecoveryQueueState *state = &g_instance.ckpt_cxt_ctl->ckpt_redo_state;
(void)LWLockAcquire(state->recovery_queue_lock, LW_EXCLUSIVE);
state->start = state->end > 0 ? state->end - 1 : state->end;
(void)LWLockRelease(state->recovery_queue_lock);
}
SpinLockAcquire(&xlogctl->info_lck);
lastCheckPointRecPtr = xlogctl->lastCheckPointRecPtr;
lastCheckPoint = const_cast<CheckPoint &>(xlogctl->lastCheckPoint);
SpinLockRelease(&xlogctl->info_lck);
} else {
lastCheckPointRecPtr = GetRestartPointInRecovery(&lastCheckPoint);
}
/*
* If the last checkpoint record we've replayed is already our last
* restartpoint, we can't perform a new restart point. We still update
@ -11138,8 +11293,7 @@ bool CreateRestartPoint(int flags)
LogCheckpointStart((unsigned int)flags, true);
}
if (g_instance.attr.attr_storage.enableIncrementalCheckpoint) {
update_dirty_page_queue_rec_lsn(lastCheckPoint.redo, true);
if (ENABLE_INCRE_CKPT && doFullCkpt) {
g_instance.ckpt_cxt_ctl->full_ckpt_redo_ptr = lastCheckPoint.redo;
g_instance.ckpt_cxt_ctl->full_ckpt_expected_flush_loc = get_dirty_page_queue_tail();
pg_write_barrier();
@ -11147,7 +11301,23 @@ bool CreateRestartPoint(int flags)
g_instance.ckpt_cxt_ctl->flush_all_dirty_page = true;
}
ereport(LOG, (errmsg("CreateRestartPoint, need flush %ld pages.", get_dirty_page_num())));
} else if (ENABLE_INCRE_CKPT) {
g_instance.ckpt_cxt_ctl->full_ckpt_redo_ptr = lastCheckPoint.redo;
(void)LWLockAcquire(g_instance.ckpt_cxt_ctl->prune_queue_lock, LW_EXCLUSIVE);
g_instance.ckpt_cxt_ctl->full_ckpt_expected_flush_loc = get_loc_for_lsn(lastCheckPoint.redo);
LWLockRelease(g_instance.ckpt_cxt_ctl->prune_queue_lock);
pg_write_barrier();
uint64 head = pg_atomic_read_u64(&g_instance.ckpt_cxt_ctl->dirty_page_queue_head);
int64 need_flush_num = g_instance.ckpt_cxt_ctl->full_ckpt_expected_flush_loc > head ?
g_instance.ckpt_cxt_ctl->full_ckpt_expected_flush_loc - head : 0;
if (need_flush_num > 0) {
g_instance.ckpt_cxt_ctl->flush_all_dirty_page = true;
}
ereport(LOG, (errmsg("CreateRestartPoint, need flush %ld pages", need_flush_num)));
}
CheckPointGuts(lastCheckPoint.redo, flags, true);
#ifdef ENABLE_MOT
@ -11159,6 +11329,15 @@ bool CreateRestartPoint(int flags)
* prior checkpoint's earliest info.
*/
XLByteToSeg(t_thrd.shemem_ptr_cxt.ControlFile->checkPointCopy.redo, _logSegNo);
if (ENABLE_INCRE_CKPT) {
XLogRecPtr MinRecLSN = ckpt_get_min_rec_lsn();
if (!XLogRecPtrIsInvalid(MinRecLSN) && XLByteLT(MinRecLSN, lastCheckPoint.redo)) {
ereport(PANIC, (errmsg("current dirty page list head recLSN %08X/%08X smaller than redo lsn %08X/%08X",
(uint32)(MinRecLSN >> XLOG_LSN_SWAP), (uint32)MinRecLSN,
(uint32)(lastCheckPoint.redo >> XLOG_LSN_SWAP),
(uint32)lastCheckPoint.redo)));
}
}
/*
* Update pg_control, using current time. Check that it still shows
@ -11250,7 +11429,7 @@ bool CreateRestartPoint(int flags)
* Reduce the frequency of trucate CSN log to avoid the probability of lock contention.
* Incremental chekpoint does not require frequent truncate of csnlog.
*/
if (!g_instance.attr.attr_storage.enableIncrementalCheckpoint ||
if (!ENABLE_INCRE_CKPT ||
elapsed_secs >= u_sess->attr.attr_storage.fullCheckPointTimeout) {
TransactionId globalXmin = InvalidTransactionId;
(void)GetOldestActiveTransactionId(&globalXmin);
@ -11544,7 +11723,7 @@ void UpdateFullPageWrites(void)
* because we assume that there is no concurrently running process which
* can update it.
*/
if (g_instance.attr.attr_storage.enableIncrementalCheckpoint) {
if (ENABLE_INCRE_CKPT) {
u_sess->attr.attr_storage.fullPageWrites = false;
}
if (u_sess->attr.attr_storage.fullPageWrites == Insert->fullPageWrites) {
@ -12385,7 +12564,7 @@ char** tblspcmapfile, List** tablespaces, bool infotbssize, bool needtblspcmapfi
} else {
t_thrd.shemem_ptr_cxt.XLogCtl->Insert.nonExclusiveBackups++;
}
if (g_instance.attr.attr_storage.enableIncrementalCheckpoint) {
if (ENABLE_INCRE_CKPT) {
t_thrd.shemem_ptr_cxt.XLogCtl->Insert.forcePageWrites = false;
} else {
t_thrd.shemem_ptr_cxt.XLogCtl->Insert.forcePageWrites = true;
@ -14079,6 +14258,7 @@ void SetXLogReplayRecPtr(XLogRecPtr readRecPtr, XLogRecPtr endRecPtr)
if (isUpdated && !IsExtremeRedo()) {
RedoSpeedDiag(readRecPtr, endRecPtr);
}
update_dirty_page_queue_rec_lsn(readRecPtr);
}
void DumpXlogCtl()
@ -16595,7 +16775,7 @@ bool IsRoachRestore(void)
strncmp(t_thrd.xlog_cxt.recoveryTargetBarrierId, ROACH_BACKUP_PREFIX, strlen(ROACH_BACKUP_PREFIX)) == 0);
}
const int UPDATE_REC_XLOG_NUM = 10;
const uint UPDATE_REC_XLOG_NUM = 4;
#if defined(__x86_64__) || defined(__aarch64__)
bool atomic_update_dirty_page_queue_rec_lsn(XLogRecPtr current_insert_lsn, bool need_immediately_update)
{
@ -16640,7 +16820,7 @@ void update_dirty_page_queue_rec_lsn(XLogRecPtr current_insert_lsn, bool need_im
bool is_update = false;
uint32 freespace;
if (!g_instance.attr.attr_storage.enableIncrementalCheckpoint) {
if (!ENABLE_INCRE_CKPT) {
return;
}
@ -16698,21 +16878,20 @@ uint64 get_dirty_page_queue_rec_lsn()
XLogRecPtr ckpt_get_min_rec_lsn(void)
{
uint64 queue_loc;
XLogRecPtr dirty_queue_min_lsn = InvalidXLogRecPtr;
uint64 dirty_page_queue_tail;
XLogRecPtr min_rec_lsn = InvalidXLogRecPtr;
/*
* If head recLSN is Invalid, then add head, get next buffer recLSN, if head equal tail,
* return InvalidXLogRecPtr.
*/
queue_loc = pg_atomic_read_u64(&g_instance.ckpt_cxt_ctl->dirty_page_queue_head);
dirty_page_queue_tail = get_dirty_page_queue_tail();
if (dirty_page_queue_tail - queue_loc == 0) {
if (get_dirty_page_num() == 0) {
return InvalidXLogRecPtr;
}
while (XLogRecPtrIsInvalid(dirty_queue_min_lsn) && (queue_loc < get_dirty_page_queue_tail())) {
queue_loc = pg_atomic_read_u64(&g_instance.ckpt_cxt_ctl->dirty_page_queue_head);
while (XLogRecPtrIsInvalid(min_rec_lsn) && (queue_loc < get_dirty_page_queue_tail())) {
Buffer buffer;
BufferDesc *buf_desc = NULL;
XLogRecPtr page_rec_lsn = InvalidXLogRecPtr;
uint64 temp_loc = queue_loc % g_instance.ckpt_cxt_ctl->dirty_page_queue_size;
volatile DirtyPageQueueSlot *slot = &g_instance.ckpt_cxt_ctl->dirty_page_queue[temp_loc];
@ -16731,9 +16910,12 @@ XLogRecPtr ckpt_get_min_rec_lsn(void)
continue;
}
buf_desc = GetBufferDescriptor(buffer - 1);
dirty_queue_min_lsn = pg_atomic_read_u64(&buf_desc->rec_lsn);
page_rec_lsn = pg_atomic_read_u64(&buf_desc->rec_lsn);
if (!BufferIsInvalid(slot->buffer)) {
min_rec_lsn = page_rec_lsn;
}
}
return dirty_queue_min_lsn;
return min_rec_lsn;
}
void WaitCheckpointSync(void)

View File

@ -198,5 +198,11 @@ Size BufferShmemSize(void)
/* size of checkpoint sort array in bufmgr.c */
size = add_size(size, mul_size(g_instance.attr.attr_storage.NBuffers, sizeof(CkptSortItem)));
/* size of candidate buffers */
size = add_size(size, mul_size(g_instance.attr.attr_storage.NBuffers, sizeof(Buffer)));
/* size of candidate free map */
size = add_size(size, mul_size(g_instance.attr.attr_storage.NBuffers, sizeof(bool)));
return size;
}

View File

@ -84,6 +84,8 @@ const int ONE_MILLISECOND = 1;
const int TEN_MICROSECOND = 10;
const int MILLISECOND_TO_MICROSECOND = 1000;
const float PAGE_QUEUE_SLOT_USED_MAX_PERCENTAGE = 0.8;
const long CHKPT_LOG_TIME_INTERVAL = 1000000 * 60; /* 60000000us -> 1min */
const double CHKPT_LOG_PERCENT_INTERVAL = 0.1;
/*
* Status of buffers to checkpoint for a particular tablespace, used
@ -3019,6 +3021,7 @@ static void BufferSync(int flags)
int num_spaces;
int num_processed;
int num_written;
double bufferFlushPercent = CHKPT_LOG_PERCENT_INTERVAL;
CkptTsStatus *per_ts_stat = NULL;
Oid last_tsid;
binaryheap *ts_heap = NULL;
@ -3229,10 +3232,17 @@ static void BufferSync(int flags)
binaryheap_replace_first(ts_heap, PointerGetDatum(ts_stat));
}
double progress = (double)num_processed / num_to_scan;
/*
* Sleep to throttle our I/O rate.
*/
CheckpointWriteDelay(flags, (double)num_processed / num_to_scan);
CheckpointWriteDelay(flags, progress);
if (((uint32)flags & CHECKPOINT_IS_SHUTDOWN) && progress >= bufferFlushPercent && !IsInitdb) {
/* print warning log and increase counter if flushed percent exceed threshold */
ereport(WARNING, (errmsg("full checkpoint mode, shuting down, wait for dirty page flush, remain num:%d",
num_to_scan - num_processed)));
bufferFlushPercent += CHKPT_LOG_PERCENT_INTERVAL;
}
}
/* issue all pending flushes */
@ -3875,6 +3885,7 @@ void CheckPointBuffers(int flags, bool doFullCheckpoint)
pg_atomic_read_u32(&g_instance.ckpt_cxt_ctl->current_page_writer_count) < 1) {
BufferSync(flags);
} else if (g_instance.attr.attr_storage.enableIncrementalCheckpoint && doFullCheckpoint) {
long waitCount = 0;
/*
* If the enable_incremental_checkpoint is on, but doFullCheckpoint is true (full checkpoint),
* checkpoint thread don't need flush dirty page, but need wait pagewriter thread flush given
@ -3887,11 +3898,26 @@ void CheckPointBuffers(int flags, bool doFullCheckpoint)
break;
} else {
/* sleep 1 ms wait the dirty page flush */
pg_usleep(ONE_MILLISECOND * MILLISECOND_TO_MICROSECOND);
long sleepTime = ONE_MILLISECOND * MILLISECOND_TO_MICROSECOND;
pg_usleep(sleepTime);
/* do smgrsync in case dw file recycle of pagewriter is being blocked */
if (dw_enabled()) {
smgrsync_with_absorption();
}
if (((uint32)flags & CHECKPOINT_IS_SHUTDOWN) && !IsInitdb) {
/*
* since we use sleep time as counter so there will be some error in calculate the interval,
* but it doesn't mater cause we don't need a precise counter.
*/
waitCount += sleepTime;
if (waitCount >= CHKPT_LOG_TIME_INTERVAL) {
/* print warning log and reset counter if waitting time exceed threshold */
ereport(WARNING, (errmsg("incremental checkpoint mode, shuting down, "
"wait for dirty page flush, remain num:%u",
g_instance.ckpt_cxt_ctl->actual_dirty_page_num)));
waitCount = 0;
}
}
}
}
}

View File

@ -22,6 +22,7 @@
#include "storage/buf/bufmgr.h"
#include "storage/proc.h"
#include "postmaster/aiocompleter.h" /* this is for the function AioCompltrIsReady() */
#include "postmaster/bgwriter.h"
#include "postmaster/pagewriter.h"
#include "postmaster/postmaster.h"
#include "access/double_write.h"
@ -467,7 +468,8 @@ BufferAccessStrategy GetAccessStrategy(BufferAccessStrategyType btype)
ring_size = (u_sess->attr.attr_storage.bulk_write_ring_size / BLCKSZ) * 1024;
break;
case BAS_VACUUM:
ring_size = 256 * 1024 / BLCKSZ;
ring_size = g_instance.attr.attr_storage.NBuffers / 32 /
Max(g_instance.attr.attr_storage.autovacuum_max_workers, 1);
break;
default:
@ -508,6 +510,8 @@ void FreeAccessStrategy(BufferAccessStrategy strategy)
}
}
const int MAX_RETRY_RING_TIMES = 100;
const float MAX_RETRY_RING_PCT = 0.1;
/*
* GetBufferFromRing -- returns a buffer from the ring, or NULL if the
* ring is empty.
@ -519,10 +523,13 @@ static BufferDesc *GetBufferFromRing(BufferAccessStrategy strategy, uint32 *buf_
BufferDesc *buf = NULL;
Buffer buf_num;
uint32 local_buf_state; /* to avoid repeated (de-)referencing */
uint16 retry_times = 0;
RETRY:
/* Advance to next ring slot */
if (++strategy->current >= strategy->ring_size)
strategy->current = 0;
retry_times++;
ADIO_RUN()
{
@ -575,6 +582,15 @@ static BufferDesc *GetBufferFromRing(BufferAccessStrategy strategy, uint32 *buf_
* shouldn't re-use it.
*/
buf = GetBufferDescriptor(buf_num - 1);
if ((pg_atomic_read_u32(&buf->state) & BM_DIRTY) &&
retry_times < Min(MAX_RETRY_RING_TIMES, strategy->ring_size * MAX_RETRY_RING_PCT)) {
goto RETRY;
} else if (get_curr_candidate_nums() >= (uint32)g_instance.attr.attr_storage.NBuffers *
u_sess->attr.attr_storage.candidate_buf_percent_target) {
strategy->current_was_in_ring = false;
return NULL;
}
local_buf_state = LockBufHdr(buf);
if (BUF_STATE_GET_REFCOUNT(local_buf_state) == 0 && BUF_STATE_GET_USAGECOUNT(local_buf_state) <= 1 &&
(backend_can_flush_dirty_page() || !(local_buf_state & BM_DIRTY))) {
@ -582,6 +598,7 @@ static BufferDesc *GetBufferFromRing(BufferAccessStrategy strategy, uint32 *buf_
*buf_state = local_buf_state;
return buf;
}
UnlockBufHdr(buf, local_buf_state);
/*
* Tell caller to allocate a new buffer with the normal allocation

View File

@ -363,6 +363,10 @@ void CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
LsnXlogFlushChkShmInit();
if (g_instance.ckpt_cxt_ctl->prune_queue_lock == NULL) {
g_instance.ckpt_cxt_ctl->prune_queue_lock = LWLockAssign(LWTRANCHE_PRUNE_DIRTY_QUEUE);
}
if (g_instance.pid_cxt.PageWriterPID == NULL) {
MemoryContext oldcontext = MemoryContextSwitchTo(g_instance.increCheckPoint_context);
g_instance.pid_cxt.PageWriterPID =
@ -383,6 +387,21 @@ void CreateSharedMemoryAndSemaphores(bool makePrivate, int port)
incre_ckpt_bgwriter_cxt_init();
}
if (g_instance.attr.attr_storage.enableIncrementalCheckpoint &&
g_instance.ckpt_cxt_ctl->ckpt_redo_state.ckpt_rec_queue == NULL) {
MemoryContext oldcontext = MemoryContextSwitchTo(g_instance.increCheckPoint_context);
g_instance.ckpt_cxt_ctl->ckpt_redo_state.recovery_queue_lock = LWLockAssign(LWTRANCHE_REDO_POINT_QUEUE);
g_instance.ckpt_cxt_ctl->ckpt_redo_state.ckpt_rec_queue =
(CheckPointItem*)palloc0(sizeof(CheckPointItem) * RESTART_POINT_QUEUE_LEN);
g_instance.ckpt_cxt_ctl->ckpt_redo_state.start = 0;
g_instance.ckpt_cxt_ctl->ckpt_redo_state.end = 0;
(void)MemoryContextSwitchTo(oldcontext);
}
if (g_instance.ckpt_cxt_ctl->ckpt_redo_state.recovery_queue_lock == NULL) {
g_instance.ckpt_cxt_ctl->ckpt_redo_state.recovery_queue_lock = LWLockAssign(LWTRANCHE_REDO_POINT_QUEUE);
}
/*
* Now give loadable modules a chance to set up their shmem allocations
*/

View File

@ -157,6 +157,8 @@ static const char *BuiltinTrancheNames[] = {
"DoubleWriteLock",
"DWSingleFlushPosLock",
"DWSingleFlushWriteLock",
"RestartPointQueueLock",
"PruneDirtyQueueLock",
"LWTRANCHE_ACCOUNT_TABLE",
"GeneralExtendedLock",
"MPFLLOCK",
@ -374,6 +376,12 @@ int NumLWLocks(void)
/* for materialized view */
numLocks += 1;
/* for recovery state queue */
numLocks += 1;
/* for prune dirty queue */
numLocks += 1;
/*
* Add any requested by loadable modules; for backwards-compatibility
* reasons, allocate at least NUM_USER_DEFINED_LWLOCKS of them even if

View File

@ -90,6 +90,7 @@ static void CheckSessionTimeout(void);
static bool CheckStandbyTimeout(void);
extern void ResetGtmHandleXmin(GTM_TransactionKey txnKey);
static void FiniNuma(int code, Datum arg);
static inline void ReleaseChildSlot(void);
/*
* Report shared-memory space needed by InitProcGlobal.
@ -437,6 +438,18 @@ PGPROC* GetFreeCMAgentProc()
return current;
}
/* Relase child slot in some cases, other role will release slot in CleanupBackend */
static inline void ReleaseChildSlot(void)
{
if (IsUnderPostmaster &&
((t_thrd.role == WLM_WORKER || t_thrd.role == WLM_MONITOR || t_thrd.role == WLM_ARBITER ||
t_thrd.role == WLM_CPMONITOR) || IsJobAspProcess() || t_thrd.role == STREAMING_BACKEND ||
IsStatementFlushProcess() || IsJobSnapshotProcess() || t_thrd.postmaster_cxt.IsRPCWorkerThread ||
IsJobPercentileProcess() || t_thrd.role == ARCH)) {
(void)ReleasePostmasterChildSlot(t_thrd.proc_cxt.MyPMChildSlot);
}
}
/*
* InitProcess -- initialize a per-process data structure for this backend
*/
@ -522,12 +535,7 @@ void InitProcess(void)
* old wlm worker process which has exited, if no new slot can be used while
* postmaster starting thread, it will be throw a panic error.
*/
if (IsUnderPostmaster &&
(t_thrd.role == WLM_WORKER || t_thrd.role == WLM_MONITOR || t_thrd.role == WLM_ARBITER ||
IsStatementFlushProcess() || t_thrd.role == WLM_CPMONITOR || IsJobAspProcess() ||
IsJobPercentileProcess() || IsJobSnapshotProcess() || t_thrd.role == STREAMING_BACKEND)) {
(void)ReleasePostmasterChildSlot(t_thrd.proc_cxt.MyPMChildSlot);
}
ReleaseChildSlot();
ereport(FATAL,
(errcode(ERRCODE_TOO_MANY_CONNECTIONS),
@ -1156,12 +1164,7 @@ static void ProcKill(int code, Datum arg)
* old wlm worker process which has exited, if no new slot can be used while
* postmaster starting thread, it will be throw a panic error.
*/
if (IsUnderPostmaster &&
((t_thrd.role == WLM_WORKER || t_thrd.role == WLM_MONITOR || t_thrd.role == WLM_ARBITER ||
t_thrd.role == WLM_CPMONITOR) || IsJobAspProcess() || t_thrd.role == STREAMING_BACKEND ||
IsStatementFlushProcess() ||
IsJobSnapshotProcess() || t_thrd.postmaster_cxt.IsRPCWorkerThread || IsJobPercentileProcess()))
(void)ReleasePostmasterChildSlot(t_thrd.proc_cxt.MyPMChildSlot);
ReleaseChildSlot();
/* wake autovac launcher if needed -- see comments in FreeWorkerInfo */
if (t_thrd.autovacuum_cxt.AutovacuumLauncherPid != 0)

View File

@ -29,66 +29,9 @@ top_builddir ?= ../../../../../../
include $(top_builddir)/src/Makefile.global
MASSTREE_DIR=$(MASSTREE_HOME)
MASSTREE_PACKAGE=masstree-beta-0.9.0
MOT_MASSTREE_PATCH=masstree-beta-0.9.0_patch
MASSTREE_MEGRED_SOURCES_DIR=$(MASSTREE_DIR)/code
mot_target:
@$(call create_masstree_sources)
@make -f Makefile.local
clean:
@rm -rf $(MASSTREE_MEGRED_SOURCES_DIR)
@make -f Makefile.local clean
MASSTREE_RELEVANT_SOURCES = \
btree_leaflink.hh \
circular_int.hh \
compiler.cc \
compiler.hh \
hashcode.hh \
kpermuter.hh \
ksearch.hh \
masstree_get.hh \
masstree.hh \
masstree_insert.hh \
masstree_key.hh \
masstree_remove.hh \
masstree_scan.hh \
masstree_split.hh \
masstree_struct.hh \
masstree_tcursor.hh \
memdebug.cc \
memdebug.hh \
mtcounters.hh \
kvthread.cc \
kvthread.hh \
nodeversion.hh \
small_vector.hh \
straccum.cc \
straccum.hh \
str.hh \
stringbag.hh \
string_base.hh \
string.cc \
string.hh \
string_slice.hh \
timestamp.hh
define create_masstree_sources
rm -rf $(MASSTREE_DIR)/tmp; \
rm -rf $(MASSTREE_MEGRED_SOURCES_DIR); \
mkdir $(MASSTREE_DIR)/tmp; \
mkdir $(MASSTREE_MEGRED_SOURCES_DIR); \
tar xfzv $(MASSTREE_DIR)/$(MASSTREE_PACKAGE).tar.gz -C $(MASSTREE_DIR)/tmp &> /dev/null; \
for src_file in $(MASSTREE_RELEVANT_SOURCES); \
do \
cp $(MASSTREE_DIR)/tmp/$(MASSTREE_PACKAGE)/$$src_file $(MASSTREE_MEGRED_SOURCES_DIR); \
done
rename ".cc" ".cpp" $(MASSTREE_MEGRED_SOURCES_DIR)/*.cc; \
rm -rf $(MASSTREE_DIR)/tmp; \
patch -d $(MASSTREE_MEGRED_SOURCES_DIR) < $(MASSTREE_DIR)/$(MOT_MASSTREE_PATCH).patch &> /dev/null;
endef

View File

@ -32,7 +32,6 @@ CFLAGS ?=
subdir=src/gausskernel/storage/mot/core/src
top_builddir ?= ../../../../../../
MASSTREE_MEGRED_SOURCES_DIR=$(MASSTREE_HOME)/code
ifeq ($(UNDERPG),yes)
include $(top_builddir)/src/Makefile.global
@ -48,7 +47,7 @@ SRC_TARGET_DIRS = \
memory \
storage \
system \
$(MASSTREE_MEGRED_SOURCES_DIR) \
$(MASSTREE_HOME)/include \
utils
SRC_DIRS = $(shell find $(SRC_TARGET_DIRS) -type d 2> /dev/null) # 2> /dev/null To remove error printout when calling clean
@ -108,6 +107,7 @@ CFLAGS += -faligned-new
# turn on some warnings
CFLAGS += -Wwrite-strings -Wcast-align -Wreturn-type
CFLAGS += -Wpointer-arith -Wlogical-op -Waddress -Wsizeof-pointer-memaccess -Winit-self
CFLAGS += -L$(MASSTREE_HOME)/lib -lmasstree
# Enable this warning for gcc version 6.0 or higher
#CFLAGS += -Wduplicated-cond

View File

@ -110,13 +110,7 @@ void RowHeader::WriteChangesToRow(const Access* access, uint64_t csn)
#ifdef MOT_DEBUG
if (access->m_params.IsPrimarySentinel()) {
uint64_t v = m_csnWord;
if (MOTEngine::GetInstance()->IsRecovering()) {
if (!(csn == GetCSN() && (v & LOCK_BIT))) {
MOT_LOG_ERROR(
"csn=%ld, v & LOCK_BIT=%ld, v & (~LOCK_BIT)=%ld\n", csn, (v & LOCK_BIT), (v & (~LOCK_BIT)));
MOT_ASSERT(false);
}
} else {
if (!MOTEngine::GetInstance()->IsRecovering()) {
if (!(csn > GetCSN() && (v & LOCK_BIT))) {
MOT_LOG_ERROR(
"csn=%ld, v & LOCK_BIT=%ld, v & (~LOCK_BIT)=%ld\n", csn, (v & LOCK_BIT), (v & (~LOCK_BIT)));

View File

@ -247,7 +247,7 @@ void GcManager::CleanIndexItems(uint32_t indexId, bool dropIndex)
}
m_managerLock.unlock();
if (counter) {
MOT_LOG_INFO("Entity:%s threadId = %d cleaned from index id = %d items = %d\n",
MOT_LOG_INFO("Entity:%s threadId = %d cleaned from index id = %u items = %u\n",
enGcTypes[m_purpose],
m_tid,
indexId,

View File

@ -1,62 +0,0 @@
/*
* Copyright (c) 2020 Huawei Technologies Co.,Ltd.
*
* openGauss is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
* -------------------------------------------------------------------------
*
* config.h
* Masstree index configurations template.
*
* IDENTIFICATION
* src/gausskernel/storage/mot/core/src/storage/index/masstree/config.h
*
* -------------------------------------------------------------------------
*/
#include "mot_masstree_config.hpp"
#ifndef MASSTREE_CONFIG_H
#define MASSTREE_CONFIG_H
#define HAVE_CXX_TEMPLATE_ALIAS MOT_HAVE_CXX_TEMPLATE_ALIAS
#define HAVE_INT64_T_IS_LONG MOT_HAVE_INT64_T_IS_LONG
#define HAVE_SIZE_T_IS_UNSIGNED_LONG MOT_HAVE_SIZE_T_IS_UNSIGNED_LONG
#define HAVE_STD_HASH MOT_HAVE_STD_HASH
#define HAVE_STD_IS_TRIVIALLY_COPYABLE MOT_HAVE_STD_IS_TRIVIALLY_COPYABLE
#define HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE MOT_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE
#define HAVE_SUPERPAGE MOT_HAVE_SUPERPAGE
#define HAVE_TYPE_TRAITS MOT_HAVE_TYPE_TRAITS
#define HAVE_UNALIGNED_ACCESS MOT_HAVE_UNALIGNED_ACCESS
#define HAVE___BUILTIN_CLZ MOT_HAVE___BUILTIN_CLZ
#define HAVE___BUILTIN_CLZL MOT_HAVE___BUILTIN_CLZL
#define HAVE___BUILTIN_CLZLL MOT_HAVE___BUILTIN_CLZLL
#define HAVE___BUILTIN_CTZ MOT_HAVE___BUILTIN_CTZ
#define HAVE___BUILTIN_CTZL MOT_HAVE___BUILTIN_CTZL
#define HAVE___BUILTIN_CTZLL MOT_HAVE___BUILTIN_CTZLL
#define HAVE___HAS_TRIVIAL_COPY MOT_HAVE___HAS_TRIVIAL_COPY
#define HAVE___HAS_TRIVIAL_DESTRUCTOR MOT_HAVE___HAS_TRIVIAL_DESTRUCTOR
#define HAVE___SYNC_BOOL_COMPARE_AND_SWAP MOT_HAVE___SYNC_BOOL_COMPARE_AND_SWAP
#define HAVE___SYNC_BOOL_COMPARE_AND_SWAP_8 MOT_HAVE___SYNC_BOOL_COMPARE_AND_SWAP_8
#define HAVE___SYNC_FETCH_AND_ADD MOT_HAVE___SYNC_FETCH_AND_ADD
#define HAVE___SYNC_FETCH_AND_ADD_8 MOT_HAVE___SYNC_FETCH_AND_ADD_8
#define HAVE___SYNC_FETCH_AND_OR MOT_HAVE___SYNC_FETCH_AND_OR
#define HAVE___SYNC_FETCH_AND_OR_8 MOT_HAVE___SYNC_FETCH_AND_OR_8
#define HAVE___SYNC_VAL_COMPARE_AND_SWAP MOT_HAVE___SYNC_VAL_COMPARE_AND_SWAP
#define HAVE___SYNC_VAL_COMPARE_AND_SWAP_8 MOT_HAVE___SYNC_VAL_COMPARE_AND_SWAP_8
#define MASSTREE_MAXKEYLEN MOT_MASSTREE_MAXKEYLEN
#define SIZEOF_INT MOT_SIZEOF_INT
#define SIZEOF_LONG MOT_SIZEOF_LONG
#define SIZEOF_LONG_LONG MOT_SIZEOF_LONG_LONG
#define SIZEOF_SHORT MOT_SIZEOF_SHORT
#define WORDS_BIGENDIAN_SET MOT_WORDS_BIGENDIAN_SET
#endif

View File

@ -1,77 +0,0 @@
/*
* Copyright (c) 2020 Huawei Technologies Co.,Ltd.
*
* openGauss is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
* -------------------------------------------------------------------------
*
* mot_masstree_config.hpp
* MOT configurations for Masstree index.
*
* IDENTIFICATION
* src/gausskernel/storage/mot/core/src/storage/index/masstree/mot_masstree_config.hpp
*
* -------------------------------------------------------------------------
*/
#ifndef MOT_MASSTREE_CONFIG_HPP
#define MOT_MASSTREE_CONFIG_HPP
#define MOT_HAVE_CXX_TEMPLATE_ALIAS 1
#define MOT_HAVE_INT64_T_IS_LONG 1
#define MOT_HAVE_SIZE_T_IS_UNSIGNED_LONG 1
#define MOT_HAVE_STD_HASH 1
#define MOT_HAVE_STD_IS_TRIVIALLY_COPYABLE 1
#define MOT_HAVE_STD_IS_TRIVIALLY_DESTRUCTIBLE 1
#define MOT_HAVE_SUPERPAGE 1
#define MOT_HAVE_TYPE_TRAITS 1
#define MOT_HAVE_UNALIGNED_ACCESS 0
#define MOT_HAVE___BUILTIN_CLZ 1
#define MOT_HAVE___BUILTIN_CLZL 1
#define MOT_HAVE___BUILTIN_CLZLL 1
#define MOT_HAVE___BUILTIN_CTZ 1
#define MOT_HAVE___BUILTIN_CTZL 1
#define MOT_HAVE___BUILTIN_CTZLL 1
#define MOT_HAVE___HAS_TRIVIAL_COPY 1
#define MOT_HAVE___HAS_TRIVIAL_DESTRUCTOR 1
#define MOT_HAVE___SYNC_BOOL_COMPARE_AND_SWAP 1
#define MOT_HAVE___SYNC_BOOL_COMPARE_AND_SWAP_8 1
#define MOT_HAVE___SYNC_FETCH_AND_ADD 1
#define MOT_HAVE___SYNC_FETCH_AND_ADD_8 1
#define MOT_HAVE___SYNC_FETCH_AND_OR 1
#define MOT_HAVE___SYNC_FETCH_AND_OR_8 1
#define MOT_HAVE___SYNC_VAL_COMPARE_AND_SWAP 1
#define MOT_HAVE___SYNC_VAL_COMPARE_AND_SWAP_8 1
/* Maximum key length */
#define MOT_MASSTREE_MAXKEYLEN MAX_KEY_SIZE
#define MOT_SIZEOF_INT 4
#define MOT_SIZEOF_LONG 8
#define MOT_SIZEOF_LONG_LONG 8
#define MOT_SIZEOF_SHORT 2
#define MOT_WORDS_BIGENDIAN_SET 1
#define masstree_invariant(x, ...) \
do { \
} while (0)
#define masstree_precondition(x, ...) \
do { \
} while (0)
#ifndef invariant
#define invariant masstree_invariant
#endif
#ifndef precondition
#define precondition masstree_precondition
#endif
#endif // MOT_MASSTREE_CONFIG_HPP

View File

@ -0,0 +1,123 @@
/*
* Copyright (c) 2020 Huawei Technologies Co.,Ltd.
*
* openGauss is licensed under Mulan PSL v2.
* You can use this software according to the terms and conditions of the Mulan PSL v2.
* You may obtain a copy of Mulan PSL v2 at:
*
* http://license.coscl.org.cn/MulanPSL2
*
* THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
* EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
* MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
* See the Mulan PSL v2 for more details.
* -------------------------------------------------------------------------
*
* mot_masstree_kvthread.hpp
* Replace Masstree's thread info implementations with MOT functionality.
*
* IDENTIFICATION
* src/gausskernel/storage/mot/core/src/storage/index/masstree/mot_masstree_kvthread.cpp
*
* -------------------------------------------------------------------------
*/
#include "masstree_index.h"
#include "kvthread.hh"
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <new>
#include <sys/mman.h>
#if HAVE_SUPERPAGE && !NOSUPERPAGE
#include <sys/types.h>
#include <dirent.h>
#endif
#include "mm_api.h"
#include "mm_gc_manager.h"
// This is the thread info which serves the current masstree operation. It is set before the operation starts.
__thread threadinfo* mtSessionThreadInfo = nullptr;
volatile mrcu_epoch_type globalepoch;
inline threadinfo::threadinfo(int purpose, int index, int rcu_max_free_count)
{
errno_t erc = memset_s(this, sizeof(*this), 0, sizeof(*this));
securec_check(erc, "\0", "\0");
purpose_ = purpose;
index_ = index;
rcu_free_count = rcu_max_free_count;
ts_ = 2;
}
threadinfo* threadinfo::make(void* obj_mem, int purpose, int index, int rcu_max_free_count)
{
threadinfo* ti = new(obj_mem) threadinfo(purpose, index, rcu_max_free_count);
if (use_pool()) {
void *limbo_space = ti->allocate(MAX_MEMTAG_MASSTREE_LIMBO_GROUP_ALLOCATION_SIZE, memtag_limbo);
if (!limbo_space) {
return nullptr;
}
ti->mark(tc_limbo_slots, mt_limbo_group::capacity);
ti->limbo_head_ = ti->limbo_tail_ = new(limbo_space) mt_limbo_group;
}
return ti;
}
void* threadinfo::allocate(size_t sz, memtag tag, size_t* actual_size)
{
int size = sz;
void* p = nullptr;
if (likely(!use_pool())) {
p = cur_working_index->AllocateMem(size, tag);
} else {
p = malloc(sz + memdebug_size);
}
p = memdebug::make(p, sz, tag);
if (p) {
if (actual_size) {
*actual_size = size;
}
mark(threadcounter(tc_alloc + (tag > memtag_value)), sz);
}
return p;
}
void threadinfo::deallocate(void* p, size_t sz, memtag tag)
{
MOT_ASSERT(p);
p = memdebug::check_free(p, sz, tag);
if (likely(!use_pool())) {
cur_working_index->DeallocateMem(p, sz, tag);
} else {
free(p);
}
mark(threadcounter(tc_alloc + (tag > memtag_value)), -sz);
}
void threadinfo::ng_record_rcu(void* p, int sz, memtag tag)
{
MOT_ASSERT(p);
memdebug::check_rcu(p, sz, tag);
cur_working_index->RecordMemRcu(p, sz, tag);
mark(threadcounter(tc_alloc + (tag > memtag_value)), -sz);
}
void threadinfo::set_gc_session(MOT::GcManager* gc_session)
{
gc_session_ = gc_session;
}
inline MOT::GcManager* threadinfo::get_gc_session()
{
return gc_session_;
}

View File

@ -28,7 +28,7 @@
#include "index.h"
#include "index_base.h"
#include "utilities.h"
#include "masstree/config.h"
#include "masstree_config.h"
#include "masstree/mot_masstree.hpp"
#include "masstree/mot_masstree_insert.hpp"
#include "masstree/mot_masstree_remove.hpp"

View File

@ -552,7 +552,7 @@ uint32_t CheckpointRecovery::HaveTasks()
bool CheckpointRecovery::IsMemoryLimitReached(uint32_t numThreads, uint32_t neededBytes)
{
uint64_t memoryRequiredBytes = numThreads * neededBytes;
uint64_t memoryRequiredBytes = (uint64_t)numThreads * neededBytes;
if (MOTEngine::GetInstance()->GetCurrentMemoryConsumptionBytes() + memoryRequiredBytes >=
MOTEngine::GetInstance()->GetHardMemoryLimitBytes()) {
MOT_LOG_WARN("CheckpointRecovery::IsMemoryLimitReached: memory limit reached "

View File

@ -175,24 +175,16 @@ void TxnAccess::ClearSet()
{
m_lastAcc = nullptr;
MOT_ASSERT(m_rowCnt == m_rowsSet->size());
m_rowsSet->clear();
if (unlikely(m_accessSetSize > DEFAULT_ACCESS_SIZE)) {
ShrinkAccessSet();
}
m_rowsSet->clear();
m_allocatedAc = 0;
unsigned int i;
for (i = 0; i < m_accessSetSize; i++) {
Access* ac = GetAccessPtr(i);
if (ac != nullptr) {
DestroyAccess(ac);
} else {
break;
} else {
for (unsigned int i = 0; i < m_rowCnt; i++) {
DestroyAccess(m_accessesSetBuff[i]);
}
m_rowCnt = 0;
}
m_allocatedAc = i;
m_insertManager->ClearSet();
m_rowCnt = 0;
}
void TxnAccess::DestroyAccess(Access* access)
@ -241,15 +233,9 @@ void TxnAccess::ShrinkAccessSet()
errno_t erc;
uint64_t new_array_size = DEFAULT_ACCESS_SIZE;
// Clear access set
for (unsigned int i = DEFAULT_ACCESS_SIZE; i < m_accessSetSize; i++) {
Access* ac = GetAccessPtr(i);
if (ac != nullptr) {
DestroyAccess(ac);
delete ac;
ResetAccessPtr(i);
} else {
break;
}
for (unsigned int i = 0; i < m_allocatedAc; i++) {
DestroyAccess(m_accessesSetBuff[i]);
delete m_accessesSetBuff[i];
}
if (new_array_size < m_accessSetSize) {
@ -269,11 +255,11 @@ void TxnAccess::ShrinkAccessSet()
erc = memset_s(ptr, alloc_size, 0, sizeof(Access*) * new_array_size);
securec_check(erc, "\0", "\0");
erc = memcpy_s(ptr, alloc_size, m_accessesSetBuff, sizeof(Access*) * DEFAULT_ACCESS_SIZE);
securec_check(erc, "\0", "\0");
MemSessionFree(m_accessesSetBuff);
SetAccessesSet(reinterpret_cast<Access**>(ptr));
m_accessSetSize = new_array_size;
m_allocatedAc = 0;
m_rowCnt = 0;
}
Access* TxnAccess::GetNewRowAccess(const Row* row, AccessType type, RC& rc)

View File

@ -369,8 +369,9 @@ RC RedoLog::SerializeTransactionDDLs(IdxDDLAccessMap& idxDDLMap)
RC RedoLog::SerializeTransactionDMLs()
{
RC status = RC_OK;
for (uint32_t index = 0; index < m_txn->m_accessMgr->m_rowCnt; index++) {
Access* access = m_txn->m_accessMgr->GetAccessPtr(index);
TxnOrderedSet_t& orderedSet = m_txn->m_accessMgr->GetOrderedRowSet();
for (const auto& raPair : orderedSet) {
Access* access = raPair.second;
if (access != nullptr) {
switch (access->m_type) {
case INS:

View File

@ -574,8 +574,9 @@ static void MOTGetForeignPaths(PlannerInfo* root, RelOptInfo* baserel, Oid forei
foreach (lc, baserel->baserestrictinfo) {
RestrictInfo* ri = (RestrictInfo*)lfirst(lc);
if (!IsMOTExpr(baserel, planstate, &marr, ri->clause, NULL, true))
if (!IsMOTExpr(baserel, planstate, &marr, ri->clause, nullptr, true)) {
planstate->m_localConds = lappend(planstate->m_localConds, ri->clause);
}
}
// get best index
@ -675,8 +676,8 @@ static void MOTGetForeignPaths(PlannerInfo* root, RelOptInfo* baserel, Oid forei
foreach (lc, bestClause) {
RestrictInfo* ri = (RestrictInfo*)lfirst(lc);
IsMOTExpr(baserel, planstate, &marr, ri->clause, nullptr, false);
// in case we use index params DO NOT add it to envelope filter
// In case we use index params DO NOT add it to envelope filter.
(void)IsMOTExpr(baserel, planstate, &marr, ri->clause, nullptr, false);
}
best = MOTAdaptor::GetBestMatchIndex(planstate, &marr, list_length(bestClause), false);
@ -1383,7 +1384,7 @@ static TupleTableSlot* MOTExecForeignInsert(
MOTFdwStateSt* fdwState = (MOTFdwStateSt*)resultRelInfo->ri_FdwState;
MOT::RC rc = MOT::RC_OK;
if (MOTAdaptor::m_engine->IsSoftMemoryLimitReached()) {
if (MOTAdaptor::m_engine->IsSoftMemoryLimitReached() && fdwState != nullptr) {
CleanQueryStatesOnError(fdwState->m_currTxn);
}

Some files were not shown because too many files have changed in this diff Show More