1 ustore bugfix

2  禁用段页式、USTORE压缩表使用
3  禁用压缩表页面修复
4  修复remote read功能
This commit is contained in:
wuyuechuan 2022-03-18 09:54:50 +08:00
parent f3e27c51bc
commit c66d37c274
29 changed files with 161 additions and 58 deletions

View File

@ -491,6 +491,7 @@ RelFileNodeForkNum relpath_to_filenode(char* path)
securec_check(rc, "\0", "\0");
filenode.rnode.node.spcNode = DEFAULTTABLESPACE_OID;
filenode.rnode.node.bucketNode = InvalidBktId;
filenode.rnode.node.opt = 0;
filenode.rnode.backend = InvalidBackendId;
if (0 == strncmp(token, "global", 7)) {

View File

@ -967,6 +967,7 @@ void push_del_rel_to_hashtbl(bool isCommit)
entry->rnode.dbNode = pending->relnode.dbNode;
entry->rnode.relNode = pending->relnode.relNode;
entry->rnode.bucketNode = pending->relnode.bucketNode;
entry->rnode.opt = pending->relnode.opt;
entry->maxSegNo = -1;
}
BatchClearBadBlock(pending->relnode, pending->forknum, 0);

View File

@ -561,6 +561,7 @@ static void gtt_storage_removeall(int code, Datum arg)
rnode.spcNode = gtt_rnode->spcnode;
rnode.dbNode = u_sess->proc_cxt.MyDatabaseId;
rnode.relNode = gtt_rnode->relfilenode;
rnode.opt = 0;
rnode.bucketNode = InvalidBktId;
srel = smgropen(rnode, BackendIdForTempRelations);

View File

@ -284,9 +284,10 @@ extern int RemoteGetPage(char* remoteAddress, RepairBlockKey *key, uint32 blocks
pblk->block, timeout);
} else {
tnRet = snprintf_s(sqlCommands, MAX_PATH_LEN, MAX_PATH_LEN - 1,
"SELECT gs_read_block_from_remote(%u, %u, %u, %d, %d, '%lu', %u, '%lu', false, %d);",
"SELECT gs_read_block_from_remote(%u, %u, %u, %d, %d, %d, '%lu', %u, '%lu', false, %d);",
key->relfilenode.spcNode, key->relfilenode.dbNode, key->relfilenode.relNode,
key->relfilenode.bucketNode, key->relfilenode.opt, key->forknum, key->blocknum, blocksize, lsn, timeout);
key->relfilenode.bucketNode, (int2)key->relfilenode.opt, key->forknum, key->blocknum,
blocksize, lsn, timeout);
}
securec_check_ss(tnRet, "", "");

View File

@ -514,6 +514,15 @@ Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_al
lockmode = concurrent ? ShareUpdateExclusiveLock : ShareLock;
rel = heap_open(relationId, lockmode);
TableCreateSupport indexCreateSupport{COMPRESS_TYPE_NONE, false, false, false, false, false};
ListCell *cell = NULL;
foreach (cell, stmt->options) {
DefElem *defElem = (DefElem *)lfirst(cell);
SetOneOfCompressOption(defElem, &indexCreateSupport);
}
CheckCompressOption(&indexCreateSupport);
/* Forbidden to create gin index on ustore table. */
if (rel->rd_tam_type == TAM_USTORE) {
if (strcmp(stmt->accessMethod, "btree") == 0) {
@ -1054,13 +1063,21 @@ Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_al
}
}
TableCreateSupport indexCreateSupport{COMPRESS_TYPE_NONE, false, false, false, false, false};
ListCell* cell = NULL;
foreach (cell, stmt->options) {
DefElem* defElem = (DefElem*)lfirst(cell);
SetOneOfCompressOption(defElem, &indexCreateSupport);
if (indexCreateSupport.compressType || HasCompressOption(&indexCreateSupport)) {
foreach (cell, stmt->options) {
DefElem *defElem = (DefElem *)lfirst(cell);
if (pg_strcasecmp(defElem->defname, "storage_type") == 0 &&
pg_strcasecmp(defGetString(defElem), "ustore") == 0) {
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("Can not use compress option in ustore index.")));
}
if (pg_strcasecmp(defElem->defname, "segment") == 0 && defGetBoolean(defElem)) {
ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("Can not use compress option in segment storage.")));
}
}
}
CheckCompressOption(&indexCreateSupport);
/*
* Parse AM-specific options, convert to text array form, validate.

View File

@ -1102,7 +1102,7 @@ static List* AddDefaultOptionsIfNeed(List* options, const char relkind, CreateSt
bool createWithOrientationRow = false; /* To mark whether table have been create with(orientation = row) */
bool isUstore = false;
bool assignedStorageType = false;
bool segment = false;
TableCreateSupport tableCreateSupport{COMPRESS_TYPE_NONE, false, false, false, false, false};
(void)isOrientationSet(options, NULL, false);
foreach (cell, options) {
@ -1133,6 +1133,8 @@ static List* AddDefaultOptionsIfNeed(List* options, const char relkind, CreateSt
ereport(ERROR,
(errcode(ERRCODE_INVALID_OPTION),
errmsg("It is not allowed to assign version option for non-dfs table.")));
} else if (pg_strcasecmp(def->defname, "segment") == 0){
segment = true;
} else {
SetOneOfCompressOption(def, &tableCreateSupport);
}
@ -1160,7 +1162,7 @@ static List* AddDefaultOptionsIfNeed(List* options, const char relkind, CreateSt
res = lappend(options, def);
}
bool noSupportTable = isCStore || isTsStore || relkind != RELKIND_RELATION ||
bool noSupportTable = segment || isUstore || isCStore || isTsStore || relkind != RELKIND_RELATION ||
stmt->relation->relpersistence == RELPERSISTENCE_UNLOGGED ||
stmt->relation->relpersistence == RELPERSISTENCE_TEMP ||
stmt->relation->relpersistence == RELPERSISTENCE_GLOBAL_TEMP;

View File

@ -71,6 +71,23 @@ void initRepairBadBlockStat()
}
}
static void UnsupportedPageRepair(const char *path)
{
char pcaPath[MAXPGPATH];
int rc = 0;
rc = snprintf_s(pcaPath, MAXPGPATH, MAXPGPATH - 1, "%s/%s_pca", t_thrd.proc_cxt.DataDir, path);
securec_check_ss(rc, "\0", "\0");
char pcdPath[MAXPGPATH];
rc = snprintf_s(pcdPath, MAXPGPATH, MAXPGPATH - 1, "%s/%s_pcd", t_thrd.proc_cxt.DataDir, path);
struct stat pcaStat;
struct stat pcdStat;
bool pcaState = stat(pcaPath, &pcaStat) == 0;
bool pcdState = stat(pcdPath, &pcdStat) == 0;
if (pcaState || pcdState) {
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), (errmsg("Compressed Table is not allowed here."))));
}
}
/* BatchClearBadBlock
* clear global_repair_bad_block_stat hashtable entry when the relation drop or truncate.
@ -167,6 +184,7 @@ void UpdateRepairTime(const RelFileNode &rnode, ForkNumber forknum, BlockNumber
key.relfilenode.dbNode = rnode.dbNode;
key.relfilenode.relNode = rnode.relNode;
key.relfilenode.bucketNode = rnode.bucketNode;
key.relfilenode.opt = 0;
key.forknum = forknum;
key.blocknum = blocknum;
@ -208,6 +226,7 @@ void addGlobalRepairBadBlockStat(const RelFileNodeBackend &rnode, ForkNumber for
key.relfilenode.dbNode = rnode.node.dbNode;
key.relfilenode.relNode = rnode.node.relNode;
key.relfilenode.bucketNode = rnode.node.bucketNode;
key.relfilenode.opt = 0;
key.forknum = forknum;
key.blocknum = blocknum;
@ -360,6 +379,7 @@ bool tryRepairPage(int blocknum, bool is_segment, RelFileNode *relnode, int time
logicalRelNode.spcNode = relnode->spcNode;
logicalRelNode.dbNode = relnode->dbNode;
logicalRelNode.bucketNode = SegmentBktId;
logicalRelNode.opt = 0;
logicalBlocknum = loc.blocknum;
}
@ -576,6 +596,8 @@ Datum gs_repair_page(PG_FUNCTION_ARGS)
bool is_segment = PG_GETARG_BOOL(2);
int32 timeout = PG_GETARG_INT32(3);
UnsupportedPageRepair(path);
bool result = repairPage(path, blockNum, is_segment, timeout);
PG_RETURN_BOOL(result);
}
@ -602,6 +624,8 @@ Datum gs_repair_file(PG_FUNCTION_ARGS)
char* path = text_to_cstring(PG_GETARG_TEXT_P(1));
int32 timeout = PG_GETARG_INT32(2);
UnsupportedPageRepair(path);
if (!CheckRelDataFilePath(path)) {
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
(errmsg("The input path(%s) is an incorrect relation file path input. \n", path))));
@ -780,6 +804,8 @@ Datum gs_verify_and_tryrepair_page(PG_FUNCTION_ARGS)
int j = 1;
XLogPhyBlock pblk = {0, 0, 0};
UnsupportedPageRepair(path);
/* build tupdesc for result tuples */
tupdesc = CreateTemplateTupleDesc(REPAIR_BLOCK_STAT_NATTS, false);
TupleDescInitEntry(tupdesc, (AttrNumber)j++, "node_name", TEXTOID, -1, 0);
@ -854,14 +880,13 @@ Datum gs_read_segment_block_from_remote(PG_FUNCTION_ARGS)
uint32 dbNode = PG_GETARG_UINT32(1);
uint32 relNode = PG_GETARG_UINT32(2);
int16 bucketNode = PG_GETARG_INT16(3);
uint16 opt = PG_GETARG_INT16(4);
int32 forkNum = PG_GETARG_INT32(5);
uint64 blockNum = (uint64)PG_GETARG_TRANSACTIONID(6);
uint32 blockSize = PG_GETARG_UINT32(7);
uint64 lsn = (uint64)PG_GETARG_TRANSACTIONID(8);
uint32 seg_relNode = PG_GETARG_UINT32(9);
uint32 seg_block = PG_GETARG_UINT32(10);
int32 timeout = PG_GETARG_INT32(11);
int32 forkNum = PG_GETARG_INT32(4);
uint64 blockNum = (uint64)PG_GETARG_TRANSACTIONID(5);
uint32 blockSize = PG_GETARG_UINT32(6);
uint64 lsn = (uint64)PG_GETARG_TRANSACTIONID(7);
uint32 seg_relNode = PG_GETARG_UINT32(8);
uint32 seg_block = PG_GETARG_UINT32(9);
int32 timeout = PG_GETARG_INT32(10);
XLogPhyBlock pblk = {
.relNode = seg_relNode,
@ -874,7 +899,7 @@ Datum gs_read_segment_block_from_remote(PG_FUNCTION_ARGS)
key.relfilenode.dbNode = dbNode;
key.relfilenode.relNode = relNode;
key.relfilenode.bucketNode = bucketNode;
key.relfilenode.opt = opt;
key.relfilenode.opt = 0;
key.forknum = forkNum;
key.blocknum = blockNum;
@ -1114,7 +1139,8 @@ List* getSegmentBadFiles(List* spcList, List* badFileItems)
.spcNode = lfirst_oid(currentCell),
.dbNode = u_sess->proc_cxt.MyDatabaseId,
.relNode = 1,
.bucketNode = SegmentBktId
.bucketNode = SegmentBktId,
.opt = 0
};
char* segmentDir = relSegmentDir(relFileNode, MAIN_FORKNUM);
List* segmentFiles = getSegmentMainFilesPath(segmentDir, '/', 5);
@ -1576,7 +1602,8 @@ bool gsRepairCsnOrCLog(char* path, int timeout)
.spcNode = (uint32)transType,
.dbNode = 0,
.relNode = logName,
.bucketNode = InvalidBktId
.bucketNode = InvalidBktId,
.opt = 0
};
RemoteReadFileKey repairFileKey = {

View File

@ -71,13 +71,13 @@ static void SeqRemoteReadFile();
static void checkOtherFile(RepairFileKey key, uint32 max_segno, uint64 size);
static void PushBadFileToRemoteHashTbl(RepairFileKey key);
#define COMPARE_REPAIR_PAGE_KEY(key1, key2) \
((key1).relfilenode.relNode == (key2).relfilenode.relNode && \
(key1).relfilenode.dbNode == (key2).relfilenode.dbNode && \
(key1).relfilenode.spcNode == (key2).relfilenode.spcNode && \
(key1).relfilenode.bucketNode == (key2).relfilenode.bucketNode && \
(key1).forknum == (key2).forknum && \
(key1).blocknum == (key2).blocknum)
#define COMPARE_REPAIR_PAGE_KEY(key1, key2) \
((key1).relfilenode.relNode == (key2).relfilenode.relNode && \
(key1).relfilenode.dbNode == (key2).relfilenode.dbNode && \
(key1).relfilenode.spcNode == (key2).relfilenode.spcNode && \
(key1).relfilenode.bucketNode == (key2).relfilenode.bucketNode && \
(key1).relfilenode.opt == (key2).relfilenode.opt && (key1).forknum == (key2).forknum && \
(key1).blocknum == (key2).blocknum)
#define NOT_SUPPORT_PAGE_REPAIR \
(g_instance.attr.attr_common.cluster_run_mode == RUN_MODE_STANDBY || \
@ -935,6 +935,7 @@ static void PushBadFileToRemoteHashTbl(RepairFileKey key)
entry->key.relfilenode.dbNode = key.relfilenode.dbNode;
entry->key.relfilenode.spcNode = key.relfilenode.spcNode;
entry->key.relfilenode.bucketNode = key.relfilenode.bucketNode;
entry->key.relfilenode.opt = key.relfilenode.opt;
entry->key.forknum = key.forknum;
entry->key.segno = key.segno;
entry->min_recovery_point = min_recovery_point;
@ -1438,6 +1439,7 @@ bool CheckAllSegmentFileRepair(RepairFileKey key, uint32 max_segno)
temp_key.relfilenode.dbNode = key.relfilenode.dbNode;
temp_key.relfilenode.spcNode = key.relfilenode.spcNode;
temp_key.relfilenode.bucketNode = key.relfilenode.bucketNode;
temp_key.relfilenode.bucketNode = key.relfilenode.opt;
temp_key.forknum = key.forknum;
temp_key.segno = i;
@ -1460,6 +1462,7 @@ bool CheckAllSegmentFileRepair(RepairFileKey key, uint32 max_segno)
rename_key.relfilenode.dbNode = key.relfilenode.dbNode;
rename_key.relfilenode.spcNode = key.relfilenode.spcNode;
rename_key.relfilenode.bucketNode = key.relfilenode.bucketNode;
rename_key.relfilenode.opt = key.relfilenode.opt;
rename_key.forknum = key.forknum;
rename_key.segno = i;
@ -1484,6 +1487,7 @@ bool CheckAllSegmentFileRepair(RepairFileKey key, uint32 max_segno)
change_key.relfilenode.dbNode = key.relfilenode.dbNode;
change_key.relfilenode.spcNode = key.relfilenode.spcNode;
change_key.relfilenode.bucketNode = key.relfilenode.bucketNode;
change_key.relfilenode.opt = key.relfilenode.opt;
change_key.forknum = key.forknum;
change_key.segno = i;
@ -1600,6 +1604,7 @@ static void checkOtherFile(RepairFileKey key, uint32 max_segno, uint64 size)
temp_key.relfilenode.dbNode = key.relfilenode.dbNode;
temp_key.relfilenode.spcNode = key.relfilenode.spcNode;
temp_key.relfilenode.bucketNode = key.relfilenode.bucketNode;
temp_key.relfilenode.opt = key.relfilenode.opt;
temp_key.forknum = key.forknum;
temp_key.segno = i;

View File

@ -9649,6 +9649,7 @@ void addBadBlockStat(const RelFileNode* relfilenode, ForkNumber forknum)
hash_key.relfilenode.dbNode = relfilenode->dbNode;
hash_key.relfilenode.relNode = relfilenode->relNode;
hash_key.relfilenode.bucketNode = relfilenode->bucketNode;
hash_key.relfilenode.opt = relfilenode->opt;
hash_key.forknum = forknum;
bool found = false;

View File

@ -65,6 +65,7 @@ XLogRecParseState *DatabaseXlogCommonParseToBlock(XLogReaderState *record, uint3
}
rnode.bucketNode = InvalidBktId;
rnode.opt = 0;
(*blocknum)++;
XLogParseBufferAllocListFunc(record, &recordstatehead, NULL);

View File

@ -67,6 +67,7 @@ static XLogRecParseState *segpage_redo_parse_space_drop(XLogReaderState *record,
rnode.spcNode = *(Oid *)data;
rnode.dbNode = *(Oid *)(data + sizeof(Oid));
rnode.relNode = InvalidOid;
rnode.opt = 0;
rnode.bucketNode = InvalidBktId;
(*blocknum)++;
@ -425,6 +426,7 @@ void SegPageRedoSpaceShrink(XLogBlockHead *blockhead)
rnode.dbNode = blockhead->dbNode;
rnode.relNode = blockhead->relNode;
rnode.bucketNode = blockhead->bucketNode;
rnode.opt = 0;
char *path = relpathperm(rnode, blockhead->forknum);
ereport(LOG, (errmsg("call space shrink files, filename: %s, xlog lsn: %lX", path, blockhead->end_ptr)));
pfree(path);

View File

@ -98,6 +98,7 @@ XLogRecParseState *tblspc_xlog_common_parse_to_block(XLogReaderState *record, ui
relnode.dbNode = InvalidOid;
relnode.relNode = InvalidOid;
relnode.bucketNode = InvalidBktId;
relnode.opt = 0;
(*blocknum)++;
XLogParseBufferAllocListFunc(record, &recordstatehead, NULL);
if (recordstatehead == NULL) {

View File

@ -294,6 +294,7 @@ XLogRecParseState *xact_xlog_commit_parse_to_block(XLogReaderState *record, XLog
relnode.spcNode = InvalidOid;
relnode.relNode = InvalidOid;
relnode.bucketNode = InvalidBktId;
relnode.opt = 0;
(*blocknum)++;
XLogParseBufferAllocListStateFunc(record, &blockstate, &recordstatehead);

View File

@ -2912,6 +2912,7 @@ void RecordBadBlockAndPushToRemote(XLogBlockDataParse *datadecode, PageErrorType
key.relfilenode.dbNode = state->blockparse.blockhead.dbNode;
key.relfilenode.relNode = state->blockparse.blockhead.relNode;
key.relfilenode.bucketNode = state->blockparse.blockhead.bucketNode;
key.relfilenode.opt = state->blockparse.blockhead.opt;
key.forknum = state->blockparse.blockhead.forknum;
key.blocknum = state->blockparse.blockhead.blkno;

View File

@ -7174,6 +7174,7 @@ void push_unlink_rel_to_hashtbl(ColFileNodeRel *xnodes, int nrels)
entry->rnode.dbNode = colFileNode.filenode.dbNode;
entry->rnode.relNode = colFileNode.filenode.relNode;
entry->rnode.bucketNode = colFileNode.filenode.bucketNode;
entry->rnode.opt = colFileNode.filenode.opt;
entry->maxSegNo = -1;
del_rel_num++;
}

View File

@ -1360,6 +1360,7 @@ void PageListBackWrite(uint32 *buf_list, int32 nbufs, uint32 flags = 0, SMgrRela
Assert(smgrReln->smgr_rnode.node.dbNode == bufHdr->tag.rnode.dbNode);
Assert(smgrReln->smgr_rnode.node.relNode == bufHdr->tag.rnode.relNode);
Assert(smgrReln->smgr_rnode.node.bucketNode == bufHdr->tag.rnode.bucketNode);
Assert(smgrReln->smgr_rnode.node.opt == bufHdr->tag.rnode.opt);
/* PageListBackWrite: jeh XLogFlush blocking? */
/*
@ -1985,7 +1986,8 @@ static bool ReadBuffer_common_ReadBlock(SMgrRelation smgr, char relpersistence,
.spcNode = spc->spcNode,
.dbNode = spc->dbNode,
.relNode = pblk->relNode,
.bucketNode = SegmentBktId
.bucketNode = SegmentBktId,
.opt = 0
};
seg_physical_read(spc, fakenode, forkNum, pblk->block, (char *)bufBlock);
if (PageIsVerified((Page)bufBlock, pblk->block)) {
@ -4446,7 +4448,8 @@ void FlushBuffer(void *buf, SMgrRelation reln, ReadBufferMethod flushmethod, boo
.spcNode = spc->spcNode,
.dbNode = spc->dbNode,
.relNode = bufdesc->seg_fileno,
.bucketNode = SegmentBktId
.bucketNode = SegmentBktId,
.opt = 0
};
seg_physical_write(spc, fakenode, bufferinfo.blockinfo.forknum, bufdesc->seg_blockno, bufToWrite, false);
} else {
@ -6532,6 +6535,7 @@ int ckpt_buforder_comparator(const void *pa, const void *pb)
} else { /* should not be the same block ... */
return 1;
}
/* do not need to compare opt */
}
/*

View File

@ -282,8 +282,8 @@ int StandbyReadPageforPrimary(RepairBlockKey key, uint32 blocksize, uint64 lsn,
}
}
RelFileNode relfilenode {key.relfilenode.spcNode, key.relfilenode.dbNode, key.relfilenode.relNode,
key.relfilenode.bucketNode};
RelFileNode relfilenode{key.relfilenode.spcNode, key.relfilenode.dbNode, key.relfilenode.relNode,
key.relfilenode.bucketNode, key.relfilenode.opt};
if (NULL != pblk) {
SegPageLocation loc = seg_get_physical_location(relfilenode, key.forknum, key.blocknum);
@ -383,6 +383,7 @@ Datum gs_read_file_from_remote(PG_FUNCTION_ARGS)
rnode.dbNode = PG_GETARG_UINT32(parano++);
rnode.relNode = PG_GETARG_UINT32(parano++);
rnode.bucketNode = PG_GETARG_INT32(parano++);
rnode.opt = 0;
forknum = PG_GETARG_INT32(parano++);
blockstart = PG_GETARG_INT32(parano++);
lsn = (uint64)PG_GETARG_TRANSACTIONID(parano++);
@ -443,6 +444,7 @@ Datum gs_read_file_size_from_remote(PG_FUNCTION_ARGS)
rnode.dbNode = PG_GETARG_UINT32(parano++);
rnode.relNode = PG_GETARG_UINT32(parano++);
rnode.bucketNode = PG_GETARG_INT32(parano++);
rnode.opt = 0;
forknum = PG_GETARG_INT32(parano++);
lsn = (uint64)PG_GETARG_TRANSACTIONID(parano++);
timeout = PG_GETARG_INT32(parano++);
@ -555,7 +557,8 @@ int ReadFileByReadDisk(SegSpace* spc, RemoteReadFileKey *key, char* bufBlock, Bl
.spcNode = key->relfilenode.spcNode,
.dbNode = key->relfilenode.dbNode,
.relNode = key->relfilenode.relNode,
.bucketNode = SegmentBktId
.bucketNode = SegmentBktId,
.opt = 0
};
SEG_RETRY:
seg_physical_read(spc, fakenode, key->forknum, blocknum, (char *)bufBlock);

View File

@ -1686,6 +1686,7 @@ UndoFileType CheckUndoPath(const char* fname, int* segNo)
rnode.dbNode = InvalidOid;
rnode.relNode = InvalidOid;
rnode.bucketNode = InvalidBktId;
rnode.opt = 0;
/* undo file and undo transaction slot file Checking */
if (sscanf_s(fname, "undo/permanent/%05X.%07zX", &rnode.relNode, segNo) == MATCH_TWO) {
return UNDO_RECORD;

View File

@ -144,6 +144,7 @@ void createBCMFile(Relation rel, int col)
hd->node.relNode = rel->rd_node.relNode;
hd->node.spcNode = rel->rd_node.spcNode;
hd->node.bucketNode = rel->rd_node.bucketNode;
hd->node.opt = 0;
hd->blockSize = col > 0 ? CUAlignUtils::GetCuAlignSizeColumnId(col) : BLCKSZ; /* defaut size for ROW_STORE */
if (col > 0)

View File

@ -1815,7 +1815,12 @@ SMGR_READ_STATUS mdread(SMgrRelation reln, ForkNumber forknum, BlockNumber block
static THR_LOCAL Oid lstSpc = InvalidOid;
if (IS_COMPRESSED_MAINFORK(reln, forknum)) {
return mdread_pc(reln, forknum, blocknum, buffer) ? SMGR_RD_OK : SMGR_RD_CRC_ERROR;
bool success = mdread_pc(reln, forknum, blocknum, buffer);
if (success && PageIsVerified((Page)buffer, blocknum)) {
return SMGR_RD_OK;
} else {
return SMGR_RD_CRC_ERROR;
}
}
(void)INSTR_TIME_SET_CURRENT(startTime);

View File

@ -764,7 +764,7 @@ int seg_unlink_filetag(const FileTag *ftag, char *path)
void segForgetDatabaseFsyncRequests(Oid dbid)
{
FileTag tag;
RelFileNode rnode = {.spcNode = 0, .dbNode = dbid, .relNode = 0, .bucketNode = InvalidBktId};
RelFileNode rnode = {.spcNode = 0, .dbNode = dbid, .relNode = 0, .bucketNode = InvalidBktId, .opt = 0};
tag.rnode = rnode;
tag.handler = SYNC_HANDLER_SEGMENT;

View File

@ -174,7 +174,8 @@ void eg_ctrl_init(SegSpace *spc, SegExtentGroup *seg, int extent_size, ForkNumbe
seg->rnode = {.spcNode = spc->spcNode,
.dbNode = spc->dbNode,
.relNode = EXTENT_SIZE_TO_TYPE(extent_size),
.bucketNode = SegmentBktId};
.bucketNode = SegmentBktId,
.opt = 0};
seg->forknum = forknum;
seg->map_head_entry = DF_MAP_HEAD_PAGE;

View File

@ -187,7 +187,8 @@ RelFileNode EXTENT_GROUP_RNODE(SegSpace *spc, ExtentSize extentSize)
return {.spcNode = spc->spcNode,
.dbNode = spc->dbNode,
.relNode = EXTENT_SIZE_TO_TYPE(extentSize),
.bucketNode = SegmentBktId};
.bucketNode = SegmentBktId,
.opt = 0};
}
void seg_head_update_xlog(Buffer head_buffer, SegmentHead *seg_head, int level0_slot,

View File

@ -962,6 +962,7 @@ static void push_unlink_rel_one_fork_to_hashtbl(RelFileNode node, ForkNumber for
entry->forkrnode.rnode.dbNode = key.rnode.dbNode;
entry->forkrnode.rnode.relNode = key.rnode.relNode;
entry->forkrnode.rnode.bucketNode = key.rnode.bucketNode;
entry->forkrnode.rnode.opt = key.rnode.opt;
entry->forkrnode.forkNum = key.forkNum;
entry->maxSegNo = -1;
del_rel_num++;

View File

@ -396,6 +396,7 @@ void TrDoPurgeObjectTruncate(TrObjDesc *desc)
rnode.dbNode = (rnode.spcNode == GLOBALTABLESPACE_OID) ? InvalidOid :
u_sess->proc_cxt.MyDatabaseId;
rnode.relNode = rbForm->rcyrelfilenode;
rnode.opt = 0;
rnode.bucketNode = InvalidBktId;
/*

View File

@ -173,12 +173,13 @@ typedef enum {
/* Populate a RelFileNode from an UndoRecPtr. */
#define UNDO_PTR_ASSIGN_REL_FILE_NODE(rfn, urp, dbId) \
do { \
(rfn).spcNode = DEFAULTTABLESPACE_OID; \
(rfn).dbNode = dbId; \
(rfn).relNode = UNDO_PTR_GET_REL_NODE(urp); \
(rfn).bucketNode = InvalidBktId; \
#define UNDO_PTR_ASSIGN_REL_FILE_NODE(rfn, urp, dbId) \
do { \
(rfn).spcNode = DEFAULTTABLESPACE_OID; \
(rfn).dbNode = dbId; \
(rfn).relNode = UNDO_PTR_GET_REL_NODE(urp); \
(rfn).bucketNode = InvalidBktId; \
(rfn).opt = 0; \
} while (false);
#define DECLARE_NODE_COUNT() \

View File

@ -135,12 +135,13 @@ typedef struct RelFileNodeOld
} while(0)
#define RelFileNodeCopy(relFileNode, relFileNodeRel, bucketid) \
do { \
(relFileNode).spcNode = (relFileNodeRel).spcNode; \
(relFileNode).dbNode = (relFileNodeRel).dbNode; \
(relFileNode).relNode = (relFileNodeRel).relNode; \
(relFileNode).bucketNode = (bucketid); \
} while(0)
do { \
(relFileNode).spcNode = (relFileNodeRel).spcNode; \
(relFileNode).dbNode = (relFileNodeRel).dbNode; \
(relFileNode).relNode = (relFileNodeRel).relNode; \
(relFileNode).bucketNode = (bucketid); \
(relFileNode).opt = 0; \
} while (0)
#define RelFileNodeV2Copy(relFileNodeV2, relFileNode) \
do { \
@ -291,14 +292,15 @@ static inline StorageType forknum_get_storage_type(const ForkNumber& forknum)
return (StorageType)(((uint)forknum & 0xC000) >> 14);
}
#define ColFileNodeCopy(colFileNode, colFileNodeRel) \
do { \
(colFileNode)->filenode.spcNode = (colFileNodeRel)->filenode.spcNode; \
(colFileNode)->filenode.dbNode = (colFileNodeRel)->filenode.dbNode; \
(colFileNode)->filenode.relNode = (colFileNodeRel)->filenode.relNode; \
#define ColFileNodeCopy(colFileNode, colFileNodeRel) \
do { \
(colFileNode)->filenode.spcNode = (colFileNodeRel)->filenode.spcNode; \
(colFileNode)->filenode.dbNode = (colFileNodeRel)->filenode.dbNode; \
(colFileNode)->filenode.relNode = (colFileNodeRel)->filenode.relNode; \
(colFileNode)->filenode.opt = 0; \
(colFileNode)->filenode.bucketNode = forknum_get_bucketid((colFileNodeRel)->forknum); \
(colFileNode)->forknum = forknum_get_forknum((colFileNodeRel)->forknum); \
(colFileNode)->ownerid= (colFileNodeRel)->ownerid; \
} while(0)
(colFileNode)->forknum = forknum_get_forknum((colFileNodeRel)->forknum); \
(colFileNode)->ownerid = (colFileNodeRel)->ownerid; \
} while (0)
#endif /* RELFILENODE_H */

View File

@ -80,3 +80,14 @@ ERROR: compress_level should be used with ZSTD algorithm.
create table unspported_feature.t_rowcompress_pglz_compresslevel(id int) with (compresstype=1,compress_level=2); -- failed
ERROR: compress_level should be used with ZSTD algorithm.
create table unspported_feature.t_rowcompress_pglz_compresslevel(id int) with (compresstype=2,compress_level=2); -- success
CREATE TABLE unspported_feature.index_test(id int, c1 text);
-- ustore
CREATE TABLE unspported_feature.ustore_table(id int, c1 text) WITH(compresstype=2, storage_type=ustore); --failed
ERROR: only row orientation table support compresstype.
CREATE INDEX tbl_pc_idx1 on unspported_feature.index_test(c1) WITH(compresstype=2, storage_type=ustore); --failed
ERROR: Can not use compress option in ustore index.
-- segment
CREATE TABLE unspported_feature.segment_table(id int, c1 text) WITH(compresstype=2, segment=on); --failed
ERROR: only row orientation table support compresstype.
CREATE INDEX on unspported_feature.index_test(c1) WITH(compresstype=2, segment=on); --faled
ERROR: Can not use compress option in segment storage.

View File

@ -49,4 +49,12 @@ alter table unspported_feature.t_rowcompress_0007 set (compress_diff_convert=tru
alter table unspported_feature.t_rowcompress_0007 set (compress_byte_convert=true, compress_diff_convert=true); --success
alter table unspported_feature.t_rowcompress_0007 set (compress_level=31); --failed
create table unspported_feature.t_rowcompress_pglz_compresslevel(id int) with (compresstype=1,compress_level=2); -- failed
create table unspported_feature.t_rowcompress_pglz_compresslevel(id int) with (compresstype=2,compress_level=2); -- success
create table unspported_feature.t_rowcompress_pglz_compresslevel(id int) with (compresstype=2,compress_level=2); -- success
CREATE TABLE unspported_feature.index_test(id int, c1 text);
-- ustore
CREATE TABLE unspported_feature.ustore_table(id int, c1 text) WITH(compresstype=2, storage_type=ustore); --failed
CREATE INDEX tbl_pc_idx1 on unspported_feature.index_test(c1) WITH(compresstype=2, storage_type=ustore); --failed
-- segment
CREATE TABLE unspported_feature.segment_table(id int, c1 text) WITH(compresstype=2, segment=on); --failed
CREATE INDEX on unspported_feature.index_test(c1) WITH(compresstype=2, segment=on); --faled