Compare commits

...

7 Commits
master ... dev

Author SHA1 Message Date
opengauss-bot bec7edffac !1397 openGauss支持在线reindex
Merge pull request !1397 from 花花怪/master
2021-12-09 11:25:17 +00:00
jiajunchang 207ba4a272 fix_codestruct 2021-12-09 22:28:15 +08:00
jiajunchang 60688c6b51 fix_index.cpp4create_copy 2021-12-01 08:37:08 +08:00
jiajunchang 20ea1a2235 change_regress4reindexconcurrently 2021-11-25 19:25:49 +08:00
jiajunchang 831c15f67c change_parsenodes_common.h4reindexconcurrently 2021-11-25 19:15:19 +08:00
jiajunchang 1c3940ee3a change_h4reindexconcurrently 2021-11-25 19:03:30 +08:00
jiajunchang e654bc313b change_cpp4reindexconcurrently 2021-11-25 18:54:56 +08:00
25 changed files with 2551 additions and 236 deletions

View File

@ -2014,6 +2014,20 @@ static bool command_no_begin(const char* query)
return true;
if (wordlen == 10 && pg_strncasecmp(query, "tablespace", 10) == 0)
return true;
if (wordlen == 5 && (pg_strncasecmp(query, "index", 5) == 0 || pg_strncasecmp(query, "table", 5) == 0)) {
query += wordlen;
query = skip_white_space(query);
wordlen = 0;
while (isalpha((unsigned char) query[wordlen]))
wordlen += PQmblen(&query[wordlen], pset.encoding);
/*
* REINDEX [ TABLE | INDEX ] CONCURRENTLY are not allowed
* in xacts.
*/
if(wordlen == 12 && pg_strncasecmp(query, "concurrently", 12) == 0)
return true;
}
return false;
}
@ -2639,4 +2653,4 @@ char* GetEnvStr(const char* env)
}
}
return NULL;
}
}

View File

@ -2638,10 +2638,18 @@ static char** PsqlCompletion(const char *text, int start, int end)
COMPLETE_WITH_LIST(listReindex);
} else if (pg_strcasecmp(PREV2_WD, "REINDEX") == 0) {
if (pg_strcasecmp(PREV_WD, "TABLE") == 0)
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tm, NULL);
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tm, " UNION SELECT 'CONCURRENTLY'");
else if (pg_strcasecmp(PREV_WD, "INDEX") == 0)
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes, NULL);
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes, " UNION SELECT 'CONCURRENTLY'");
else if (pg_strcasecmp(PREV_WD, "SYSTEM") == 0 || pg_strcasecmp(PREV_WD, "DATABASE") == 0)
COMPLETE_WITH_QUERY(Query_for_list_of_databases " UNION SELECT 'CONCURRENTLY'");
} else if (pg_strcasecmp(PREV3_WD, "REINDEX") == 0) {
if (pg_strcasecmp(PREV2_WD, "TABLE") == 0 && pg_strcasecmp(PREV_WD, "CONCURRENTLY") == 0)
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tm, NULL);
else if (pg_strcasecmp(PREV2_WD, "INDEX") == 0 && pg_strcasecmp(PREV_WD, "CONCURRENTLY") == 0)
COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_indexes, NULL);
else if ((pg_strcasecmp(PREV2_WD, "SYSTEM") == 0 || pg_strcasecmp(PREV2_WD, "DATABASE") == 0) &&
pg_strcasecmp(PREV_WD, "CONCURRENTLY") == 0)
COMPLETE_WITH_QUERY(Query_for_list_of_databases);
}

View File

@ -13,11 +13,11 @@
#include "dumputils.h"
static void reindex_one_database(const char* name, const char* dbname_tem, const char* type, const char* host,
const char* port, const char* username, enum trivalue prompt_password, const char* progname_tem, bool echo);
const char* port, const char* username, enum trivalue prompt_password, const char* progname_tem, bool echo, bool concurrently);
static void reindex_all_databases(const char* maintenance_db, const char* host, const char* port, const char* username,
enum trivalue prompt_password, const char* progname_tem, bool echo, bool quiet);
enum trivalue prompt_password, const char* progname_tem, bool echo, bool quiet, bool concurrently);
static void reindex_system_catalogs(const char* dbname_tem, const char* host, const char* port, const char* username,
enum trivalue prompt_password, const char* progname_tem, bool echo);
enum trivalue prompt_password, const char* progname_tem, bool echo, bool concurrently);
static void help(const char* progname_tem);
int main(int argc, char* argv[])
@ -34,6 +34,7 @@ int main(int argc, char* argv[])
{"system", no_argument, NULL, 's'},
{"table", required_argument, NULL, 't'},
{"index", required_argument, NULL, 'i'},
{"concurrently", no_argument, NULL, 1}
{"maintenance-db", required_argument, NULL, 2},
{NULL, 0, NULL, 0}};
@ -51,6 +52,7 @@ int main(int argc, char* argv[])
bool alldb = false;
bool echo = false;
bool quiet = false;
bool concurrently = false;
const char* table = NULL;
const char* index = NULL;
@ -98,6 +100,9 @@ int main(int argc, char* argv[])
case 'i':
index = optarg;
break;
case 1:
concurrently = true;
break;
case 2:
maintenance_db = optarg;
break;
@ -142,7 +147,7 @@ int main(int argc, char* argv[])
exit(1);
}
reindex_all_databases(maintenance_db, host, port, username, prompt_password, progname_tem, echo, quiet);
reindex_all_databases(maintenance_db, host, port, username, prompt_password, progname_tem, echo, quiet, concurrently);
} else if (syscatalog) {
if (table != NULL) {
fprintf(stderr, _("%s: cannot reindex a specific table and system catalogs at the same time\n"), progname_tem);
@ -162,7 +167,7 @@ int main(int argc, char* argv[])
dbname_tem = get_user_name(progname_tem);
}
reindex_system_catalogs(dbname_tem, host, port, username, prompt_password, progname_tem, echo);
reindex_system_catalogs(dbname_tem, host, port, username, prompt_password, progname_tem, echo, concurrently);
} else {
if (dbname_tem == NULL) {
if (getenv("PGDATABASE") != NULL)
@ -174,19 +179,19 @@ int main(int argc, char* argv[])
}
if (index != NULL)
reindex_one_database(index, dbname_tem, "INDEX", host, port, username, prompt_password, progname_tem, echo);
reindex_one_database(index, dbname_tem, "INDEX", host, port, username, prompt_password, progname_tem, echo, concurrently);
if (table != NULL)
reindex_one_database(table, dbname_tem, "TABLE", host, port, username, prompt_password, progname_tem, echo);
reindex_one_database(table, dbname_tem, "TABLE", host, port, username, prompt_password, progname_tem, echo, concurrently);
/* reindex database only if index or table is not specified */
if (index == NULL && table == NULL)
reindex_one_database(dbname_tem, dbname_tem, "DATABASE", host, port, username, prompt_password, progname_tem, echo);
reindex_one_database(dbname_tem, dbname_tem, "DATABASE", host, port, username, prompt_password, progname_tem, echo, concurrently);
}
exit(0);
}
static void reindex_one_database(const char* name, const char* dbname_tem, const char* type, const char* host,
const char* port, const char* username, enum trivalue prompt_password, const char* progname_tem, bool echo)
const char* port, const char* username, enum trivalue prompt_password, const char* progname_tem, bool echo, bool concurrently)
{
PQExpBufferData sql;
@ -194,13 +199,15 @@ static void reindex_one_database(const char* name, const char* dbname_tem, const
initPQExpBuffer(&sql);
appendPQExpBuffer(&sql, "REINDEX");
if (strcmp(type, "TABLE") == 0)
appendPQExpBuffer(&sql, " TABLE %s", name);
else if (strcmp(type, "INDEX") == 0)
appendPQExpBuffer(&sql, " INDEX %s", name);
appendPQExpBuffer(&sql, "REINDEX ");
appendPQExpBuffer(&sql, type);
appendPQExpBuffer(&sql," ");
if(concurrently)
appendPQExpBuffer(&sql,"CONCURRENTLY ");
if (strcmp(type, "TABLE") == 0 || strcmp(type, "INDEX") == 0)
appendPQExpBuffer(&sql, name);
else if (strcmp(type, "DATABASE") == 0)
appendPQExpBuffer(&sql, " DATABASE %s", fmtId(name));
appendPQExpBuffer(&sql, fmtId(name));
appendPQExpBuffer(&sql, ";\n");
conn = connectDatabase(dbname_tem, host, port, username, prompt_password, progname_tem, false);
@ -230,7 +237,7 @@ static void reindex_one_database(const char* name, const char* dbname_tem, const
}
static void reindex_all_databases(const char* maintenance_db, const char* host, const char* port, const char* username,
enum trivalue prompt_password, const char* progname_tem, bool echo, bool quiet)
enum trivalue prompt_password, const char* progname_tem, bool echo, bool quiet, bool concurrently)
{
PGconn* conn = NULL;
PGresult* result = NULL;
@ -248,14 +255,14 @@ static void reindex_all_databases(const char* maintenance_db, const char* host,
fflush(stdout);
}
reindex_one_database(dbname_tem, dbname_tem, "DATABASE", host, port, username, prompt_password, progname_tem, echo);
reindex_one_database(dbname_tem, dbname_tem, "DATABASE", host, port, username, prompt_password, progname_tem, echo, concurrently);
}
PQclear(result);
}
static void reindex_system_catalogs(const char* dbname_tem, const char* host, const char* port, const char* username,
enum trivalue prompt_password, const char* progname_tem, bool echo)
enum trivalue prompt_password, const char* progname_tem, bool echo, bool concurrently)
{
PQExpBufferData sql;
@ -263,7 +270,11 @@ static void reindex_system_catalogs(const char* dbname_tem, const char* host, co
initPQExpBuffer(&sql);
appendPQExpBuffer(&sql, "REINDEX SYSTEM %s;\n", dbname_tem);
appendPQExpBuffer(&sql, "REINDEX SYSTEM ");
if(concurrently)
appendPQExpBuffer(&sql,"CONCURRENTLY ");
appendPQExpBuffer(&sql, "%s;\n", dbname_tem);
conn = connectDatabase(dbname_tem, host, port, username, prompt_password, progname_tem, false);
if (!executeMaintenanceCommand(conn, sql.data, echo)) {
@ -282,6 +293,7 @@ static void help(const char* progname_tem)
printf(_(" %s [OPTION]... [DBNAME]\n"), progname_tem);
printf(_("\nOptions:\n"));
printf(_(" -a, --all reindex all databases\n"));
printf(_(" --concurrently reindex concurrently\n"));
printf(_(" -d, --dbname=DBNAME database to reindex\n"));
printf(_(" -e, --echo show the commands being sent to the server\n"));
printf(_(" -i, --index=INDEX recreate specific index only\n"));

View File

@ -194,6 +194,9 @@ extern char* pg_get_functiondef_worker(Oid funcid, int* headerlines);
* a column default is dropped as an intermediate step while adding a new one,
* that's an internal operation. On the other hand, when the we drop something
* because the user issued a DROP statement against it, that's not internal.
*
* PERFORM_DELETION_CONCURRENT_LOCK: perform the drop normally but with a lock
* as if it were concurrent. This is used by REINDEX CONCURRENTLY
*/
void performDeletion(const ObjectAddress* object, DropBehavior behavior, int flags)
{
@ -1167,9 +1170,10 @@ static void doDeletion(const ObjectAddress* object, int flags)
if (relKind == RELKIND_INDEX || relKind == RELKIND_GLOBAL_INDEX) {
bool concurrent = (((uint32)flags & PERFORM_DELETION_CONCURRENTLY) == PERFORM_DELETION_CONCURRENTLY);
bool concurrent_lock_mode = (((uint32)flags & PERFORM_DELETION_CONCURRENTLY_LOCK) == PERFORM_DELETION_CONCURRENTLY_LOCK);
Assert(object->objectSubId == 0);
index_drop(object->objectId, concurrent);
index_drop(object->objectId, concurrent,concurrent_lock_mode);/*change for index concurrent*/
} else {
/*
* relation_open() must be before the heap_drop_with_catalog(). If you reload

View File

@ -48,6 +48,7 @@
#include "catalog/pg_tablespace.h"
#include "catalog/pg_trigger.h"
#include "catalog/pg_type.h"
#include "catalog/pg_description.h"
#include "catalog/storage.h"
#include "catalog/storage_gtt.h"
#include "commands/tablecmds.h"
@ -799,9 +800,9 @@ Oid index_create(Relation heapRelation, const char *indexRelationName, Oid index
/*
* concurrent index build on a system catalog is unsafe because we tend to
* release locks before committing in catalogs
* release locks before committing in catalogs.
*/
if (concurrent && IsSystemRelation(heapRelation))
if (concurrent && IsCatalogRelation(heapRelation))
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("concurrent index creation on system catalog tables is not supported")));
@ -1306,6 +1307,672 @@ Oid partition_index_create(const char* partIndexName, /* the name of partition i
return indexid;
}
/*
* index_concurrently_create_copy
*
* Create concurrently an index based on the definition of the one provided
* by caller. The index is inserted into catalogs and needs to be built later
* on. This is called during concurrent reindex processing.
*/
Oid index_concurrently_create_copy(Relation heapRelation, Oid oldIndexId, const char* newName)
{
Relation indexRelation;
IndexInfo* indexInfo;
Oid newIndexId = InvalidOid;
HeapTuple indexTuple, classTuple;
Form_pg_index indexForm;
Datum indclassDatum, colOptionDatum, optionDatum;
oidvector* indclass;
int2vector* indcoloptions;
bool isnull;
List* indexColNames = NIL;
bool isprimary;
indexRelation = index_open(oldIndexId, RowExclusiveLock);
/* New index uses the same index information as old index */
indexInfo = BuildIndexInfo(indexRelation);
/* Get the array of class and column options IDs from index info */
indexTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(oldIndexId));
if(!HeapTupleIsValid(indexTuple))
elog(ERROR, "cache lookup failed for index %u", oldIndexId);
indclassDatum = SysCacheGetAttr(INDEXRELID, indexTuple, Anum_pg_index_indclass, &isnull);
Assert(!isnull);
indclass = (oidvector*) DatumGetPointer(indclassDatum);
colOptionDatum = SysCacheGetAttr(INDEXRELID, indexTuple, Anum_pg_index_indoption, &isnull);
Assert(!isnull);
indcoloptions = (int2vector*) DatumGetPointer(colOptionDatum);
/* Get index info about primary */
indexForm = (Form_pg_index) GETSTRUCT(indexTuple);
isprimary = indexForm->indisprimary;
/* Fetch options of index if any */
classTuple = SearchSysCache1(RELOID, oldIndexId);
if(!HeapTupleIsValid(classTuple))
elog(ERROR, "cache lookup failed for relation %u", oldIndexId);
optionDatum = SysCacheGetAttr(RELOID, classTuple, Anum_pg_class_reloptions, &isnull);
/*
* Extract the list of column names to be used for the index
* creation
*/
for (int i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
{
TupleDesc indexTupDesc = RelationGetDescr(indexRelation);
Form_pg_attribute att = TupleDescAttr(indexTupDesc, i);
indexColNames = lappend(indexColNames, NameStr(att->attname));
}
/* Make the indexCreateExtraArgs */
IndexCreateExtraArgs extra;
SetIndexCreateExtraArgs(&extra, indexRelation->rd_rel->relcudescrelid,
(indexRelation->rd_rel->parttype == PARTTYPE_PARTITIONED_RELATION) ||
(indexRelation->rd_rel->relkind == RELKIND_GLOBAL_INDEX),
indexRelation->rd_rel->relkind == RELKIND_GLOBAL_INDEX);
/* Now create the new index */
newIndexId = index_create(heapRelation,
newName,
InvalidOid,
InvalidOid,
indexInfo,
indexColNames,
indexRelation->rd_rel->relam,
indexRelation->rd_rel->reltablespace,
indexRelation->rd_indcollation,
indclass->values,
indcoloptions->values,
optionDatum,
isprimary,
false,
false,
false,
true,
true,
true,
&extra
);
/* Close the relations used and clean up */
index_close(indexRelation, NoLock);
ReleaseSysCache(indexTuple);
ReleaseSysCache(classTuple);
return newIndexId;
}
/*
* index_concurrently_build
*
* Build index for a concurrent operation. Low-level locks are taken when
* this operation is performed to prevent only schema change, but they need
* to be kept until the end of the transaction performing this operation.
* 'indexOid' refers to an index relation OID already created as part of
* previous processing. and 'heapOid' refers to its parent heap relation.
*/
void index_concurrently_build(Oid heapRelationId, Oid indexRelationId, bool isPrimary, AdaptMem* memInfo, bool dbWide)
{
Relation heapRel;
Relation indexRelation;
IndexInfo* indexInfo;
/* This had better make sure that a snapshot is active */
Assert(ActiveSnapshotSet());
/* Open and lock the parent heap relation */
heapRel = heap_open(heapRelationId, ShareUpdateExclusiveLock);
/* And the target index relation */
indexRelation = index_open(indexRelationId, RowExclusiveLock);
/* We have to re-build the IndexInfo struct, since it was lost in commit */
indexInfo = BuildIndexInfo(indexRelation);
Assert(!indexInfo->ii_ReadyForInserts);
indexInfo->ii_Concurrent = true;
indexInfo->ii_BrokenHotChain = false;
/* workload client manager */
if (IS_PGXC_COORDINATOR && ENABLE_WORKLOAD_CONTROL) {
/* if operatorMem is already set, the mem check is already done */
if (memInfo != NULL && memInfo->work_mem == 0) {
EstIdxMemInfo(heapRel, NULL, &indexInfo->ii_desc, indexInfo, indexRelation->rd_am->amname.data);
if (dbWide) {
indexInfo->ii_desc.cost = g_instance.cost_cxt.disable_cost;
indexInfo->ii_desc.query_mem[0] = Max(STATEMENT_MIN_MEM * 1024, indexInfo->ii_desc.query_mem[0]);
}
WLMInitQueryPlan((QueryDesc*)&indexInfo->ii_desc, false);
dywlm_client_manager((QueryDesc*)&indexInfo->ii_desc, false);
AdjustIdxMemInfo(memInfo, &indexInfo->ii_desc);
}
} else if (IS_PGXC_DATANODE && memInfo != NULL && memInfo->work_mem > 0) {
indexInfo->ii_desc.query_mem[0] = memInfo->work_mem;
indexInfo->ii_desc.query_mem[1] = memInfo->max_mem;
}
/* Now build the index */
index_build(heapRel, NULL, indexRelation, NULL, indexInfo, isPrimary, false, INDEX_CREATE_NONE_PARTITION);
/* Close both the relations, but keep the locks */
heap_close(heapRel, NoLock);
index_close(indexRelation, NoLock);
/*
* Update the pg_index row to mark the index as ready for inserts, Once we
* commit this transaction, any new transactions that open the table must
* insert new entries into the index for insertions and non-HOT updates.
*/
index_set_state_flags(indexRelationId, INDEX_CREATE_SET_READY);
}
/*
* index_concurrently_swap
* Swap name, dependencies, and constraints of the old index over to the new
* index, while marking the old index as invalid and the new as valid.
*/
void index_concurrently_swap(Oid newIndexId, Oid oldIndexId, const char* oldName)
{
Relation pg_class, pg_index, pg_constraint, pg_trigger;
Relation oldClassRel, newClassRel;
HeapTuple oldClassTuple, newClassTuple;
Form_pg_class oldClassForm, newClassForm;
HeapTuple oldIndexTuple, newIndexTuple;
Form_pg_index oldIndexForm, newIndexForm;
Oid indexConstraintOid;
List* constraintOids = NIL;
ListCell* lc;
/*
* Take a necessary lock on the old and new index before swaping them.
*/
oldClassRel = relation_open(oldIndexId, ShareUpdateExclusiveLock);
newClassRel = relation_open(newIndexId, ShareUpdateExclusiveLock);
/* Now swap names and dependencies of those indexs */
pg_class = heap_open(RelationRelationId, RowExclusiveLock);
oldClassTuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(oldIndexId));
if(!HeapTupleIsValid(oldClassTuple))
elog(ERROR, "could not find tuple for relation %u", oldIndexId);
newClassTuple = SearchSysCacheCopy1(RELOID, ObjectIdGetDatum(newIndexId));
if(!HeapTupleIsValid(newClassTuple))
elog(ERROR, "could not find tuple for relation %u", newIndexId);
oldClassForm = (Form_pg_class) GETSTRUCT(oldClassTuple);
newClassForm = (Form_pg_class) GETSTRUCT(newClassTuple);
/* Swap the name */
namestrcpy(&newClassForm->relname, NameStr(oldClassForm->relname));
namestrcpy(&oldClassForm->relname, oldName);
simple_heap_update(pg_class, &oldClassTuple->t_self, oldClassTuple);
CatalogUpdateIndexes(pg_class, oldClassTuple);
simple_heap_update(pg_class, &newClassTuple->t_self, newClassTuple);
CatalogUpdateIndexes(pg_class, newClassTuple);
heap_freetuple(oldClassTuple);
heap_freetuple(newClassTuple);
/* Now swap index info */
pg_index = heap_open(IndexRelationId, RowExclusiveLock);
oldIndexTuple = SearchSysCacheCopy1(INDEXRELID, ObjectIdGetDatum(oldIndexId));
if (!HeapTupleIsValid(oldIndexTuple))
elog(ERROR, "could not find tuple for relation %u", oldIndexId);
newIndexTuple = SearchSysCacheCopy1(INDEXRELID, ObjectIdGetDatum(newIndexId));
if (!HeapTupleIsValid(newIndexTuple))
elog(ERROR, "could not find tuple for relation %u", newIndexId);
oldIndexForm = (Form_pg_index) GETSTRUCT(oldIndexTuple);
newIndexForm = (Form_pg_index) GETSTRUCT(newIndexTuple);
/*
* Copy constraint flags from the old index. This is safe because the old
* index guaranteed uniqueness.
*/
newIndexForm->indisprimary = oldIndexForm->indisprimary;
oldIndexForm->indisprimary = false;
newIndexForm->indisexclusion = oldIndexForm->indisexclusion;
oldIndexForm->indisexclusion = false;
newIndexForm->indimmediate = oldIndexForm->indimmediate;
oldIndexForm->indimmediate = true;
/* Mark old index as valid and new as invalid as index_set_state_flags */
newIndexForm->indisvalid = true;
oldIndexForm->indisvalid = false;
oldIndexForm->indisclustered = false;
simple_heap_update(pg_index, &oldIndexTuple->t_self, oldIndexTuple);
CatalogUpdateIndexes(pg_index, oldIndexTuple);
simple_heap_update(pg_index, &newIndexTuple->t_self, newIndexTuple);
CatalogUpdateIndexes(pg_index, newIndexTuple);
heap_freetuple(oldIndexTuple);
heap_freetuple(newIndexTuple);
/*
* Move constraints and triggers over to the new index
*/
constraintOids = get_index_ref_constraints(oldIndexId);
indexConstraintOid = get_index_constraint(oldIndexId);
if(OidIsValid(indexConstraintOid))
constraintOids = lappend_oid(constraintOids, indexConstraintOid);
pg_constraint = heap_open(ConstraintRelationId, RowExclusiveLock);
pg_trigger = heap_open(TriggerRelationId, RowExclusiveLock);
foreach(lc, constraintOids) {
HeapTuple constraintTuple, triggerTuple;
Form_pg_constraint conForm;
ScanKeyData key[1];
SysScanDesc scan;
Oid constraintOid = lfirst_oid(lc);
/* Move the constraint from the old to the new index */
constraintTuple = SearchSysCacheCopy1(CONSTROID, ObjectIdGetDatum(constraintOid));
if (!HeapTupleIsValid(constraintTuple))
elog(ERROR, "could not find tuple for constraint %u", constraintOid);
conForm = (Form_pg_constraint) GETSTRUCT(constraintTuple);
if (conForm->conindid == oldIndexId)
{
conForm->conindid = newIndexId;
simple_heap_update(pg_constraint, &constraintTuple->t_self, constraintTuple);
CatalogUpdateIndexes(pg_constraint, constraintTuple);
}
heap_freetuple(constraintTuple);
/* Search for trigger records */
ScanKeyInit(&key[0], Anum_pg_trigger_tgconstraint, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(constraintOid));
scan = systable_beginscan(pg_trigger, TriggerConstraintIndexId, true, NULL, 1, key);
while (HeapTupleIsValid(triggerTuple = systable_getnext(scan)))
{
Form_pg_trigger tgForm = (Form_pg_trigger) GETSTRUCT(triggerTuple);
if (tgForm->tgconstrindid != oldIndexId)
continue;
/* Make a modifiable copy */
triggerTuple = heap_copytuple(triggerTuple);
tgForm = (Form_pg_trigger) GETSTRUCT(triggerTuple);
tgForm->tgconstrindid = newIndexId;
simple_heap_update(pg_trigger, &triggerTuple->t_self, triggerTuple);
CatalogUpdateIndexes(pg_trigger, triggerTuple);
heap_freetuple(triggerTuple);
}
systable_endscan(scan);
}
/*
* Move comment if any
*/
{
Relation description;
ScanKeyData skey[3];
SysScanDesc sd;
HeapTuple tuple;
Datum values[Natts_pg_description] = {0};
bool nulls[Natts_pg_description] = {0};
bool replaces[Natts_pg_description] = {0};
values[Anum_pg_description_objoid - 1] = ObjectIdGetDatum(newIndexId);
replaces[Anum_pg_description_objoid - 1] = true;
ScanKeyInit(&skey[0], Anum_pg_description_objoid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(oldIndexId));
ScanKeyInit(&skey[1], Anum_pg_description_classoid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationRelationId));
ScanKeyInit(&skey[2], Anum_pg_description_objsubid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(0));
description = heap_open(DescriptionRelationId, RowExclusiveLock);
sd = systable_beginscan(description, DescriptionObjIndexId, true, NULL, 3, skey);
while ((tuple = systable_getnext(sd)) != NULL)
{
tuple = heap_modify_tuple(tuple, RelationGetDescr(description), values, nulls, replaces);
simple_heap_update(description, &tuple->t_self, tuple);
CatalogUpdateIndexes(description, tuple);
break; /* Assume there can be only one match */
}
systable_endscan(sd);
heap_close(description, NoLock);
}
/*
* Move all dependencies on the old index to the new one
*/
if (OidIsValid(indexConstraintOid))
{
ObjectAddress myself, referenced;
/* Change to having the new index depend on the constraint */
deleteDependencyRecordsForClass(RelationRelationId, oldIndexId, ConstraintRelationId, DEPENDENCY_INTERNAL);
myself.classId = RelationRelationId;
myself.objectId = newIndexId;
myself.objectSubId = 0;
referenced.classId = ConstraintRelationId;
referenced.objectId = indexConstraintOid;
referenced.objectSubId = 0;
recordDependencyOn(&myself, &referenced, DEPENDENCY_INTERNAL);
}
changeDependenciesOn(RelationRelationId, oldIndexId, newIndexId);
/*
* Copy over statistics from old to new index
*/
{
PgStat_StatTabKey tabkey;
PgStat_StatTabEntry *tabentry;
tabkey.tableid = oldIndexId;
tabentry = pgstat_fetch_stat_tabentry(&tabkey);
if (tabentry)
{
if (newClassRel->pgstat_info)
{
newClassRel->pgstat_info->t_counts.t_numscans = tabentry->numscans;
newClassRel->pgstat_info->t_counts.t_tuples_returned = tabentry->tuples_returned;
newClassRel->pgstat_info->t_counts.t_tuples_fetched = tabentry->tuples_fetched;
newClassRel->pgstat_info->t_counts.t_blocks_fetched = tabentry->blocks_fetched;
newClassRel->pgstat_info->t_counts.t_blocks_hit = tabentry->blocks_hit;
/* The data will be sent by the next pgstat_report_stat() call. */
}
}
}
/* Close relations */
heap_close(pg_class, RowExclusiveLock);
heap_close(pg_index, RowExclusiveLock);
heap_close(pg_constraint, RowExclusiveLock);
heap_close(pg_trigger, RowExclusiveLock);
/* The lock taken previously is not released until the end of transaction */
relation_close(oldClassRel, NoLock);
relation_close(newClassRel, NoLock);
}
/*
* index_concurrently_set_dead
* Perform the last invaildation stage of DROP INDEX CONCURRENTLY or REINDEX
* CONCURRENTLY before actually dropping index. After calling this
* functions, the index is seen by all the backends as dead. Low-level locks
* taken here are kept until the end of the transaction calling this function.
*/
void index_concurrently_set_dead(Oid heapId, Oid indexId)
{
Relation userHeapRelation;
Relation userIndexRelation;
/*
* No more predicate locks will be acquired on this index, and we're
* about to stop doing inserts into the index which could show
* conflicts with existing predicate locks, so now is the time to move
* them to the heap relation.
*/
userHeapRelation = heap_open(heapId, ShareUpdateExclusiveLock);
userIndexRelation = index_open(indexId, ShareUpdateExclusiveLock);
TransferPredicateLocksToHeapRelation(userIndexRelation);
/*
* Now we are sure that nobody uses the index for queries; they just
* might have it open for updating it. So now we can unset indisready
* and set indisvalid, then wait till nobody could be using it at all
* anymore.
*/
index_set_state_flags(indexId, INDEX_DROP_SET_DEAD);
/*
* Invalidate the relcache for the table, so that after this commit
* all sessions will refresh the table's index list. Forgetting just
* the index's relcache entry is not enough.
*/
CacheInvalidateRelcache(userHeapRelation);
/*
* Close the relations again, though still holding session lock
*/
heap_close(userHeapRelation, NoLock);
index_close(userIndexRelation, NoLock);
}
/*
* index_concurrently_part_create_copy
*
* Create concurrently a part index based on the definition of the one provided
* by caller. The index is inserted into catalogs and needs to be built later
* on. This is called during concurrent reindex partition processing.
*/
Oid index_concurrently_part_create_copy(Oid oldIndexPartId, const char* newName) {
Oid newIndexPartId = InvalidOid;
Oid heapPartId = InvalidOid;
Oid indexId = InvalidOid;
Oid heapId = InvalidOid;
Oid partitiontspid = InvalidOid;
Relation indexRelation = NULL;
Relation heapRelation = NULL;
Partition indexPart = NULL;
Partition heapPart = NULL;
HeapTuple indexPartTuple, classTuple;
Form_pg_partition indexPartForm;
Relation pg_partition_rel = NULL;
IndexInfo* indexInfo;
List* indexColNames = NIL;
bool isnull;
Datum optionDatum;
indexPartTuple = SearchSysCache1(PARTRELID, ObjectIdGetDatum(oldIndexPartId));
if(!HeapTupleIsValid(indexPartTuple)) {
ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("cache lookup failed for partition index %u", oldIndexPartId)));
}
indexPartForm = (Form_pg_partition)GETSTRUCT(indexPartTuple);
heapPartId = indexPartForm->indextblid;
indexId = indexPartForm->parentid;
indexRelation = index_open(indexId, AccessShareLock);
heapId = indexRelation->rd_index->indrelid;
heapRelation = heap_open(heapId, AccessShareLock);
partitiontspid = indexPartForm->reltablespace;
heapPart = partitionOpen(heapRelation, heapPartId, ShareUpdateExclusiveLock);
indexPart = partitionOpen(indexRelation, oldIndexPartId, RowExclusiveLock);
indexInfo = BuildIndexInfo(indexRelation);
/* Fetch the options of index if any */
classTuple = SearchSysCache1(RELOID, indexId);
if(!HeapTupleIsValid(classTuple)) {
ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), errmsg("cache lookup failed for relation %u", indexId)));
}
optionDatum = SysCacheGetAttr(RELOID, classTuple, Anum_pg_class_reloptions, &isnull);
/*
* Extract the list of column names to be used for the index
* creation.
*/
for(int i = 0; i < indexInfo->ii_NumIndexAttrs; i++) {
TupleDesc indexTupDesc = RelationGetDescr(indexRelation);
Form_pg_attribute att = TupleDescAttr(indexTupDesc, i);
indexColNames = lappend(indexColNames, NameStr(att->attname));
}
pg_partition_rel = heap_open(PartitionRelationId, RowExclusiveLock);
PartIndexCreateExtraArgs partExtra;
partExtra.existingPSortOid = indexPart->pd_part->relcudescrelid;
newIndexPartId = partition_index_create(newName, InvalidOid, heapPart, partitiontspid, indexRelation, heapRelation,
pg_partition_rel, indexInfo, indexColNames, optionDatum, true, &partExtra);
partitionClose(indexRelation, indexPart, NoLock);
partitionClose(heapRelation, heapPart, NoLock);
heap_close(pg_partition_rel, RowExclusiveLock);
heap_close(indexRelation, NoLock);
heap_close(heapRelation, NoLock);
ReleaseSysCache(indexPartTuple);
ReleaseSysCache(classTuple);
return newIndexPartId;
}
/*
* index_concurrently_part_build
*
* Build partition index for a concurrent operation. Low-level locks are taken when
* this operation is performed to prevent only schema change, but they need
* to be kept until the end of the transaction performing this operation.
*/
void index_concurrently_part_build(Oid heapRelationId, Oid heapPartitionId, Oid indexRelationId, Oid IndexPartitionId, AdaptMem* memInfo, bool dbWide) {
Relation heapRelation;
Relation indexRelation;
Partition heapPartition;
Partition indexPartition;
IndexInfo* indexInfo;
/* Open and lock the parent heap relation and index relation */
heapRelation = heap_open(heapRelationId, AccessShareLock);
indexRelation = index_open(indexRelationId, AccessShareLock);
/* Open and lock the parent heap partition */
heapPartition = partitionOpen(heapRelation, heapPartitionId, ShareUpdateExclusiveLock);
/* Open and lock target index partition */
indexPartition = partitionOpen(indexRelation, IndexPartitionId, RowExclusiveLock);
/* We have to re-build the IndexInfo struct, since it was lost in commit */
indexInfo = BuildIndexInfo(indexRelation);
//Assert(!indexInfo->ii_ReadyForInserts); --part index indexInfo->ii_ReadyForInserts is false
indexInfo->ii_Concurrent = true;
indexInfo->ii_BrokenHotChain = false;
/* workload client manager */
if (IS_PGXC_COORDINATOR && ENABLE_WORKLOAD_CONTROL) {
/* if operatorMem is already set, the mem check is already done */
if (memInfo != NULL && memInfo->work_mem == 0) {
EstIdxMemInfo(heapRelation, NULL, &indexInfo->ii_desc, indexInfo, indexRelation->rd_am->amname.data);
if (dbWide) {
indexInfo->ii_desc.cost = g_instance.cost_cxt.disable_cost;
indexInfo->ii_desc.query_mem[0] = Max(STATEMENT_MIN_MEM * 1024, indexInfo->ii_desc.query_mem[0]);
}
WLMInitQueryPlan((QueryDesc*)&indexInfo->ii_desc, false);
dywlm_client_manager((QueryDesc*)&indexInfo->ii_desc, false);
AdjustIdxMemInfo(memInfo, &indexInfo->ii_desc);
}
} else if (IS_PGXC_DATANODE && memInfo != NULL && memInfo->work_mem > 0) {
indexInfo->ii_desc.query_mem[0] = memInfo->work_mem;
indexInfo->ii_desc.query_mem[1] = memInfo->max_mem;
}
/* Now build the index */
index_build(heapRelation, heapPartition, indexRelation, indexPartition, indexInfo, false, true, INDEX_CREATE_LOCAL_PARTITION);
/* Close the partitions and relations, but keep the locks */
partitionClose(heapRelation, heapPartition, NoLock);
partitionClose(indexRelation, indexPartition, NoLock);
heap_close(heapRelation, NoLock);
index_close(indexRelation, NoLock);
}
/*
* index_concurrently_part_swap
*
* Swap name, dependencies, and constraints of the old index over to the new
* index, while marking the old index as invalid and the new as valid.
*/
void index_concurrently_part_swap(Relation indexRelation, Oid newIndexPartId, Oid oldIndexPartId, const char *oldName) {
Relation pg_partition;
Partition oldIndexPartition, newIndexPartition;
HeapTuple oldIndexPartTuple, newIndexPartTuple;
Form_pg_partition oldIndexPartForm, newIndexPartForm;
/*
* Take a necessary lock on the old and new index before swaping them.
*/
oldIndexPartition = partitionOpen(indexRelation, oldIndexPartId, ShareUpdateExclusiveLock);
newIndexPartition = partitionOpen(indexRelation, newIndexPartId, ShareUpdateExclusiveLock);
/* Now swap names of those indexs */
pg_partition = heap_open(PartitionRelationId, RowExclusiveLock);
oldIndexPartTuple = SearchSysCache1(PARTRELID, ObjectIdGetDatum(oldIndexPartId));
if(!HeapTupleIsValid(oldIndexPartTuple))
elog(ERROR, "could not find tuple for relation %u", oldIndexPartId);
newIndexPartTuple = SearchSysCache1(PARTRELID, ObjectIdGetDatum(newIndexPartId));
if(!HeapTupleIsValid(newIndexPartTuple))
elog(ERROR, "could not find tuple for relation %u", newIndexPartId);
oldIndexPartForm = (Form_pg_partition) GETSTRUCT(oldIndexPartTuple);
newIndexPartForm = (Form_pg_partition) GETSTRUCT(newIndexPartTuple);
/* Swap the name */
namestrcpy(&newIndexPartForm->relname, NameStr(oldIndexPartForm->relname));
namestrcpy(&oldIndexPartForm->relname, oldName);
/* Mark old index as invalid and new as valid */
newIndexPartForm->indisusable = true;
oldIndexPartForm->indisusable = false;
simple_heap_update(pg_partition, &oldIndexPartTuple->t_self, oldIndexPartTuple);
CatalogUpdateIndexes(pg_partition, oldIndexPartTuple);
simple_heap_update(pg_partition, &newIndexPartTuple->t_self, newIndexPartTuple);
CatalogUpdateIndexes(pg_partition, newIndexPartTuple);
ReleaseSysCache(oldIndexPartTuple);
ReleaseSysCache(newIndexPartTuple);
/*
* Copy over statistics from old to new index
*/
{
PgStat_StatTabKey tabkey;
PgStat_StatTabEntry* tabentry;
tabkey.tableid = oldIndexPartId;
tabentry = pgstat_fetch_stat_tabentry(&tabkey);
if (tabentry)
{
if (newIndexPartition->pd_pgstat_info)
{
newIndexPartition->pd_pgstat_info->t_counts.t_numscans = tabentry->numscans;
newIndexPartition->pd_pgstat_info->t_counts.t_tuples_returned = tabentry->tuples_returned;
newIndexPartition->pd_pgstat_info->t_counts.t_tuples_fetched = tabentry->tuples_fetched;
newIndexPartition->pd_pgstat_info->t_counts.t_blocks_fetched = tabentry->blocks_fetched;
newIndexPartition->pd_pgstat_info->t_counts.t_blocks_hit = tabentry->blocks_hit;
/* The data will be sent by the next pgstat_report_stat() call. */
}
}
}
/* Close relation */
heap_close(pg_partition, RowExclusiveLock);
/* The lock taken previously is not released until the end of transaction */
partitionClose(indexRelation, oldIndexPartition, NoLock);
partitionClose(indexRelation, newIndexPartition, NoLock);
}
/*
* index_constraint_create
*
@ -1519,7 +2186,7 @@ static void MotFdwDropForeignIndex(Relation userHeapRelation, Relation userIndex
* NOTE: this routine should now only be called through performDeletion(),
* else associated dependencies won't be cleaned up.
*/
void index_drop(Oid indexId, bool concurrent)
void index_drop(Oid indexId, bool concurrent, bool concurrent_lock_mode)
{
Oid heapId;
Relation userHeapRelation;
@ -1563,7 +2230,7 @@ void index_drop(Oid indexId, bool concurrent)
* using it.)
*/
heapId = IndexGetRelation(indexId, false);
lockmode = concurrent ? ShareUpdateExclusiveLock : AccessExclusiveLock;
lockmode = (concurrent || concurrent_lock_mode) ? ShareUpdateExclusiveLock : AccessExclusiveLock;
userHeapRelation = heap_open(heapId, lockmode);
userIndexRelation = index_open(indexId, lockmode);
@ -1690,36 +2357,8 @@ void index_drop(Oid indexId, bool concurrent)
old_lockholders++;
}
/*
* No more predicate locks will be acquired on this index, and we're
* about to stop doing inserts into the index which could show
* conflicts with existing predicate locks, so now is the time to move
* them to the heap relation.
*/
userHeapRelation = heap_open(heapId, ShareUpdateExclusiveLock);
userIndexRelation = index_open(indexId, ShareUpdateExclusiveLock);
TransferPredicateLocksToHeapRelation(userIndexRelation);
/*
* Now we are sure that nobody uses the index for queries; they just
* might have it open for updating it. So now we can unset indisready
* and set indisvalid, then wait till nobody could be using it at all
* anymore.
*/
index_set_state_flags(indexId, INDEX_DROP_SET_DEAD);
/*
* Invalidate the relcache for the table, so that after this commit
* all sessions will refresh the table's index list. Forgetting just
* the index's relcache entry is not enough.
*/
CacheInvalidateRelcache(userHeapRelation);
/*
* Close the relations again, though still holding session lock.
*/
heap_close(userHeapRelation, NoLock);
index_close(userIndexRelation, NoLock);
/* Finish invalidation of index and mark it as dead */
index_concurrently_set_dead(heapId, indexId);
/*
* Again, commit the transaction to make the pg_index update visible
@ -3793,7 +4432,7 @@ static void IndexCheckExclusion(Relation heapRelation, Relation indexRelation, I
* making the table append-only by setting use_fsm). However that would
* add yet more locking issues.
*/
void validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
void validate_index(Oid heapId, Oid indexId, Snapshot snapshot, bool isPart)
{
Relation heapRelation, indexRelation;
IndexInfo* indexInfo = NULL;
@ -3803,10 +4442,27 @@ void validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
int save_sec_context;
int save_nestlevel;
/* Open and lock the parent heap relation */
heapRelation = heap_open(heapId, ShareUpdateExclusiveLock);
/* And the target index relation */
indexRelation = index_open(indexId, RowExclusiveLock);
/* these variants is used for part index */
Oid heapParentId, indexParentId;
Relation heapParentRel, indexParentRel;
Partition heapPartition, indexPartition;
if (isPart){
heapParentId = PartIdGetParentId(heapId, false);
heapParentRel = heap_open(heapParentId, AccessShareLock);
heapPartition = partitionOpen(heapParentRel, heapId, ShareUpdateExclusiveLock);
heapRelation = partitionGetRelation(heapParentRel, heapPartition);
indexParentId = PartIdGetParentId(indexId, false);
indexParentRel = index_open(indexParentId, AccessShareLock);
indexPartition = partitionOpen(indexParentRel, indexId, RowExclusiveLock);
indexRelation = partitionGetRelation(indexParentRel, indexPartition);
}
else {
/* Open and lock the parent heap relation */
heapRelation = heap_open(heapId, ShareUpdateExclusiveLock);
/* And the target index relation */
indexRelation = index_open(indexId, RowExclusiveLock);
}
/*
* Fetch info needed for index_insert. (You might think this should be
@ -3867,8 +4523,18 @@ void validate_index(Oid heapId, Oid indexId, Snapshot snapshot)
SetUserIdAndSecContext(save_userid, save_sec_context);
/* Close rels, but keep locks */
index_close(indexRelation, NoLock);
heap_close(heapRelation, NoLock);
if (isPart) {
partitionClose(indexParentRel, indexPartition, NoLock);
partitionClose(heapParentRel, heapPartition, NoLock);
index_close(indexParentRel, NoLock);
heap_close(heapParentRel, NoLock);
releaseDummyRelation(&indexRelation);
releaseDummyRelation(&heapRelation);
}
else {
index_close(indexRelation, NoLock);
heap_close(heapRelation, NoLock);
}
}
/*
@ -4190,6 +4856,51 @@ Oid IndexGetRelation(Oid indexId, bool missing_ok)
return result;
}
/*
* PartIndexGetPartition: given an part index's relation OID, get the OID of the
* Partiton it is an index on. Uses the system cache.
*/
Oid PartIndexGetPartition(Oid partIndexId, bool missing_ok)
{
HeapTuple tuple;
Form_pg_partition indexForm;
Oid result;
tuple = SearchSysCache1(PARTRELID, ObjectIdGetDatum(partIndexId));
if(!HeapTupleIsValid(tuple)) {
if (missing_ok)
return InvalidOid;
ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for partition index %u", partIndexId)));
}
indexForm = (Form_pg_partition)GETSTRUCT(tuple);
result = indexForm->indextblid;
ReleaseSysCache(tuple);
return result;
}
/*
* PartIdGetParentId: given an partition OID, get the OID of the
* parent. Uses the system cache.
*/
Oid PartIdGetParentId(Oid partIndexId, bool missing_ok)
{
HeapTuple tuple;
Form_pg_partition indexForm;
Oid result;
tuple = SearchSysCache1(PARTRELID, ObjectIdGetDatum(partIndexId));
if(!HeapTupleIsValid(tuple)) {
if (missing_ok)
return InvalidOid;
ereport(ERROR, (errcode(ERRCODE_CACHE_LOOKUP_FAILED), errmsg("cache lookup failed for partition %u", partIndexId)));
}
indexForm = (Form_pg_partition)GETSTRUCT(tuple);
result = indexForm->parentid;
ReleaseSysCache(tuple);
return result;
}
/*
* @@GaussDB@@
* Target : data partition
@ -5028,6 +5739,49 @@ bool reindexPartition(Oid relid, Oid partOid, int flags, int reindexType)
return result;
}
/*
* Use PartitionOid and indexId look for indexPartitionOid
*/
Oid heapPartitionIdGetindexPartitionId(Oid indexId, Oid partOid) {
Relation partRel = NULL;
SysScanDesc partScan;
HeapTuple partTuple;
Form_pg_partition partForm;
ScanKeyData partKey;
Oid indexPartOid = InvalidOid;
/*
* Find the tuple in pg_partition whose 'indextblid' is partOid
* and 'parentid' is indexId with systable scan.
*/
partRel = heap_open(PartitionRelationId, AccessShareLock);
ScanKeyInit(&partKey, Anum_pg_partition_indextblid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(partOid));
partScan = systable_beginscan(partRel, PartitionIndexTableIdIndexId, true, NULL, 1, &partKey);
while ((partTuple = systable_getnext(partScan)) != NULL) {
partForm = (Form_pg_partition)GETSTRUCT(partTuple);
if (partForm->parentid == indexId) {
indexPartOid = HeapTupleGetOid(partTuple);
break;
}
}
/* End scan and close pg_partition */
systable_endscan(partScan);
heap_close(partRel, AccessShareLock);
if (!OidIsValid(indexPartOid)) {
ereport(ERROR,
(errcode(ERRCODE_CACHE_LOOKUP_FAILED),
errmsg("cache lookup failed for partitioned index %u", indexId)));
}
return indexPartOid;
}
/*
* reindexPartIndex - This routine is used to recreate a single index partition
*/
@ -5079,11 +5833,6 @@ static void reindexPartIndex(Oid indexId, Oid partOid, bool skip_constraint_chec
PG_TRY();
{
Relation partRel = NULL;
SysScanDesc partScan;
HeapTuple partTuple;
Form_pg_partition partForm;
ScanKeyData partKey;
Oid indexPartOid = InvalidOid;
/* Suppress use of the target index while rebuilding it */
@ -5103,34 +5852,8 @@ static void reindexPartIndex(Oid indexId, Oid partOid, bool skip_constraint_chec
}
// step 1: rebuild index partition
/*
* Find the tuple in pg_partition whose 'indextblid' is partOid
* and 'parentid' is indexId with systable scan.
*/
partRel = heap_open(PartitionRelationId, AccessShareLock);
ScanKeyInit(&partKey, Anum_pg_partition_indextblid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(partOid));
partScan = systable_beginscan(partRel, PartitionIndexTableIdIndexId, true, NULL, 1, &partKey);
while ((partTuple = systable_getnext(partScan)) != NULL) {
partForm = (Form_pg_partition)GETSTRUCT(partTuple);
if (partForm->parentid == indexId) {
indexPartOid = HeapTupleGetOid(partTuple);
break;
}
}
/* End scan and close pg_partition */
systable_endscan(partScan);
heap_close(partRel, AccessShareLock);
if (!OidIsValid(indexPartOid)) {
ereport(ERROR,
(errcode(ERRCODE_CACHE_LOOKUP_FAILED),
errmsg("cache lookup failed for partitioned index %u", indexId)));
}
/* Use partOid and indexOid look for indexPartOid */
indexPartOid = heapPartitionIdGetindexPartitionId(indexId, partOid);
/* Now, we have get the index partition oid and open it. */
heapPart = partitionOpen(heapRelation, partOid, ShareLock);

View File

@ -396,6 +396,84 @@ long changeDependencyFor(Oid classId, Oid objectId, Oid refClassId, Oid oldRefOb
return count;
}
/*
* Adjust all dependency records to point to a different object of the same type
*
* refClassId/oldRefObjectId specify the old referenced object.
* newRefObjectId is the new referenced object (must be of class refClassId).
*
* Returns the number of records updated.
*/
long changeDependenciesOn(Oid refClassId, Oid oldRefObjectId, Oid newRefObjectId)
{
long count = 0;
Relation depRel = NULL;
ScanKeyData key[2];
SysScanDesc scan = NULL;
HeapTuple tup = NULL;
ObjectAddress objAddr;
bool newIsPinned = false;
depRel = heap_open(DependRelationId, RowExclusiveLock);
/*
* If oldRefObjectId is pinned, there won't be any dependency entries on
* it --- we can't cope in that case. (This isn't really worth expending
* code to fix, in current usage; it just means you can't rename stuff out
* of pg_catalog, which would likely be a bad move anyway.)
*/
objAddr.classId = refClassId;
objAddr.objectId = oldRefObjectId;
objAddr.objectSubId = 0;
if (isObjectPinned(&objAddr, depRel))
ereport (ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot remove dependency on %s because it is a system object",
getObjectDescription(&objAddr))));
/*
* We can handle adding a dependency on something pinned, though, since
* that just means deleting the dependency entry.
*/
objAddr.objectId = newRefObjectId;
newIsPinned = isObjectPinned(&objAddr, depRel);
/* Now search for dependency records */
ScanKeyInit(&key[0], Anum_pg_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(refClassId));
ScanKeyInit(&key[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(oldRefObjectId));
scan = systable_beginscan(depRel, DependReferenceIndexId, true, NULL, 2, key);
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
Form_pg_depend depform = (Form_pg_depend) GETSTRUCT(tup);
if(newIsPinned)
simple_heap_delete(depRel, &tup->t_self);
else {
/* make a modifiable copy */
tup = heap_copytuple(tup);
depform = (Form_pg_depend) GETSTRUCT(tup);
depform->refobjid = newRefObjectId;
simple_heap_update(depRel, &tup->t_self, tup);
CatalogUpdateIndexes(depRel, tup);
heap_freetuple_ext(tup);
}
count++;
}
systable_endscan(scan);
heap_close(depRel, RowExclusiveLock);
return count;
}
/*
* isObjectPinned()
*
@ -681,3 +759,46 @@ Oid get_index_constraint(Oid indexId)
return constraintId;
}
/*
* get_index_ref_constraints
* Given the OID of an index, return the OID of all foreign key
* constraints which reference the index.
*/
List* get_index_ref_constraints(Oid indexId)
{
List* result = NULL;
Relation depRel = NULL;
ScanKeyData key[3];
SysScanDesc scan = NULL;
HeapTuple tup = NULL;
/* search the dependency table for the index */
depRel = heap_open(DependRelationId,AccessShareLock);
ScanKeyInit(&key[0], Anum_pg_depend_refclassid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(RelationRelationId));
ScanKeyInit(&key[1], Anum_pg_depend_refobjid, BTEqualStrategyNumber, F_OIDEQ, ObjectIdGetDatum(indexId));
ScanKeyInit(&key[2], Anum_pg_depend_refobjsubid, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(0));
scan = systable_beginscan(depRel, DependReferenceIndexId, true, NULL, 3, key);
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
Form_pg_depend deprec = (Form_pg_depend) GETSTRUCT(tup);
/*
* We assume any normal dependency from a constraint must be what we
* are looking for.
*/
if (deprec->classid == ConstraintRelationId && deprec->objsubid == 0 &&
deprec->deptype == DEPENDENCY_NORMAL)
{
result = lappend_oid(result, deprec->objid);
}
}
systable_endscan(scan);
heap_close(depRel, AccessShareLock);
return result;
}

View File

@ -5816,6 +5816,8 @@ static ReindexStmt* _copyReindexStmt(const ReindexStmt* from)
COPY_SCALAR_FIELD(do_user);
COPY_SCALAR_FIELD(memUsage.work_mem);
COPY_SCALAR_FIELD(memUsage.max_mem);
/*use for reindex concurrently*/
COPY_SCALAR_FIELD(concurrent);
return newnode;
}

View File

@ -2115,6 +2115,8 @@ static bool _equalReindexStmt(const ReindexStmt* a, const ReindexStmt* b)
COMPARE_STRING_FIELD(name);
COMPARE_SCALAR_FIELD(do_system);
COMPARE_SCALAR_FIELD(do_user);
/*use for reindex concurerntly*/
COMPARE_SCALAR_FIELD(concurrent);
return true;
}

View File

@ -12174,22 +12174,23 @@ opt_if_exists: IF_P EXISTS { $$ = TRUE; }
*
* QUERY:
*
* REINDEX type <name> [FORCE]
* REINDEX type [CONCURRENTLY] <name> [FORCE]
*
* FORCE no longer does anything, but we accept it for backwards compatibility
*****************************************************************************/
ReindexStmt:
REINDEX reindex_type qualified_name opt_force
REINDEX reindex_type opt_concurrently qualified_name opt_force
{
ReindexStmt *n = makeNode(ReindexStmt);
n->kind = $2;
n->relation = $3;
n->concurrent = $3;
n->relation = $4;
n->name = NULL;
$$ = (Node *)n;
}
|
REINDEX reindex_type qualified_name PARTITION ColId opt_force
REINDEX reindex_type opt_concurrently qualified_name PARTITION ColId opt_force
{
ReindexStmt *n = makeNode(ReindexStmt);
if ($2 == OBJECT_INDEX)
@ -12198,25 +12199,28 @@ ReindexStmt:
n->kind = OBJECT_TABLE_PARTITION;
else
n->kind = OBJECT_INTERNAL_PARTITION;
n->relation = $3;
n->name = $5;
n->concurrent = $3;
n->relation = $4;
n->name = $6;
$$ = (Node *)n;
}
| REINDEX SYSTEM_P name opt_force
| REINDEX SYSTEM_P opt_concurrently name opt_force
{
ReindexStmt *n = makeNode(ReindexStmt);
n->kind = OBJECT_DATABASE;
n->name = $3;
n->concurrent = $3;
n->name = $4;
n->relation = NULL;
n->do_system = true;
n->do_user = false;
$$ = (Node *)n;
}
| REINDEX DATABASE name opt_force
| REINDEX DATABASE opt_concurrently name opt_force
{
ReindexStmt *n = makeNode(ReindexStmt);
n->kind = OBJECT_DATABASE;
n->name = $3;
n->concurrent = $3;
n->name = $4;
n->relation = NULL;
n->do_system = true;
n->do_user = true;

1144
src/gausskernel/optimizer/commands/indexcmds.cpp Normal file → Executable file

File diff suppressed because it is too large Load Diff

View File

@ -3578,10 +3578,12 @@ static void RangeVarCallbackForDropRelation(
char relkind;
Form_pg_class classform;
LOCKMODE heap_lockmode;
bool invalid_system_index; /*use for reindex concurrent*/
state = (struct DropRelationCallbackState*)arg;
relkind = state->relkind;
heap_lockmode = state->concurrent ? ShareUpdateExclusiveLock : AccessExclusiveLock;
invalid_system_index = false;
if (target_is_partition)
heap_lockmode = AccessShareLock;
@ -3616,10 +3618,37 @@ static void RangeVarCallbackForDropRelation(
DropErrorMsgWrongType(rel->relname, classform->relkind, relkind);
}
/*
* Check the case of a system index that might have been invalidated by a
* failed concurrent process and allow its drop. For the time being, this
* only concerns indexes of toast relations that became invalid during a
* REINDEX CONCURRENTLY process.
*/
if(IsSystemClass(classform)&&relkind == RELKIND_INDEX) {
HeapTuple locTuple;
Form_pg_index indexform;
bool indisvalid;
locTuple = SearchSysCache1(INDEXRELID,ObjectIdGetDatum(relOid));
if(!HeapTupleIsValid(locTuple)) {
ReleaseSysCache(tuple);
return;
}
indexform = (Form_pg_index) GETSTRUCT(locTuple);
indisvalid = indexform->indisvalid;
ReleaseSysCache(locTuple);
/*Mark object as being an invaild index of system catalogs*/
if(!indisvalid)
invalid_system_index = true;
}
/* Permission Check */
DropRelationPermissionCheck(relkind, relOid, classform->relnamespace, rel->relname);
if (!CheckClassFormPermission(classform)) {
if (!invalid_system_index && !CheckClassFormPermission(classform)) {
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied: \"%s\" is a system catalog", rel->relname)));

View File

@ -2201,16 +2201,16 @@ void ReindexCommand(ReindexStmt* stmt, bool is_top_level)
switch (stmt->kind) {
case OBJECT_INDEX:
case OBJECT_INDEX_PARTITION:
ReindexIndex(stmt->relation, (const char*)stmt->name, &stmt->memUsage);
ReindexIndex(stmt->relation, (const char*)stmt->name, &stmt->memUsage,stmt->concurrent);
break;
case OBJECT_TABLE:
case OBJECT_MATVIEW:
case OBJECT_TABLE_PARTITION:
ReindexTable(stmt->relation, (const char*)stmt->name, &stmt->memUsage);
ReindexTable(stmt->relation, (const char*)stmt->name, &stmt->memUsage,stmt->concurrent);
break;
case OBJECT_INTERNAL:
case OBJECT_INTERNAL_PARTITION:
ReindexInternal(stmt->relation, (const char*)stmt->name);
ReindexInternal(stmt->relation, (const char*)stmt->name,stmt->concurrent);
break;
case OBJECT_DATABASE:
@ -2221,7 +2221,7 @@ void ReindexCommand(ReindexStmt* stmt, bool is_top_level)
* intended effect!
*/
PreventTransactionChain(is_top_level, "REINDEX DATABASE");
ReindexDatabase(stmt->name, stmt->do_system, stmt->do_user, &stmt->memUsage);
ReindexDatabase(stmt->name, stmt->do_system, stmt->do_user, &stmt->memUsage,stmt->concurrent);
break;
default: {
ereport(ERROR,
@ -6581,6 +6581,11 @@ void standard_ProcessUtility(Node* parse_tree, const char* query_string, ParamLi
ReindexStmt* stmt = (ReindexStmt*)parse_tree;
RemoteQueryExecType exec_type;
bool is_temp = false;
/* use for reindex concurrent */
if(stmt->concurrent)
PreventTransactionChain(is_top_level,"REINDEX CONCURRENTLY");
pgstat_set_io_state(IOSTATE_WRITE);
#ifdef PGXC
if (IS_PGXC_COORDINATOR) {

View File

@ -213,6 +213,7 @@ typedef enum ObjectClass {
#define PERFORM_DELETION_INVALID 0x0000
#define PERFORM_DELETION_INTERNAL 0x0001
#define PERFORM_DELETION_CONCURRENTLY 0x0002
#define PERFORM_DELETION_CONCURRENTLY_LOCK 0x0020 /* normal drop with concurrent lock mode */
/* ObjectAddressExtra flag bits */
#define DEPFLAG_ORIGINAL 0x0001 /* an original deletion target */
@ -294,6 +295,9 @@ extern long changeDependencyFor(Oid classId,
Oid oldRefObjectId,
Oid newRefObjectId);
/*use for reindex concurrently*/
extern long changeDependenciesOn(Oid refClassId,Oid oldRefObjectId,Oid newRefObjectId);
extern Oid getExtensionOfObject(Oid classId, Oid objectId);
extern bool sequenceIsOwned(Oid seqId, Oid *tableId, int32 *colId);
@ -306,6 +310,9 @@ extern Oid get_constraint_index(Oid constraintId);
extern Oid get_index_constraint(Oid indexId);
/* use for reindex concurrently */
extern List *get_index_ref_constraints(Oid indexId);
/* in pg_shdepend.c */
extern void recordSharedDependencyOn(ObjectAddress *depender,
ObjectAddress *referenced,

View File

@ -125,12 +125,36 @@ extern Oid index_create(Relation heapRelation, const char *indexRelationName, Oi
IndexCreateExtraArgs *extra, bool useLowLockLevel = false,
int8 relindexsplit = 0);
/* ues for reindex concurrently*/
extern Oid index_concurrently_create_copy(Relation heapRelation, Oid oldIndexId, const char *newName);
/* use for reindex concurrently*/
extern void index_concurrently_build(Oid heapRelationId, Oid indexRelationId, bool isPrimary, AdaptMem* memInfo = NULL, bool dbWide = false);
/* use for reindex concurrently*/
extern void index_concurrently_swap(Oid newIndexId, Oid oldIndexId, const char *oldName);
/* use for reindex concurrently*/
extern void index_concurrently_set_dead(Oid heapId, Oid indexId);
/* use for reindex concurrently partition */
extern Oid index_concurrently_part_create_copy(Oid oldIndexPartId, const char *newName);
/* use for reindex concurrently partition */
extern void index_concurrently_part_build(Oid heapRelationId, Oid heapPartitionId, Oid indexRelationId, Oid IndexPartitionId, AdaptMem* memInfo = NULL, bool dbWide = false);
/* use for reindex concurrently partition */
extern void index_concurrently_part_swap(Relation indexRelation, Oid newIndexPartId, Oid oldIndexPartId, const char *oldName);
extern void index_constraint_create(Relation heapRelation, Oid indexRelationId, IndexInfo *indexInfo,
const char *constraintName, char constraintType, bool deferrable,
bool initdeferred, bool mark_as_primary, bool update_pgindex,
bool remove_old_dependencies, bool allow_system_table_mods);
extern void index_drop(Oid indexId, bool concurrent);
/* change input: add concurrent_lock_mode */
extern void index_drop(Oid indexId, bool concurrent, bool concurrent_lock_mode = false);
extern IndexInfo *BuildIndexInfo(Relation index);
extern IndexInfo *BuildDummyIndexInfo(Relation index);
@ -173,7 +197,7 @@ extern double IndexBuildVectorBatchScan(Relation heapRelation, Relation indexRel
void *transferFuncs);
extern void validate_index(Oid heapId, Oid indexId, Snapshot snapshot);
extern void validate_index(Oid heapId, Oid indexId, Snapshot snapshot, bool isPart = false);
extern void validate_index_heapscan(
Relation heapRelation, Relation indexRelation, IndexInfo* indexInfo, Snapshot snapshot, v_i_state* state);
@ -201,6 +225,9 @@ extern bool ReindexIsProcessingHeap(Oid heapOid);
extern bool ReindexIsProcessingIndex(Oid indexOid);
extern Oid IndexGetRelation(Oid indexId, bool missing_ok);
extern Oid PartIndexGetPartition(Oid partIndexId, bool missing_ok);
extern Oid PartIdGetParentId(Oid partIndexId, bool missing_ok);
typedef struct
{
Oid existingPSortOid;
@ -232,6 +259,7 @@ extern void PartitionNameCallbackForIndexPartition(Oid partitionedRelationOid,
LOCKMODE callbackobj_lockMode);
extern void reindex_partIndex(Relation heapRel, Partition heapPart, Relation indexRel , Partition indexPart);
extern bool reindexPartition(Oid relid, Oid partOid, int flags, int reindexType);
extern Oid heapPartitionIdGetindexPartitionId(Oid indexId, Oid partOid);
extern void AddGPIForPartition(Oid partTableOid, Oid partOid);
void AddCBIForPartition(Relation partTableRel, Relation tempTableRel, const List* indexRelList,
const List* indexDestOidList);

View File

@ -23,11 +23,14 @@ extern void RemoveObjects(DropStmt* stmt, bool missing_ok, bool is_securityadmin
/* commands/indexcmds.c */
extern Oid DefineIndex(Oid relationId, IndexStmt* stmt, Oid indexRelationId, bool is_alter_table, bool check_rights,
bool skip_build, bool quiet);
extern void ReindexIndex(RangeVar* indexRelation, const char* partition_name, AdaptMem* mem_info);
extern void ReindexTable(RangeVar* relation, const char* partition_name, AdaptMem* mem_info);
extern void ReindexInternal(RangeVar* relation, const char* partition_name);
extern void ReindexDatabase(const char* databaseName, bool do_system, bool do_user, AdaptMem* mem_info);
/*add input for reindex concurrently*/
extern void ReindexIndex(RangeVar* indexRelation, const char* partition_name, AdaptMem* mem_info, bool concurrent);
extern void ReindexTable(RangeVar* relation, const char* partition_name, AdaptMem* mem_info, bool concurrent);
extern void ReindexInternal(RangeVar* relation, const char* partition_name, bool concurrent);
/*add input for reindex concurrently */
extern void ReindexDatabase(const char* databaseName, bool do_system, bool do_user, AdaptMem* mem_info, bool concurrent);
extern char* makeObjectName(const char* name1, const char* name2, const char* label, bool reverseTruncate = false);
extern char* ChooseRelationName(
const char* name1, const char* name2, const char* label, size_t labelLength, Oid namespaceid,

View File

@ -765,6 +765,7 @@ typedef struct ReindexStmt {
bool do_system; /* include system tables in database case */
bool do_user; /* include user tables in database case */
AdaptMem memUsage; /* adaptive memory assigned for the stmt */
bool concurrent; /* reindex concurrently */
} ReindexStmt;
typedef struct Position {

View File

@ -0,0 +1,87 @@
--
-- REINDEX CONCURRENTLY
--
CREATE TABLE concur_reindex_tab (c1 int);
-- REINDEX
REINDEX TABLE concur_reindex_tab; -- notice
NOTICE: table "concur_reindex_tab" has no indexes
REINDEX TABLE CONCURRENTLY concur_reindex_tab; -- notice
NOTICE: table "concur_reindex_tab" has no indexes
ALTER TABLE concur_reindex_tab ADD COLUMN c2 text; -- add toast index
-- Normal index with integer column
CREATE UNIQUE INDEX concur_reindex_ind1 ON concur_reindex_tab(c1);
-- Normal index with text column
CREATE INDEX concur_reindex_ind2 ON concur_reindex_tab(c2);
-- UNION INDEX index with expression
CREATE UNIQUE INDEX concur_reindex_ind3 ON concur_reindex_tab(abs(c1));
-- Duplicates column names error
CREATE INDEX concur_reindex_ind4 ON concur_reindex_tab(c1, c1, c2);
ERROR: duplicate column name
-- Create table for check on foreign key dependence switch with indexes swapped
ALTER TABLE concur_reindex_tab ADD PRIMARY KEY USING INDEX concur_reindex_ind1;
CREATE TABLE concur_reindex_tab2 (c1 int REFERENCES concur_reindex_tab);
INSERT INTO concur_reindex_tab VALUES (1, 'a');
INSERT INTO concur_reindex_tab VALUES (2, 'a');
-- Check materialized views
CREATE MATERIALIZED VIEW concur_reindex_matview AS SELECT * FROM concur_reindex_tab;
REINDEX INDEX CONCURRENTLY concur_reindex_ind1;
REINDEX TABLE CONCURRENTLY concur_reindex_tab;
REINDEX TABLE CONCURRENTLY concur_reindex_matview;
-- Check views
CREATE VIEW concur_reindex_view AS SELECT * FROM concur_reindex_tab;
REINDEX TABLE CONCURRENTLY concur_reindex_view; -- Error
ERROR: "concur_reindex_view" is not a table or materialized view
-- Check that comments are preserved
CREATE TABLE testcomment (i int);
CREATE INDEX testcomment_idx1 ON testcomment(i);
COMMENT ON INDEX testcomment_idx1 IS 'test comment';
SELECT obj_description('testcomment_idx1'::regclass, 'pg_class');
obj_description
-----------------
test comment
(1 row)
REINDEX TABLE testcomment;
SELECT obj_description('testcomment_idx1'::regclass, 'pg_class');
obj_description
-----------------
test comment
(1 row)
REINDEX TABLE CONCURRENTLY testcomment;
SELECT obj_description('testcomment_idx1'::regclass, 'pg_class');
obj_description
-----------------
test comment
(1 row)
DROP TABLE testcomment;
-- Check error
-- Cannot run inside a transaction block
BEGIN;
REINDEX TABLE CONCURRENTLY concur_reindex_tab;
ERROR: REINDEX CONCURRENTLY cannot run inside a transaction block
COMMIT;
REINDEX TABLE CONCURRENTLY pg_database; -- no shared relation
ERROR: concurrent index creation on system catalog tables is not supported
REINDEX TABLE CONCURRENTLY pg_class; -- no catalog relations
ERROR: concurrent index creation on system catalog tables is not supported
REINDEX SYSTEM CONCURRENTLY postgres; -- not allowed for SYSTEM
ERROR: can only reindex the currently open database
-- Check the relation status, there should not be invalid indexe
\d concur_reindex_tab
Table "public.concur_reindex_tab"
Column | Type | Modifiers
--------+---------+-----------
c1 | integer | not null
c2 | text |
Indexes:
"concur_reindex_ind1" PRIMARY KEY, btree (c1) TABLESPACE pg_default
"concur_reindex_ind3" UNIQUE, btree (abs(c1)) TABLESPACE pg_default
"concur_reindex_ind2" btree (c2) TABLESPACE pg_default
Referenced by:
TABLE "concur_reindex_tab2" CONSTRAINT "concur_reindex_tab2_c1_fkey" FOREIGN KEY (c1) REFERENCES concur_reindex_tab(c1)
DROP VIEW concur_reindex_view;
DROP MATERIALIZED VIEW concur_reindex_matview;
DROP TABLE concur_reindex_tab, concur_reindex_tab2;

View File

@ -0,0 +1,59 @@
--
-- REINDEX CONCURRENTLY PARALLEL
--
CREATE TABLE reind_con_tab(id serial primary key, data text);
NOTICE: CREATE TABLE will create implicit sequence "reind_con_tab_id_seq" for serial column "reind_con_tab.id"
NOTICE: CREATE TABLE / PRIMARY KEY will create implicit index "reind_con_tab_pkey" for table "reind_con_tab"
INSERT INTO reind_con_tab(data) VALUES ('aa');
INSERT INTO reind_con_tab(data) VALUES ('aaa');
INSERT INTO reind_con_tab(data) VALUES ('aaaa');
INSERT INTO reind_con_tab(data) VALUES ('aaaaa');
\d reind_con_tab;
Table "public.reind_con_tab"
Column | Type | Modifiers
--------+---------+------------------------------------------------------------
id | integer | not null default nextval('reind_con_tab_id_seq'::regclass)
data | text |
Indexes:
"reind_con_tab_pkey" PRIMARY KEY, btree (id) TABLESPACE pg_default
\parallel on
REINDEX TABLE CONCURRENTLY reind_con_tab;
SELECT data FROM reind_con_tab WHERE id =3;
\parallel off
data
------
aaaa
(1 row)
\parallel on
REINDEX TABLE CONCURRENTLY reind_con_tab;
UPDATE reind_con_tab SET data = 'bbbb' WHERE id = 3;
\parallel off
\parallel on
REINDEX TABLE CONCURRENTLY reind_con_tab;
INSERT INTO reind_con_tab(data) VALUES('cccc');
\parallel off
\parallel on
REINDEX TABLE CONCURRENTLY reind_con_tab;
DELETE FROM reind_con_tab WHERE data = 'aaa';
\parallel off
SELECT * FROM reind_con_tab;
id | data
----+-------
1 | aa
4 | aaaaa
3 | bbbb
5 | cccc
(4 rows)
\d reind_con_tab;
Table "public.reind_con_tab"
Column | Type | Modifiers
--------+---------+------------------------------------------------------------
id | integer | not null default nextval('reind_con_tab_id_seq'::regclass)
data | text |
Indexes:
"reind_con_tab_pkey" PRIMARY KEY, btree (id) TABLESPACE pg_default
DROP TABLE reind_con_tab;

View File

@ -0,0 +1,55 @@
--
-- REINDEX CONCURRENTLY PARTITION
--
drop table if exists t1;
NOTICE: table "t1" does not exist, skipping
create table t1(
c_id varchar,
c_w_id integer,
c_date date
)
partition by range (c_date,c_w_id)
(
PARTITION t1_1 values less than ('20170331',5),
PARTITION t1_2 values less than ('20170731',450),
PARTITION t1_3 values less than ('20170930',1062),
PARTITION t1_4 values less than ('20171231',1765),
PARTITION t1_5 values less than ('20180331',2024),
PARTITION t1_6 values less than ('20180731',2384),
PARTITION t1_7 values less than ('20180930',2786),
PARTITION t1_8 values less than (maxvalue,maxvalue)
);
insert into t1 values('gauss1',4,'20170301');
insert into t1 values('gauss2',400,'20170625');
insert into t1 values('gauss3',480,'20170920');
insert into t1 values('gauss4',1065,'20170920');
insert into t1 values('gauss5',1800,'20170920');
insert into t1 values('gauss6',2030,'20170920');
insert into t1 values('gauss7',2385,'20170920');
insert into t1 values('gauss8',2789,'20191020');
insert into t1 values('gauss9',2789,'20171020');
create index idx_t1 on t1 using btree(c_id) LOCAL;
create index idx2_t1 on t1 using btree(c_id) LOCAL (
PARTITION t1_1_index,
PARTITION t1_2_index,
PARTITION t1_3_index,
PARTITION t1_4_index,
PARTITION t1_5_index,
PARTITION t1_6_index,
PARTITION t1_7_index,
PARTITION t1_8_index
);
reindex index CONCURRENTLY idx_t1;
reindex index CONCURRENTLY idx2_t1 partition t1_1_index;
reindex table CONCURRENTLY t1;
reindex table CONCURRENTLY t1 partition t1_1;
drop index idx_t1;
drop index idx2_t1;
create index idx_t1 on t1 using btree(c_id);
reindex index idx_t1;
reindex index CONCURRENTLY idx_t1; --ERROR, can't reindex concurrently global index partition
ERROR: cannot reindex concurrently global partition index " public.idx_t1"
reindex table t1;
reindex table CONCURRENTLY t1; --WARNING, can't reindex concurrently global index partition
WARNING: cannot reindex concurrently global partition index " public.idx_t1", skipping
drop table t1;

View File

@ -0,0 +1,51 @@
--
-- REINDEX CONCURRENTLY PARTITION PARALLEL
--
drop table if exists t2;
NOTICE: table "t2" does not exist, skipping
CREATE TABLE t2 (id int, data text) partition by range(id)(partition p1 values less than(100), partition p2 values less than(200), partition p3 values less than(MAXVALUE));
insert into t2 select generate_series(1,500),generate_series(1,500);
create index ind_id on t2(id) LOCAL;
select * from t2 where id = 4;
id | data
----+------
4 | 4
(1 row)
\parallel on
REINDEX index CONCURRENTLY ind_id;
select * from t2 where id = 3;
\parallel off
id | data
----+------
3 | 3
(1 row)
\parallel on
REINDEX index CONCURRENTLY ind_id;
delete from t2 where id = 4;
\parallel off
\parallel on
REINDEX index CONCURRENTLY ind_id;
insert into t2 values (4,3);
\parallel off
\parallel on
REINDEX index CONCURRENTLY ind_id;
update t2 set data = 4 where id = 4;
\parallel off
\parallel on
REINDEX index CONCURRENTLY ind_id;
select * from t2 where id = 4;
\parallel off
id | data
----+------
4 | 4
(1 row)
select * from t2 where id = 4;
id | data
----+------
4 | 4
(1 row)
drop table t2;

View File

@ -831,8 +831,15 @@ test: comment_proc
test: hw_package
test: procedure_privilege_test
test: cast_privileges_test
test: reindex_concurrently
test: reindex_concurrently_parallel
test: reindex_concurrently_partition
test: reindex_concurrently_partition_parallel
#test: hw_dbms_sql1
test: hw_cipher_sm4
test: hw_cipher_aes128
test: sequence_cache_test
test: pg_buffercache_pages
test: pg_buffercache_pages

View File

@ -0,0 +1,54 @@
--
-- REINDEX CONCURRENTLY
--
CREATE TABLE concur_reindex_tab (c1 int);
-- REINDEX
REINDEX TABLE concur_reindex_tab; -- notice
REINDEX TABLE CONCURRENTLY concur_reindex_tab; -- notice
ALTER TABLE concur_reindex_tab ADD COLUMN c2 text; -- add toast index
-- Normal index with integer column
CREATE UNIQUE INDEX concur_reindex_ind1 ON concur_reindex_tab(c1);
-- Normal index with text column
CREATE INDEX concur_reindex_ind2 ON concur_reindex_tab(c2);
-- UNION INDEX index with expression
CREATE UNIQUE INDEX concur_reindex_ind3 ON concur_reindex_tab(abs(c1));
-- Duplicates column names error
CREATE INDEX concur_reindex_ind4 ON concur_reindex_tab(c1, c1, c2);
-- Create table for check on foreign key dependence switch with indexes swapped
ALTER TABLE concur_reindex_tab ADD PRIMARY KEY USING INDEX concur_reindex_ind1;
CREATE TABLE concur_reindex_tab2 (c1 int REFERENCES concur_reindex_tab);
INSERT INTO concur_reindex_tab VALUES (1, 'a');
INSERT INTO concur_reindex_tab VALUES (2, 'a');
-- Check materialized views
CREATE MATERIALIZED VIEW concur_reindex_matview AS SELECT * FROM concur_reindex_tab;
REINDEX INDEX CONCURRENTLY concur_reindex_ind1;
REINDEX TABLE CONCURRENTLY concur_reindex_tab;
REINDEX TABLE CONCURRENTLY concur_reindex_matview;
-- Check views
CREATE VIEW concur_reindex_view AS SELECT * FROM concur_reindex_tab;
REINDEX TABLE CONCURRENTLY concur_reindex_view; -- Error
-- Check that comments are preserved
CREATE TABLE testcomment (i int);
CREATE INDEX testcomment_idx1 ON testcomment(i);
COMMENT ON INDEX testcomment_idx1 IS 'test comment';
SELECT obj_description('testcomment_idx1'::regclass, 'pg_class');
REINDEX TABLE testcomment;
SELECT obj_description('testcomment_idx1'::regclass, 'pg_class');
REINDEX TABLE CONCURRENTLY testcomment;
SELECT obj_description('testcomment_idx1'::regclass, 'pg_class');
DROP TABLE testcomment;
-- Check error
-- Cannot run inside a transaction block
BEGIN;
REINDEX TABLE CONCURRENTLY concur_reindex_tab;
COMMIT;
REINDEX TABLE CONCURRENTLY pg_database; -- no shared relation
REINDEX TABLE CONCURRENTLY pg_class; -- no catalog relations
REINDEX SYSTEM CONCURRENTLY postgres; -- not allowed for SYSTEM
-- Check the relation status, there should not be invalid indexe
\d concur_reindex_tab
DROP VIEW concur_reindex_view;
DROP MATERIALIZED VIEW concur_reindex_matview;
DROP TABLE concur_reindex_tab, concur_reindex_tab2;

View File

@ -0,0 +1,33 @@
--
-- REINDEX CONCURRENTLY PARALLEL
--
CREATE TABLE reind_con_tab(id serial primary key, data text);
INSERT INTO reind_con_tab(data) VALUES ('aa');
INSERT INTO reind_con_tab(data) VALUES ('aaa');
INSERT INTO reind_con_tab(data) VALUES ('aaaa');
INSERT INTO reind_con_tab(data) VALUES ('aaaaa');
\d reind_con_tab;
\parallel on
REINDEX TABLE CONCURRENTLY reind_con_tab;
SELECT data FROM reind_con_tab WHERE id =3;
\parallel off
\parallel on
REINDEX TABLE CONCURRENTLY reind_con_tab;
UPDATE reind_con_tab SET data = 'bbbb' WHERE id = 3;
\parallel off
\parallel on
REINDEX TABLE CONCURRENTLY reind_con_tab;
INSERT INTO reind_con_tab(data) VALUES('cccc');
\parallel off
\parallel on
REINDEX TABLE CONCURRENTLY reind_con_tab;
DELETE FROM reind_con_tab WHERE data = 'aaa';
\parallel off
SELECT * FROM reind_con_tab;
\d reind_con_tab;
DROP TABLE reind_con_tab;

View File

@ -0,0 +1,58 @@
--
-- REINDEX CONCURRENTLY PARTITION
--
drop table if exists t1;
create table t1(
c_id varchar,
c_w_id integer,
c_date date
)
partition by range (c_date,c_w_id)
(
PARTITION t1_1 values less than ('20170331',5),
PARTITION t1_2 values less than ('20170731',450),
PARTITION t1_3 values less than ('20170930',1062),
PARTITION t1_4 values less than ('20171231',1765),
PARTITION t1_5 values less than ('20180331',2024),
PARTITION t1_6 values less than ('20180731',2384),
PARTITION t1_7 values less than ('20180930',2786),
PARTITION t1_8 values less than (maxvalue,maxvalue)
);
insert into t1 values('gauss1',4,'20170301');
insert into t1 values('gauss2',400,'20170625');
insert into t1 values('gauss3',480,'20170920');
insert into t1 values('gauss4',1065,'20170920');
insert into t1 values('gauss5',1800,'20170920');
insert into t1 values('gauss6',2030,'20170920');
insert into t1 values('gauss7',2385,'20170920');
insert into t1 values('gauss8',2789,'20191020');
insert into t1 values('gauss9',2789,'20171020');
create index idx_t1 on t1 using btree(c_id) LOCAL;
create index idx2_t1 on t1 using btree(c_id) LOCAL (
PARTITION t1_1_index,
PARTITION t1_2_index,
PARTITION t1_3_index,
PARTITION t1_4_index,
PARTITION t1_5_index,
PARTITION t1_6_index,
PARTITION t1_7_index,
PARTITION t1_8_index
);
reindex index CONCURRENTLY idx_t1;
reindex index CONCURRENTLY idx2_t1 partition t1_1_index;
reindex table CONCURRENTLY t1;
reindex table CONCURRENTLY t1 partition t1_1;
drop index idx_t1;
drop index idx2_t1;
create index idx_t1 on t1 using btree(c_id);
reindex index idx_t1;
reindex index CONCURRENTLY idx_t1; --ERROR, can't reindex concurrently global index partition
reindex table t1;
reindex table CONCURRENTLY t1; --WARNING, can't reindex concurrently global index partition
drop table t1;

View File

@ -0,0 +1,38 @@
--
-- REINDEX CONCURRENTLY PARTITION PARALLEL
--
drop table if exists t2;
CREATE TABLE t2 (id int, data text) partition by range(id)(partition p1 values less than(100), partition p2 values less than(200), partition p3 values less than(MAXVALUE));
insert into t2 select generate_series(1,500),generate_series(1,500);
create index ind_id on t2(id) LOCAL;
select * from t2 where id = 4;
\parallel on
REINDEX index CONCURRENTLY ind_id;
select * from t2 where id = 3;
\parallel off
\parallel on
REINDEX index CONCURRENTLY ind_id;
delete from t2 where id = 4;
\parallel off
\parallel on
REINDEX index CONCURRENTLY ind_id;
insert into t2 values (4,3);
\parallel off
\parallel on
REINDEX index CONCURRENTLY ind_id;
update t2 set data = 4 where id = 4;
\parallel off
\parallel on
REINDEX index CONCURRENTLY ind_id;
select * from t2 where id = 4;
\parallel off
select * from t2 where id = 4;
drop table t2;