Remove duplicate table scan in logical apply worker and code refactoring

Started by Zhijie Hou (Fujitsu)over 1 year ago9 messages
#1Zhijie Hou (Fujitsu)
houzj.fnst@fujitsu.com
3 attachment(s)

Hi,

When reviewing the code in logical/worker.c, I noticed that when applying a
cross-partition update action, it scans the old partition twice.
I am attaching the patch 0001 to remove this duplicate table scan.

The test shows that it brings noticeable improvement:

Steps
-----
Pub:
create table tab (a int not null, b int);
alter table tab replica identity full;
insert into tab select 1,generate_series(1, 1000000, 1);

Sub:
create table tab (a int not null, b int) partition by range (b);
create table tab_1 partition of tab for values from (minvalue) to (5000000);
create table tab_2 partition of tab for values from (5000000) to (maxvalue);
alter table tab replica identity full;

Test query:
update tab set b = 6000000 where b > 999900; -- UPDATE 100

Results (The time spent by apply worker to apply the all the UPDATEs):
Before 14s
After 7s
-----

Apart from above, I found there are quite a few duplicate codes related to partition
handling(e.g. apply_handle_tuple_routing), so I tried to extract some
common logic to simplify the codes. Please see 0002 for this refactoring.

Best Regards,
Hou Zhijie

Attachments:

v1-0002-refactor-the-partition-related-logic-in-worker.c.patchapplication/octet-stream; name=v1-0002-refactor-the-partition-related-logic-in-worker.c.patchDownload
From affd291ec23378ca59334aa73c20b47adfe354bd Mon Sep 17 00:00:00 2001
From: Hou Zhijie <houzj.fnst@cn.fujitsu.com>
Date: Thu, 25 Jul 2024 16:16:10 +0800
Subject: [PATCH v1 2/2] refactor the partition related logic in worker.c

The data modification part is extracted from apply_handle_tuple_routing() and
placed into the appropriate apply handling function. As a result,
apply_handle_tuple_routing() is now solely responsible for partition
determination and tuple conversion, and the code duplication is eliminated.
---
 src/backend/replication/logical/worker.c | 498 +++++++++--------------
 src/test/subscription/t/013_partition.pl |   4 +-
 2 files changed, 204 insertions(+), 298 deletions(-)

diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index 9a61679130..5ac9eb47fd 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -383,10 +383,11 @@ static void apply_handle_insert_internal(ApplyExecutionData *edata,
 										 ResultRelInfo *relinfo,
 										 TupleTableSlot *remoteslot);
 static void apply_handle_update_internal(ApplyExecutionData *edata,
-										 ResultRelInfo *relinfo,
+										 ResultRelInfo *targetRelinfo,
+										 TupleTableSlot *remoteslot_root,
 										 TupleTableSlot *remoteslot,
 										 LogicalRepTupleData *newtup,
-										 Oid localindexoid);
+										 LogicalRepRelMapEntry *relentry);
 static void apply_handle_delete_internal(ApplyExecutionData *edata,
 										 ResultRelInfo *relinfo,
 										 TupleTableSlot *remoteslot,
@@ -396,10 +397,10 @@ static bool FindReplTupleInLocalRel(ApplyExecutionData *edata, Relation localrel
 									Oid localidxoid,
 									TupleTableSlot *remoteslot,
 									TupleTableSlot **localslot);
-static void apply_handle_tuple_routing(ApplyExecutionData *edata,
-									   TupleTableSlot *remoteslot,
-									   LogicalRepTupleData *newtup,
-									   CmdType operation);
+static TupleTableSlot *apply_handle_tuple_routing(ApplyExecutionData *edata,
+												  CmdType operation,
+												  TupleTableSlot *remoteslot,
+												  ResultRelInfo **targetrelinfo);
 
 /* Functions for skipping changes */
 static void maybe_start_skipping_changes(XLogRecPtr finish_lsn);
@@ -2376,6 +2377,7 @@ apply_handle_insert(StringInfo s)
 	TupleTableSlot *remoteslot;
 	MemoryContext oldctx;
 	bool		run_as_owner;
+	ResultRelInfo *targetRelInfo;
 
 	/*
 	 * Quick return if we are skipping data modification changes or handling
@@ -2424,13 +2426,15 @@ apply_handle_insert(StringInfo s)
 	slot_fill_defaults(rel, estate, remoteslot);
 	MemoryContextSwitchTo(oldctx);
 
+	targetRelInfo = edata->targetRelInfo;
+
 	/* For a partitioned table, insert the tuple into a partition. */
 	if (rel->localrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
-		apply_handle_tuple_routing(edata,
-								   remoteslot, NULL, CMD_INSERT);
-	else
-		apply_handle_insert_internal(edata, edata->targetRelInfo,
-									 remoteslot);
+		remoteslot = apply_handle_tuple_routing(edata, CMD_INSERT, remoteslot,
+												&targetRelInfo);
+
+	/* For a partitioned table, insert the tuple into a partition. */
+	apply_handle_insert_internal(edata, targetRelInfo, remoteslot);
 
 	finish_edata(edata);
 
@@ -2508,6 +2512,22 @@ check_relation_updatable(LogicalRepRelMapEntry *rel)
 					rel->remoterel.nspname, rel->remoterel.relname)));
 }
 
+/*
+ * If the tuple to be modified could not be found, a log message is emitted.
+ */
+static void
+report_tuple_not_found(CmdType cmd, Relation targetrel, bool is_partition)
+{
+	Assert(cmd == CMD_UPDATE || cmd == CMD_DELETE);
+
+	/* XXX should this be promoted to ereport(LOG) perhaps? */
+	elog(DEBUG1,
+		 "logical replication did not find row to be %s in replication target relation%s \"%s\"",
+		 cmd == CMD_UPDATE ? "updated" : "deleted",
+		 is_partition ? "'s partition" : "",
+		 RelationGetRelationName(targetrel));
+}
+
 /*
  * Handle UPDATE message.
  *
@@ -2517,6 +2537,7 @@ static void
 apply_handle_update(StringInfo s)
 {
 	LogicalRepRelMapEntry *rel;
+	LogicalRepRelMapEntry *targetrel;
 	LogicalRepRelId relid;
 	UserContext ucxt;
 	ApplyExecutionData *edata;
@@ -2525,9 +2546,11 @@ apply_handle_update(StringInfo s)
 	LogicalRepTupleData newtup;
 	bool		has_oldtup;
 	TupleTableSlot *remoteslot;
+	TupleTableSlot *remoteslot_root = NULL;
 	RTEPermissionInfo *target_perminfo;
 	MemoryContext oldctx;
 	bool		run_as_owner;
+	ResultRelInfo *targetRelInfo;
 
 	/*
 	 * Quick return if we are skipping data modification changes or handling
@@ -2556,9 +2579,6 @@ apply_handle_update(StringInfo s)
 	/* Set relation for error callback */
 	apply_error_callback_arg.rel = rel;
 
-	/* Check if we can do the update. */
-	check_relation_updatable(rel);
-
 	/*
 	 * Make sure that any user-supplied code runs as the table owner, unless
 	 * the user has opted out of that behavior.
@@ -2604,13 +2624,27 @@ apply_handle_update(StringInfo s)
 					has_oldtup ? &oldtup : &newtup);
 	MemoryContextSwitchTo(oldctx);
 
+	targetRelInfo = edata->targetRelInfo;
+	targetrel = rel;
+	remoteslot_root = remoteslot;
+
 	/* For a partitioned table, apply update to correct partition. */
 	if (rel->localrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
-		apply_handle_tuple_routing(edata,
-								   remoteslot, &newtup, CMD_UPDATE);
-	else
-		apply_handle_update_internal(edata, edata->targetRelInfo,
-									 remoteslot, &newtup, rel->localindexoid);
+	{
+		TupleConversionMap *map;
+
+		remoteslot = apply_handle_tuple_routing(edata, CMD_UPDATE, remoteslot,
+												&targetRelInfo);
+		map = ExecGetRootToChildMap(targetRelInfo, estate);
+		targetrel = logicalrep_partition_open(rel, targetRelInfo->ri_RelationDesc,
+											  map ? map->attrMap : NULL);
+	}
+
+	/* Check if we can do the update. */
+	check_relation_updatable(targetrel);
+
+	apply_handle_update_internal(edata, targetRelInfo, remoteslot_root,
+								 remoteslot, &newtup, targetrel);
 
 	finish_edata(edata);
 
@@ -2625,73 +2659,6 @@ apply_handle_update(StringInfo s)
 	end_replication_step();
 }
 
-/*
- * Workhorse for apply_handle_update()
- * relinfo is for the relation we're actually updating in
- * (could be a child partition of edata->targetRelInfo)
- */
-static void
-apply_handle_update_internal(ApplyExecutionData *edata,
-							 ResultRelInfo *relinfo,
-							 TupleTableSlot *remoteslot,
-							 LogicalRepTupleData *newtup,
-							 Oid localindexoid)
-{
-	EState	   *estate = edata->estate;
-	LogicalRepRelMapEntry *relmapentry = edata->targetRel;
-	Relation	localrel = relinfo->ri_RelationDesc;
-	EPQState	epqstate;
-	TupleTableSlot *localslot;
-	bool		found;
-	MemoryContext oldctx;
-
-	EvalPlanQualInit(&epqstate, estate, NULL, NIL, -1, NIL);
-	ExecOpenIndices(relinfo, false);
-
-	found = FindReplTupleInLocalRel(edata, localrel,
-									&relmapentry->remoterel,
-									localindexoid,
-									remoteslot, &localslot);
-	ExecClearTuple(remoteslot);
-
-	/*
-	 * Tuple found.
-	 *
-	 * Note this will fail if there are other conflicting unique indexes.
-	 */
-	if (found)
-	{
-		/* Process and store remote tuple in the slot */
-		oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
-		slot_modify_data(remoteslot, localslot, relmapentry, newtup);
-		MemoryContextSwitchTo(oldctx);
-
-		EvalPlanQualSetSlot(&epqstate, remoteslot);
-
-		/* Do the actual update. */
-		TargetPrivilegesCheck(relinfo->ri_RelationDesc, ACL_UPDATE);
-		ExecSimpleRelationUpdate(relinfo, estate, &epqstate, localslot,
-								 remoteslot);
-	}
-	else
-	{
-		/*
-		 * The tuple to be updated could not be found.  Do nothing except for
-		 * emitting a log message.
-		 *
-		 * XXX should this be promoted to ereport(LOG) perhaps?
-		 */
-		elog(DEBUG1,
-			 "logical replication did not find row to be updated "
-			 "in replication target relation \"%s\"",
-			 RelationGetRelationName(localrel));
-	}
-
-	/* Cleanup. */
-	ExecCloseIndices(relinfo);
-	EvalPlanQualEnd(&epqstate);
-}
-
 /*
  * Handle DELETE message.
  *
@@ -2701,6 +2668,7 @@ static void
 apply_handle_delete(StringInfo s)
 {
 	LogicalRepRelMapEntry *rel;
+	LogicalRepRelMapEntry *targetrel;
 	LogicalRepTupleData oldtup;
 	LogicalRepRelId relid;
 	UserContext ucxt;
@@ -2709,6 +2677,7 @@ apply_handle_delete(StringInfo s)
 	TupleTableSlot *remoteslot;
 	MemoryContext oldctx;
 	bool		run_as_owner;
+	ResultRelInfo *targetRelInfo;
 
 	/*
 	 * Quick return if we are skipping data modification changes or handling
@@ -2736,9 +2705,6 @@ apply_handle_delete(StringInfo s)
 	/* Set relation for error callback */
 	apply_error_callback_arg.rel = rel;
 
-	/* Check if we can do the delete. */
-	check_relation_updatable(rel);
-
 	/*
 	 * Make sure that any user-supplied code runs as the table owner, unless
 	 * the user has opted out of that behavior.
@@ -2759,13 +2725,26 @@ apply_handle_delete(StringInfo s)
 	slot_store_data(remoteslot, rel, &oldtup);
 	MemoryContextSwitchTo(oldctx);
 
+	targetRelInfo = edata->targetRelInfo;
+	targetrel = rel;
+
 	/* For a partitioned table, apply delete to correct partition. */
 	if (rel->localrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
-		apply_handle_tuple_routing(edata,
-								   remoteslot, NULL, CMD_DELETE);
-	else
-		apply_handle_delete_internal(edata, edata->targetRelInfo,
-									 remoteslot, rel->localindexoid);
+	{
+		TupleConversionMap *map;
+
+		remoteslot = apply_handle_tuple_routing(edata, CMD_DELETE, remoteslot,
+												&targetRelInfo);
+		map = ExecGetRootToChildMap(targetRelInfo, estate);
+		targetrel = logicalrep_partition_open(rel, targetRelInfo->ri_RelationDesc,
+											  map ? map->attrMap : NULL);
+	}
+
+	/* Check if we can do the delete. */
+	check_relation_updatable(targetrel);
+
+	apply_handle_delete_internal(edata, targetRelInfo, remoteslot,
+								 targetrel->localindexoid);
 
 	finish_edata(edata);
 
@@ -2782,7 +2761,7 @@ apply_handle_delete(StringInfo s)
 
 /*
  * Workhorse for apply_handle_delete()
- * relinfo is for the relation we're actually deleting from
+ * relinfo and relentry are for the relation we're actually deleting from
  * (could be a child partition of edata->targetRelInfo)
  */
 static void
@@ -2814,18 +2793,7 @@ apply_handle_delete_internal(ApplyExecutionData *edata,
 		ExecSimpleRelationDelete(relinfo, estate, &epqstate, localslot);
 	}
 	else
-	{
-		/*
-		 * The tuple to be deleted could not be found.  Do nothing except for
-		 * emitting a log message.
-		 *
-		 * XXX should this be promoted to ereport(LOG) perhaps?
-		 */
-		elog(DEBUG1,
-			 "logical replication did not find row to be deleted "
-			 "in replication target relation \"%s\"",
-			 RelationGetRelationName(localrel));
-	}
+		report_tuple_not_found(CMD_DELETE, localrel, edata->proute);
 
 	/* Cleanup. */
 	ExecCloseIndices(relinfo);
@@ -2884,46 +2852,45 @@ FindReplTupleInLocalRel(ApplyExecutionData *edata, Relation localrel,
 }
 
 /*
- * This handles insert, update, delete on a partitioned table.
+ * Determine the partition in which the tuple in slot is to be inserted, and
+ * return its ResultRelInfo in *targetrelinfo.  The return value is a slot
+ * holding the tuple of the partition rowtype.
  */
-static void
-apply_handle_tuple_routing(ApplyExecutionData *edata,
+static TupleTableSlot *
+apply_handle_tuple_routing(ApplyExecutionData *edata, CmdType operation,
 						   TupleTableSlot *remoteslot,
-						   LogicalRepTupleData *newtup,
-						   CmdType operation)
+						   ResultRelInfo **targetrelinfo)
 {
 	EState	   *estate = edata->estate;
-	LogicalRepRelMapEntry *relmapentry = edata->targetRel;
+	TupleTableSlot *remoteslot_part;
 	ResultRelInfo *relinfo = edata->targetRelInfo;
-	Relation	parentrel = relinfo->ri_RelationDesc;
-	ModifyTableState *mtstate;
-	PartitionTupleRouting *proute;
 	ResultRelInfo *partrelinfo;
+	Relation	parentrel = relinfo->ri_RelationDesc;
 	Relation	partrel;
-	TupleTableSlot *remoteslot_part;
 	TupleConversionMap *map;
 	MemoryContext oldctx;
-	LogicalRepRelMapEntry *part_entry = NULL;
-	AttrMap    *attrmap = NULL;
+
+	Assert(remoteslot != NULL);
 
 	/* ModifyTableState is needed for ExecFindPartition(). */
-	edata->mtstate = mtstate = makeNode(ModifyTableState);
-	mtstate->ps.plan = NULL;
-	mtstate->ps.state = estate;
-	mtstate->operation = operation;
-	mtstate->resultRelInfo = relinfo;
+	if (edata->mtstate == NULL)
+	{
+		edata->mtstate = makeNode(ModifyTableState);
+		edata->mtstate->ps.state = estate;
+		edata->mtstate->operation = operation;
+		edata->mtstate->resultRelInfo = relinfo;
+	}
 
 	/* ... as is PartitionTupleRouting. */
-	edata->proute = proute = ExecSetupPartitionTupleRouting(estate, parentrel);
+	if (edata->proute == NULL)
+		edata->proute = ExecSetupPartitionTupleRouting(estate, parentrel);
 
 	/*
 	 * Find the partition to which the "search tuple" belongs.
 	 */
-	Assert(remoteslot != NULL);
 	oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
-	partrelinfo = ExecFindPartition(mtstate, relinfo, proute,
+	partrelinfo = ExecFindPartition(edata->mtstate, relinfo, edata->proute,
 									remoteslot, estate);
-	Assert(partrelinfo != NULL);
 	partrel = partrelinfo->ri_RelationDesc;
 
 	/*
@@ -2943,11 +2910,11 @@ apply_handle_tuple_routing(ApplyExecutionData *edata,
 	remoteslot_part = partrelinfo->ri_PartitionTupleSlot;
 	if (remoteslot_part == NULL)
 		remoteslot_part = table_slot_create(partrel, &estate->es_tupleTable);
+
 	map = ExecGetRootToChildMap(partrelinfo, estate);
 	if (map != NULL)
 	{
-		attrmap = map->attrMap;
-		remoteslot_part = execute_attr_map_slot(attrmap, remoteslot,
+		remoteslot_part = execute_attr_map_slot(map->attrMap, remoteslot,
 												remoteslot_part);
 	}
 	else
@@ -2955,186 +2922,125 @@ apply_handle_tuple_routing(ApplyExecutionData *edata,
 		remoteslot_part = ExecCopySlot(remoteslot_part, remoteslot);
 		slot_getallattrs(remoteslot_part);
 	}
+
 	MemoryContextSwitchTo(oldctx);
 
-	/* Check if we can do the update or delete on the leaf partition. */
-	if (operation == CMD_UPDATE || operation == CMD_DELETE)
-	{
-		part_entry = logicalrep_partition_open(relmapentry, partrel,
-											   attrmap);
-		check_relation_updatable(part_entry);
-	}
+	*targetrelinfo = partrelinfo;
 
-	switch (operation)
-	{
-		case CMD_INSERT:
-			apply_handle_insert_internal(edata, partrelinfo,
-										 remoteslot_part);
-			break;
+	return remoteslot_part;
+}
 
-		case CMD_DELETE:
-			apply_handle_delete_internal(edata, partrelinfo,
-										 remoteslot_part,
-										 part_entry->localindexoid);
-			break;
+/*
+ * Workhorse for apply_handle_update()
+ *
+ * relinfo and relentry are for the relation we're actually updating (could be
+ * a child partition of edata->targetRelInfo).
+ *
+ * remoteslot holds the tuple corresponding to the rowtype of the relation to
+ * be updated. remoteslot_root holds the tuple of the logical replication
+ * target relation's (edata->targetRelInfo) rowtype.
+ */
+static void
+apply_handle_update_internal(ApplyExecutionData *edata,
+							 ResultRelInfo *relinfo,
+							 TupleTableSlot *remoteslot_root,
+							 TupleTableSlot *remoteslot,
+							 LogicalRepTupleData *newtup,
+							 LogicalRepRelMapEntry *relentry)
+{
+	EState	   *estate = edata->estate;
+	Relation	targetrel = relinfo->ri_RelationDesc;
+	Relation	rootrel = edata->targetRelInfo->ri_RelationDesc;
+	MemoryContext oldctx;
+	TupleTableSlot *localslot,
+			   *newslot;
+	EPQState	epqstate;
+	bool		found;
 
-		case CMD_UPDATE:
+	found = FindReplTupleInLocalRel(edata, targetrel, &relentry->remoterel,
+									relentry->localindexoid,
+									remoteslot, &localslot);
+	if (!found)
+	{
+		report_tuple_not_found(CMD_UPDATE, targetrel, edata->proute);
+		return;
+	}
 
-			/*
-			 * For UPDATE, depending on whether or not the updated tuple
-			 * satisfies the partition's constraint, perform a simple UPDATE
-			 * of the partition or move the updated tuple into a different
-			 * suitable partition.
-			 */
-			{
-				TupleTableSlot *localslot;
-				ResultRelInfo *partrelinfo_new;
-				Relation	partrel_new;
-				bool		found;
-				EPQState	epqstate;
-
-				/* Get the matching local tuple from the partition. */
-				found = FindReplTupleInLocalRel(edata, partrel,
-												&part_entry->remoterel,
-												part_entry->localindexoid,
-												remoteslot_part, &localslot);
-				if (!found)
-				{
-					/*
-					 * The tuple to be updated could not be found.  Do nothing
-					 * except for emitting a log message.
-					 *
-					 * XXX should this be promoted to ereport(LOG) perhaps?
-					 */
-					elog(DEBUG1,
-						 "logical replication did not find row to be updated "
-						 "in replication target relation's partition \"%s\"",
-						 RelationGetRelationName(partrel));
-					return;
-				}
+	/* Store the new tuple into the slot. */
+	newslot = remoteslot;
+	oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
+	slot_modify_data(newslot, localslot, relentry, newtup);
+	MemoryContextSwitchTo(oldctx);
 
-				/*
-				 * Apply the update to the local tuple, putting the result in
-				 * remoteslot_part.
-				 */
-				oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
-				slot_modify_data(remoteslot_part, localslot, part_entry,
-								 newtup);
-				MemoryContextSwitchTo(oldctx);
+	EvalPlanQualInit(&epqstate, estate, NULL, NIL, -1, NIL);
+	ExecOpenIndices(relinfo, false);
 
-				EvalPlanQualInit(&epqstate, estate, NULL, NIL, -1, NIL);
-				ExecOpenIndices(partrelinfo, false);
+	/*
+	 * If the target table is a partition of the original table
+	 * (edata->targetRelInfo) and the updated tuple still satisfies the
+	 * constraint of that partition, or if the original table is not a
+	 * partitioned table at all, a simple UPDATE operation is performed.
+	 * Otherwise, the updated tuple is moved to a different partition that is
+	 * suitable for it.
+	 */
+	if (rootrel->rd_rel->relkind != RELKIND_PARTITIONED_TABLE ||
+		!targetrel->rd_rel->relispartition ||
+		ExecPartitionCheck(relinfo, newslot, estate, false))
+	{
+		/* Simply UPDATE the partition. */
+		EvalPlanQualSetSlot(&epqstate, newslot);
+		TargetPrivilegesCheck(relinfo->ri_RelationDesc, ACL_UPDATE);
+		ExecSimpleRelationUpdate(relinfo, estate, &epqstate,
+								 localslot, newslot);
+	}
+	else
+	{
+		/* Move the tuple into the new partition. */
+		ResultRelInfo *partrelinfo_new;
 
-				/*
-				 * Does the updated tuple still satisfy the current
-				 * partition's constraint?
-				 */
-				if (!partrel->rd_rel->relispartition ||
-					ExecPartitionCheck(partrelinfo, remoteslot_part, estate,
-									   false))
-				{
-					/*
-					 * Yes, so simply UPDATE the partition.  We don't call
-					 * apply_handle_update_internal() here, which would
-					 * normally do the following work, to avoid repeating some
-					 * work already done above to find the local tuple in the
-					 * partition.
-					 */
-					EvalPlanQualSetSlot(&epqstate, remoteslot_part);
-					TargetPrivilegesCheck(partrelinfo->ri_RelationDesc,
-										  ACL_UPDATE);
-					ExecSimpleRelationUpdate(partrelinfo, estate, &epqstate,
-											 localslot, remoteslot_part);
-				}
-				else
-				{
-					/* Move the tuple into the new partition. */
-
-					/*
-					 * New partition will be found using tuple routing, which
-					 * can only occur via the parent table.  We might need to
-					 * convert the tuple to the parent's rowtype.  Note that
-					 * this is the tuple found in the partition, not the
-					 * original search tuple received by this function.
-					 */
-					if (map)
-					{
-						TupleConversionMap *PartitionToRootMap =
-							convert_tuples_by_name(RelationGetDescr(partrel),
-												   RelationGetDescr(parentrel));
+		/*
+		 * Simply DELETE old tuple found in the old partition. We don't call
+		 * apply_handle_delete_internal() here to avoid repeating some work
+		 * already done above to find the local tuple in the partition.
+		 */
+		EvalPlanQualSetSlot(&epqstate, localslot);
+		TargetPrivilegesCheck(relinfo->ri_RelationDesc, ACL_DELETE);
+		ExecSimpleRelationDelete(relinfo, estate, &epqstate, localslot);
 
-						remoteslot =
-							execute_attr_map_slot(PartitionToRootMap->attrMap,
-												  remoteslot_part, remoteslot);
-					}
-					else
-					{
-						remoteslot = ExecCopySlot(remoteslot, remoteslot_part);
-						slot_getallattrs(remoteslot);
-					}
+		/*
+		 * New partition will be found using tuple routing, which can only
+		 * occur via the parent table, so we need to convert the tuple to the
+		 * parent's rowtype if the column order of the partition differs from
+		 * that of the parent table.
+		 */
+		if (ExecGetRootToChildMap(relinfo, estate) != NULL)
+		{
+			TupleConversionMap *PartitionToRootMap =
+				convert_tuples_by_name(RelationGetDescr(targetrel),
+									   RelationGetDescr(rootrel));
 
-					/* Find the new partition. */
-					oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
-					partrelinfo_new = ExecFindPartition(mtstate, relinfo,
-														proute, remoteslot,
-														estate);
-					MemoryContextSwitchTo(oldctx);
-					Assert(partrelinfo_new != partrelinfo);
-					partrel_new = partrelinfo_new->ri_RelationDesc;
-
-					/* Check that new partition also has supported relkind. */
-					CheckSubscriptionRelkind(partrel_new->rd_rel->relkind,
-											 get_namespace_name(RelationGetNamespace(partrel_new)),
-											 RelationGetRelationName(partrel_new));
-
-					/*
-					 * Simply DELETE old tuple found in the old partition. We
-					 * don't call apply_handle_delete_internal() here to avoid
-					 * repeating some work already done above to find the
-					 * local tuple in the partition.
-					 */
-					EvalPlanQualSetSlot(&epqstate, localslot);
-					TargetPrivilegesCheck(partrelinfo->ri_RelationDesc, ACL_DELETE);
-					ExecSimpleRelationDelete(partrelinfo, estate, &epqstate, localslot);
-
-					/* INSERT new tuple into the new partition. */
-
-					/*
-					 * Convert the replacement tuple to match the destination
-					 * partition rowtype.
-					 */
-					oldctx = MemoryContextSwitchTo(GetPerTupleMemoryContext(estate));
-					remoteslot_part = partrelinfo_new->ri_PartitionTupleSlot;
-					if (remoteslot_part == NULL)
-						remoteslot_part = table_slot_create(partrel_new,
-															&estate->es_tupleTable);
-					map = ExecGetRootToChildMap(partrelinfo_new, estate);
-					if (map != NULL)
-					{
-						remoteslot_part = execute_attr_map_slot(map->attrMap,
-																remoteslot,
-																remoteslot_part);
-					}
-					else
-					{
-						remoteslot_part = ExecCopySlot(remoteslot_part,
-													   remoteslot);
-						slot_getallattrs(remoteslot);
-					}
-					MemoryContextSwitchTo(oldctx);
-					apply_handle_insert_internal(edata, partrelinfo_new,
-												 remoteslot_part);
-				}
+			remoteslot_root =
+				execute_attr_map_slot(PartitionToRootMap->attrMap,
+									  newslot, remoteslot_root);
+		}
+		else
+		{
+			remoteslot_root = ExecCopySlot(remoteslot_root, newslot);
+			slot_getallattrs(remoteslot_root);
+		}
 
-				ExecCloseIndices(partrelinfo);
-				EvalPlanQualEnd(&epqstate);
-			}
-			break;
+		/* Find the the partition and convert the tuple to its rowtype. */
+		newslot = apply_handle_tuple_routing(edata, CMD_INSERT,
+											 remoteslot_root,
+											 &partrelinfo_new);
 
-		default:
-			elog(ERROR, "unrecognized CmdType: %d", (int) operation);
-			break;
+		/* INSERT new tuple into the new partition. */
+		apply_handle_insert_internal(edata, partrelinfo_new, newslot);
 	}
+
+	/* Cleanup. */
+	ExecCloseIndices(relinfo);
+	EvalPlanQualEnd(&epqstate);
 }
 
 /*
diff --git a/src/test/subscription/t/013_partition.pl b/src/test/subscription/t/013_partition.pl
index 29580525a9..d1e21daaca 100644
--- a/src/test/subscription/t/013_partition.pl
+++ b/src/test/subscription/t/013_partition.pl
@@ -378,7 +378,7 @@ ok( $logfile =~
 	  qr/logical replication did not find row to be deleted in replication target relation "tab1_1"/,
 	'delete target row is missing in tab1_1');
 ok( $logfile =~
-	  qr/logical replication did not find row to be deleted in replication target relation "tab1_2_2"/,
+	  qr/logical replication did not find row to be deleted in replication target relation's partition "tab1_2_2"/,
 	'delete target row is missing in tab1_2_2');
 ok( $logfile =~
 	  qr/logical replication did not find row to be deleted in replication target relation "tab1_def"/,
@@ -799,7 +799,7 @@ ok( $logfile =~
 	  qr/logical replication did not find row to be updated in replication target relation's partition "tab2_1"/,
 	'update target row is missing in tab2_1');
 ok( $logfile =~
-	  qr/logical replication did not find row to be deleted in replication target relation "tab2_1"/,
+	  qr/logical replication did not find row to be deleted in replication target relation's partition "tab2_1"/,
 	'delete target row is missing in tab2_1');
 
 $node_subscriber1->append_conf('postgresql.conf',
-- 
2.31.1

perftest.confapplication/octet-stream; name=perftest.confDownload
v1-0001-avoid-duplicate-table-scan-for-cross-partition-up.patchapplication/octet-stream; name=v1-0001-avoid-duplicate-table-scan-for-cross-partition-up.patchDownload
From 71d668ee94571693283e2be1a57bf43400fc7ace Mon Sep 17 00:00:00 2001
From: Hou Zhijie <houzj.fnst@cn.fujitsu.com>
Date: Thu, 25 Jul 2024 15:36:47 +0800
Subject: [PATCH v1 1/2] avoid duplicate table scan for cross partition update

When performing a cross-partition update action in apply worker, it
accidentally scans the old partition twice, resulting in noticeable overhead.
This commit optimizes it by removing the redundant table scan.
---
 src/backend/replication/logical/worker.c | 27 ++++++++++++++----------
 1 file changed, 16 insertions(+), 11 deletions(-)

diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index ec96b5fe85..9a61679130 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -2991,6 +2991,7 @@ apply_handle_tuple_routing(ApplyExecutionData *edata,
 				ResultRelInfo *partrelinfo_new;
 				Relation	partrel_new;
 				bool		found;
+				EPQState	epqstate;
 
 				/* Get the matching local tuple from the partition. */
 				found = FindReplTupleInLocalRel(edata, partrel,
@@ -3021,6 +3022,9 @@ apply_handle_tuple_routing(ApplyExecutionData *edata,
 								 newtup);
 				MemoryContextSwitchTo(oldctx);
 
+				EvalPlanQualInit(&epqstate, estate, NULL, NIL, -1, NIL);
+				ExecOpenIndices(partrelinfo, false);
+
 				/*
 				 * Does the updated tuple still satisfy the current
 				 * partition's constraint?
@@ -3036,18 +3040,11 @@ apply_handle_tuple_routing(ApplyExecutionData *edata,
 					 * work already done above to find the local tuple in the
 					 * partition.
 					 */
-					EPQState	epqstate;
-
-					EvalPlanQualInit(&epqstate, estate, NULL, NIL, -1, NIL);
-					ExecOpenIndices(partrelinfo, false);
-
 					EvalPlanQualSetSlot(&epqstate, remoteslot_part);
 					TargetPrivilegesCheck(partrelinfo->ri_RelationDesc,
 										  ACL_UPDATE);
 					ExecSimpleRelationUpdate(partrelinfo, estate, &epqstate,
 											 localslot, remoteslot_part);
-					ExecCloseIndices(partrelinfo);
-					EvalPlanQualEnd(&epqstate);
 				}
 				else
 				{
@@ -3090,10 +3087,15 @@ apply_handle_tuple_routing(ApplyExecutionData *edata,
 											 get_namespace_name(RelationGetNamespace(partrel_new)),
 											 RelationGetRelationName(partrel_new));
 
-					/* DELETE old tuple found in the old partition. */
-					apply_handle_delete_internal(edata, partrelinfo,
-												 localslot,
-												 part_entry->localindexoid);
+					/*
+					 * Simply DELETE old tuple found in the old partition. We
+					 * don't call apply_handle_delete_internal() here to avoid
+					 * repeating some work already done above to find the
+					 * local tuple in the partition.
+					 */
+					EvalPlanQualSetSlot(&epqstate, localslot);
+					TargetPrivilegesCheck(partrelinfo->ri_RelationDesc, ACL_DELETE);
+					ExecSimpleRelationDelete(partrelinfo, estate, &epqstate, localslot);
 
 					/* INSERT new tuple into the new partition. */
 
@@ -3123,6 +3125,9 @@ apply_handle_tuple_routing(ApplyExecutionData *edata,
 					apply_handle_insert_internal(edata, partrelinfo_new,
 												 remoteslot_part);
 				}
+
+				ExecCloseIndices(partrelinfo);
+				EvalPlanQualEnd(&epqstate);
 			}
 			break;
 
-- 
2.31.1

#2Kirill Reshke
reshkekirill@gmail.com
In reply to: Zhijie Hou (Fujitsu) (#1)
Re: Remove duplicate table scan in logical apply worker and code refactoring

Hi!

When reviewing the code in logical/worker.c, I noticed that when applying a
cross-partition update action, it scans the old partition twice.

Nice catch!

-/*
- * Workhorse for apply_handle_update()
- * relinfo is for the relation we're actually updating in
- * (could be a child partition of edata->targetRelInfo)
- */
-static void
-apply_handle_update_internal(ApplyExecutionData *edata,
- ResultRelInfo *relinfo,
- TupleTableSlot *remoteslot,
- LogicalRepTupleData *newtup,
- Oid localindexoid)
-{

What's the necessity of this change? Can we modify a function in-place
instead of removing and rewriting it in the same file?
This will reduce diff, making patch a bit more clear.

+/*
+ * If the tuple to be modified could not be found, a log message is emitted.
+ */
+static void
+report_tuple_not_found(CmdType cmd, Relation targetrel, bool is_partition)
+{
+ Assert(cmd == CMD_UPDATE || cmd == CMD_DELETE);
+
+ /* XXX should this be promoted to ereport(LOG) perhaps? */
+ elog(DEBUG1,
+ "logical replication did not find row to be %s in replication target relation%s \"%s\"",
+ cmd == CMD_UPDATE ? "updated" : "deleted",
+ is_partition ? "'s partition" : "",
+ RelationGetRelationName(targetrel));
+}

Encapsulating report logic inside function is ok, but double ternary
expression is a bit out of line. I do not see similar places within
PostgreSQL,
so it probably violates code style.

#3Ashutosh Bapat
ashutosh.bapat.oss@gmail.com
In reply to: Kirill Reshke (#2)
Re: Remove duplicate table scan in logical apply worker and code refactoring

On Thu, Jul 25, 2024 at 5:56 PM Kirill Reshke <reshkekirill@gmail.com> wrote:

+/*
+ * If the tuple to be modified could not be found, a log message is emitted.
+ */
+static void
+report_tuple_not_found(CmdType cmd, Relation targetrel, bool is_partition)
+{
+ Assert(cmd == CMD_UPDATE || cmd == CMD_DELETE);
+
+ /* XXX should this be promoted to ereport(LOG) perhaps? */
+ elog(DEBUG1,
+ "logical replication did not find row to be %s in replication target relation%s \"%s\"",
+ cmd == CMD_UPDATE ? "updated" : "deleted",
+ is_partition ? "'s partition" : "",
+ RelationGetRelationName(targetrel));
+}

Encapsulating report logic inside function is ok, but double ternary
expression is a bit out of line. I do not see similar places within
PostgreSQL,
so it probably violates code style.

They it's written, it would make it hard for the translations. See [1]https://www.postgresql.org/docs/current/error-style-guide.html
which redirects to [2]https://www.postgresql.org/docs/current/nls-programmer.html#NLS-GUIDELINES.

[1]: https://www.postgresql.org/docs/current/error-style-guide.html
[2]: https://www.postgresql.org/docs/current/nls-programmer.html#NLS-GUIDELINES

--
Best Wishes,
Ashutosh Bapat

#4Amit Kapila
amit.kapila16@gmail.com
In reply to: Zhijie Hou (Fujitsu) (#1)
Re: Remove duplicate table scan in logical apply worker and code refactoring

On Thu, Jul 25, 2024 at 4:00 PM Zhijie Hou (Fujitsu)
<houzj.fnst@fujitsu.com> wrote:

When reviewing the code in logical/worker.c, I noticed that when applying a
cross-partition update action, it scans the old partition twice.
I am attaching the patch 0001 to remove this duplicate table scan.

The test shows that it brings noticeable improvement:

Steps
-----
Pub:
create table tab (a int not null, b int);
alter table tab replica identity full;
insert into tab select 1,generate_series(1, 1000000, 1);

Sub:
create table tab (a int not null, b int) partition by range (b);
create table tab_1 partition of tab for values from (minvalue) to (5000000);
create table tab_2 partition of tab for values from (5000000) to (maxvalue);
alter table tab replica identity full;

Test query:
update tab set b = 6000000 where b > 999900; -- UPDATE 100

Results (The time spent by apply worker to apply the all the UPDATEs):
Before 14s
After 7s
-----

The idea sounds good to me. BTW, we don't need the following comment
in the 0001 patch:
We
+ * don't call apply_handle_delete_internal() here to avoid
+ * repeating some work already done above to find the
+ * local tuple in the partition.

It is implied by the change and we already follow the same for the update.

--
With Regards,
Amit Kapila.

#5Amit Kapila
amit.kapila16@gmail.com
In reply to: Kirill Reshke (#2)
Re: Remove duplicate table scan in logical apply worker and code refactoring

On Thu, Jul 25, 2024 at 5:56 PM Kirill Reshke <reshkekirill@gmail.com> wrote:

+/*
+ * If the tuple to be modified could not be found, a log message is emitted.
+ */
+static void
+report_tuple_not_found(CmdType cmd, Relation targetrel, bool is_partition)
+{
+ Assert(cmd == CMD_UPDATE || cmd == CMD_DELETE);
+
+ /* XXX should this be promoted to ereport(LOG) perhaps? */
+ elog(DEBUG1,
+ "logical replication did not find row to be %s in replication target relation%s \"%s\"",
+ cmd == CMD_UPDATE ? "updated" : "deleted",
+ is_partition ? "'s partition" : "",
+ RelationGetRelationName(targetrel));
+}

Encapsulating report logic inside function is ok,

This could even be a separate patch as it is not directly to other
parts of the 0002 patch. BTW, the problem statement for 0002 is not
explicitly stated like which part of the code we want to optimize by
removing duplication. Also, as proposed the name
apply_handle_tuple_routing() for the function doesn't seem suitable as
it no longer remains similar to other apply_handle_* functions where
we perform the required operation like insert or update the tuple. How
about naming it as apply_tuple_routing()?

--
With Regards,
Amit Kapila.

#6Hayato Kuroda (Fujitsu)
kuroda.hayato@fujitsu.com
In reply to: Zhijie Hou (Fujitsu) (#1)
RE: Remove duplicate table scan in logical apply worker and code refactoring

Dear Hou,

Thanks for creating a patch!

When reviewing the code in logical/worker.c, I noticed that when applying a
cross-partition update action, it scans the old partition twice.
I am attaching the patch 0001 to remove this duplicate table scan.

Just to clarify, you meant that FindReplTupleInLocalRel() are called in
apply_handle_tuple_routing() and apply_handle_tuple_routing()->apply_handle_delete_internal(),
which requires the index or sequential scan, right? LGTM.

Apart from above, I found there are quite a few duplicate codes related to partition
handling(e.g. apply_handle_tuple_routing), so I tried to extract some
common logic to simplify the codes. Please see 0002 for this refactoring.

IIUC, you wanted to remove the application code from apply_handle_tuple_routing()
and put only a part partition detection. Is it right? Anyway, here are comments.

01. apply_handle_insert()

```
+    targetRelInfo = edata->targetRelInfo;
+
     /* For a partitioned table, insert the tuple into a partition. */
     if (rel->localrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
-        apply_handle_tuple_routing(edata,
-                                   remoteslot, NULL, CMD_INSERT);
-    else
-        apply_handle_insert_internal(edata, edata->targetRelInfo,
-                                     remoteslot);
+        remoteslot = apply_handle_tuple_routing(edata, CMD_INSERT, remoteslot,
+                                                &targetRelInfo);
+
+    /* For a partitioned table, insert the tuple into a partition. */
+    apply_handle_insert_internal(edata, targetRelInfo, remoteslot);
```

This part contains same comments, and no need to subsctitute in case of normal tables.
How about:

```
-    /* For a partitioned table, insert the tuple into a partition. */
+    /*
+     * Find the actual target table if the table is partitioned. Otherwise, use
+     * the same table as the remote one.
+     */
     if (rel->localrel->rd_rel->relkind == RELKIND_PARTITIONED_TABLE)
-        apply_handle_tuple_routing(edata,
-                                   remoteslot, NULL, CMD_INSERT);
+        remoteslot = apply_handle_tuple_routing(edata, CMD_INSERT, remoteslot,
+                                                &targetRelInfo);
     else
-        apply_handle_insert_internal(edata, edata->targetRelInfo,
-                                     remoteslot);
+        targetRelInfo = edata->targetRelInfo;
+
+    /* Insert a tuple to the target table */
+    apply_handle_insert_internal(edata, targetRelInfo, remoteslot);
```

02. apply_handle_tuple_routing()

```
 /*
- * This handles insert, update, delete on a partitioned table.
+ * Determine the partition in which the tuple in slot is to be inserted, and
...
```

But this function is called from delete_internal(). How about "Determine the
partition to which the tuple in the slot belongs."?

03. apply_handle_tuple_routing()

Do you have a reason why this does not return `ResultRelInfo *` but `TupleTableSlot *`?
Not sure, but it is more proper for me to return the table info because this is a
routing function.

04. apply_handle_update()

```
+    targetRelInfo = edata->targetRelInfo;
+    targetrel = rel;
+    remoteslot_root = remoteslot;
```

Here I can say the same thing as 1.

05. apply_handle_update_internal()

It looks the ordering of function's implementations are changed. Is it intentaional?

before

apply_handle_update
apply_handle_update_internal
apply_handle_delete
apply_handle_delete_internal
FindReplTupleInLocalRel
apply_handle_tuple_routing

after

apply_handle_update
apply_handle_delete
apply_handle_delete_internal
FindReplTupleInLocalRel
apply_handle_tuple_routing
apply_handle_update_internal

06. apply_handle_delete_internal()

```
+    targetRelInfo = edata->targetRelInfo;
+    targetrel = rel;
+
```

Here I can say the same thing as 1.

Best regards,
Hayato Kuroda
FUJITSU LIMITED

#7Zhijie Hou (Fujitsu)
houzj.fnst@fujitsu.com
In reply to: Hayato Kuroda (Fujitsu) (#6)
1 attachment(s)
RE: Remove duplicate table scan in logical apply worker and code refactoring

On Wednesday, July 31, 2024 5:07 PM Kuroda, Hayato/黒田 隼人 <kuroda.hayato@fujitsu.com> wrote:

Dear Hou,

When reviewing the code in logical/worker.c, I noticed that when
applying a cross-partition update action, it scans the old partition twice.
I am attaching the patch 0001 to remove this duplicate table scan.

Just to clarify, you meant that FindReplTupleInLocalRel() are called in
apply_handle_tuple_routing() and
apply_handle_tuple_routing()->apply_handle_delete_internal(),
which requires the index or sequential scan, right? LGTM.

Thanks for reviewing the patch, and your understanding is correct.

Here is the updated patch 0001. I removed the comments as suggested by Amit.

Since 0002 patch is only refactoring the code and I need some time to review
the comments for it, I will hold it until the 0001 is committed.

Best Regards,
Hou zj

Attachments:

v2-0001-avoid-duplicate-table-scan-for-cross-partition-up.patchapplication/octet-stream; name=v2-0001-avoid-duplicate-table-scan-for-cross-partition-up.patchDownload
From 7ff41153969a805dcaf6195fed0aacaf30ea51af Mon Sep 17 00:00:00 2001
From: Hou Zhijie <houzj.fnst@cn.fujitsu.com>
Date: Thu, 25 Jul 2024 15:36:47 +0800
Subject: [PATCH v2] avoid duplicate table scan for cross partition update

When performing a cross-partition update action in apply worker, it
accidentally scans the old partition twice, resulting in noticeable overhead.
This commit optimizes it by removing the redundant table scan.
---
 src/backend/replication/logical/worker.c | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

diff --git a/src/backend/replication/logical/worker.c b/src/backend/replication/logical/worker.c
index ec96b5fe85..6dc54c7283 100644
--- a/src/backend/replication/logical/worker.c
+++ b/src/backend/replication/logical/worker.c
@@ -2991,6 +2991,7 @@ apply_handle_tuple_routing(ApplyExecutionData *edata,
 				ResultRelInfo *partrelinfo_new;
 				Relation	partrel_new;
 				bool		found;
+				EPQState	epqstate;
 
 				/* Get the matching local tuple from the partition. */
 				found = FindReplTupleInLocalRel(edata, partrel,
@@ -3021,6 +3022,9 @@ apply_handle_tuple_routing(ApplyExecutionData *edata,
 								 newtup);
 				MemoryContextSwitchTo(oldctx);
 
+				EvalPlanQualInit(&epqstate, estate, NULL, NIL, -1, NIL);
+				ExecOpenIndices(partrelinfo, false);
+
 				/*
 				 * Does the updated tuple still satisfy the current
 				 * partition's constraint?
@@ -3036,18 +3040,11 @@ apply_handle_tuple_routing(ApplyExecutionData *edata,
 					 * work already done above to find the local tuple in the
 					 * partition.
 					 */
-					EPQState	epqstate;
-
-					EvalPlanQualInit(&epqstate, estate, NULL, NIL, -1, NIL);
-					ExecOpenIndices(partrelinfo, false);
-
 					EvalPlanQualSetSlot(&epqstate, remoteslot_part);
 					TargetPrivilegesCheck(partrelinfo->ri_RelationDesc,
 										  ACL_UPDATE);
 					ExecSimpleRelationUpdate(partrelinfo, estate, &epqstate,
 											 localslot, remoteslot_part);
-					ExecCloseIndices(partrelinfo);
-					EvalPlanQualEnd(&epqstate);
 				}
 				else
 				{
@@ -3091,9 +3088,9 @@ apply_handle_tuple_routing(ApplyExecutionData *edata,
 											 RelationGetRelationName(partrel_new));
 
 					/* DELETE old tuple found in the old partition. */
-					apply_handle_delete_internal(edata, partrelinfo,
-												 localslot,
-												 part_entry->localindexoid);
+					EvalPlanQualSetSlot(&epqstate, localslot);
+					TargetPrivilegesCheck(partrelinfo->ri_RelationDesc, ACL_DELETE);
+					ExecSimpleRelationDelete(partrelinfo, estate, &epqstate, localslot);
 
 					/* INSERT new tuple into the new partition. */
 
@@ -3123,6 +3120,9 @@ apply_handle_tuple_routing(ApplyExecutionData *edata,
 					apply_handle_insert_internal(edata, partrelinfo_new,
 												 remoteslot_part);
 				}
+
+				ExecCloseIndices(partrelinfo);
+				EvalPlanQualEnd(&epqstate);
 			}
 			break;
 
-- 
2.30.0.windows.2

#8Hayato Kuroda (Fujitsu)
kuroda.hayato@fujitsu.com
In reply to: Zhijie Hou (Fujitsu) (#7)
1 attachment(s)
RE: Remove duplicate table scan in logical apply worker and code refactoring

Dear Hou,

Thanks for reviewing the patch, and your understanding is correct.

Here is the updated patch 0001. I removed the comments as suggested by Amit.

Since 0002 patch is only refactoring the code and I need some time to review
the comments for it, I will hold it until the 0001 is committed.

Thanks for updating the patch. I did a performance testing with v2-0001.

Before: 15.553 [s]
After: 7.593 [s]

I used the attached script for setting up. I used almost the same setting and synchronous
replication is used.

[machine]
CPU(s): 120
Model name: Intel(R) Xeon(R) CPU E7-4890 v2 @ 2.80GHz
Core(s) per socket: 15
Socket(s): 4

Best regards,
Hayato Kuroda
FUJITSU LIMITED

Attachments:

test.shapplication/octet-stream; name=test.shDownload
#9Amit Kapila
amit.kapila16@gmail.com
In reply to: Hayato Kuroda (Fujitsu) (#8)
Re: Remove duplicate table scan in logical apply worker and code refactoring

On Thu, Aug 1, 2024 at 7:29 AM Hayato Kuroda (Fujitsu)
<kuroda.hayato@fujitsu.com> wrote:

Thanks for reviewing the patch, and your understanding is correct.

Here is the updated patch 0001. I removed the comments as suggested by Amit.

Since 0002 patch is only refactoring the code and I need some time to review
the comments for it, I will hold it until the 0001 is committed.

Thanks for updating the patch. I did a performance testing with v2-0001.

Before: 15.553 [s]
After: 7.593 [s]

Thanks for the testing. I have pushed the patch.

--
With Regards,
Amit Kapila.