From 17fb22142ade65fdbe8c90889e49d0be60ba45e4 Mon Sep 17 00:00:00 2001
From: Melanie Plageman <melanieplageman@gmail.com>
Date: Fri, 4 Mar 2022 15:53:05 -0500
Subject: [PATCH v6 3/4] BTree index use unbuffered IO optimization

While building a btree index, the backend can avoid fsync'ing all of the
pages if it uses the optimization introduced in a prior commit.

This can substantially improve performance when many indexes are being
built during DDL operations.
---
 src/backend/access/nbtree/nbtree.c  | 2 +-
 src/backend/access/nbtree/nbtsort.c | 6 +++++-
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c
index 6b78acefbe..fc5cce4603 100644
--- a/src/backend/access/nbtree/nbtree.c
+++ b/src/backend/access/nbtree/nbtree.c
@@ -161,7 +161,7 @@ btbuildempty(Relation index)
 	 * internally. However, were this to be replaced with unbuffered_extend(),
 	 * do_wal must be true to ensure the data is logged and fsync'd.
 	 */
-	unbuffered_prep(&wstate, true, false);
+	unbuffered_prep(&wstate, true, true);
 
 	/* Construct metapage. */
 	metapage = (Page) palloc(BLCKSZ);
diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c
index d6d0d4b361..f1b9e2e24e 100644
--- a/src/backend/access/nbtree/nbtsort.c
+++ b/src/backend/access/nbtree/nbtsort.c
@@ -1189,7 +1189,11 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
 	int64		tuples_done = 0;
 	bool		deduplicate;
 
-	unbuffered_prep(&wstate->ub_wstate, wstate->btws_use_wal, false);
+	/*
+	 * The fsync optimization done by directmgr is only relevant if
+	 * WAL-logging, so pass btws_use_wal for this parameter.
+	 */
+	unbuffered_prep(&wstate->ub_wstate, wstate->btws_use_wal, wstate->btws_use_wal);
 
 	deduplicate = wstate->inskey->allequalimage && !btspool->isunique &&
 		BTGetDeduplicateItems(wstate->index);
-- 
2.30.2

