From e0f8a3c4bd6863c90d4169b5cd6a222c7045d6ad Mon Sep 17 00:00:00 2001
From: Peter Geoghegan <peter.geoghegan86@gmail.com>
Date: Wed, 29 Jul 2015 15:38:12 -0700
Subject: [PATCH 1/2] Quicksort when only one initial run is produced

Problem
=======

Previously, in the event of only producing a single run, by the time
tuplesort_performsort() is called tuplesort had only incrementally
spilled tuples from the memtuples array (which is a heap at that stage)
onto tape.  The incremental spilling occurs by consuming from the heap,
meaning that on-tape tuples are sorted.  Following dumping all tuples at
once (in a similar fashion to how tuples were incrementally spilled
before that point) and observing that there was only ever one run,
tuplesort realized its "one run" optimization was applicable (In
general, tuplesort imagined that once a sort cannot fit entirely in
memory, all tuples must be dumped within tuplesort_performsort()).

All tuples end up sorted and on tape (tuple comparisons occur as the
heap invariant is maintained as the memtuples heap is spilled).  This
approach is useful because no final merge step is required, unlike the
"merge sort" case where more than one run is used and multiple tapes
must be merged.  However, it had a number of significant disadvantages,
including:

* The tuples often don't actually need to be written out to tape/disk,
but that happens anyway.

* The sort step is a degenerate heapsort.  Heapsort is an
algorithm that performs very poorly on modern CPUs with fast caches.

* Since we are not quicksorting, that means we're unable to use the
"onlyKey" optimization.  Single-attribute heaptuple and datumtuple sorts
that don't use abbreviated keys benefit from this optimization
considerably.

Solution
========

We can often (but not always) do better. In the event of:

1. Having only a single conventional run (or, more specifically, only
one run that made it to disk/tape -- a new, broader criteria for
applying a "one run" optimization)

2. The caller not requiring random access (a new, more restrictive
criteria for apply a "one run" optimization)

Then:

1. Split input tuples into two conceptual "subruns":  one on-disk, and
the other in-memory

2. Quicksort the memtuples array subrun (the other subrun is already
sorted when the decision to apply the new optimization is made)

3. Finally, merge the new subruns on-the-fly, much like the existing
"external merge" case

Testing shows that this can perform significantly better than the
existing "one run" optimization.
---
 src/backend/commands/explain.c     |  13 +-
 src/backend/utils/sort/tuplesort.c | 332 +++++++++++++++++++++++++++++++++----
 src/include/utils/tuplesort.h      |   3 +-
 3 files changed, 309 insertions(+), 39 deletions(-)

diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index 5d06fa4..4f61ee2 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -2178,20 +2178,27 @@ show_sort_info(SortState *sortstate, ExplainState *es)
 		const char *sortMethod;
 		const char *spaceType;
 		long		spaceUsed;
+		int			rowsQuicksorted;
 
-		tuplesort_get_stats(state, &sortMethod, &spaceType, &spaceUsed);
+		tuplesort_get_stats(state, &sortMethod, &spaceType, &spaceUsed,
+							&rowsQuicksorted);
 
 		if (es->format == EXPLAIN_FORMAT_TEXT)
 		{
 			appendStringInfoSpaces(es->str, es->indent * 2);
-			appendStringInfo(es->str, "Sort Method: %s  %s: %ldkB\n",
-							 sortMethod, spaceType, spaceUsed);
+			appendStringInfo(es->str,
+							 "Sort Method: %s  %s: %ldkB  Rows Quicksorted: %d",
+							 sortMethod,
+							 spaceType,
+							 spaceUsed,
+							 rowsQuicksorted);
 		}
 		else
 		{
 			ExplainPropertyText("Sort Method", sortMethod, es);
 			ExplainPropertyLong("Sort Space Used", spaceUsed, es);
 			ExplainPropertyText("Sort Space Type", spaceType, es);
+			ExplainPropertyInteger("Rows Quicksorted", rowsQuicksorted, es);
 		}
 	}
 }
diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c
index 435041a..5371f4f 100644
--- a/src/backend/utils/sort/tuplesort.c
+++ b/src/backend/utils/sort/tuplesort.c
@@ -72,6 +72,15 @@
  * to one run per logical tape.  The final merge is then performed
  * on-the-fly as the caller repeatedly calls tuplesort_getXXX; this
  * saves one cycle of writing all the data out to disk and reading it in.
+ * Also, if only one run is spilled to tape so far when
+ * tuplesort_performsort() is reached, and if the caller does not require
+ * random access, then the merge step can take place between still
+ * in-memory tuples, and tuples stored on tape (it does not matter that
+ * there may be a second run in that array -- only that a second one has
+ * spilled).  This ensures that spilling to disk only occurs for a number of
+ * tuples approximately equal to the number of tuples read in after
+ * work_mem was reached and it became apparent that an external sort is
+ * required.
  *
  * Before Postgres 8.2, we always used a seven-tape polyphase merge, on the
  * grounds that 7 is the "sweet spot" on the tapes-to-passes curve according
@@ -186,6 +195,7 @@ typedef enum
 	TSS_BUILDRUNS,				/* Loading tuples; writing to tape */
 	TSS_SORTEDINMEM,			/* Sort completed entirely in memory */
 	TSS_SORTEDONTAPE,			/* Sort completed, final run is on tape */
+	TSS_MEMTAPEMERGE,			/* Performing memory/tape merge on-the-fly */
 	TSS_FINALMERGE				/* Performing final merge on-the-fly */
 } TupSortStatus;
 
@@ -327,12 +337,22 @@ struct Tuplesortstate
 	int			activeTapes;	/* # of active input tapes in merge pass */
 
 	/*
+	 * These variables are used after tuplesort_performsort() for the
+	 * TSS_MEMTAPEMERGE case.  This is a special, optimized final on-the-fly
+	 * merge pass involving merging the result tape with memtuples that were
+	 * quicksorted (but never made it out to a tape).
+	 */
+	SortTuple	tape_cache;		/* cached tape tuple from prior call */
+	bool		cached;			/* tape_cache holds pending tape tuple */
+	bool		just_memtuples;	/* merge only fetching from memtuples */
+
+	/*
 	 * These variables are used after completion of sorting to keep track of
 	 * the next tuple to return.  (In the tape case, the tape's current read
 	 * position is also critical state.)
 	 */
 	int			result_tape;	/* actual tape number of finished output */
-	int			current;		/* array index (only used if SORTEDINMEM) */
+	int			current;		/* array index (only used if quicksorted) */
 	bool		eof_reached;	/* reached EOF (needed for cursors) */
 
 	/* markpos_xxx holds marked position for mark and restore */
@@ -460,6 +480,7 @@ struct Tuplesortstate
 static Tuplesortstate *tuplesort_begin_common(int workMem, bool randomAccess);
 static void puttuple_common(Tuplesortstate *state, SortTuple *tuple);
 static bool consider_abort_common(Tuplesortstate *state);
+static void tuplesort_quicksort(Tuplesortstate *state);
 static void inittapes(Tuplesortstate *state);
 static void selectnewtape(Tuplesortstate *state);
 static void mergeruns(Tuplesortstate *state);
@@ -1549,6 +1570,23 @@ consider_abort_common(Tuplesortstate *state)
 	return false;
 }
 
+static void
+tuplesort_quicksort(Tuplesortstate *state)
+{
+	if (state->memtupcount > 1)
+	{
+		/* Can we use the single-key sort function? */
+		if (state->onlyKey != NULL)
+			qsort_ssup(state->memtuples, state->memtupcount,
+					   state->onlyKey);
+		else
+			qsort_tuple(state->memtuples,
+						state->memtupcount,
+						state->comparetup,
+						state);
+	}
+}
+
 /*
  * All tuples have been provided; finish the sort.
  */
@@ -1569,20 +1607,9 @@ tuplesort_performsort(Tuplesortstate *state)
 
 			/*
 			 * We were able to accumulate all the tuples within the allowed
-			 * amount of memory.  Just qsort 'em and we're done.
+			 * amount of memory.  Just quicksort 'em and we're done.
 			 */
-			if (state->memtupcount > 1)
-			{
-				/* Can we use the single-key sort function? */
-				if (state->onlyKey != NULL)
-					qsort_ssup(state->memtuples, state->memtupcount,
-							   state->onlyKey);
-				else
-					qsort_tuple(state->memtuples,
-								state->memtupcount,
-								state->comparetup,
-								state);
-			}
+			tuplesort_quicksort(state);
 			state->current = 0;
 			state->eof_reached = false;
 			state->markpos_offset = 0;
@@ -1609,12 +1636,111 @@ tuplesort_performsort(Tuplesortstate *state)
 
 			/*
 			 * Finish tape-based sort.  First, flush all tuples remaining in
-			 * memory out to tape; then merge until we have a single remaining
-			 * run (or, if !randomAccess, one run per tape). Note that
-			 * mergeruns sets the correct state->status.
+			 * memory out to tape where that's required (it's required when
+			 * more than one run is represented on tape, or when the caller
+			 * required random access).
+			 *
+			 * We do this before considering which of the three cases are
+			 * taken, because dumping tuples can increase currentRun,
+			 * altering the outcome.
 			 */
-			dumptuples(state, true);
-			mergeruns(state);
+			if (state->currentRun > 0 || state->randomAccess)
+				dumptuples(state, true);
+
+			if (state->currentRun > 1)
+			{
+				/*
+				 * Merge until we have a single remaining run (or, if
+				 * !randomAccess, one run per tape).  Note that mergeruns sets
+				 * the correct state->status.
+				 */
+				mergeruns(state);
+			}
+			else if (state->currentRun == 0)
+			{
+				/*
+				 * Avoid dumping most tuples.
+				 *
+				 * Since we produced only one initial run and the caller
+				 * does not require random access, we can use our result
+				 * tape as part of the finished output.  We quicksort the
+				 * remaining unsorted (although heapified) tuples in the
+				 * memtuples array, and merge that with the sorted-on-tape
+				 * contents of this run.
+				 *
+				 * This is similar to TSS_FINALMERGE, which also has an
+				 * on-the-fly merge step;  the major difference is that we
+				 * merge memtuples with a tape (each is therefore a "subrun").
+				 * Merging is fairly fast when we merge from memory to a
+				 * single tape, and this avoids significant I/O.
+				 *
+				 * Note that there might actually be 2 runs, but only the
+				 * contents of one of them went to tape, and so we can
+				 * safely "pretend" that there is only 1 run (since we're
+				 * about to give up on the idea of the memtuples array being
+				 * a heap).  This means that if our sort happened to require
+				 * random access, the similar "single run" optimization
+				 * below (which sets TSS_SORTEDONTAPE) might not be used at
+				 * all.  This is because dumping all tuples out might have
+				 * forced an otherwise equivalent randomAccess case to
+				 * acknowledge a second run, which we can avoid.
+				 *
+				 * And so, this optimization is more effective than the
+				 * similar 1 run TSS_SORTEDONTAPE optimization below because
+				 * it will be used more frequently.  A further advantage is
+				 * that quicksort is very fast.  Modern CPUs tend to be
+				 * enormously bottlenecked on memory access, which heap sort
+				 * exacerbates, whereas quicksort is a cache-oblivious
+				 * algorithm, and so uses the cache optimally.  The fact
+				 * that the memtuples array has already been heapified is no
+				 * reason to commit to the path of unnecessarily dumping and
+				 * heap-sorting input tuples.
+				 *
+				 * Effectively, our "single" run is "split" into an on-disk
+				 * subrun and a memtuples subrun that stores a significant
+				 * proportion of all tuples to be sorted.  Often, this
+				 * subrun is much larger than the tape subrun, which is
+				 * where the optimization is most effective.
+				 */
+				Assert(!state->randomAccess);
+				markrunend(state, state->currentRun);
+
+				/*
+				 * Note that the quicksort may use abbreviated keys, which
+				 * are no longer available for the tuples that spilled to
+				 * tape.  This is something that the final on-the-fly merge
+				 * step accounts for.
+				 */
+				tuplesort_quicksort(state);
+				state->current = 0;
+
+#ifdef TRACE_SORT
+				if (trace_sort)
+					elog(LOG, "finished quicksort of %d tuples, a large portion of final run: %s",
+						 state->memtupcount,
+						 pg_rusage_show(&state->ru_start));
+#endif
+
+				state->result_tape = state->tp_tapenum[state->destTape];
+				/* Must freeze and rewind the finished output tape */
+				LogicalTapeFreeze(state->tapeset, state->result_tape);
+				state->status = TSS_MEMTAPEMERGE;
+			}
+			else
+			{
+				/*
+				 * Since we produced only one initial run (quite likely if
+				 * the total data volume is between 1X and 2X workMem), we
+				 * can just use that tape as the finished output, rather
+				 * than doing a useless merge.  (This obvious optimization
+				 * is not in Knuth's algorithm.)
+				 */
+				Assert(state->randomAccess);
+				state->result_tape = state->tp_tapenum[state->destTape];
+				/* Must freeze and rewind the finished output tape */
+				LogicalTapeFreeze(state->tapeset, state->result_tape);
+				state->status = TSS_SORTEDONTAPE;
+			}
 			state->eof_reached = false;
 			state->markpos_block = 0L;
 			state->markpos_offset = 0;
@@ -1633,6 +1759,9 @@ tuplesort_performsort(Tuplesortstate *state)
 			elog(LOG, "performsort done (except %d-way final merge): %s",
 				 state->activeTapes,
 				 pg_rusage_show(&state->ru_start));
+		else if (state->status == TSS_MEMTAPEMERGE)
+			elog(LOG, "performsort done (except memory/tape final merge): %s",
+				 pg_rusage_show(&state->ru_start));
 		else
 			elog(LOG, "performsort done: %s",
 				 pg_rusage_show(&state->ru_start));
@@ -1784,6 +1913,138 @@ tuplesort_gettuple_common(Tuplesortstate *state, bool forward,
 			READTUP(state, stup, state->result_tape, tuplen);
 			return true;
 
+		case TSS_MEMTAPEMERGE:
+			Assert(forward);
+			/* For now, assume tuple should not be freed by caller */
+			*should_free = false;
+
+			if (state->eof_reached)
+				return false;
+
+			if (state->just_memtuples)
+				goto just_memtuples;
+
+			/*
+			 * Should be at least one memtuple, since no "all" dump of
+			 * memtuples occurred (avoiding that is a big reason for the
+			 * existence of the TSS_MEMTAPEMERGE case)
+			 */
+			Assert(state->memtupcount > 0);
+
+			/*
+			 * Merge together quicksorted memtuples array, and sorted tape.
+			 *
+			 * When the decision was taken to apply this optimization, the
+			 * array was heapified.  Some number of tuples were spilled to
+			 * disk from the top of the heap, and are read from tape here in
+			 * fully sorted order.  That does not imply that there are no
+			 * tuples in the memtuples array that are less than any tuple
+			 * stored on tape;  that's why this merge step is necessary.
+			 *
+			 * "stup" is always the current tape tuple, which may be cached
+			 * from previous call, or read from tape in this call (and
+			 * cached for next).
+			 *
+			 * Exhaust the supply of tape tuples first.
+			 */
+			if (state->cached)
+			{
+				*stup = state->tape_cache;
+			}
+			else if ((tuplen = getlen(state, state->result_tape, true)) != 0)
+			{
+				READTUP(state, stup, state->result_tape, tuplen);
+				state->tape_cache = *stup;
+			}
+			else
+			{
+				/*
+				 * Initially, we mostly only returned tape tuples, with a
+				 * few interspersed memtuples.  From here on, we just fetch
+				 * from memtuples array.
+				 *
+				 * We may end up here rather quickly, particularly when
+				 * there are proportionally fewer tape tuples, which is
+				 * common.  This is because the tape tuples overall sorted
+				 * order skews very low (they're not guaranteed to be
+				 * strictly lower than any memtuples tuple, but since they
+				 * were written to tape from the top of a then-heapified
+				 * memtuples array, many will be).
+				 */
+				state->just_memtuples = true;
+just_memtuples:
+				*should_free = false;
+
+				/*
+				 * XXX:  If this routine ever supports memory prefetching,
+				 * it ought to happen here too.  Often, the majority of
+				 * tuples will be returned to caller from here, in a manner
+				 * very similar to the TSS_SORTEDINMEM case.
+				 */
+				if (state->current < state->memtupcount)
+				{
+					*stup = state->memtuples[state->current++];
+					return true;
+				}
+				state->eof_reached = true;
+				return false;
+			}
+
+			/*
+			 * Kludge:  Trigger abbreviated tie-breaker if in-memory tuples
+			 * use abbreviation (writing tuples to tape never preserves
+			 * abbreviated keys).  Do this by assigning in-memory
+			 * abbreviated tuple to tape tuple directly.
+			 *
+			 * It doesn't seem worth generating a new abbreviated key for
+			 * the tape tuple, and this approach is simpler than
+			 * "unabbreviating" the memtuple tuple from a "common" routine
+			 * like this.
+			 *
+			 * In the future, this routine could offer an API that allows
+			 * certain clients (like ordered set aggregate callers) to
+			 * cheaply test *inequality* across adjacent pairs of sorted
+			 * tuples on the basis of simple abbreviated key binary
+			 * inequality.  Another advantage of this approach is that that
+			 * can still work without reporting to clients that abbreviation
+			 * wasn't used.  The tape tuples might only be a small minority
+			 * of all tuples returned.
+			 */
+			if (state->sortKeys != NULL && state->sortKeys->abbrev_converter != NULL)
+				stup->datum1 = state->memtuples[state->current].datum1;
+
+			/*
+			 * Compare a tape tuple to a memtuple.
+			 *
+			 * Since we always start with at least one memtuple, and since tape
+			 * tuples are always returned before equal memtuples, it follows
+			 * that there must be at least one memtuple left to return here.
+			 */
+			Assert(state->current < state->memtupcount);
+
+			if (COMPARETUP(state, stup, &state->memtuples[state->current]) <= 0)
+			{
+				/*
+				 * Tape tuple less than or equal to memtuple array current
+				 * iterator position.  Return it.
+				 */
+				state->cached = false;
+				*should_free = true;
+			}
+			else
+			{
+				/*
+				 * Tape tuple greater than memtuple array's current tuple.
+				 *
+				 * Return current memtuple tuple, and cache tape tuple for
+				 * next call, where it may be returned.
+				 */
+				state->cached = true;
+				*should_free = false;
+				*stup = state->memtuples[state->current++];
+			}
+			return true;
+
 		case TSS_FINALMERGE:
 			Assert(forward);
 			*should_free = true;
@@ -2120,6 +2381,8 @@ inittapes(Tuplesortstate *state)
 	state->tp_runs = (int *) palloc0(maxTapes * sizeof(int));
 	state->tp_dummy = (int *) palloc0(maxTapes * sizeof(int));
 	state->tp_tapenum = (int *) palloc0(maxTapes * sizeof(int));
+	state->cached = false;
+	state->just_memtuples = false;
 
 	/*
 	 * Convert the unsorted contents of memtuples[] into a heap. Each tuple is
@@ -2213,21 +2476,6 @@ mergeruns(Tuplesortstate *state)
 	Assert(state->status == TSS_BUILDRUNS);
 	Assert(state->memtupcount == 0);
 
-	/*
-	 * If we produced only one initial run (quite likely if the total data
-	 * volume is between 1X and 2X workMem), we can just use that tape as the
-	 * finished output, rather than doing a useless merge.  (This obvious
-	 * optimization is not in Knuth's algorithm.)
-	 */
-	if (state->currentRun == 1)
-	{
-		state->result_tape = state->tp_tapenum[state->destTape];
-		/* must freeze and rewind the finished output tape */
-		LogicalTapeFreeze(state->tapeset, state->result_tape);
-		state->status = TSS_SORTEDONTAPE;
-		return;
-	}
-
 	if (state->sortKeys != NULL && state->sortKeys->abbrev_converter != NULL)
 	{
 		/*
@@ -2770,7 +3018,8 @@ void
 tuplesort_get_stats(Tuplesortstate *state,
 					const char **sortMethod,
 					const char **spaceType,
-					long *spaceUsed)
+					long *spaceUsed,
+					int *rowsQuicksorted)
 {
 	/*
 	 * Note: it might seem we should provide both memory and disk usage for a
@@ -2780,6 +3029,10 @@ tuplesort_get_stats(Tuplesortstate *state,
 	 * rely on availMem in a disk sort.  This does not seem worth the overhead
 	 * to fix.  Is it worth creating an API for the memory context code to
 	 * tell us how much is actually used in sortcontext?
+	 *
+	 * XXX:  Revisit this restriction.  TSS_MEMTAPEMERGE may require both
+	 * memory and disk space usage (although since memory used should always
+	 * be work_mem, perhaps not).
 	 */
 	if (state->tapeset)
 	{
@@ -2792,17 +3045,26 @@ tuplesort_get_stats(Tuplesortstate *state,
 		*spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
 	}
 
+	*rowsQuicksorted = 0;
+
 	switch (state->status)
 	{
 		case TSS_SORTEDINMEM:
 			if (state->boundUsed)
 				*sortMethod = "top-N heapsort";
 			else
+			{
 				*sortMethod = "quicksort";
+				*rowsQuicksorted = state->memtupcount;
+			}
 			break;
 		case TSS_SORTEDONTAPE:
 			*sortMethod = "external sort";
 			break;
+		case TSS_MEMTAPEMERGE:
+			*sortMethod = "quicksort with spillover";
+			*rowsQuicksorted = state->memtupcount;
+			break;
 		case TSS_FINALMERGE:
 			*sortMethod = "external merge";
 			break;
diff --git a/src/include/utils/tuplesort.h b/src/include/utils/tuplesort.h
index de6fc56..e5f5501 100644
--- a/src/include/utils/tuplesort.h
+++ b/src/include/utils/tuplesort.h
@@ -109,7 +109,8 @@ extern void tuplesort_end(Tuplesortstate *state);
 extern void tuplesort_get_stats(Tuplesortstate *state,
 					const char **sortMethod,
 					const char **spaceType,
-					long *spaceUsed);
+					long *spaceUsed,
+					int *rowsQuicksorted);
 
 extern int	tuplesort_merge_order(int64 allowedMem);
 
-- 
1.9.1

