From 2a545e9e949ca1daad6f724ceaaae20ce281d5a6 Mon Sep 17 00:00:00 2001
From: Heikki Linnakangas <heikki.linnakangas@iki.fi>
Date: Tue, 12 Jun 2018 15:31:39 +0300
Subject: [PATCH 2/4] Move GROUP BY planning code from planner.c to separate
 file, aggpath.c

---
 src/backend/optimizer/path/Makefile  |    4 +-
 src/backend/optimizer/path/aggpath.c | 1967 ++++++++++++
 src/backend/optimizer/plan/Makefile  |    4 +-
 src/backend/optimizer/plan/planner.c | 5420 +++++++++++-----------------------
 src/include/nodes/relation.h         |   19 +
 src/include/optimizer/paths.h        |   13 +
 src/include/optimizer/planmain.h     |    1 +
 src/include/optimizer/planner.h      |    2 +
 8 files changed, 3741 insertions(+), 3689 deletions(-)
 create mode 100644 src/backend/optimizer/path/aggpath.c

diff --git a/src/backend/optimizer/path/Makefile b/src/backend/optimizer/path/Makefile
index 6864a62132..21216a621d 100644
--- a/src/backend/optimizer/path/Makefile
+++ b/src/backend/optimizer/path/Makefile
@@ -12,7 +12,7 @@ subdir = src/backend/optimizer/path
 top_builddir = ../../../..
 include $(top_builddir)/src/Makefile.global
 
-OBJS = allpaths.o clausesel.o costsize.o equivclass.o indxpath.o \
-       joinpath.o joinrels.o pathkeys.o tidpath.o
+OBJS = aggpath.o allpaths.o clausesel.o costsize.o equivclass.o \
+       indxpath.o joinpath.o joinrels.o pathkeys.o tidpath.o
 
 include $(top_srcdir)/src/backend/common.mk
diff --git a/src/backend/optimizer/path/aggpath.c b/src/backend/optimizer/path/aggpath.c
new file mode 100644
index 0000000000..618171b148
--- /dev/null
+++ b/src/backend/optimizer/path/aggpath.c
@@ -0,0 +1,1967 @@
+/*-------------------------------------------------------------------------
+ *
+ * aggpath.c
+ *	  Routines to generate paths for processing GROUP BY and aggregates.
+ *
+ * XXX
+ *
+ *
+ * Portions Copyright (c) 1996-2018, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ *
+ * IDENTIFICATION
+ *	  src/backend/optimizer/path/aggpath.c
+ *
+ *-------------------------------------------------------------------------
+ */
+#include "postgres.h"
+
+#include <math.h>
+
+#include "access/htup_details.h"
+#include "catalog/pg_aggregate.h"
+#include "catalog/pg_type.h"
+#include "executor/nodeAgg.h"
+#include "foreign/fdwapi.h"
+#include "lib/knapsack.h"
+#include "miscadmin.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "optimizer/clauses.h"
+#include "optimizer/cost.h"
+#include "optimizer/pathnode.h"
+#include "optimizer/paths.h"
+#include "optimizer/planmain.h"
+#include "optimizer/planner.h"
+#include "optimizer/prep.h"
+#include "optimizer/tlist.h"
+#include "optimizer/var.h"
+#include "parser/parsetree.h"
+#include "parser/parse_agg.h"
+#include "parser/parse_clause.h"
+#include "rewrite/rewriteManip.h"
+#include "utils/lsyscache.h"
+#include "utils/selfuncs.h"
+#include "utils/syscache.h"
+
+static RelOptInfo *make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
+				  PathTarget *target, bool target_parallel_safe,
+				  Node *havingQual);
+static bool is_degenerate_grouping(PlannerInfo *root);
+static void create_degenerate_grouping_paths(PlannerInfo *root,
+								 RelOptInfo *input_rel,
+								 RelOptInfo *grouped_rel);
+static void create_ordinary_grouping_paths(PlannerInfo *root,
+							   RelOptInfo *input_rel,
+							   RelOptInfo *grouped_rel,
+							   const AggClauseCosts *agg_costs,
+							   grouping_sets_data *gd,
+							   GroupPathExtraData *extra,
+							   RelOptInfo **partially_grouped_rel_p);
+static bool can_partial_agg(PlannerInfo *root,
+				const AggClauseCosts *agg_costs);
+static void create_partitionwise_grouping_paths(PlannerInfo *root,
+									RelOptInfo *input_rel,
+									RelOptInfo *grouped_rel,
+									RelOptInfo *partially_grouped_rel,
+									const AggClauseCosts *agg_costs,
+									grouping_sets_data *gd,
+									PartitionwiseAggregateType patype,
+									GroupPathExtraData *extra);
+static bool group_by_has_partkey(RelOptInfo *input_rel,
+					 List *targetList,
+					 List *groupClause);
+static RelOptInfo *create_partial_grouping_paths(PlannerInfo *root,
+							  RelOptInfo *grouped_rel,
+							  RelOptInfo *input_rel,
+							  grouping_sets_data *gd,
+							  GroupPathExtraData *extra,
+							  bool force_rel_creation);
+static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel);
+static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
+						  RelOptInfo *grouped_rel,
+						  RelOptInfo *partially_grouped_rel,
+						  const AggClauseCosts *agg_costs,
+						  grouping_sets_data *gd,
+						  double dNumGroups,
+						  GroupPathExtraData *extra);
+static PathTarget *make_partial_grouping_target(PlannerInfo *root,
+							 PathTarget *grouping_target,
+							 Node *havingQual);
+static void consider_groupingsets_paths(PlannerInfo *root,
+							RelOptInfo *grouped_rel,
+							Path *path,
+							bool is_sorted,
+							bool can_hash,
+							grouping_sets_data *gd,
+							const AggClauseCosts *agg_costs,
+							double dNumGroups);
+
+/*
+ * Estimate number of groups produced by grouping clauses (1 if not grouping)
+ *
+ * path_rows: number of output rows from scan/join step
+ * gd: grouping sets data including list of grouping sets and their clauses
+ * target_list: target list containing group clause references
+ *
+ * If doing grouping sets, we also annotate the gsets data with the estimates
+ * for each set and each individual rollup list, with a view to later
+ * determining whether some combination of them could be hashed instead.
+ */
+static double
+get_number_of_groups(PlannerInfo *root,
+					 double path_rows,
+					 grouping_sets_data *gd,
+					 List *target_list)
+{
+	Query	   *parse = root->parse;
+	double		dNumGroups;
+
+	if (parse->groupClause)
+	{
+		List	   *groupExprs;
+
+		if (parse->groupingSets)
+		{
+			/* Add up the estimates for each grouping set */
+			ListCell   *lc;
+			ListCell   *lc2;
+
+			Assert(gd);			/* keep Coverity happy */
+
+			dNumGroups = 0;
+
+			foreach(lc, gd->rollups)
+			{
+				RollupData *rollup = lfirst_node(RollupData, lc);
+				ListCell   *lc;
+
+				groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
+													 target_list);
+
+				rollup->numGroups = 0.0;
+
+				forboth(lc, rollup->gsets, lc2, rollup->gsets_data)
+				{
+					List	   *gset = (List *) lfirst(lc);
+					GroupingSetData *gs = lfirst_node(GroupingSetData, lc2);
+					double		numGroups = estimate_num_groups(root,
+																groupExprs,
+																path_rows,
+																&gset);
+
+					gs->numGroups = numGroups;
+					rollup->numGroups += numGroups;
+				}
+
+				dNumGroups += rollup->numGroups;
+			}
+
+			if (gd->hash_sets_idx)
+			{
+				ListCell   *lc;
+
+				gd->dNumHashGroups = 0;
+
+				groupExprs = get_sortgrouplist_exprs(parse->groupClause,
+													 target_list);
+
+				forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
+				{
+					List	   *gset = (List *) lfirst(lc);
+					GroupingSetData *gs = lfirst_node(GroupingSetData, lc2);
+					double		numGroups = estimate_num_groups(root,
+																groupExprs,
+																path_rows,
+																&gset);
+
+					gs->numGroups = numGroups;
+					gd->dNumHashGroups += numGroups;
+				}
+
+				dNumGroups += gd->dNumHashGroups;
+			}
+		}
+		else
+		{
+			/* Plain GROUP BY */
+			groupExprs = get_sortgrouplist_exprs(parse->groupClause,
+												 target_list);
+
+			dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
+											 NULL);
+		}
+	}
+	else if (parse->groupingSets)
+	{
+		/* Empty grouping sets ... one result row for each one */
+		dNumGroups = list_length(parse->groupingSets);
+	}
+	else if (parse->hasAggs || root->hasHavingQual)
+	{
+		/* Plain aggregation, one result row */
+		dNumGroups = 1;
+	}
+	else
+	{
+		/* Not grouping */
+		dNumGroups = 1;
+	}
+
+	return dNumGroups;
+}
+
+/*
+ * estimate_hashagg_tablesize
+ *	  estimate the number of bytes that a hash aggregate hashtable will
+ *	  require based on the agg_costs, path width and dNumGroups.
+ *
+ * XXX this may be over-estimating the size now that hashagg knows to omit
+ * unneeded columns from the hashtable. Also for mixed-mode grouping sets,
+ * grouping columns not in the hashed set are counted here even though hashagg
+ * won't store them. Is this a problem?
+ */
+static Size
+estimate_hashagg_tablesize(Path *path, const AggClauseCosts *agg_costs,
+						   double dNumGroups)
+{
+	Size		hashentrysize;
+
+	/* Estimate per-hash-entry space at tuple width... */
+	hashentrysize = MAXALIGN(path->pathtarget->width) +
+		MAXALIGN(SizeofMinimalTupleHeader);
+
+	/* plus space for pass-by-ref transition values... */
+	hashentrysize += agg_costs->transitionSpace;
+	/* plus the per-hash-entry overhead */
+	hashentrysize += hash_agg_entry_size(agg_costs->numAggs);
+
+	/*
+	 * Note that this disregards the effect of fill-factor and growth policy
+	 * of the hash-table. That's probably ok, given default the default
+	 * fill-factor is relatively high. It'd be hard to meaningfully factor in
+	 * "double-in-size" growth policies here.
+	 */
+	return hashentrysize * dNumGroups;
+}
+
+/*
+ * create_grouping_paths
+ *
+ * Build a new upperrel containing Paths for grouping and/or aggregation.
+ * Along the way, we also build an upperrel for Paths which are partially
+ * grouped and/or aggregated.  A partially grouped and/or aggregated path
+ * needs a FinalizeAggregate node to complete the aggregation.  Currently,
+ * the only partially grouped paths we build are also partial paths; that
+ * is, they need a Gather and then a FinalizeAggregate.
+ *
+ * input_rel: contains the source-data Paths
+ * target: the pathtarget for the result Paths to compute
+ * agg_costs: cost info about all aggregates in query (in AGGSPLIT_SIMPLE mode)
+ * gd: grouping sets data including list of grouping sets and their clauses
+ *
+ * Note: all Paths in input_rel are expected to return the target computed
+ * by make_group_input_target.
+ */
+RelOptInfo *
+create_grouping_paths(PlannerInfo *root,
+					  RelOptInfo *input_rel,
+					  PathTarget *target,
+					  bool target_parallel_safe,
+					  const AggClauseCosts *agg_costs,
+					  grouping_sets_data *gd)
+{
+	Query	   *parse = root->parse;
+	RelOptInfo *grouped_rel;
+	RelOptInfo *partially_grouped_rel;
+
+	/*
+	 * Create grouping relation to hold fully aggregated grouping and/or
+	 * aggregation paths.
+	 */
+	grouped_rel = make_grouping_rel(root, input_rel, target,
+									target_parallel_safe, parse->havingQual);
+
+	/*
+	 * Create either paths for a degenerate grouping or paths for ordinary
+	 * grouping, as appropriate.
+	 */
+	if (is_degenerate_grouping(root))
+		create_degenerate_grouping_paths(root, input_rel, grouped_rel);
+	else
+	{
+		int			flags = 0;
+		GroupPathExtraData extra;
+
+		/*
+		 * Determine whether it's possible to perform sort-based
+		 * implementations of grouping.  (Note that if groupClause is empty,
+		 * grouping_is_sortable() is trivially true, and all the
+		 * pathkeys_contained_in() tests will succeed too, so that we'll
+		 * consider every surviving input path.)
+		 *
+		 * If we have grouping sets, we might be able to sort some but not all
+		 * of them; in this case, we need can_sort to be true as long as we
+		 * must consider any sorted-input plan.
+		 */
+		if ((gd && gd->rollups != NIL)
+			|| grouping_is_sortable(parse->groupClause))
+			flags |= GROUPING_CAN_USE_SORT;
+
+		/*
+		 * Determine whether we should consider hash-based implementations of
+		 * grouping.
+		 *
+		 * Hashed aggregation only applies if we're grouping. If we have
+		 * grouping sets, some groups might be hashable but others not; in
+		 * this case we set can_hash true as long as there is nothing globally
+		 * preventing us from hashing (and we should therefore consider plans
+		 * with hashes).
+		 *
+		 * Executor doesn't support hashed aggregation with DISTINCT or ORDER
+		 * BY aggregates.  (Doing so would imply storing *all* the input
+		 * values in the hash table, and/or running many sorts in parallel,
+		 * either of which seems like a certain loser.)  We similarly don't
+		 * support ordered-set aggregates in hashed aggregation, but that case
+		 * is also included in the numOrderedAggs count.
+		 *
+		 * Note: grouping_is_hashable() is much more expensive to check than
+		 * the other gating conditions, so we want to do it last.
+		 */
+		if ((parse->groupClause != NIL &&
+			 agg_costs->numOrderedAggs == 0 &&
+			 (gd ? gd->any_hashable : grouping_is_hashable(parse->groupClause))))
+			flags |= GROUPING_CAN_USE_HASH;
+
+		/*
+		 * Determine whether partial aggregation is possible.
+		 */
+		if (can_partial_agg(root, agg_costs))
+			flags |= GROUPING_CAN_PARTIAL_AGG;
+
+		extra.flags = flags;
+		extra.target_parallel_safe = target_parallel_safe;
+		extra.havingQual = parse->havingQual;
+		extra.targetList = parse->targetList;
+		extra.partial_costs_set = false;
+
+		/*
+		 * Determine whether partitionwise aggregation is in theory possible.
+		 * It can be disabled by the user, and for now, we don't try to
+		 * support grouping sets.  create_ordinary_grouping_paths() will check
+		 * additional conditions, such as whether input_rel is partitioned.
+		 */
+		if (enable_partitionwise_aggregate && !parse->groupingSets)
+			extra.patype = PARTITIONWISE_AGGREGATE_FULL;
+		else
+			extra.patype = PARTITIONWISE_AGGREGATE_NONE;
+
+		create_ordinary_grouping_paths(root, input_rel, grouped_rel,
+									   agg_costs, gd, &extra,
+									   &partially_grouped_rel);
+	}
+
+	set_cheapest(grouped_rel);
+	return grouped_rel;
+}
+
+/*
+ * make_grouping_rel
+ *
+ * Create a new grouping rel and set basic properties.
+ *
+ * input_rel represents the underlying scan/join relation.
+ * target is the output expected from the grouping relation.
+ */
+static RelOptInfo *
+make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
+				  PathTarget *target, bool target_parallel_safe,
+				  Node *havingQual)
+{
+	RelOptInfo *grouped_rel;
+
+	if (IS_OTHER_REL(input_rel))
+	{
+		grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG,
+									  input_rel->relids);
+		grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
+	}
+	else
+	{
+		/*
+		 * By tradition, the relids set for the main grouping relation is
+		 * NULL.  (This could be changed, but might require adjustments
+		 * elsewhere.)
+		 */
+		grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
+	}
+
+	/* Set target. */
+	grouped_rel->reltarget = target;
+
+	/*
+	 * If the input relation is not parallel-safe, then the grouped relation
+	 * can't be parallel-safe, either.  Otherwise, it's parallel-safe if the
+	 * target list and HAVING quals are parallel-safe.
+	 */
+	if (input_rel->consider_parallel && target_parallel_safe &&
+		is_parallel_safe(root, (Node *) havingQual))
+		grouped_rel->consider_parallel = true;
+
+	/*
+	 * If the input rel belongs to a single FDW, so does the grouped rel.
+	 */
+	grouped_rel->serverid = input_rel->serverid;
+	grouped_rel->userid = input_rel->userid;
+	grouped_rel->useridiscurrent = input_rel->useridiscurrent;
+	grouped_rel->fdwroutine = input_rel->fdwroutine;
+
+	return grouped_rel;
+}
+
+/*
+ * is_degenerate_grouping
+ *
+ * A degenerate grouping is one in which the query has a HAVING qual and/or
+ * grouping sets, but no aggregates and no GROUP BY (which implies that the
+ * grouping sets are all empty).
+ */
+static bool
+is_degenerate_grouping(PlannerInfo *root)
+{
+	Query	   *parse = root->parse;
+
+	return (root->hasHavingQual || parse->groupingSets) &&
+		!parse->hasAggs && parse->groupClause == NIL;
+}
+
+/*
+ * create_degenerate_grouping_paths
+ *
+ * When the grouping is degenerate (see is_degenerate_grouping), we are
+ * supposed to emit either zero or one row for each grouping set depending on
+ * whether HAVING succeeds.  Furthermore, there cannot be any variables in
+ * either HAVING or the targetlist, so we actually do not need the FROM table
+ * at all! We can just throw away the plan-so-far and generate a Result node.
+ * This is a sufficiently unusual corner case that it's not worth contorting
+ * the structure of this module to avoid having to generate the earlier paths
+ * in the first place.
+ */
+static void
+create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
+								 RelOptInfo *grouped_rel)
+{
+	Query	   *parse = root->parse;
+	int			nrows;
+	Path	   *path;
+
+	nrows = list_length(parse->groupingSets);
+	if (nrows > 1)
+	{
+		/*
+		 * Doesn't seem worthwhile writing code to cons up a generate_series
+		 * or a values scan to emit multiple rows. Instead just make N clones
+		 * and append them.  (With a volatile HAVING clause, this means you
+		 * might get between 0 and N output rows. Offhand I think that's
+		 * desired.)
+		 */
+		List	   *paths = NIL;
+
+		while (--nrows >= 0)
+		{
+			path = (Path *)
+				create_result_path(root, grouped_rel,
+								   grouped_rel->reltarget,
+								   (List *) parse->havingQual);
+			paths = lappend(paths, path);
+		}
+		path = (Path *)
+			create_append_path(root,
+							   grouped_rel,
+							   paths,
+							   NIL,
+							   NULL,
+							   0,
+							   false,
+							   NIL,
+							   -1);
+	}
+	else
+	{
+		/* No grouping sets, or just one, so one output row */
+		path = (Path *)
+			create_result_path(root, grouped_rel,
+							   grouped_rel->reltarget,
+							   (List *) parse->havingQual);
+	}
+
+	add_path(grouped_rel, path);
+}
+
+/*
+ * create_ordinary_grouping_paths
+ *
+ * Create grouping paths for the ordinary (that is, non-degenerate) case.
+ *
+ * We need to consider sorted and hashed aggregation in the same function,
+ * because otherwise (1) it would be harder to throw an appropriate error
+ * message if neither way works, and (2) we should not allow hashtable size
+ * considerations to dissuade us from using hashing if sorting is not possible.
+ *
+ * *partially_grouped_rel_p will be set to the partially grouped rel which this
+ * function creates, or to NULL if it doesn't create one.
+ */
+static void
+create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
+							   RelOptInfo *grouped_rel,
+							   const AggClauseCosts *agg_costs,
+							   grouping_sets_data *gd,
+							   GroupPathExtraData *extra,
+							   RelOptInfo **partially_grouped_rel_p)
+{
+	Path	   *cheapest_path = input_rel->cheapest_total_path;
+	RelOptInfo *partially_grouped_rel = NULL;
+	double		dNumGroups;
+	PartitionwiseAggregateType patype = PARTITIONWISE_AGGREGATE_NONE;
+
+	/*
+	 * If this is the topmost grouping relation or if the parent relation is
+	 * doing some form of partitionwise aggregation, then we may be able to do
+	 * it at this level also.  However, if the input relation is not
+	 * partitioned, partitionwise aggregate is impossible, and if it is dummy,
+	 * partitionwise aggregate is pointless.
+	 */
+	if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
+		input_rel->part_scheme && input_rel->part_rels &&
+		!IS_DUMMY_REL(input_rel))
+	{
+		/*
+		 * If this is the topmost relation or if the parent relation is doing
+		 * full partitionwise aggregation, then we can do full partitionwise
+		 * aggregation provided that the GROUP BY clause contains all of the
+		 * partitioning columns at this level.  Otherwise, we can do at most
+		 * partial partitionwise aggregation.  But if partial aggregation is
+		 * not supported in general then we can't use it for partitionwise
+		 * aggregation either.
+		 */
+		if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
+			group_by_has_partkey(input_rel, extra->targetList,
+								 root->parse->groupClause))
+			patype = PARTITIONWISE_AGGREGATE_FULL;
+		else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
+			patype = PARTITIONWISE_AGGREGATE_PARTIAL;
+		else
+			patype = PARTITIONWISE_AGGREGATE_NONE;
+	}
+
+	/*
+	 * Before generating paths for grouped_rel, we first generate any possible
+	 * partially grouped paths; that way, later code can easily consider both
+	 * parallel and non-parallel approaches to grouping.
+	 */
+	if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
+	{
+		bool		force_rel_creation;
+
+		/*
+		 * If we're doing partitionwise aggregation at this level, force
+		 * creation of a partially_grouped_rel so we can add partitionwise
+		 * paths to it.
+		 */
+		force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
+
+		partially_grouped_rel =
+			create_partial_grouping_paths(root,
+										  grouped_rel,
+										  input_rel,
+										  gd,
+										  extra,
+										  force_rel_creation);
+	}
+
+	/* Set out parameter. */
+	*partially_grouped_rel_p = partially_grouped_rel;
+
+	/* Apply partitionwise aggregation technique, if possible. */
+	if (patype != PARTITIONWISE_AGGREGATE_NONE)
+		create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
+											partially_grouped_rel, agg_costs,
+											gd, patype, extra);
+
+	/* If we are doing partial aggregation only, return. */
+	if (extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
+	{
+		Assert(partially_grouped_rel);
+
+		if (partially_grouped_rel->pathlist)
+			set_cheapest(partially_grouped_rel);
+
+		return;
+	}
+
+	/* Gather any partially grouped partial paths. */
+	if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
+	{
+		gather_grouping_paths(root, partially_grouped_rel);
+		set_cheapest(partially_grouped_rel);
+	}
+
+	/*
+	 * Estimate number of groups.
+	 */
+	dNumGroups = get_number_of_groups(root,
+									  cheapest_path->rows,
+									  gd,
+									  extra->targetList);
+
+	/* Build final grouping paths */
+	add_paths_to_grouping_rel(root, input_rel, grouped_rel,
+							  partially_grouped_rel, agg_costs, gd,
+							  dNumGroups, extra);
+
+	/* Give a helpful error if we failed to find any implementation */
+	if (grouped_rel->pathlist == NIL)
+		ereport(ERROR,
+				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+				 errmsg("could not implement GROUP BY"),
+				 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
+
+	/*
+	 * If there is an FDW that's responsible for all baserels of the query,
+	 * let it consider adding ForeignPaths.
+	 */
+	if (grouped_rel->fdwroutine &&
+		grouped_rel->fdwroutine->GetForeignUpperPaths)
+		grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
+													  input_rel, grouped_rel,
+													  extra);
+
+	/* Let extensions possibly add some more paths */
+	if (create_upper_paths_hook)
+		(*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
+									input_rel, grouped_rel,
+									extra);
+}
+
+/*
+ * add_paths_to_grouping_rel
+ *
+ * Add non-partial paths to grouping relation.
+ */
+static void
+add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
+						  RelOptInfo *grouped_rel,
+						  RelOptInfo *partially_grouped_rel,
+						  const AggClauseCosts *agg_costs,
+						  grouping_sets_data *gd, double dNumGroups,
+						  GroupPathExtraData *extra)
+{
+	Query	   *parse = root->parse;
+	Path	   *cheapest_path = input_rel->cheapest_total_path;
+	ListCell   *lc;
+	bool		can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
+	bool		can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
+	List	   *havingQual = (List *) extra->havingQual;
+	AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
+
+	if (can_sort)
+	{
+		/*
+		 * Use any available suitably-sorted path as input, and also consider
+		 * sorting the cheapest-total path.
+		 */
+		foreach(lc, input_rel->pathlist)
+		{
+			Path	   *path = (Path *) lfirst(lc);
+			bool		is_sorted;
+
+			is_sorted = pathkeys_contained_in(root->group_pathkeys,
+											  path->pathkeys);
+			if (path == cheapest_path || is_sorted)
+			{
+				/* Sort the cheapest-total path if it isn't already sorted */
+				if (!is_sorted)
+					path = (Path *) create_sort_path(root,
+													 grouped_rel,
+													 path,
+													 root->group_pathkeys,
+													 -1.0);
+
+				/* Now decide what to stick atop it */
+				if (parse->groupingSets)
+				{
+					consider_groupingsets_paths(root, grouped_rel,
+												path, true, can_hash,
+												gd, agg_costs, dNumGroups);
+				}
+				else if (parse->hasAggs)
+				{
+					/*
+					 * We have aggregation, possibly with plain GROUP BY. Make
+					 * an AggPath.
+					 */
+					add_path(grouped_rel, (Path *)
+							 create_agg_path(root,
+											 grouped_rel,
+											 path,
+											 grouped_rel->reltarget,
+											 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
+											 AGGSPLIT_SIMPLE,
+											 parse->groupClause,
+											 havingQual,
+											 agg_costs,
+											 dNumGroups));
+				}
+				else if (parse->groupClause)
+				{
+					/*
+					 * We have GROUP BY without aggregation or grouping sets.
+					 * Make a GroupPath.
+					 */
+					add_path(grouped_rel, (Path *)
+							 create_group_path(root,
+											   grouped_rel,
+											   path,
+											   parse->groupClause,
+											   havingQual,
+											   dNumGroups));
+				}
+				else
+				{
+					/* Other cases should have been handled above */
+					Assert(false);
+				}
+			}
+		}
+
+		/*
+		 * Instead of operating directly on the input relation, we can
+		 * consider finalizing a partially aggregated path.
+		 */
+		if (partially_grouped_rel != NULL)
+		{
+			foreach(lc, partially_grouped_rel->pathlist)
+			{
+				Path	   *path = (Path *) lfirst(lc);
+
+				/*
+				 * Insert a Sort node, if required.  But there's no point in
+				 * sorting anything but the cheapest path.
+				 */
+				if (!pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
+				{
+					if (path != partially_grouped_rel->cheapest_total_path)
+						continue;
+					path = (Path *) create_sort_path(root,
+													 grouped_rel,
+													 path,
+													 root->group_pathkeys,
+													 -1.0);
+				}
+
+				if (parse->hasAggs)
+					add_path(grouped_rel, (Path *)
+							 create_agg_path(root,
+											 grouped_rel,
+											 path,
+											 grouped_rel->reltarget,
+											 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
+											 AGGSPLIT_FINAL_DESERIAL,
+											 parse->groupClause,
+											 havingQual,
+											 agg_final_costs,
+											 dNumGroups));
+				else
+					add_path(grouped_rel, (Path *)
+							 create_group_path(root,
+											   grouped_rel,
+											   path,
+											   parse->groupClause,
+											   havingQual,
+											   dNumGroups));
+			}
+		}
+	}
+
+	if (can_hash)
+	{
+		Size		hashaggtablesize;
+
+		if (parse->groupingSets)
+		{
+			/*
+			 * Try for a hash-only groupingsets path over unsorted input.
+			 */
+			consider_groupingsets_paths(root, grouped_rel,
+										cheapest_path, false, true,
+										gd, agg_costs, dNumGroups);
+		}
+		else
+		{
+			hashaggtablesize = estimate_hashagg_tablesize(cheapest_path,
+														  agg_costs,
+														  dNumGroups);
+
+			/*
+			 * Provided that the estimated size of the hashtable does not
+			 * exceed work_mem, we'll generate a HashAgg Path, although if we
+			 * were unable to sort above, then we'd better generate a Path, so
+			 * that we at least have one.
+			 */
+			if (hashaggtablesize < work_mem * 1024L ||
+				grouped_rel->pathlist == NIL)
+			{
+				/*
+				 * We just need an Agg over the cheapest-total input path,
+				 * since input order won't matter.
+				 */
+				add_path(grouped_rel, (Path *)
+						 create_agg_path(root, grouped_rel,
+										 cheapest_path,
+										 grouped_rel->reltarget,
+										 AGG_HASHED,
+										 AGGSPLIT_SIMPLE,
+										 parse->groupClause,
+										 havingQual,
+										 agg_costs,
+										 dNumGroups));
+			}
+		}
+
+		/*
+		 * Generate a Finalize HashAgg Path atop of the cheapest partially
+		 * grouped path, assuming there is one. Once again, we'll only do this
+		 * if it looks as though the hash table won't exceed work_mem.
+		 */
+		if (partially_grouped_rel && partially_grouped_rel->pathlist)
+		{
+			Path	   *path = partially_grouped_rel->cheapest_total_path;
+
+			hashaggtablesize = estimate_hashagg_tablesize(path,
+														  agg_final_costs,
+														  dNumGroups);
+
+			if (hashaggtablesize < work_mem * 1024L)
+				add_path(grouped_rel, (Path *)
+						 create_agg_path(root,
+										 grouped_rel,
+										 path,
+										 grouped_rel->reltarget,
+										 AGG_HASHED,
+										 AGGSPLIT_FINAL_DESERIAL,
+										 parse->groupClause,
+										 havingQual,
+										 agg_final_costs,
+										 dNumGroups));
+		}
+	}
+
+	/*
+	 * When partitionwise aggregate is used, we might have fully aggregated
+	 * paths in the partial pathlist, because add_paths_to_append_rel() will
+	 * consider a path for grouped_rel consisting of a Parallel Append of
+	 * non-partial paths from each child.
+	 */
+	if (grouped_rel->partial_pathlist != NIL)
+		gather_grouping_paths(root, grouped_rel);
+}
+
+/*
+ * can_partial_agg
+ *
+ * Determines whether or not partial grouping and/or aggregation is possible.
+ * Returns true when possible, false otherwise.
+ */
+static bool
+can_partial_agg(PlannerInfo *root, const AggClauseCosts *agg_costs)
+{
+	Query	   *parse = root->parse;
+
+	if (!parse->hasAggs && parse->groupClause == NIL)
+	{
+		/*
+		 * We don't know how to do parallel aggregation unless we have either
+		 * some aggregates or a grouping clause.
+		 */
+		return false;
+	}
+	else if (parse->groupingSets)
+	{
+		/* We don't know how to do grouping sets in parallel. */
+		return false;
+	}
+	else if (agg_costs->hasNonPartial || agg_costs->hasNonSerial)
+	{
+		/* Insufficient support for partial mode. */
+		return false;
+	}
+
+	/* Everything looks good. */
+	return true;
+}
+
+
+/*
+ * make_partial_grouping_target
+ *	  Generate appropriate PathTarget for output of partial aggregate
+ *	  (or partial grouping, if there are no aggregates) nodes.
+ *
+ * A partial aggregation node needs to emit all the same aggregates that
+ * a regular aggregation node would, plus any aggregates used in HAVING;
+ * except that the Aggref nodes should be marked as partial aggregates.
+ *
+ * In addition, we'd better emit any Vars and PlaceholderVars that are
+ * used outside of Aggrefs in the aggregation tlist and HAVING.  (Presumably,
+ * these would be Vars that are grouped by or used in grouping expressions.)
+ *
+ * grouping_target is the tlist to be emitted by the topmost aggregation step.
+ * havingQual represents the HAVING clause.
+ */
+static PathTarget *
+make_partial_grouping_target(PlannerInfo *root,
+							 PathTarget *grouping_target,
+							 Node *havingQual)
+{
+	Query	   *parse = root->parse;
+	PathTarget *partial_target;
+	List	   *non_group_cols;
+	List	   *non_group_exprs;
+	int			i;
+	ListCell   *lc;
+
+	partial_target = create_empty_pathtarget();
+	non_group_cols = NIL;
+
+	i = 0;
+	foreach(lc, grouping_target->exprs)
+	{
+		Expr	   *expr = (Expr *) lfirst(lc);
+		Index		sgref = get_pathtarget_sortgroupref(grouping_target, i);
+
+		if (sgref && parse->groupClause &&
+			get_sortgroupref_clause_noerr(sgref, parse->groupClause) != NULL)
+		{
+			/*
+			 * It's a grouping column, so add it to the partial_target as-is.
+			 * (This allows the upper agg step to repeat the grouping calcs.)
+			 */
+			add_column_to_pathtarget(partial_target, expr, sgref);
+		}
+		else
+		{
+			/*
+			 * Non-grouping column, so just remember the expression for later
+			 * call to pull_var_clause.
+			 */
+			non_group_cols = lappend(non_group_cols, expr);
+		}
+
+		i++;
+	}
+
+	/*
+	 * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
+	 */
+	if (havingQual)
+		non_group_cols = lappend(non_group_cols, havingQual);
+
+	/*
+	 * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
+	 * non-group cols (plus HAVING), and add them to the partial_target if not
+	 * already present.  (An expression used directly as a GROUP BY item will
+	 * be present already.)  Note this includes Vars used in resjunk items, so
+	 * we are covering the needs of ORDER BY and window specifications.
+	 */
+	non_group_exprs = pull_var_clause((Node *) non_group_cols,
+									  PVC_INCLUDE_AGGREGATES |
+									  PVC_RECURSE_WINDOWFUNCS |
+									  PVC_INCLUDE_PLACEHOLDERS);
+
+	add_new_columns_to_pathtarget(partial_target, non_group_exprs);
+
+	/*
+	 * Adjust Aggrefs to put them in partial mode.  At this point all Aggrefs
+	 * are at the top level of the target list, so we can just scan the list
+	 * rather than recursing through the expression trees.
+	 */
+	foreach(lc, partial_target->exprs)
+	{
+		Aggref	   *aggref = (Aggref *) lfirst(lc);
+
+		if (IsA(aggref, Aggref))
+		{
+			Aggref	   *newaggref;
+
+			/*
+			 * We shouldn't need to copy the substructure of the Aggref node,
+			 * but flat-copy the node itself to avoid damaging other trees.
+			 */
+			newaggref = makeNode(Aggref);
+			memcpy(newaggref, aggref, sizeof(Aggref));
+
+			/* For now, assume serialization is required */
+			mark_partial_aggref(newaggref, AGGSPLIT_INITIAL_SERIAL);
+
+			lfirst(lc) = newaggref;
+		}
+	}
+
+	/* clean up cruft */
+	list_free(non_group_exprs);
+	list_free(non_group_cols);
+
+	/* XXX this causes some redundant cost calculation ... */
+	return set_pathtarget_cost_width(root, partial_target);
+}
+
+/*
+ * create_partial_grouping_paths
+ *
+ * Create a new upper relation representing the result of partial aggregation
+ * and populate it with appropriate paths.  Note that we don't finalize the
+ * lists of paths here, so the caller can add additional partial or non-partial
+ * paths and must afterward call gather_grouping_paths and set_cheapest on
+ * the returned upper relation.
+ *
+ * All paths for this new upper relation -- both partial and non-partial --
+ * have been partially aggregated but require a subsequent FinalizeAggregate
+ * step.
+ *
+ * NB: This function is allowed to return NULL if it determines that there is
+ * no real need to create a new RelOptInfo.
+ */
+static RelOptInfo *
+create_partial_grouping_paths(PlannerInfo *root,
+							  RelOptInfo *grouped_rel,
+							  RelOptInfo *input_rel,
+							  grouping_sets_data *gd,
+							  GroupPathExtraData *extra,
+							  bool force_rel_creation)
+{
+	Query	   *parse = root->parse;
+	RelOptInfo *partially_grouped_rel;
+	AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
+	AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
+	Path	   *cheapest_partial_path = NULL;
+	Path	   *cheapest_total_path = NULL;
+	double		dNumPartialGroups = 0;
+	double		dNumPartialPartialGroups = 0;
+	ListCell   *lc;
+	bool		can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
+	bool		can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
+
+	/*
+	 * Consider whether we should generate partially aggregated non-partial
+	 * paths.  We can only do this if we have a non-partial path, and only if
+	 * the parent of the input rel is performing partial partitionwise
+	 * aggregation.  (Note that extra->patype is the type of partitionwise
+	 * aggregation being used at the parent level, not this level.)
+	 */
+	if (input_rel->pathlist != NIL &&
+		extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
+		cheapest_total_path = input_rel->cheapest_total_path;
+
+	/*
+	 * If parallelism is possible for grouped_rel, then we should consider
+	 * generating partially-grouped partial paths.  However, if the input rel
+	 * has no partial paths, then we can't.
+	 */
+	if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
+		cheapest_partial_path = linitial(input_rel->partial_pathlist);
+
+	/*
+	 * If we can't partially aggregate partial paths, and we can't partially
+	 * aggregate non-partial paths, then don't bother creating the new
+	 * RelOptInfo at all, unless the caller specified force_rel_creation.
+	 */
+	if (cheapest_total_path == NULL &&
+		cheapest_partial_path == NULL &&
+		!force_rel_creation)
+		return NULL;
+
+	/*
+	 * Build a new upper relation to represent the result of partially
+	 * aggregating the rows from the input relation.
+	 */
+	partially_grouped_rel = fetch_upper_rel(root,
+											UPPERREL_PARTIAL_GROUP_AGG,
+											grouped_rel->relids);
+	partially_grouped_rel->consider_parallel =
+		grouped_rel->consider_parallel;
+	partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
+	partially_grouped_rel->serverid = grouped_rel->serverid;
+	partially_grouped_rel->userid = grouped_rel->userid;
+	partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
+	partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
+
+	/*
+	 * Build target list for partial aggregate paths.  These paths cannot just
+	 * emit the same tlist as regular aggregate paths, because (1) we must
+	 * include Vars and Aggrefs needed in HAVING, which might not appear in
+	 * the result tlist, and (2) the Aggrefs must be set in partial mode.
+	 */
+	partially_grouped_rel->reltarget =
+		make_partial_grouping_target(root, grouped_rel->reltarget,
+									 extra->havingQual);
+
+	if (!extra->partial_costs_set)
+	{
+		/*
+		 * Collect statistics about aggregates for estimating costs of
+		 * performing aggregation in parallel.
+		 */
+		MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
+		MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
+		if (parse->hasAggs)
+		{
+			List	   *partial_target_exprs;
+
+			/* partial phase */
+			partial_target_exprs = partially_grouped_rel->reltarget->exprs;
+			get_agg_clause_costs(root, (Node *) partial_target_exprs,
+								 AGGSPLIT_INITIAL_SERIAL,
+								 agg_partial_costs);
+
+			/* final phase */
+			get_agg_clause_costs(root, (Node *) grouped_rel->reltarget->exprs,
+								 AGGSPLIT_FINAL_DESERIAL,
+								 agg_final_costs);
+			get_agg_clause_costs(root, extra->havingQual,
+								 AGGSPLIT_FINAL_DESERIAL,
+								 agg_final_costs);
+		}
+
+		extra->partial_costs_set = true;
+	}
+
+	/* Estimate number of partial groups. */
+	if (cheapest_total_path != NULL)
+		dNumPartialGroups =
+			get_number_of_groups(root,
+								 cheapest_total_path->rows,
+								 gd,
+								 extra->targetList);
+	if (cheapest_partial_path != NULL)
+		dNumPartialPartialGroups =
+			get_number_of_groups(root,
+								 cheapest_partial_path->rows,
+								 gd,
+								 extra->targetList);
+
+	if (can_sort && cheapest_total_path != NULL)
+	{
+		/* This should have been checked previously */
+		Assert(parse->hasAggs || parse->groupClause);
+
+		/*
+		 * Use any available suitably-sorted path as input, and also consider
+		 * sorting the cheapest partial path.
+		 */
+		foreach(lc, input_rel->pathlist)
+		{
+			Path	   *path = (Path *) lfirst(lc);
+			bool		is_sorted;
+
+			is_sorted = pathkeys_contained_in(root->group_pathkeys,
+											  path->pathkeys);
+			if (path == cheapest_total_path || is_sorted)
+			{
+				/* Sort the cheapest partial path, if it isn't already */
+				if (!is_sorted)
+					path = (Path *) create_sort_path(root,
+													 partially_grouped_rel,
+													 path,
+													 root->group_pathkeys,
+													 -1.0);
+
+				if (parse->hasAggs)
+					add_path(partially_grouped_rel, (Path *)
+							 create_agg_path(root,
+											 partially_grouped_rel,
+											 path,
+											 partially_grouped_rel->reltarget,
+											 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
+											 AGGSPLIT_INITIAL_SERIAL,
+											 parse->groupClause,
+											 NIL,
+											 agg_partial_costs,
+											 dNumPartialGroups));
+				else
+					add_path(partially_grouped_rel, (Path *)
+							 create_group_path(root,
+											   partially_grouped_rel,
+											   path,
+											   parse->groupClause,
+											   NIL,
+											   dNumPartialGroups));
+			}
+		}
+	}
+
+	if (can_sort && cheapest_partial_path != NULL)
+	{
+		/* Similar to above logic, but for partial paths. */
+		foreach(lc, input_rel->partial_pathlist)
+		{
+			Path	   *path = (Path *) lfirst(lc);
+			bool		is_sorted;
+
+			is_sorted = pathkeys_contained_in(root->group_pathkeys,
+											  path->pathkeys);
+			if (path == cheapest_partial_path || is_sorted)
+			{
+				/* Sort the cheapest partial path, if it isn't already */
+				if (!is_sorted)
+					path = (Path *) create_sort_path(root,
+													 partially_grouped_rel,
+													 path,
+													 root->group_pathkeys,
+													 -1.0);
+
+				if (parse->hasAggs)
+					add_partial_path(partially_grouped_rel, (Path *)
+									 create_agg_path(root,
+													 partially_grouped_rel,
+													 path,
+													 partially_grouped_rel->reltarget,
+													 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
+													 AGGSPLIT_INITIAL_SERIAL,
+													 parse->groupClause,
+													 NIL,
+													 agg_partial_costs,
+													 dNumPartialPartialGroups));
+				else
+					add_partial_path(partially_grouped_rel, (Path *)
+									 create_group_path(root,
+													   partially_grouped_rel,
+													   path,
+													   parse->groupClause,
+													   NIL,
+													   dNumPartialPartialGroups));
+			}
+		}
+	}
+
+	if (can_hash && cheapest_total_path != NULL)
+	{
+		Size		hashaggtablesize;
+
+		/* Checked above */
+		Assert(parse->hasAggs || parse->groupClause);
+
+		hashaggtablesize =
+			estimate_hashagg_tablesize(cheapest_total_path,
+									   agg_partial_costs,
+									   dNumPartialGroups);
+
+		/*
+		 * Tentatively produce a partial HashAgg Path, depending on if it
+		 * looks as if the hash table will fit in work_mem.
+		 */
+		if (hashaggtablesize < work_mem * 1024L &&
+			cheapest_total_path != NULL)
+		{
+			add_path(partially_grouped_rel, (Path *)
+					 create_agg_path(root,
+									 partially_grouped_rel,
+									 cheapest_total_path,
+									 partially_grouped_rel->reltarget,
+									 AGG_HASHED,
+									 AGGSPLIT_INITIAL_SERIAL,
+									 parse->groupClause,
+									 NIL,
+									 agg_partial_costs,
+									 dNumPartialGroups));
+		}
+	}
+
+	if (can_hash && cheapest_partial_path != NULL)
+	{
+		Size		hashaggtablesize;
+
+		hashaggtablesize =
+			estimate_hashagg_tablesize(cheapest_partial_path,
+									   agg_partial_costs,
+									   dNumPartialPartialGroups);
+
+		/* Do the same for partial paths. */
+		if (hashaggtablesize < work_mem * 1024L &&
+			cheapest_partial_path != NULL)
+		{
+			add_partial_path(partially_grouped_rel, (Path *)
+							 create_agg_path(root,
+											 partially_grouped_rel,
+											 cheapest_partial_path,
+											 partially_grouped_rel->reltarget,
+											 AGG_HASHED,
+											 AGGSPLIT_INITIAL_SERIAL,
+											 parse->groupClause,
+											 NIL,
+											 agg_partial_costs,
+											 dNumPartialPartialGroups));
+		}
+	}
+
+	/*
+	 * If there is an FDW that's responsible for all baserels of the query,
+	 * let it consider adding partially grouped ForeignPaths.
+	 */
+	if (partially_grouped_rel->fdwroutine &&
+		partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
+	{
+		FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
+
+		fdwroutine->GetForeignUpperPaths(root,
+										 UPPERREL_PARTIAL_GROUP_AGG,
+										 input_rel, partially_grouped_rel,
+										 extra);
+	}
+
+	return partially_grouped_rel;
+}
+
+/*
+ * Generate Gather and Gather Merge paths for a grouping relation or partial
+ * grouping relation.
+ *
+ * generate_gather_paths does most of the work, but we also consider a special
+ * case: we could try sorting the data by the group_pathkeys and then applying
+ * Gather Merge.
+ *
+ * NB: This function shouldn't be used for anything other than a grouped or
+ * partially grouped relation not only because of the fact that it explicitly
+ * references group_pathkeys but we pass "true" as the third argument to
+ * generate_gather_paths().
+ */
+static void
+gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
+{
+	Path	   *cheapest_partial_path;
+
+	/* Try Gather for unordered paths and Gather Merge for ordered ones. */
+	generate_gather_paths(root, rel, true);
+
+	/* Try cheapest partial path + explicit Sort + Gather Merge. */
+	cheapest_partial_path = linitial(rel->partial_pathlist);
+	if (!pathkeys_contained_in(root->group_pathkeys,
+							   cheapest_partial_path->pathkeys))
+	{
+		Path	   *path;
+		double		total_groups;
+
+		total_groups =
+			cheapest_partial_path->rows * cheapest_partial_path->parallel_workers;
+		path = (Path *) create_sort_path(root, rel, cheapest_partial_path,
+										 root->group_pathkeys,
+										 -1.0);
+		path = (Path *)
+			create_gather_merge_path(root,
+									 rel,
+									 path,
+									 rel->reltarget,
+									 root->group_pathkeys,
+									 NULL,
+									 &total_groups);
+
+		add_path(rel, path);
+	}
+}
+
+/*
+ * create_partitionwise_grouping_paths
+ *
+ * If the partition keys of input relation are part of the GROUP BY clause, all
+ * the rows belonging to a given group come from a single partition.  This
+ * allows aggregation/grouping over a partitioned relation to be broken down
+ * into aggregation/grouping on each partition.  This should be no worse, and
+ * often better, than the normal approach.
+ *
+ * However, if the GROUP BY clause does not contain all the partition keys,
+ * rows from a given group may be spread across multiple partitions. In that
+ * case, we perform partial aggregation for each group, append the results,
+ * and then finalize aggregation.  This is less certain to win than the
+ * previous case.  It may win if the PartialAggregate stage greatly reduces
+ * the number of groups, because fewer rows will pass through the Append node.
+ * It may lose if we have lots of small groups.
+ */
+static void
+create_partitionwise_grouping_paths(PlannerInfo *root,
+									RelOptInfo *input_rel,
+									RelOptInfo *grouped_rel,
+									RelOptInfo *partially_grouped_rel,
+									const AggClauseCosts *agg_costs,
+									grouping_sets_data *gd,
+									PartitionwiseAggregateType patype,
+									GroupPathExtraData *extra)
+{
+	int			nparts = input_rel->nparts;
+	int			cnt_parts;
+	List	   *grouped_live_children = NIL;
+	List	   *partially_grouped_live_children = NIL;
+	PathTarget *target = grouped_rel->reltarget;
+
+	Assert(patype != PARTITIONWISE_AGGREGATE_NONE);
+	Assert(patype != PARTITIONWISE_AGGREGATE_PARTIAL ||
+		   partially_grouped_rel != NULL);
+
+	/* Add paths for partitionwise aggregation/grouping. */
+	for (cnt_parts = 0; cnt_parts < nparts; cnt_parts++)
+	{
+		RelOptInfo *child_input_rel = input_rel->part_rels[cnt_parts];
+		PathTarget *child_target = copy_pathtarget(target);
+		AppendRelInfo **appinfos;
+		int			nappinfos;
+		GroupPathExtraData child_extra;
+		RelOptInfo *child_grouped_rel;
+		RelOptInfo *child_partially_grouped_rel;
+
+		/* Input child rel must have a path */
+		Assert(child_input_rel->pathlist != NIL);
+
+		/*
+		 * Copy the given "extra" structure as is and then override the
+		 * members specific to this child.
+		 */
+		memcpy(&child_extra, extra, sizeof(child_extra));
+
+		appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
+										   &nappinfos);
+
+		child_target->exprs = (List *)
+			adjust_appendrel_attrs(root,
+								   (Node *) target->exprs,
+								   nappinfos, appinfos);
+
+		/* Translate havingQual and targetList. */
+		child_extra.havingQual = (Node *)
+			adjust_appendrel_attrs(root,
+								   extra->havingQual,
+								   nappinfos, appinfos);
+		child_extra.targetList = (List *)
+			adjust_appendrel_attrs(root,
+								   (Node *) extra->targetList,
+								   nappinfos, appinfos);
+
+		/*
+		 * extra->patype was the value computed for our parent rel; patype is
+		 * the value for this relation.  For the child, our value is its
+		 * parent rel's value.
+		 */
+		child_extra.patype = patype;
+
+		/*
+		 * Create grouping relation to hold fully aggregated grouping and/or
+		 * aggregation paths for the child.
+		 */
+		child_grouped_rel = make_grouping_rel(root, child_input_rel,
+											  child_target,
+											  extra->target_parallel_safe,
+											  child_extra.havingQual);
+
+		/* Ignore empty children. They contribute nothing. */
+		if (IS_DUMMY_REL(child_input_rel))
+		{
+			mark_dummy_rel(child_grouped_rel);
+
+			continue;
+		}
+
+		/* Create grouping paths for this child relation. */
+		create_ordinary_grouping_paths(root, child_input_rel,
+									   child_grouped_rel,
+									   agg_costs, gd, &child_extra,
+									   &child_partially_grouped_rel);
+
+		if (child_partially_grouped_rel)
+		{
+			partially_grouped_live_children =
+				lappend(partially_grouped_live_children,
+						child_partially_grouped_rel);
+		}
+
+		if (patype == PARTITIONWISE_AGGREGATE_FULL)
+		{
+			set_cheapest(child_grouped_rel);
+			grouped_live_children = lappend(grouped_live_children,
+											child_grouped_rel);
+		}
+
+		pfree(appinfos);
+	}
+
+	/*
+	 * All children can't be dummy at this point. If they are, then the parent
+	 * too marked as dummy.
+	 */
+	Assert(grouped_live_children != NIL ||
+		   partially_grouped_live_children != NIL);
+
+	/*
+	 * Try to create append paths for partially grouped children. For full
+	 * partitionwise aggregation, we might have paths in the partial_pathlist
+	 * if parallel aggregation is possible.  For partial partitionwise
+	 * aggregation, we may have paths in both pathlist and partial_pathlist.
+	 */
+	if (partially_grouped_rel)
+	{
+		add_paths_to_append_rel(root, partially_grouped_rel,
+								partially_grouped_live_children);
+
+		/*
+		 * We need call set_cheapest, since the finalization step will use the
+		 * cheapest path from the rel.
+		 */
+		if (partially_grouped_rel->pathlist)
+			set_cheapest(partially_grouped_rel);
+	}
+
+	/* If possible, create append paths for fully grouped children. */
+	if (patype == PARTITIONWISE_AGGREGATE_FULL)
+		add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
+}
+
+/*
+ * group_by_has_partkey
+ *
+ * Returns true, if all the partition keys of the given relation are part of
+ * the GROUP BY clauses, false otherwise.
+ */
+static bool
+group_by_has_partkey(RelOptInfo *input_rel,
+					 List *targetList,
+					 List *groupClause)
+{
+	List	   *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
+	int			cnt = 0;
+	int			partnatts;
+
+	/* Input relation should be partitioned. */
+	Assert(input_rel->part_scheme);
+
+	/* Rule out early, if there are no partition keys present. */
+	if (!input_rel->partexprs)
+		return false;
+
+	partnatts = input_rel->part_scheme->partnatts;
+
+	for (cnt = 0; cnt < partnatts; cnt++)
+	{
+		List	   *partexprs = input_rel->partexprs[cnt];
+		ListCell   *lc;
+		bool		found = false;
+
+		foreach(lc, partexprs)
+		{
+			Expr	   *partexpr = lfirst(lc);
+
+			if (list_member(groupexprs, partexpr))
+			{
+				found = true;
+				break;
+			}
+		}
+
+		/*
+		 * If none of the partition key expressions match with any of the
+		 * GROUP BY expression, return false.
+		 */
+		if (!found)
+			return false;
+	}
+
+	return true;
+}
+
+
+/*
+ * For a given input path, consider the possible ways of doing grouping sets on
+ * it, by combinations of hashing and sorting.  This can be called multiple
+ * times, so it's important that it not scribble on input.  No result is
+ * returned, but any generated paths are added to grouped_rel.
+ */
+static void
+consider_groupingsets_paths(PlannerInfo *root,
+							RelOptInfo *grouped_rel,
+							Path *path,
+							bool is_sorted,
+							bool can_hash,
+							grouping_sets_data *gd,
+							const AggClauseCosts *agg_costs,
+							double dNumGroups)
+{
+	Query	   *parse = root->parse;
+
+	/*
+	 * If we're not being offered sorted input, then only consider plans that
+	 * can be done entirely by hashing.
+	 *
+	 * We can hash everything if it looks like it'll fit in work_mem. But if
+	 * the input is actually sorted despite not being advertised as such, we
+	 * prefer to make use of that in order to use less memory.
+	 *
+	 * If none of the grouping sets are sortable, then ignore the work_mem
+	 * limit and generate a path anyway, since otherwise we'll just fail.
+	 */
+	if (!is_sorted)
+	{
+		List	   *new_rollups = NIL;
+		RollupData *unhashed_rollup = NULL;
+		List	   *sets_data;
+		List	   *empty_sets_data = NIL;
+		List	   *empty_sets = NIL;
+		ListCell   *lc;
+		ListCell   *l_start = list_head(gd->rollups);
+		AggStrategy strat = AGG_HASHED;
+		Size		hashsize;
+		double		exclude_groups = 0.0;
+
+		Assert(can_hash);
+
+		/*
+		 * If the input is coincidentally sorted usefully (which can happen
+		 * even if is_sorted is false, since that only means that our caller
+		 * has set up the sorting for us), then save some hashtable space by
+		 * making use of that. But we need to watch out for degenerate cases:
+		 *
+		 * 1) If there are any empty grouping sets, then group_pathkeys might
+		 * be NIL if all non-empty grouping sets are unsortable. In this case,
+		 * there will be a rollup containing only empty groups, and the
+		 * pathkeys_contained_in test is vacuously true; this is ok.
+		 *
+		 * XXX: the above relies on the fact that group_pathkeys is generated
+		 * from the first rollup. If we add the ability to consider multiple
+		 * sort orders for grouping input, this assumption might fail.
+		 *
+		 * 2) If there are no empty sets and only unsortable sets, then the
+		 * rollups list will be empty (and thus l_start == NULL), and
+		 * group_pathkeys will be NIL; we must ensure that the vacuously-true
+		 * pathkeys_contain_in test doesn't cause us to crash.
+		 */
+		if (l_start != NULL &&
+			pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
+		{
+			unhashed_rollup = lfirst_node(RollupData, l_start);
+			exclude_groups = unhashed_rollup->numGroups;
+			l_start = lnext(l_start);
+		}
+
+		hashsize = estimate_hashagg_tablesize(path,
+											  agg_costs,
+											  dNumGroups - exclude_groups);
+
+		/*
+		 * gd->rollups is empty if we have only unsortable columns to work
+		 * with.  Override work_mem in that case; otherwise, we'll rely on the
+		 * sorted-input case to generate usable mixed paths.
+		 */
+		if (hashsize > work_mem * 1024L && gd->rollups)
+			return;				/* nope, won't fit */
+
+		/*
+		 * We need to burst the existing rollups list into individual grouping
+		 * sets and recompute a groupClause for each set.
+		 */
+		sets_data = list_copy(gd->unsortable_sets);
+
+		for_each_cell(lc, l_start)
+		{
+			RollupData *rollup = lfirst_node(RollupData, lc);
+
+			/*
+			 * If we find an unhashable rollup that's not been skipped by the
+			 * "actually sorted" check above, we can't cope; we'd need sorted
+			 * input (with a different sort order) but we can't get that here.
+			 * So bail out; we'll get a valid path from the is_sorted case
+			 * instead.
+			 *
+			 * The mere presence of empty grouping sets doesn't make a rollup
+			 * unhashable (see preprocess_grouping_sets), we handle those
+			 * specially below.
+			 */
+			if (!rollup->hashable)
+				return;
+			else
+				sets_data = list_concat(sets_data, list_copy(rollup->gsets_data));
+		}
+		foreach(lc, sets_data)
+		{
+			GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
+			List	   *gset = gs->set;
+			RollupData *rollup;
+
+			if (gset == NIL)
+			{
+				/* Empty grouping sets can't be hashed. */
+				empty_sets_data = lappend(empty_sets_data, gs);
+				empty_sets = lappend(empty_sets, NIL);
+			}
+			else
+			{
+				rollup = makeNode(RollupData);
+
+				rollup->groupClause = preprocess_groupclause(root, gset);
+				rollup->gsets_data = list_make1(gs);
+				rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
+														 rollup->gsets_data,
+														 gd->tleref_to_colnum_map);
+				rollup->numGroups = gs->numGroups;
+				rollup->hashable = true;
+				rollup->is_hashed = true;
+				new_rollups = lappend(new_rollups, rollup);
+			}
+		}
+
+		/*
+		 * If we didn't find anything nonempty to hash, then bail.  We'll
+		 * generate a path from the is_sorted case.
+		 */
+		if (new_rollups == NIL)
+			return;
+
+		/*
+		 * If there were empty grouping sets they should have been in the
+		 * first rollup.
+		 */
+		Assert(!unhashed_rollup || !empty_sets);
+
+		if (unhashed_rollup)
+		{
+			new_rollups = lappend(new_rollups, unhashed_rollup);
+			strat = AGG_MIXED;
+		}
+		else if (empty_sets)
+		{
+			RollupData *rollup = makeNode(RollupData);
+
+			rollup->groupClause = NIL;
+			rollup->gsets_data = empty_sets_data;
+			rollup->gsets = empty_sets;
+			rollup->numGroups = list_length(empty_sets);
+			rollup->hashable = false;
+			rollup->is_hashed = false;
+			new_rollups = lappend(new_rollups, rollup);
+			strat = AGG_MIXED;
+		}
+
+		add_path(grouped_rel, (Path *)
+				 create_groupingsets_path(root,
+										  grouped_rel,
+										  path,
+										  (List *) parse->havingQual,
+										  strat,
+										  new_rollups,
+										  agg_costs,
+										  dNumGroups));
+		return;
+	}
+
+	/*
+	 * If we have sorted input but nothing we can do with it, bail.
+	 */
+	if (list_length(gd->rollups) == 0)
+		return;
+
+	/*
+	 * Given sorted input, we try and make two paths: one sorted and one mixed
+	 * sort/hash. (We need to try both because hashagg might be disabled, or
+	 * some columns might not be sortable.)
+	 *
+	 * can_hash is passed in as false if some obstacle elsewhere (such as
+	 * ordered aggs) means that we shouldn't consider hashing at all.
+	 */
+	if (can_hash && gd->any_hashable)
+	{
+		List	   *rollups = NIL;
+		List	   *hash_sets = list_copy(gd->unsortable_sets);
+		double		availspace = (work_mem * 1024.0);
+		ListCell   *lc;
+
+		/*
+		 * Account first for space needed for groups we can't sort at all.
+		 */
+		availspace -= (double) estimate_hashagg_tablesize(path,
+														  agg_costs,
+														  gd->dNumHashGroups);
+
+		if (availspace > 0 && list_length(gd->rollups) > 1)
+		{
+			double		scale;
+			int			num_rollups = list_length(gd->rollups);
+			int			k_capacity;
+			int		   *k_weights = palloc(num_rollups * sizeof(int));
+			Bitmapset  *hash_items = NULL;
+			int			i;
+
+			/*
+			 * We treat this as a knapsack problem: the knapsack capacity
+			 * represents work_mem, the item weights are the estimated memory
+			 * usage of the hashtables needed to implement a single rollup,
+			 * and we really ought to use the cost saving as the item value;
+			 * however, currently the costs assigned to sort nodes don't
+			 * reflect the comparison costs well, and so we treat all items as
+			 * of equal value (each rollup we hash instead saves us one sort).
+			 *
+			 * To use the discrete knapsack, we need to scale the values to a
+			 * reasonably small bounded range.  We choose to allow a 5% error
+			 * margin; we have no more than 4096 rollups in the worst possible
+			 * case, which with a 5% error margin will require a bit over 42MB
+			 * of workspace. (Anyone wanting to plan queries that complex had
+			 * better have the memory for it.  In more reasonable cases, with
+			 * no more than a couple of dozen rollups, the memory usage will
+			 * be negligible.)
+			 *
+			 * k_capacity is naturally bounded, but we clamp the values for
+			 * scale and weight (below) to avoid overflows or underflows (or
+			 * uselessly trying to use a scale factor less than 1 byte).
+			 */
+			scale = Max(availspace / (20.0 * num_rollups), 1.0);
+			k_capacity = (int) floor(availspace / scale);
+
+			/*
+			 * We leave the first rollup out of consideration since it's the
+			 * one that matches the input sort order.  We assign indexes "i"
+			 * to only those entries considered for hashing; the second loop,
+			 * below, must use the same condition.
+			 */
+			i = 0;
+			for_each_cell(lc, lnext(list_head(gd->rollups)))
+			{
+				RollupData *rollup = lfirst_node(RollupData, lc);
+
+				if (rollup->hashable)
+				{
+					double		sz = estimate_hashagg_tablesize(path,
+																agg_costs,
+																rollup->numGroups);
+
+					/*
+					 * If sz is enormous, but work_mem (and hence scale) is
+					 * small, avoid integer overflow here.
+					 */
+					k_weights[i] = (int) Min(floor(sz / scale),
+											 k_capacity + 1.0);
+					++i;
+				}
+			}
+
+			/*
+			 * Apply knapsack algorithm; compute the set of items which
+			 * maximizes the value stored (in this case the number of sorts
+			 * saved) while keeping the total size (approximately) within
+			 * capacity.
+			 */
+			if (i > 0)
+				hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
+
+			if (!bms_is_empty(hash_items))
+			{
+				rollups = list_make1(linitial(gd->rollups));
+
+				i = 0;
+				for_each_cell(lc, lnext(list_head(gd->rollups)))
+				{
+					RollupData *rollup = lfirst_node(RollupData, lc);
+
+					if (rollup->hashable)
+					{
+						if (bms_is_member(i, hash_items))
+							hash_sets = list_concat(hash_sets,
+													list_copy(rollup->gsets_data));
+						else
+							rollups = lappend(rollups, rollup);
+						++i;
+					}
+					else
+						rollups = lappend(rollups, rollup);
+				}
+			}
+		}
+
+		if (!rollups && hash_sets)
+			rollups = list_copy(gd->rollups);
+
+		foreach(lc, hash_sets)
+		{
+			GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
+			RollupData *rollup = makeNode(RollupData);
+
+			Assert(gs->set != NIL);
+
+			rollup->groupClause = preprocess_groupclause(root, gs->set);
+			rollup->gsets_data = list_make1(gs);
+			rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
+													 rollup->gsets_data,
+													 gd->tleref_to_colnum_map);
+			rollup->numGroups = gs->numGroups;
+			rollup->hashable = true;
+			rollup->is_hashed = true;
+			rollups = lcons(rollup, rollups);
+		}
+
+		if (rollups)
+		{
+			add_path(grouped_rel, (Path *)
+					 create_groupingsets_path(root,
+											  grouped_rel,
+											  path,
+											  (List *) parse->havingQual,
+											  AGG_MIXED,
+											  rollups,
+											  agg_costs,
+											  dNumGroups));
+		}
+	}
+
+	/*
+	 * Now try the simple sorted case.
+	 */
+	if (!gd->unsortable_sets)
+		add_path(grouped_rel, (Path *)
+				 create_groupingsets_path(root,
+										  grouped_rel,
+										  path,
+										  (List *) parse->havingQual,
+										  AGG_SORTED,
+										  gd->rollups,
+										  agg_costs,
+										  dNumGroups));
+}
+
+
+/*
+ * Given a groupclause and a list of GroupingSetData, return equivalent sets
+ * (without annotation) mapped to indexes into the given groupclause.
+ */
+List *
+remap_to_groupclause_idx(List *groupClause,
+						 List *gsets,
+						 int *tleref_to_colnum_map)
+{
+	int			ref = 0;
+	List	   *result = NIL;
+	ListCell   *lc;
+
+	foreach(lc, groupClause)
+	{
+		SortGroupClause *gc = lfirst_node(SortGroupClause, lc);
+
+		tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
+	}
+
+	foreach(lc, gsets)
+	{
+		List	   *set = NIL;
+		ListCell   *lc2;
+		GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
+
+		foreach(lc2, gs->set)
+		{
+			set = lappend_int(set, tleref_to_colnum_map[lfirst_int(lc2)]);
+		}
+
+		result = lappend(result, set);
+	}
+
+	return result;
+}
diff --git a/src/backend/optimizer/plan/Makefile b/src/backend/optimizer/plan/Makefile
index 88a9f7ff8c..e3c96b704e 100644
--- a/src/backend/optimizer/plan/Makefile
+++ b/src/backend/optimizer/plan/Makefile
@@ -12,7 +12,7 @@ subdir = src/backend/optimizer/plan
 top_builddir = ../../../..
 include $(top_builddir)/src/Makefile.global
 
-OBJS = analyzejoins.o createplan.o initsplan.o planagg.o planmain.o planner.o \
-	setrefs.o subselect.o
+OBJS = analyzejoins.o createplan.o initsplan.o planagg.o planmain.o \
+	planner.o setrefs.o subselect.o
 
 include $(top_srcdir)/src/backend/common.mk
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index e73007f3ba..0d31eded37 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -28,10 +28,9 @@
 #include "executor/executor.h"
 #include "executor/nodeAgg.h"
 #include "foreign/fdwapi.h"
+#include "lib/bipartite_match.h"
 #include "miscadmin.h"
 #include "jit/jit.h"
-#include "lib/bipartite_match.h"
-#include "lib/knapsack.h"
 #include "nodes/makefuncs.h"
 #include "nodes/nodeFuncs.h"
 #ifdef OPTIMIZER_DEBUG
@@ -94,22 +93,6 @@ typedef struct
 	List	   *groupClause;	/* overrides parse->groupClause */
 } standard_qp_extra;
 
-/*
- * Data specific to grouping sets
- */
-
-typedef struct
-{
-	List	   *rollups;
-	List	   *hash_sets_idx;
-	double		dNumHashGroups;
-	bool		any_hashable;
-	Bitmapset  *unsortable_refs;
-	Bitmapset  *unhashable_refs;
-	List	   *unsortable_sets;
-	int		   *tleref_to_colnum_map;
-} grouping_sets_data;
-
 /* Local functions */
 static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind);
 static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
@@ -117,53 +100,12 @@ static void inheritance_planner(PlannerInfo *root);
 static void grouping_planner(PlannerInfo *root, bool inheritance_update,
 				 double tuple_fraction);
 static void compute_pathkeys(PlannerInfo *root, List *tlist, List *activeWindows, List *groupClause);
-static grouping_sets_data *preprocess_grouping_sets(PlannerInfo *root);
-static List *remap_to_groupclause_idx(List *groupClause, List *gsets,
-						 int *tleref_to_colnum_map);
 static void preprocess_rowmarks(PlannerInfo *root);
 static double preprocess_limit(PlannerInfo *root,
 				 double tuple_fraction,
 				 int64 *offset_est, int64 *count_est);
 static bool limit_needed(Query *parse);
 static void remove_useless_groupby_columns(PlannerInfo *root);
-static List *preprocess_groupclause(PlannerInfo *root, List *force);
-static List *extract_rollup_sets(List *groupingSets);
-static List *reorder_grouping_sets(List *groupingSets, List *sortclause);
-static double get_number_of_groups(PlannerInfo *root,
-					 double path_rows,
-					 grouping_sets_data *gd,
-					 List *target_list);
-static Size estimate_hashagg_tablesize(Path *path,
-						   const AggClauseCosts *agg_costs,
-						   double dNumGroups);
-static RelOptInfo *create_grouping_paths(PlannerInfo *root,
-					  RelOptInfo *input_rel,
-					  PathTarget *target,
-					  bool target_parallel_safe,
-					  const AggClauseCosts *agg_costs,
-					  grouping_sets_data *gd);
-static bool is_degenerate_grouping(PlannerInfo *root);
-static void create_degenerate_grouping_paths(PlannerInfo *root,
-								 RelOptInfo *input_rel,
-								 RelOptInfo *grouped_rel);
-static RelOptInfo *make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
-				  PathTarget *target, bool target_parallel_safe,
-				  Node *havingQual);
-static void create_ordinary_grouping_paths(PlannerInfo *root,
-							   RelOptInfo *input_rel,
-							   RelOptInfo *grouped_rel,
-							   const AggClauseCosts *agg_costs,
-							   grouping_sets_data *gd,
-							   GroupPathExtraData *extra,
-							   RelOptInfo **partially_grouped_rel_p);
-static void consider_groupingsets_paths(PlannerInfo *root,
-							RelOptInfo *grouped_rel,
-							Path *path,
-							bool is_sorted,
-							bool can_hash,
-							grouping_sets_data *gd,
-							const AggClauseCosts *agg_costs,
-							double dNumGroups);
 static RelOptInfo *create_window_paths(PlannerInfo *root,
 					RelOptInfo *input_rel,
 					PathTarget *input_target,
@@ -189,9 +131,6 @@ static RelOptInfo *create_ordered_paths(PlannerInfo *root,
 					 double limit_tuples);
 static PathTarget *make_group_input_target(PlannerInfo *root,
 						PathTarget *final_target);
-static PathTarget *make_partial_grouping_target(PlannerInfo *root,
-							 PathTarget *grouping_target,
-							 Node *havingQual);
 static List *postprocess_setop_tlist(List *new_tlist, List *orig_tlist);
 static List *select_active_windows(PlannerInfo *root, WindowFuncLists *wflists);
 static PathTarget *make_window_input_target(PlannerInfo *root,
@@ -204,39 +143,14 @@ static PathTarget *make_sort_input_target(PlannerInfo *root,
 					   bool *have_postponed_srfs);
 static void adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
 					  List *targets, List *targets_contain_srfs);
-static void add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
-						  RelOptInfo *grouped_rel,
-						  RelOptInfo *partially_grouped_rel,
-						  const AggClauseCosts *agg_costs,
-						  grouping_sets_data *gd,
-						  double dNumGroups,
-						  GroupPathExtraData *extra);
-static RelOptInfo *create_partial_grouping_paths(PlannerInfo *root,
-							  RelOptInfo *grouped_rel,
-							  RelOptInfo *input_rel,
-							  grouping_sets_data *gd,
-							  GroupPathExtraData *extra,
-							  bool force_rel_creation);
-static void gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel);
-static bool can_partial_agg(PlannerInfo *root,
-				const AggClauseCosts *agg_costs);
 static void apply_scanjoin_target_to_paths(PlannerInfo *root,
 							   RelOptInfo *rel,
 							   List *scanjoin_targets,
 							   List *scanjoin_targets_contain_srfs,
 							   bool scanjoin_target_parallel_safe,
 							   bool tlist_same_exprs);
-static void create_partitionwise_grouping_paths(PlannerInfo *root,
-									RelOptInfo *input_rel,
-									RelOptInfo *grouped_rel,
-									RelOptInfo *partially_grouped_rel,
-									const AggClauseCosts *agg_costs,
-									grouping_sets_data *gd,
-									PartitionwiseAggregateType patype,
-									GroupPathExtraData *extra);
-static bool group_by_has_partkey(RelOptInfo *input_rel,
-					 List *targetList,
-					 List *groupClause);
+static List *extract_rollup_sets(List *groupingSets);
+static List *reorder_grouping_sets(List *groupingSets, List *sortclause);
 
 
 /*****************************************************************************
@@ -2252,13 +2166,121 @@ grouping_planner(PlannerInfo *root, bool inheritance_update,
 	/* Note: currently, we leave it to callers to do set_cheapest() */
 }
 
+
+
+
+/*
+ * preprocess_groupclause - do preparatory work on GROUP BY clause
+ *
+ * The idea here is to adjust the ordering of the GROUP BY elements
+ * (which in itself is semantically insignificant) to match ORDER BY,
+ * thereby allowing a single sort operation to both implement the ORDER BY
+ * requirement and set up for a Unique step that implements GROUP BY.
+ *
+ * In principle it might be interesting to consider other orderings of the
+ * GROUP BY elements, which could match the sort ordering of other
+ * possible plans (eg an indexscan) and thereby reduce cost.  We don't
+ * bother with that, though.  Hashed grouping will frequently win anyway.
+ *
+ * Note: we need no comparable processing of the distinctClause because
+ * the parser already enforced that that matches ORDER BY.
+ *
+ * For grouping sets, the order of items is instead forced to agree with that
+ * of the grouping set (and items not in the grouping set are skipped). The
+ * work of sorting the order of grouping set elements to match the ORDER BY if
+ * possible is done elsewhere.
+ */
+List *
+preprocess_groupclause(PlannerInfo *root, List *force)
+{
+	Query	   *parse = root->parse;
+	List	   *new_groupclause = NIL;
+	bool		partial_match;
+	ListCell   *sl;
+	ListCell   *gl;
+
+	/* For grouping sets, we need to force the ordering */
+	if (force)
+	{
+		foreach(sl, force)
+		{
+			Index		ref = lfirst_int(sl);
+			SortGroupClause *cl = get_sortgroupref_clause(ref, parse->groupClause);
+
+			new_groupclause = lappend(new_groupclause, cl);
+		}
+
+		return new_groupclause;
+	}
+
+	/* If no ORDER BY, nothing useful to do here */
+	if (parse->sortClause == NIL)
+		return parse->groupClause;
+
+	/*
+	 * Scan the ORDER BY clause and construct a list of matching GROUP BY
+	 * items, but only as far as we can make a matching prefix.
+	 *
+	 * This code assumes that the sortClause contains no duplicate items.
+	 */
+	foreach(sl, parse->sortClause)
+	{
+		SortGroupClause *sc = lfirst_node(SortGroupClause, sl);
+
+		foreach(gl, parse->groupClause)
+		{
+			SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
+
+			if (equal(gc, sc))
+			{
+				new_groupclause = lappend(new_groupclause, gc);
+				break;
+			}
+		}
+		if (gl == NULL)
+			break;				/* no match, so stop scanning */
+	}
+
+	/* Did we match all of the ORDER BY list, or just some of it? */
+	partial_match = (sl != NULL);
+
+	/* If no match at all, no point in reordering GROUP BY */
+	if (new_groupclause == NIL)
+		return parse->groupClause;
+
+	/*
+	 * Add any remaining GROUP BY items to the new list, but only if we were
+	 * able to make a complete match.  In other words, we only rearrange the
+	 * GROUP BY list if the result is that one list is a prefix of the other
+	 * --- otherwise there's no possibility of a common sort.  Also, give up
+	 * if there are any non-sortable GROUP BY items, since then there's no
+	 * hope anyway.
+	 */
+	foreach(gl, parse->groupClause)
+	{
+		SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
+
+		if (list_member_ptr(new_groupclause, gc))
+			continue;			/* it matched an ORDER BY item */
+		if (partial_match)
+			return parse->groupClause;	/* give up, no common sort possible */
+		if (!OidIsValid(gc->sortop))
+			return parse->groupClause;	/* give up, GROUP BY can't be sorted */
+		new_groupclause = lappend(new_groupclause, gc);
+	}
+
+	/* Success --- install the rearranged GROUP BY list */
+	Assert(list_length(parse->groupClause) == list_length(new_groupclause));
+	return new_groupclause;
+}
+
 /*
  * Do preprocessing for groupingSets clause and related data.  This handles the
  * preliminary steps of expanding the grouping sets, organizing them into lists
  * of rollups, and preparing annotations which will later be filled in with
  * size estimates.
  */
-static grouping_sets_data *
+grouping_sets_data *
 preprocess_grouping_sets(PlannerInfo *root)
 {
 	Query	   *parse = root->parse;
@@ -2430,65 +2452,307 @@ preprocess_grouping_sets(PlannerInfo *root)
 }
 
 /*
- * Given a groupclause and a list of GroupingSetData, return equivalent sets
- * (without annotation) mapped to indexes into the given groupclause.
+ * Extract lists of grouping sets that can be implemented using a single
+ * rollup-type aggregate pass each. Returns a list of lists of grouping sets.
+ *
+ * Input must be sorted with smallest sets first. Result has each sublist
+ * sorted with smallest sets first.
+ *
+ * We want to produce the absolute minimum possible number of lists here to
+ * avoid excess sorts. Fortunately, there is an algorithm for this; the problem
+ * of finding the minimal partition of a partially-ordered set into chains
+ * (which is what we need, taking the list of grouping sets as a poset ordered
+ * by set inclusion) can be mapped to the problem of finding the maximum
+ * cardinality matching on a bipartite graph, which is solvable in polynomial
+ * time with a worst case of no worse than O(n^2.5) and usually much
+ * better. Since our N is at most 4096, we don't need to consider fallbacks to
+ * heuristic or approximate methods.  (Planning time for a 12-d cube is under
+ * half a second on my modest system even with optimization off and assertions
+ * on.)
  */
 static List *
-remap_to_groupclause_idx(List *groupClause,
-						 List *gsets,
-						 int *tleref_to_colnum_map)
+extract_rollup_sets(List *groupingSets)
 {
-	int			ref = 0;
+	int			num_sets_raw = list_length(groupingSets);
+	int			num_empty = 0;
+	int			num_sets = 0;	/* distinct sets */
+	int			num_chains = 0;
 	List	   *result = NIL;
+	List	  **results;
+	List	  **orig_sets;
+	Bitmapset **set_masks;
+	int		   *chains;
+	short	  **adjacency;
+	short	   *adjacency_buf;
+	BipartiteMatchState *state;
+	int			i;
+	int			j;
+	int			j_size;
+	ListCell   *lc1 = list_head(groupingSets);
 	ListCell   *lc;
 
-	foreach(lc, groupClause)
+	/*
+	 * Start by stripping out empty sets.  The algorithm doesn't require this,
+	 * but the planner currently needs all empty sets to be returned in the
+	 * first list, so we strip them here and add them back after.
+	 */
+	while (lc1 && lfirst(lc1) == NIL)
 	{
-		SortGroupClause *gc = lfirst_node(SortGroupClause, lc);
-
-		tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
+		++num_empty;
+		lc1 = lnext(lc1);
 	}
 
-	foreach(lc, gsets)
+	/* bail out now if it turns out that all we had were empty sets. */
+	if (!lc1)
+		return list_make1(groupingSets);
+
+	/*----------
+	 * We don't strictly need to remove duplicate sets here, but if we don't,
+	 * they tend to become scattered through the result, which is a bit
+	 * confusing (and irritating if we ever decide to optimize them out).
+	 * So we remove them here and add them back after.
+	 *
+	 * For each non-duplicate set, we fill in the following:
+	 *
+	 * orig_sets[i] = list of the original set lists
+	 * set_masks[i] = bitmapset for testing inclusion
+	 * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
+	 *
+	 * chains[i] will be the result group this set is assigned to.
+	 *
+	 * We index all of these from 1 rather than 0 because it is convenient
+	 * to leave 0 free for the NIL node in the graph algorithm.
+	 *----------
+	 */
+	orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
+	set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
+	adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
+	adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
+
+	j_size = 0;
+	j = 0;
+	i = 1;
+
+	for_each_cell(lc, lc1)
 	{
-		List	   *set = NIL;
+		List	   *candidate = (List *) lfirst(lc);
+		Bitmapset  *candidate_set = NULL;
 		ListCell   *lc2;
-		GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
+		int			dup_of = 0;
 
-		foreach(lc2, gs->set)
+		foreach(lc2, candidate)
 		{
-			set = lappend_int(set, tleref_to_colnum_map[lfirst_int(lc2)]);
+			candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
 		}
 
-		result = lappend(result, set);
-	}
+		/* we can only be a dup if we're the same length as a previous set */
+		if (j_size == list_length(candidate))
+		{
+			int			k;
 
-	return result;
-}
+			for (k = j; k < i; ++k)
+			{
+				if (bms_equal(set_masks[k], candidate_set))
+				{
+					dup_of = k;
+					break;
+				}
+			}
+		}
+		else if (j_size < list_length(candidate))
+		{
+			j_size = list_length(candidate);
+			j = i;
+		}
 
+		if (dup_of > 0)
+		{
+			orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
+			bms_free(candidate_set);
+		}
+		else
+		{
+			int			k;
+			int			n_adj = 0;
 
+			orig_sets[i] = list_make1(candidate);
+			set_masks[i] = candidate_set;
 
-/*
- * Detect whether a plan node is a "dummy" plan created when a relation
- * is deemed not to need scanning due to constraint exclusion.
- *
- * Currently, such dummy plans are Result nodes with constant FALSE
- * filter quals (see set_dummy_rel_pathlist and create_append_plan).
- *
- * XXX this probably ought to be somewhere else, but not clear where.
- */
-bool
-is_dummy_plan(Plan *plan)
-{
-	if (IsA(plan, Result))
-	{
-		List	   *rcqual = (List *) ((Result *) plan)->resconstantqual;
+			/* fill in adjacency list; no need to compare equal-size sets */
 
-		if (list_length(rcqual) == 1)
-		{
-			Const	   *constqual = (Const *) linitial(rcqual);
+			for (k = j - 1; k > 0; --k)
+			{
+				if (bms_is_subset(set_masks[k], candidate_set))
+					adjacency_buf[++n_adj] = k;
+			}
 
-			if (constqual && IsA(constqual, Const))
+			if (n_adj > 0)
+			{
+				adjacency_buf[0] = n_adj;
+				adjacency[i] = palloc((n_adj + 1) * sizeof(short));
+				memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
+			}
+			else
+				adjacency[i] = NULL;
+
+			++i;
+		}
+	}
+
+	num_sets = i - 1;
+
+	/*
+	 * Apply the graph matching algorithm to do the work.
+	 */
+	state = BipartiteMatch(num_sets, num_sets, adjacency);
+
+	/*
+	 * Now, the state->pair* fields have the info we need to assign sets to
+	 * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
+	 * pair_vu[v] = u (both will be true, but we check both so that we can do
+	 * it in one pass)
+	 */
+	chains = palloc0((num_sets + 1) * sizeof(int));
+
+	for (i = 1; i <= num_sets; ++i)
+	{
+		int			u = state->pair_vu[i];
+		int			v = state->pair_uv[i];
+
+		if (u > 0 && u < i)
+			chains[i] = chains[u];
+		else if (v > 0 && v < i)
+			chains[i] = chains[v];
+		else
+			chains[i] = ++num_chains;
+	}
+
+	/* build result lists. */
+	results = palloc0((num_chains + 1) * sizeof(List *));
+
+	for (i = 1; i <= num_sets; ++i)
+	{
+		int			c = chains[i];
+
+		Assert(c > 0);
+
+		results[c] = list_concat(results[c], orig_sets[i]);
+	}
+
+	/* push any empty sets back on the first list. */
+	while (num_empty-- > 0)
+		results[1] = lcons(NIL, results[1]);
+
+	/* make result list */
+	for (i = 1; i <= num_chains; ++i)
+		result = lappend(result, results[i]);
+
+	/*
+	 * Free all the things.
+	 *
+	 * (This is over-fussy for small sets but for large sets we could have
+	 * tied up a nontrivial amount of memory.)
+	 */
+	BipartiteMatchFree(state);
+	pfree(results);
+	pfree(chains);
+	for (i = 1; i <= num_sets; ++i)
+		if (adjacency[i])
+			pfree(adjacency[i]);
+	pfree(adjacency);
+	pfree(adjacency_buf);
+	pfree(orig_sets);
+	for (i = 1; i <= num_sets; ++i)
+		bms_free(set_masks[i]);
+	pfree(set_masks);
+
+	return result;
+}
+
+/*
+ * Reorder the elements of a list of grouping sets such that they have correct
+ * prefix relationships. Also inserts the GroupingSetData annotations.
+ *
+ * The input must be ordered with smallest sets first; the result is returned
+ * with largest sets first.  Note that the result shares no list substructure
+ * with the input, so it's safe for the caller to modify it later.
+ *
+ * If we're passed in a sortclause, we follow its order of columns to the
+ * extent possible, to minimize the chance that we add unnecessary sorts.
+ * (We're trying here to ensure that GROUPING SETS ((a,b,c),(c)) ORDER BY c,b,a
+ * gets implemented in one pass.)
+ */
+static List *
+reorder_grouping_sets(List *groupingsets, List *sortclause)
+{
+	ListCell   *lc;
+	ListCell   *lc2;
+	List	   *previous = NIL;
+	List	   *result = NIL;
+
+	foreach(lc, groupingsets)
+	{
+		List	   *candidate = (List *) lfirst(lc);
+		List	   *new_elems = list_difference_int(candidate, previous);
+		GroupingSetData *gs = makeNode(GroupingSetData);
+
+		if (list_length(new_elems) > 0)
+		{
+			while (list_length(sortclause) > list_length(previous))
+			{
+				SortGroupClause *sc = list_nth(sortclause, list_length(previous));
+				int			ref = sc->tleSortGroupRef;
+
+				if (list_member_int(new_elems, ref))
+				{
+					previous = lappend_int(previous, ref);
+					new_elems = list_delete_int(new_elems, ref);
+				}
+				else
+				{
+					/* diverged from the sortclause; give up on it */
+					sortclause = NIL;
+					break;
+				}
+			}
+
+			foreach(lc2, new_elems)
+			{
+				previous = lappend_int(previous, lfirst_int(lc2));
+			}
+		}
+
+		gs->set = list_copy(previous);
+		result = lcons(gs, result);
+		list_free(new_elems);
+	}
+
+	list_free(previous);
+
+	return result;
+}
+
+/*
+ * Detect whether a plan node is a "dummy" plan created when a relation
+ * is deemed not to need scanning due to constraint exclusion.
+ *
+ * Currently, such dummy plans are Result nodes with constant FALSE
+ * filter quals (see set_dummy_rel_pathlist and create_append_plan).
+ *
+ * XXX this probably ought to be somewhere else, but not clear where.
+ */
+bool
+is_dummy_plan(Plan *plan)
+{
+	if (IsA(plan, Result))
+	{
+		List	   *rcqual = (List *) ((Result *) plan)->resconstantqual;
+
+		if (list_length(rcqual) == 1)
+		{
+			Const	   *constqual = (Const *) linitial(rcqual);
+
+			if (constqual && IsA(constqual, Const))
 			{
 				if (!constqual->constisnull &&
 					!DatumGetBool(constqual->constvalue))
@@ -3058,3708 +3322,1699 @@ remove_useless_groupby_columns(PlannerInfo *root)
 }
 
 /*
- * preprocess_groupclause - do preparatory work on GROUP BY clause
- *
- * The idea here is to adjust the ordering of the GROUP BY elements
- * (which in itself is semantically insignificant) to match ORDER BY,
- * thereby allowing a single sort operation to both implement the ORDER BY
- * requirement and set up for a Unique step that implements GROUP BY.
- *
- * In principle it might be interesting to consider other orderings of the
- * GROUP BY elements, which could match the sort ordering of other
- * possible plans (eg an indexscan) and thereby reduce cost.  We don't
- * bother with that, though.  Hashed grouping will frequently win anyway.
- *
- * Note: we need no comparable processing of the distinctClause because
- * the parser already enforced that that matches ORDER BY.
- *
- * For grouping sets, the order of items is instead forced to agree with that
- * of the grouping set (and items not in the grouping set are skipped). The
- * work of sorting the order of grouping set elements to match the ORDER BY if
- * possible is done elsewhere.
+ * Compute query_pathkeys and other pathkeys, to tell query_planner() which
+ * orderings would be useful for the later planner stages.
  */
-static List *
-preprocess_groupclause(PlannerInfo *root, List *force)
+static void
+compute_pathkeys(PlannerInfo *root, List *tlist, List *activeWindows, List *groupClause)
 {
 	Query	   *parse = root->parse;
-	List	   *new_groupclause = NIL;
-	bool		partial_match;
-	ListCell   *sl;
-	ListCell   *gl;
 
-	/* For grouping sets, we need to force the ordering */
-	if (force)
-	{
-		foreach(sl, force)
-		{
-			Index		ref = lfirst_int(sl);
-			SortGroupClause *cl = get_sortgroupref_clause(ref, parse->groupClause);
+	/*
+	 * Calculate pathkeys that represent grouping/ordering requirements.  The
+	 * sortClause is certainly sort-able, but GROUP BY and DISTINCT might not
+	 * be, in which case we just leave their pathkeys empty.
+	 */
+	if (groupClause && grouping_is_sortable(groupClause))
+		root->group_pathkeys =
+			make_pathkeys_for_sortclauses(root,
+										  groupClause,
+										  tlist);
+	else
+		root->group_pathkeys = NIL;
 
-			new_groupclause = lappend(new_groupclause, cl);
-		}
+	/* We consider only the first (bottom) window in pathkeys logic */
+	if (activeWindows != NIL)
+	{
+		WindowClause *wc = linitial_node(WindowClause, activeWindows);
 
-		return new_groupclause;
+		root->window_pathkeys = make_pathkeys_for_window(root,
+														 wc,
+														 tlist);
 	}
+	else
+		root->window_pathkeys = NIL;
 
-	/* If no ORDER BY, nothing useful to do here */
-	if (parse->sortClause == NIL)
-		return parse->groupClause;
+	if (parse->distinctClause &&
+		grouping_is_sortable(parse->distinctClause))
+		root->distinct_pathkeys =
+			make_pathkeys_for_sortclauses(root,
+										  parse->distinctClause,
+										  tlist);
+	else
+		root->distinct_pathkeys = NIL;
+
+	root->sort_pathkeys =
+		make_pathkeys_for_sortclauses(root,
+									  parse->sortClause,
+									  tlist);
 
 	/*
-	 * Scan the ORDER BY clause and construct a list of matching GROUP BY
-	 * items, but only as far as we can make a matching prefix.
+	 * Figure out whether we want a sorted result from query_planner.
 	 *
-	 * This code assumes that the sortClause contains no duplicate items.
-	 */
-	foreach(sl, parse->sortClause)
-	{
-		SortGroupClause *sc = lfirst_node(SortGroupClause, sl);
-
-		foreach(gl, parse->groupClause)
-		{
-			SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
-
-			if (equal(gc, sc))
-			{
-				new_groupclause = lappend(new_groupclause, gc);
-				break;
-			}
-		}
-		if (gl == NULL)
-			break;				/* no match, so stop scanning */
-	}
-
-	/* Did we match all of the ORDER BY list, or just some of it? */
-	partial_match = (sl != NULL);
-
-	/* If no match at all, no point in reordering GROUP BY */
-	if (new_groupclause == NIL)
-		return parse->groupClause;
-
-	/*
-	 * Add any remaining GROUP BY items to the new list, but only if we were
-	 * able to make a complete match.  In other words, we only rearrange the
-	 * GROUP BY list if the result is that one list is a prefix of the other
-	 * --- otherwise there's no possibility of a common sort.  Also, give up
-	 * if there are any non-sortable GROUP BY items, since then there's no
-	 * hope anyway.
-	 */
-	foreach(gl, parse->groupClause)
-	{
-		SortGroupClause *gc = lfirst_node(SortGroupClause, gl);
-
-		if (list_member_ptr(new_groupclause, gc))
-			continue;			/* it matched an ORDER BY item */
-		if (partial_match)
-			return parse->groupClause;	/* give up, no common sort possible */
-		if (!OidIsValid(gc->sortop))
-			return parse->groupClause;	/* give up, GROUP BY can't be sorted */
-		new_groupclause = lappend(new_groupclause, gc);
-	}
-
-	/* Success --- install the rearranged GROUP BY list */
-	Assert(list_length(parse->groupClause) == list_length(new_groupclause));
-	return new_groupclause;
-}
-
-/*
- * Extract lists of grouping sets that can be implemented using a single
- * rollup-type aggregate pass each. Returns a list of lists of grouping sets.
- *
- * Input must be sorted with smallest sets first. Result has each sublist
- * sorted with smallest sets first.
- *
- * We want to produce the absolute minimum possible number of lists here to
- * avoid excess sorts. Fortunately, there is an algorithm for this; the problem
- * of finding the minimal partition of a partially-ordered set into chains
- * (which is what we need, taking the list of grouping sets as a poset ordered
- * by set inclusion) can be mapped to the problem of finding the maximum
- * cardinality matching on a bipartite graph, which is solvable in polynomial
- * time with a worst case of no worse than O(n^2.5) and usually much
- * better. Since our N is at most 4096, we don't need to consider fallbacks to
- * heuristic or approximate methods.  (Planning time for a 12-d cube is under
- * half a second on my modest system even with optimization off and assertions
- * on.)
- */
-static List *
-extract_rollup_sets(List *groupingSets)
-{
-	int			num_sets_raw = list_length(groupingSets);
-	int			num_empty = 0;
-	int			num_sets = 0;	/* distinct sets */
-	int			num_chains = 0;
-	List	   *result = NIL;
-	List	  **results;
-	List	  **orig_sets;
-	Bitmapset **set_masks;
-	int		   *chains;
-	short	  **adjacency;
-	short	   *adjacency_buf;
-	BipartiteMatchState *state;
-	int			i;
-	int			j;
-	int			j_size;
-	ListCell   *lc1 = list_head(groupingSets);
-	ListCell   *lc;
-
-	/*
-	 * Start by stripping out empty sets.  The algorithm doesn't require this,
-	 * but the planner currently needs all empty sets to be returned in the
-	 * first list, so we strip them here and add them back after.
-	 */
-	while (lc1 && lfirst(lc1) == NIL)
-	{
-		++num_empty;
-		lc1 = lnext(lc1);
-	}
-
-	/* bail out now if it turns out that all we had were empty sets. */
-	if (!lc1)
-		return list_make1(groupingSets);
-
-	/*----------
-	 * We don't strictly need to remove duplicate sets here, but if we don't,
-	 * they tend to become scattered through the result, which is a bit
-	 * confusing (and irritating if we ever decide to optimize them out).
-	 * So we remove them here and add them back after.
-	 *
-	 * For each non-duplicate set, we fill in the following:
-	 *
-	 * orig_sets[i] = list of the original set lists
-	 * set_masks[i] = bitmapset for testing inclusion
-	 * adjacency[i] = array [n, v1, v2, ... vn] of adjacency indices
-	 *
-	 * chains[i] will be the result group this set is assigned to.
-	 *
-	 * We index all of these from 1 rather than 0 because it is convenient
-	 * to leave 0 free for the NIL node in the graph algorithm.
-	 *----------
-	 */
-	orig_sets = palloc0((num_sets_raw + 1) * sizeof(List *));
-	set_masks = palloc0((num_sets_raw + 1) * sizeof(Bitmapset *));
-	adjacency = palloc0((num_sets_raw + 1) * sizeof(short *));
-	adjacency_buf = palloc((num_sets_raw + 1) * sizeof(short));
-
-	j_size = 0;
-	j = 0;
-	i = 1;
-
-	for_each_cell(lc, lc1)
-	{
-		List	   *candidate = (List *) lfirst(lc);
-		Bitmapset  *candidate_set = NULL;
-		ListCell   *lc2;
-		int			dup_of = 0;
-
-		foreach(lc2, candidate)
-		{
-			candidate_set = bms_add_member(candidate_set, lfirst_int(lc2));
-		}
-
-		/* we can only be a dup if we're the same length as a previous set */
-		if (j_size == list_length(candidate))
-		{
-			int			k;
-
-			for (k = j; k < i; ++k)
-			{
-				if (bms_equal(set_masks[k], candidate_set))
-				{
-					dup_of = k;
-					break;
-				}
-			}
-		}
-		else if (j_size < list_length(candidate))
-		{
-			j_size = list_length(candidate);
-			j = i;
-		}
-
-		if (dup_of > 0)
-		{
-			orig_sets[dup_of] = lappend(orig_sets[dup_of], candidate);
-			bms_free(candidate_set);
-		}
-		else
-		{
-			int			k;
-			int			n_adj = 0;
-
-			orig_sets[i] = list_make1(candidate);
-			set_masks[i] = candidate_set;
-
-			/* fill in adjacency list; no need to compare equal-size sets */
-
-			for (k = j - 1; k > 0; --k)
-			{
-				if (bms_is_subset(set_masks[k], candidate_set))
-					adjacency_buf[++n_adj] = k;
-			}
-
-			if (n_adj > 0)
-			{
-				adjacency_buf[0] = n_adj;
-				adjacency[i] = palloc((n_adj + 1) * sizeof(short));
-				memcpy(adjacency[i], adjacency_buf, (n_adj + 1) * sizeof(short));
-			}
-			else
-				adjacency[i] = NULL;
-
-			++i;
-		}
-	}
-
-	num_sets = i - 1;
-
-	/*
-	 * Apply the graph matching algorithm to do the work.
-	 */
-	state = BipartiteMatch(num_sets, num_sets, adjacency);
-
-	/*
-	 * Now, the state->pair* fields have the info we need to assign sets to
-	 * chains. Two sets (u,v) belong to the same chain if pair_uv[u] = v or
-	 * pair_vu[v] = u (both will be true, but we check both so that we can do
-	 * it in one pass)
-	 */
-	chains = palloc0((num_sets + 1) * sizeof(int));
-
-	for (i = 1; i <= num_sets; ++i)
-	{
-		int			u = state->pair_vu[i];
-		int			v = state->pair_uv[i];
-
-		if (u > 0 && u < i)
-			chains[i] = chains[u];
-		else if (v > 0 && v < i)
-			chains[i] = chains[v];
-		else
-			chains[i] = ++num_chains;
-	}
-
-	/* build result lists. */
-	results = palloc0((num_chains + 1) * sizeof(List *));
-
-	for (i = 1; i <= num_sets; ++i)
-	{
-		int			c = chains[i];
-
-		Assert(c > 0);
-
-		results[c] = list_concat(results[c], orig_sets[i]);
-	}
-
-	/* push any empty sets back on the first list. */
-	while (num_empty-- > 0)
-		results[1] = lcons(NIL, results[1]);
-
-	/* make result list */
-	for (i = 1; i <= num_chains; ++i)
-		result = lappend(result, results[i]);
-
-	/*
-	 * Free all the things.
-	 *
-	 * (This is over-fussy for small sets but for large sets we could have
-	 * tied up a nontrivial amount of memory.)
-	 */
-	BipartiteMatchFree(state);
-	pfree(results);
-	pfree(chains);
-	for (i = 1; i <= num_sets; ++i)
-		if (adjacency[i])
-			pfree(adjacency[i]);
-	pfree(adjacency);
-	pfree(adjacency_buf);
-	pfree(orig_sets);
-	for (i = 1; i <= num_sets; ++i)
-		bms_free(set_masks[i]);
-	pfree(set_masks);
-
-	return result;
-}
-
-/*
- * Reorder the elements of a list of grouping sets such that they have correct
- * prefix relationships. Also inserts the GroupingSetData annotations.
- *
- * The input must be ordered with smallest sets first; the result is returned
- * with largest sets first.  Note that the result shares no list substructure
- * with the input, so it's safe for the caller to modify it later.
- *
- * If we're passed in a sortclause, we follow its order of columns to the
- * extent possible, to minimize the chance that we add unnecessary sorts.
- * (We're trying here to ensure that GROUPING SETS ((a,b,c),(c)) ORDER BY c,b,a
- * gets implemented in one pass.)
- */
-static List *
-reorder_grouping_sets(List *groupingsets, List *sortclause)
-{
-	ListCell   *lc;
-	ListCell   *lc2;
-	List	   *previous = NIL;
-	List	   *result = NIL;
-
-	foreach(lc, groupingsets)
-	{
-		List	   *candidate = (List *) lfirst(lc);
-		List	   *new_elems = list_difference_int(candidate, previous);
-		GroupingSetData *gs = makeNode(GroupingSetData);
-
-		if (list_length(new_elems) > 0)
-		{
-			while (list_length(sortclause) > list_length(previous))
-			{
-				SortGroupClause *sc = list_nth(sortclause, list_length(previous));
-				int			ref = sc->tleSortGroupRef;
-
-				if (list_member_int(new_elems, ref))
-				{
-					previous = lappend_int(previous, ref);
-					new_elems = list_delete_int(new_elems, ref);
-				}
-				else
-				{
-					/* diverged from the sortclause; give up on it */
-					sortclause = NIL;
-					break;
-				}
-			}
-
-			foreach(lc2, new_elems)
-			{
-				previous = lappend_int(previous, lfirst_int(lc2));
-			}
-		}
-
-		gs->set = list_copy(previous);
-		result = lcons(gs, result);
-		list_free(new_elems);
-	}
-
-	list_free(previous);
-
-	return result;
-}
-
-/*
- * Compute query_pathkeys and other pathkeys, to tell query_planner() which
- * orderings would be useful for the later planner stages.
- */
-static void
-compute_pathkeys(PlannerInfo *root, List *tlist, List *activeWindows, List *groupClause)
-{
-	Query	   *parse = root->parse;
-
-	/*
-	 * Calculate pathkeys that represent grouping/ordering requirements.  The
-	 * sortClause is certainly sort-able, but GROUP BY and DISTINCT might not
-	 * be, in which case we just leave their pathkeys empty.
-	 */
-	if (groupClause && grouping_is_sortable(groupClause))
-		root->group_pathkeys =
-			make_pathkeys_for_sortclauses(root,
-										  groupClause,
-										  tlist);
-	else
-		root->group_pathkeys = NIL;
-
-	/* We consider only the first (bottom) window in pathkeys logic */
-	if (activeWindows != NIL)
-	{
-		WindowClause *wc = linitial_node(WindowClause, activeWindows);
-
-		root->window_pathkeys = make_pathkeys_for_window(root,
-														 wc,
-														 tlist);
-	}
-	else
-		root->window_pathkeys = NIL;
-
-	if (parse->distinctClause &&
-		grouping_is_sortable(parse->distinctClause))
-		root->distinct_pathkeys =
-			make_pathkeys_for_sortclauses(root,
-										  parse->distinctClause,
-										  tlist);
-	else
-		root->distinct_pathkeys = NIL;
-
-	root->sort_pathkeys =
-		make_pathkeys_for_sortclauses(root,
-									  parse->sortClause,
-									  tlist);
-
-	/*
-	 * Figure out whether we want a sorted result from query_planner.
-	 *
-	 * If we have a sortable GROUP BY clause, then we want a result sorted
-	 * properly for grouping.  Otherwise, if we have window functions to
-	 * evaluate, we try to sort for the first window.  Otherwise, if there's a
-	 * sortable DISTINCT clause that's more rigorous than the ORDER BY clause,
-	 * we try to produce output that's sufficiently well sorted for the
-	 * DISTINCT.  Otherwise, if there is an ORDER BY clause, we want to sort
-	 * by the ORDER BY clause.
-	 *
-	 * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a superset
-	 * of GROUP BY, it would be tempting to request sort by ORDER BY --- but
-	 * that might just leave us failing to exploit an available sort order at
-	 * all.  Needs more thought.  The choice for DISTINCT versus ORDER BY is
-	 * much easier, since we know that the parser ensured that one is a
-	 * superset of the other.
-	 */
-	if (root->group_pathkeys)
-		root->query_pathkeys = root->group_pathkeys;
-	else if (root->window_pathkeys)
-		root->query_pathkeys = root->window_pathkeys;
-	else if (list_length(root->distinct_pathkeys) >
-			 list_length(root->sort_pathkeys))
-		root->query_pathkeys = root->distinct_pathkeys;
-	else if (root->sort_pathkeys)
-		root->query_pathkeys = root->sort_pathkeys;
-	else
-		root->query_pathkeys = NIL;
-}
-
-/*
- * Estimate number of groups produced by grouping clauses (1 if not grouping)
- *
- * path_rows: number of output rows from scan/join step
- * gd: grouping sets data including list of grouping sets and their clauses
- * target_list: target list containing group clause references
- *
- * If doing grouping sets, we also annotate the gsets data with the estimates
- * for each set and each individual rollup list, with a view to later
- * determining whether some combination of them could be hashed instead.
- */
-static double
-get_number_of_groups(PlannerInfo *root,
-					 double path_rows,
-					 grouping_sets_data *gd,
-					 List *target_list)
-{
-	Query	   *parse = root->parse;
-	double		dNumGroups;
-
-	if (parse->groupClause)
-	{
-		List	   *groupExprs;
-
-		if (parse->groupingSets)
-		{
-			/* Add up the estimates for each grouping set */
-			ListCell   *lc;
-			ListCell   *lc2;
-
-			Assert(gd);			/* keep Coverity happy */
-
-			dNumGroups = 0;
-
-			foreach(lc, gd->rollups)
-			{
-				RollupData *rollup = lfirst_node(RollupData, lc);
-				ListCell   *lc;
-
-				groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
-													 target_list);
-
-				rollup->numGroups = 0.0;
-
-				forboth(lc, rollup->gsets, lc2, rollup->gsets_data)
-				{
-					List	   *gset = (List *) lfirst(lc);
-					GroupingSetData *gs = lfirst_node(GroupingSetData, lc2);
-					double		numGroups = estimate_num_groups(root,
-																groupExprs,
-																path_rows,
-																&gset);
-
-					gs->numGroups = numGroups;
-					rollup->numGroups += numGroups;
-				}
-
-				dNumGroups += rollup->numGroups;
-			}
-
-			if (gd->hash_sets_idx)
-			{
-				ListCell   *lc;
-
-				gd->dNumHashGroups = 0;
-
-				groupExprs = get_sortgrouplist_exprs(parse->groupClause,
-													 target_list);
-
-				forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
-				{
-					List	   *gset = (List *) lfirst(lc);
-					GroupingSetData *gs = lfirst_node(GroupingSetData, lc2);
-					double		numGroups = estimate_num_groups(root,
-																groupExprs,
-																path_rows,
-																&gset);
-
-					gs->numGroups = numGroups;
-					gd->dNumHashGroups += numGroups;
-				}
-
-				dNumGroups += gd->dNumHashGroups;
-			}
-		}
-		else
-		{
-			/* Plain GROUP BY */
-			groupExprs = get_sortgrouplist_exprs(parse->groupClause,
-												 target_list);
-
-			dNumGroups = estimate_num_groups(root, groupExprs, path_rows,
-											 NULL);
-		}
-	}
-	else if (parse->groupingSets)
-	{
-		/* Empty grouping sets ... one result row for each one */
-		dNumGroups = list_length(parse->groupingSets);
-	}
-	else if (parse->hasAggs || root->hasHavingQual)
-	{
-		/* Plain aggregation, one result row */
-		dNumGroups = 1;
-	}
-	else
-	{
-		/* Not grouping */
-		dNumGroups = 1;
-	}
-
-	return dNumGroups;
-}
-
-/*
- * estimate_hashagg_tablesize
- *	  estimate the number of bytes that a hash aggregate hashtable will
- *	  require based on the agg_costs, path width and dNumGroups.
- *
- * XXX this may be over-estimating the size now that hashagg knows to omit
- * unneeded columns from the hashtable. Also for mixed-mode grouping sets,
- * grouping columns not in the hashed set are counted here even though hashagg
- * won't store them. Is this a problem?
- */
-static Size
-estimate_hashagg_tablesize(Path *path, const AggClauseCosts *agg_costs,
-						   double dNumGroups)
-{
-	Size		hashentrysize;
-
-	/* Estimate per-hash-entry space at tuple width... */
-	hashentrysize = MAXALIGN(path->pathtarget->width) +
-		MAXALIGN(SizeofMinimalTupleHeader);
-
-	/* plus space for pass-by-ref transition values... */
-	hashentrysize += agg_costs->transitionSpace;
-	/* plus the per-hash-entry overhead */
-	hashentrysize += hash_agg_entry_size(agg_costs->numAggs);
-
-	/*
-	 * Note that this disregards the effect of fill-factor and growth policy
-	 * of the hash-table. That's probably ok, given default the default
-	 * fill-factor is relatively high. It'd be hard to meaningfully factor in
-	 * "double-in-size" growth policies here.
-	 */
-	return hashentrysize * dNumGroups;
-}
-
-/*
- * create_grouping_paths
- *
- * Build a new upperrel containing Paths for grouping and/or aggregation.
- * Along the way, we also build an upperrel for Paths which are partially
- * grouped and/or aggregated.  A partially grouped and/or aggregated path
- * needs a FinalizeAggregate node to complete the aggregation.  Currently,
- * the only partially grouped paths we build are also partial paths; that
- * is, they need a Gather and then a FinalizeAggregate.
- *
- * input_rel: contains the source-data Paths
- * target: the pathtarget for the result Paths to compute
- * agg_costs: cost info about all aggregates in query (in AGGSPLIT_SIMPLE mode)
- * gd: grouping sets data including list of grouping sets and their clauses
- *
- * Note: all Paths in input_rel are expected to return the target computed
- * by make_group_input_target.
- */
-static RelOptInfo *
-create_grouping_paths(PlannerInfo *root,
-					  RelOptInfo *input_rel,
-					  PathTarget *target,
-					  bool target_parallel_safe,
-					  const AggClauseCosts *agg_costs,
-					  grouping_sets_data *gd)
-{
-	Query	   *parse = root->parse;
-	RelOptInfo *grouped_rel;
-	RelOptInfo *partially_grouped_rel;
-
-	/*
-	 * Create grouping relation to hold fully aggregated grouping and/or
-	 * aggregation paths.
-	 */
-	grouped_rel = make_grouping_rel(root, input_rel, target,
-									target_parallel_safe, parse->havingQual);
-
-	/*
-	 * Create either paths for a degenerate grouping or paths for ordinary
-	 * grouping, as appropriate.
-	 */
-	if (is_degenerate_grouping(root))
-		create_degenerate_grouping_paths(root, input_rel, grouped_rel);
-	else
-	{
-		int			flags = 0;
-		GroupPathExtraData extra;
-
-		/*
-		 * Determine whether it's possible to perform sort-based
-		 * implementations of grouping.  (Note that if groupClause is empty,
-		 * grouping_is_sortable() is trivially true, and all the
-		 * pathkeys_contained_in() tests will succeed too, so that we'll
-		 * consider every surviving input path.)
-		 *
-		 * If we have grouping sets, we might be able to sort some but not all
-		 * of them; in this case, we need can_sort to be true as long as we
-		 * must consider any sorted-input plan.
-		 */
-		if ((gd && gd->rollups != NIL)
-			|| grouping_is_sortable(parse->groupClause))
-			flags |= GROUPING_CAN_USE_SORT;
-
-		/*
-		 * Determine whether we should consider hash-based implementations of
-		 * grouping.
-		 *
-		 * Hashed aggregation only applies if we're grouping. If we have
-		 * grouping sets, some groups might be hashable but others not; in
-		 * this case we set can_hash true as long as there is nothing globally
-		 * preventing us from hashing (and we should therefore consider plans
-		 * with hashes).
-		 *
-		 * Executor doesn't support hashed aggregation with DISTINCT or ORDER
-		 * BY aggregates.  (Doing so would imply storing *all* the input
-		 * values in the hash table, and/or running many sorts in parallel,
-		 * either of which seems like a certain loser.)  We similarly don't
-		 * support ordered-set aggregates in hashed aggregation, but that case
-		 * is also included in the numOrderedAggs count.
-		 *
-		 * Note: grouping_is_hashable() is much more expensive to check than
-		 * the other gating conditions, so we want to do it last.
-		 */
-		if ((parse->groupClause != NIL &&
-			 agg_costs->numOrderedAggs == 0 &&
-			 (gd ? gd->any_hashable : grouping_is_hashable(parse->groupClause))))
-			flags |= GROUPING_CAN_USE_HASH;
-
-		/*
-		 * Determine whether partial aggregation is possible.
-		 */
-		if (can_partial_agg(root, agg_costs))
-			flags |= GROUPING_CAN_PARTIAL_AGG;
-
-		extra.flags = flags;
-		extra.target_parallel_safe = target_parallel_safe;
-		extra.havingQual = parse->havingQual;
-		extra.targetList = parse->targetList;
-		extra.partial_costs_set = false;
-
-		/*
-		 * Determine whether partitionwise aggregation is in theory possible.
-		 * It can be disabled by the user, and for now, we don't try to
-		 * support grouping sets.  create_ordinary_grouping_paths() will check
-		 * additional conditions, such as whether input_rel is partitioned.
-		 */
-		if (enable_partitionwise_aggregate && !parse->groupingSets)
-			extra.patype = PARTITIONWISE_AGGREGATE_FULL;
-		else
-			extra.patype = PARTITIONWISE_AGGREGATE_NONE;
-
-		create_ordinary_grouping_paths(root, input_rel, grouped_rel,
-									   agg_costs, gd, &extra,
-									   &partially_grouped_rel);
-	}
-
-	set_cheapest(grouped_rel);
-	return grouped_rel;
-}
-
-/*
- * make_grouping_rel
- *
- * Create a new grouping rel and set basic properties.
- *
- * input_rel represents the underlying scan/join relation.
- * target is the output expected from the grouping relation.
- */
-static RelOptInfo *
-make_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
-				  PathTarget *target, bool target_parallel_safe,
-				  Node *havingQual)
-{
-	RelOptInfo *grouped_rel;
-
-	if (IS_OTHER_REL(input_rel))
-	{
-		grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG,
-									  input_rel->relids);
-		grouped_rel->reloptkind = RELOPT_OTHER_UPPER_REL;
-	}
-	else
-	{
-		/*
-		 * By tradition, the relids set for the main grouping relation is
-		 * NULL.  (This could be changed, but might require adjustments
-		 * elsewhere.)
-		 */
-		grouped_rel = fetch_upper_rel(root, UPPERREL_GROUP_AGG, NULL);
-	}
-
-	/* Set target. */
-	grouped_rel->reltarget = target;
-
-	/*
-	 * If the input relation is not parallel-safe, then the grouped relation
-	 * can't be parallel-safe, either.  Otherwise, it's parallel-safe if the
-	 * target list and HAVING quals are parallel-safe.
-	 */
-	if (input_rel->consider_parallel && target_parallel_safe &&
-		is_parallel_safe(root, (Node *) havingQual))
-		grouped_rel->consider_parallel = true;
-
-	/*
-	 * If the input rel belongs to a single FDW, so does the grouped rel.
-	 */
-	grouped_rel->serverid = input_rel->serverid;
-	grouped_rel->userid = input_rel->userid;
-	grouped_rel->useridiscurrent = input_rel->useridiscurrent;
-	grouped_rel->fdwroutine = input_rel->fdwroutine;
-
-	return grouped_rel;
-}
-
-/*
- * is_degenerate_grouping
- *
- * A degenerate grouping is one in which the query has a HAVING qual and/or
- * grouping sets, but no aggregates and no GROUP BY (which implies that the
- * grouping sets are all empty).
- */
-static bool
-is_degenerate_grouping(PlannerInfo *root)
-{
-	Query	   *parse = root->parse;
-
-	return (root->hasHavingQual || parse->groupingSets) &&
-		!parse->hasAggs && parse->groupClause == NIL;
-}
-
-/*
- * create_degenerate_grouping_paths
- *
- * When the grouping is degenerate (see is_degenerate_grouping), we are
- * supposed to emit either zero or one row for each grouping set depending on
- * whether HAVING succeeds.  Furthermore, there cannot be any variables in
- * either HAVING or the targetlist, so we actually do not need the FROM table
- * at all! We can just throw away the plan-so-far and generate a Result node.
- * This is a sufficiently unusual corner case that it's not worth contorting
- * the structure of this module to avoid having to generate the earlier paths
- * in the first place.
- */
-static void
-create_degenerate_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
-								 RelOptInfo *grouped_rel)
-{
-	Query	   *parse = root->parse;
-	int			nrows;
-	Path	   *path;
-
-	nrows = list_length(parse->groupingSets);
-	if (nrows > 1)
-	{
-		/*
-		 * Doesn't seem worthwhile writing code to cons up a generate_series
-		 * or a values scan to emit multiple rows. Instead just make N clones
-		 * and append them.  (With a volatile HAVING clause, this means you
-		 * might get between 0 and N output rows. Offhand I think that's
-		 * desired.)
-		 */
-		List	   *paths = NIL;
-
-		while (--nrows >= 0)
-		{
-			path = (Path *)
-				create_result_path(root, grouped_rel,
-								   grouped_rel->reltarget,
-								   (List *) parse->havingQual);
-			paths = lappend(paths, path);
-		}
-		path = (Path *)
-			create_append_path(root,
-							   grouped_rel,
-							   paths,
-							   NIL,
-							   NULL,
-							   0,
-							   false,
-							   NIL,
-							   -1);
-	}
-	else
-	{
-		/* No grouping sets, or just one, so one output row */
-		path = (Path *)
-			create_result_path(root, grouped_rel,
-							   grouped_rel->reltarget,
-							   (List *) parse->havingQual);
-	}
-
-	add_path(grouped_rel, path);
-}
-
-/*
- * create_ordinary_grouping_paths
- *
- * Create grouping paths for the ordinary (that is, non-degenerate) case.
- *
- * We need to consider sorted and hashed aggregation in the same function,
- * because otherwise (1) it would be harder to throw an appropriate error
- * message if neither way works, and (2) we should not allow hashtable size
- * considerations to dissuade us from using hashing if sorting is not possible.
- *
- * *partially_grouped_rel_p will be set to the partially grouped rel which this
- * function creates, or to NULL if it doesn't create one.
- */
-static void
-create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
-							   RelOptInfo *grouped_rel,
-							   const AggClauseCosts *agg_costs,
-							   grouping_sets_data *gd,
-							   GroupPathExtraData *extra,
-							   RelOptInfo **partially_grouped_rel_p)
-{
-	Path	   *cheapest_path = input_rel->cheapest_total_path;
-	RelOptInfo *partially_grouped_rel = NULL;
-	double		dNumGroups;
-	PartitionwiseAggregateType patype = PARTITIONWISE_AGGREGATE_NONE;
-
-	/*
-	 * If this is the topmost grouping relation or if the parent relation is
-	 * doing some form of partitionwise aggregation, then we may be able to do
-	 * it at this level also.  However, if the input relation is not
-	 * partitioned, partitionwise aggregate is impossible, and if it is dummy,
-	 * partitionwise aggregate is pointless.
-	 */
-	if (extra->patype != PARTITIONWISE_AGGREGATE_NONE &&
-		input_rel->part_scheme && input_rel->part_rels &&
-		!IS_DUMMY_REL(input_rel))
-	{
-		/*
-		 * If this is the topmost relation or if the parent relation is doing
-		 * full partitionwise aggregation, then we can do full partitionwise
-		 * aggregation provided that the GROUP BY clause contains all of the
-		 * partitioning columns at this level.  Otherwise, we can do at most
-		 * partial partitionwise aggregation.  But if partial aggregation is
-		 * not supported in general then we can't use it for partitionwise
-		 * aggregation either.
-		 */
-		if (extra->patype == PARTITIONWISE_AGGREGATE_FULL &&
-			group_by_has_partkey(input_rel, extra->targetList,
-								 root->parse->groupClause))
-			patype = PARTITIONWISE_AGGREGATE_FULL;
-		else if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
-			patype = PARTITIONWISE_AGGREGATE_PARTIAL;
-		else
-			patype = PARTITIONWISE_AGGREGATE_NONE;
-	}
-
-	/*
-	 * Before generating paths for grouped_rel, we first generate any possible
-	 * partially grouped paths; that way, later code can easily consider both
-	 * parallel and non-parallel approaches to grouping.
-	 */
-	if ((extra->flags & GROUPING_CAN_PARTIAL_AGG) != 0)
-	{
-		bool		force_rel_creation;
-
-		/*
-		 * If we're doing partitionwise aggregation at this level, force
-		 * creation of a partially_grouped_rel so we can add partitionwise
-		 * paths to it.
-		 */
-		force_rel_creation = (patype == PARTITIONWISE_AGGREGATE_PARTIAL);
-
-		partially_grouped_rel =
-			create_partial_grouping_paths(root,
-										  grouped_rel,
-										  input_rel,
-										  gd,
-										  extra,
-										  force_rel_creation);
-	}
-
-	/* Set out parameter. */
-	*partially_grouped_rel_p = partially_grouped_rel;
-
-	/* Apply partitionwise aggregation technique, if possible. */
-	if (patype != PARTITIONWISE_AGGREGATE_NONE)
-		create_partitionwise_grouping_paths(root, input_rel, grouped_rel,
-											partially_grouped_rel, agg_costs,
-											gd, patype, extra);
-
-	/* If we are doing partial aggregation only, return. */
-	if (extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
-	{
-		Assert(partially_grouped_rel);
-
-		if (partially_grouped_rel->pathlist)
-			set_cheapest(partially_grouped_rel);
-
-		return;
-	}
-
-	/* Gather any partially grouped partial paths. */
-	if (partially_grouped_rel && partially_grouped_rel->partial_pathlist)
-	{
-		gather_grouping_paths(root, partially_grouped_rel);
-		set_cheapest(partially_grouped_rel);
-	}
-
-	/*
-	 * Estimate number of groups.
-	 */
-	dNumGroups = get_number_of_groups(root,
-									  cheapest_path->rows,
-									  gd,
-									  extra->targetList);
-
-	/* Build final grouping paths */
-	add_paths_to_grouping_rel(root, input_rel, grouped_rel,
-							  partially_grouped_rel, agg_costs, gd,
-							  dNumGroups, extra);
-
-	/* Give a helpful error if we failed to find any implementation */
-	if (grouped_rel->pathlist == NIL)
-		ereport(ERROR,
-				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-				 errmsg("could not implement GROUP BY"),
-				 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
-
-	/*
-	 * If there is an FDW that's responsible for all baserels of the query,
-	 * let it consider adding ForeignPaths.
-	 */
-	if (grouped_rel->fdwroutine &&
-		grouped_rel->fdwroutine->GetForeignUpperPaths)
-		grouped_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_GROUP_AGG,
-													  input_rel, grouped_rel,
-													  extra);
-
-	/* Let extensions possibly add some more paths */
-	if (create_upper_paths_hook)
-		(*create_upper_paths_hook) (root, UPPERREL_GROUP_AGG,
-									input_rel, grouped_rel,
-									extra);
-}
-
-/*
- * For a given input path, consider the possible ways of doing grouping sets on
- * it, by combinations of hashing and sorting.  This can be called multiple
- * times, so it's important that it not scribble on input.  No result is
- * returned, but any generated paths are added to grouped_rel.
- */
-static void
-consider_groupingsets_paths(PlannerInfo *root,
-							RelOptInfo *grouped_rel,
-							Path *path,
-							bool is_sorted,
-							bool can_hash,
-							grouping_sets_data *gd,
-							const AggClauseCosts *agg_costs,
-							double dNumGroups)
-{
-	Query	   *parse = root->parse;
-
-	/*
-	 * If we're not being offered sorted input, then only consider plans that
-	 * can be done entirely by hashing.
-	 *
-	 * We can hash everything if it looks like it'll fit in work_mem. But if
-	 * the input is actually sorted despite not being advertised as such, we
-	 * prefer to make use of that in order to use less memory.
-	 *
-	 * If none of the grouping sets are sortable, then ignore the work_mem
-	 * limit and generate a path anyway, since otherwise we'll just fail.
-	 */
-	if (!is_sorted)
-	{
-		List	   *new_rollups = NIL;
-		RollupData *unhashed_rollup = NULL;
-		List	   *sets_data;
-		List	   *empty_sets_data = NIL;
-		List	   *empty_sets = NIL;
-		ListCell   *lc;
-		ListCell   *l_start = list_head(gd->rollups);
-		AggStrategy strat = AGG_HASHED;
-		Size		hashsize;
-		double		exclude_groups = 0.0;
-
-		Assert(can_hash);
-
-		/*
-		 * If the input is coincidentally sorted usefully (which can happen
-		 * even if is_sorted is false, since that only means that our caller
-		 * has set up the sorting for us), then save some hashtable space by
-		 * making use of that. But we need to watch out for degenerate cases:
-		 *
-		 * 1) If there are any empty grouping sets, then group_pathkeys might
-		 * be NIL if all non-empty grouping sets are unsortable. In this case,
-		 * there will be a rollup containing only empty groups, and the
-		 * pathkeys_contained_in test is vacuously true; this is ok.
-		 *
-		 * XXX: the above relies on the fact that group_pathkeys is generated
-		 * from the first rollup. If we add the ability to consider multiple
-		 * sort orders for grouping input, this assumption might fail.
-		 *
-		 * 2) If there are no empty sets and only unsortable sets, then the
-		 * rollups list will be empty (and thus l_start == NULL), and
-		 * group_pathkeys will be NIL; we must ensure that the vacuously-true
-		 * pathkeys_contain_in test doesn't cause us to crash.
-		 */
-		if (l_start != NULL &&
-			pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
-		{
-			unhashed_rollup = lfirst_node(RollupData, l_start);
-			exclude_groups = unhashed_rollup->numGroups;
-			l_start = lnext(l_start);
-		}
-
-		hashsize = estimate_hashagg_tablesize(path,
-											  agg_costs,
-											  dNumGroups - exclude_groups);
-
-		/*
-		 * gd->rollups is empty if we have only unsortable columns to work
-		 * with.  Override work_mem in that case; otherwise, we'll rely on the
-		 * sorted-input case to generate usable mixed paths.
-		 */
-		if (hashsize > work_mem * 1024L && gd->rollups)
-			return;				/* nope, won't fit */
-
-		/*
-		 * We need to burst the existing rollups list into individual grouping
-		 * sets and recompute a groupClause for each set.
-		 */
-		sets_data = list_copy(gd->unsortable_sets);
-
-		for_each_cell(lc, l_start)
-		{
-			RollupData *rollup = lfirst_node(RollupData, lc);
-
-			/*
-			 * If we find an unhashable rollup that's not been skipped by the
-			 * "actually sorted" check above, we can't cope; we'd need sorted
-			 * input (with a different sort order) but we can't get that here.
-			 * So bail out; we'll get a valid path from the is_sorted case
-			 * instead.
-			 *
-			 * The mere presence of empty grouping sets doesn't make a rollup
-			 * unhashable (see preprocess_grouping_sets), we handle those
-			 * specially below.
-			 */
-			if (!rollup->hashable)
-				return;
-			else
-				sets_data = list_concat(sets_data, list_copy(rollup->gsets_data));
-		}
-		foreach(lc, sets_data)
-		{
-			GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
-			List	   *gset = gs->set;
-			RollupData *rollup;
-
-			if (gset == NIL)
-			{
-				/* Empty grouping sets can't be hashed. */
-				empty_sets_data = lappend(empty_sets_data, gs);
-				empty_sets = lappend(empty_sets, NIL);
-			}
-			else
-			{
-				rollup = makeNode(RollupData);
-
-				rollup->groupClause = preprocess_groupclause(root, gset);
-				rollup->gsets_data = list_make1(gs);
-				rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
-														 rollup->gsets_data,
-														 gd->tleref_to_colnum_map);
-				rollup->numGroups = gs->numGroups;
-				rollup->hashable = true;
-				rollup->is_hashed = true;
-				new_rollups = lappend(new_rollups, rollup);
-			}
-		}
-
-		/*
-		 * If we didn't find anything nonempty to hash, then bail.  We'll
-		 * generate a path from the is_sorted case.
-		 */
-		if (new_rollups == NIL)
-			return;
-
-		/*
-		 * If there were empty grouping sets they should have been in the
-		 * first rollup.
-		 */
-		Assert(!unhashed_rollup || !empty_sets);
-
-		if (unhashed_rollup)
-		{
-			new_rollups = lappend(new_rollups, unhashed_rollup);
-			strat = AGG_MIXED;
-		}
-		else if (empty_sets)
-		{
-			RollupData *rollup = makeNode(RollupData);
-
-			rollup->groupClause = NIL;
-			rollup->gsets_data = empty_sets_data;
-			rollup->gsets = empty_sets;
-			rollup->numGroups = list_length(empty_sets);
-			rollup->hashable = false;
-			rollup->is_hashed = false;
-			new_rollups = lappend(new_rollups, rollup);
-			strat = AGG_MIXED;
-		}
-
-		add_path(grouped_rel, (Path *)
-				 create_groupingsets_path(root,
-										  grouped_rel,
-										  path,
-										  (List *) parse->havingQual,
-										  strat,
-										  new_rollups,
-										  agg_costs,
-										  dNumGroups));
-		return;
-	}
-
-	/*
-	 * If we have sorted input but nothing we can do with it, bail.
-	 */
-	if (list_length(gd->rollups) == 0)
-		return;
-
-	/*
-	 * Given sorted input, we try and make two paths: one sorted and one mixed
-	 * sort/hash. (We need to try both because hashagg might be disabled, or
-	 * some columns might not be sortable.)
-	 *
-	 * can_hash is passed in as false if some obstacle elsewhere (such as
-	 * ordered aggs) means that we shouldn't consider hashing at all.
-	 */
-	if (can_hash && gd->any_hashable)
-	{
-		List	   *rollups = NIL;
-		List	   *hash_sets = list_copy(gd->unsortable_sets);
-		double		availspace = (work_mem * 1024.0);
-		ListCell   *lc;
-
-		/*
-		 * Account first for space needed for groups we can't sort at all.
-		 */
-		availspace -= (double) estimate_hashagg_tablesize(path,
-														  agg_costs,
-														  gd->dNumHashGroups);
-
-		if (availspace > 0 && list_length(gd->rollups) > 1)
-		{
-			double		scale;
-			int			num_rollups = list_length(gd->rollups);
-			int			k_capacity;
-			int		   *k_weights = palloc(num_rollups * sizeof(int));
-			Bitmapset  *hash_items = NULL;
-			int			i;
-
-			/*
-			 * We treat this as a knapsack problem: the knapsack capacity
-			 * represents work_mem, the item weights are the estimated memory
-			 * usage of the hashtables needed to implement a single rollup,
-			 * and we really ought to use the cost saving as the item value;
-			 * however, currently the costs assigned to sort nodes don't
-			 * reflect the comparison costs well, and so we treat all items as
-			 * of equal value (each rollup we hash instead saves us one sort).
-			 *
-			 * To use the discrete knapsack, we need to scale the values to a
-			 * reasonably small bounded range.  We choose to allow a 5% error
-			 * margin; we have no more than 4096 rollups in the worst possible
-			 * case, which with a 5% error margin will require a bit over 42MB
-			 * of workspace. (Anyone wanting to plan queries that complex had
-			 * better have the memory for it.  In more reasonable cases, with
-			 * no more than a couple of dozen rollups, the memory usage will
-			 * be negligible.)
-			 *
-			 * k_capacity is naturally bounded, but we clamp the values for
-			 * scale and weight (below) to avoid overflows or underflows (or
-			 * uselessly trying to use a scale factor less than 1 byte).
-			 */
-			scale = Max(availspace / (20.0 * num_rollups), 1.0);
-			k_capacity = (int) floor(availspace / scale);
-
-			/*
-			 * We leave the first rollup out of consideration since it's the
-			 * one that matches the input sort order.  We assign indexes "i"
-			 * to only those entries considered for hashing; the second loop,
-			 * below, must use the same condition.
-			 */
-			i = 0;
-			for_each_cell(lc, lnext(list_head(gd->rollups)))
-			{
-				RollupData *rollup = lfirst_node(RollupData, lc);
-
-				if (rollup->hashable)
-				{
-					double		sz = estimate_hashagg_tablesize(path,
-																agg_costs,
-																rollup->numGroups);
-
-					/*
-					 * If sz is enormous, but work_mem (and hence scale) is
-					 * small, avoid integer overflow here.
-					 */
-					k_weights[i] = (int) Min(floor(sz / scale),
-											 k_capacity + 1.0);
-					++i;
-				}
-			}
-
-			/*
-			 * Apply knapsack algorithm; compute the set of items which
-			 * maximizes the value stored (in this case the number of sorts
-			 * saved) while keeping the total size (approximately) within
-			 * capacity.
-			 */
-			if (i > 0)
-				hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
-
-			if (!bms_is_empty(hash_items))
-			{
-				rollups = list_make1(linitial(gd->rollups));
-
-				i = 0;
-				for_each_cell(lc, lnext(list_head(gd->rollups)))
-				{
-					RollupData *rollup = lfirst_node(RollupData, lc);
-
-					if (rollup->hashable)
-					{
-						if (bms_is_member(i, hash_items))
-							hash_sets = list_concat(hash_sets,
-													list_copy(rollup->gsets_data));
-						else
-							rollups = lappend(rollups, rollup);
-						++i;
-					}
-					else
-						rollups = lappend(rollups, rollup);
-				}
-			}
-		}
-
-		if (!rollups && hash_sets)
-			rollups = list_copy(gd->rollups);
-
-		foreach(lc, hash_sets)
-		{
-			GroupingSetData *gs = lfirst_node(GroupingSetData, lc);
-			RollupData *rollup = makeNode(RollupData);
-
-			Assert(gs->set != NIL);
-
-			rollup->groupClause = preprocess_groupclause(root, gs->set);
-			rollup->gsets_data = list_make1(gs);
-			rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
-													 rollup->gsets_data,
-													 gd->tleref_to_colnum_map);
-			rollup->numGroups = gs->numGroups;
-			rollup->hashable = true;
-			rollup->is_hashed = true;
-			rollups = lcons(rollup, rollups);
-		}
-
-		if (rollups)
-		{
-			add_path(grouped_rel, (Path *)
-					 create_groupingsets_path(root,
-											  grouped_rel,
-											  path,
-											  (List *) parse->havingQual,
-											  AGG_MIXED,
-											  rollups,
-											  agg_costs,
-											  dNumGroups));
-		}
-	}
-
-	/*
-	 * Now try the simple sorted case.
-	 */
-	if (!gd->unsortable_sets)
-		add_path(grouped_rel, (Path *)
-				 create_groupingsets_path(root,
-										  grouped_rel,
-										  path,
-										  (List *) parse->havingQual,
-										  AGG_SORTED,
-										  gd->rollups,
-										  agg_costs,
-										  dNumGroups));
-}
-
-/*
- * create_window_paths
- *
- * Build a new upperrel containing Paths for window-function evaluation.
- *
- * input_rel: contains the source-data Paths
- * input_target: result of make_window_input_target
- * output_target: what the topmost WindowAggPath should return
- * tlist: query's target list (needed to look up pathkeys)
- * wflists: result of find_window_functions
- * activeWindows: result of select_active_windows
- *
- * Note: all Paths in input_rel are expected to return input_target.
- */
-static RelOptInfo *
-create_window_paths(PlannerInfo *root,
-					RelOptInfo *input_rel,
-					PathTarget *input_target,
-					PathTarget *output_target,
-					bool output_target_parallel_safe,
-					List *tlist,
-					WindowFuncLists *wflists,
-					List *activeWindows)
-{
-	RelOptInfo *window_rel;
-	ListCell   *lc;
-
-	/* For now, do all work in the (WINDOW, NULL) upperrel */
-	window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
-
-	/*
-	 * If the input relation is not parallel-safe, then the window relation
-	 * can't be parallel-safe, either.  Otherwise, we need to examine the
-	 * target list and active windows for non-parallel-safe constructs.
-	 */
-	if (input_rel->consider_parallel && output_target_parallel_safe &&
-		is_parallel_safe(root, (Node *) activeWindows))
-		window_rel->consider_parallel = true;
-
-	/*
-	 * If the input rel belongs to a single FDW, so does the window rel.
-	 */
-	window_rel->serverid = input_rel->serverid;
-	window_rel->userid = input_rel->userid;
-	window_rel->useridiscurrent = input_rel->useridiscurrent;
-	window_rel->fdwroutine = input_rel->fdwroutine;
-
-	/*
-	 * Consider computing window functions starting from the existing
-	 * cheapest-total path (which will likely require a sort) as well as any
-	 * existing paths that satisfy root->window_pathkeys (which won't).
-	 */
-	foreach(lc, input_rel->pathlist)
-	{
-		Path	   *path = (Path *) lfirst(lc);
-
-		if (path == input_rel->cheapest_total_path ||
-			pathkeys_contained_in(root->window_pathkeys, path->pathkeys))
-			create_one_window_path(root,
-								   window_rel,
-								   path,
-								   input_target,
-								   output_target,
-								   tlist,
-								   wflists,
-								   activeWindows);
-	}
-
-	/*
-	 * If there is an FDW that's responsible for all baserels of the query,
-	 * let it consider adding ForeignPaths.
-	 */
-	if (window_rel->fdwroutine &&
-		window_rel->fdwroutine->GetForeignUpperPaths)
-		window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
-													 input_rel, window_rel,
-													 NULL);
-
-	/* Let extensions possibly add some more paths */
-	if (create_upper_paths_hook)
-		(*create_upper_paths_hook) (root, UPPERREL_WINDOW,
-									input_rel, window_rel, NULL);
-
-	/* Now choose the best path(s) */
-	set_cheapest(window_rel);
-
-	return window_rel;
-}
-
-/*
- * Stack window-function implementation steps atop the given Path, and
- * add the result to window_rel.
- *
- * window_rel: upperrel to contain result
- * path: input Path to use (must return input_target)
- * input_target: result of make_window_input_target
- * output_target: what the topmost WindowAggPath should return
- * tlist: query's target list (needed to look up pathkeys)
- * wflists: result of find_window_functions
- * activeWindows: result of select_active_windows
- */
-static void
-create_one_window_path(PlannerInfo *root,
-					   RelOptInfo *window_rel,
-					   Path *path,
-					   PathTarget *input_target,
-					   PathTarget *output_target,
-					   List *tlist,
-					   WindowFuncLists *wflists,
-					   List *activeWindows)
-{
-	PathTarget *window_target;
-	ListCell   *l;
-
-	/*
-	 * Since each window clause could require a different sort order, we stack
-	 * up a WindowAgg node for each clause, with sort steps between them as
-	 * needed.  (We assume that select_active_windows chose a good order for
-	 * executing the clauses in.)
-	 *
-	 * input_target should contain all Vars and Aggs needed for the result.
-	 * (In some cases we wouldn't need to propagate all of these all the way
-	 * to the top, since they might only be needed as inputs to WindowFuncs.
-	 * It's probably not worth trying to optimize that though.)  It must also
-	 * contain all window partitioning and sorting expressions, to ensure
-	 * they're computed only once at the bottom of the stack (that's critical
-	 * for volatile functions).  As we climb up the stack, we'll add outputs
-	 * for the WindowFuncs computed at each level.
-	 */
-	window_target = input_target;
-
-	foreach(l, activeWindows)
-	{
-		WindowClause *wc = lfirst_node(WindowClause, l);
-		List	   *window_pathkeys;
-
-		window_pathkeys = make_pathkeys_for_window(root,
-												   wc,
-												   tlist);
-
-		/* Sort if necessary */
-		if (!pathkeys_contained_in(window_pathkeys, path->pathkeys))
-		{
-			path = (Path *) create_sort_path(root, window_rel,
-											 path,
-											 window_pathkeys,
-											 -1.0);
-		}
-
-		if (lnext(l))
-		{
-			/*
-			 * Add the current WindowFuncs to the output target for this
-			 * intermediate WindowAggPath.  We must copy window_target to
-			 * avoid changing the previous path's target.
-			 *
-			 * Note: a WindowFunc adds nothing to the target's eval costs; but
-			 * we do need to account for the increase in tlist width.
-			 */
-			ListCell   *lc2;
-
-			window_target = copy_pathtarget(window_target);
-			foreach(lc2, wflists->windowFuncs[wc->winref])
-			{
-				WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
-
-				add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
-				window_target->width += get_typavgwidth(wfunc->wintype, -1);
-			}
-		}
-		else
-		{
-			/* Install the goal target in the topmost WindowAgg */
-			window_target = output_target;
-		}
-
-		path = (Path *)
-			create_windowagg_path(root, window_rel, path, window_target,
-								  wflists->windowFuncs[wc->winref],
-								  wc,
-								  window_pathkeys);
-	}
-
-	add_path(window_rel, path);
-}
-
-/*
- * create_distinct_paths
- *
- * Build a new upperrel containing Paths for SELECT DISTINCT evaluation.
- *
- * input_rel: contains the source-data Paths
- *
- * Note: input paths should already compute the desired pathtarget, since
- * Sort/Unique won't project anything.
- */
-static RelOptInfo *
-create_distinct_paths(PlannerInfo *root,
-					  RelOptInfo *input_rel)
-{
-	Query	   *parse = root->parse;
-	Path	   *cheapest_input_path = input_rel->cheapest_total_path;
-	RelOptInfo *distinct_rel;
-	double		numDistinctRows;
-	bool		allow_hash;
-	Path	   *path;
-	ListCell   *lc;
-
-	/* For now, do all work in the (DISTINCT, NULL) upperrel */
-	distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
-
-	/*
-	 * We don't compute anything at this level, so distinct_rel will be
-	 * parallel-safe if the input rel is parallel-safe.  In particular, if
-	 * there is a DISTINCT ON (...) clause, any path for the input_rel will
-	 * output those expressions, and will not be parallel-safe unless those
-	 * expressions are parallel-safe.
-	 */
-	distinct_rel->consider_parallel = input_rel->consider_parallel;
-
-	/*
-	 * If the input rel belongs to a single FDW, so does the distinct_rel.
-	 */
-	distinct_rel->serverid = input_rel->serverid;
-	distinct_rel->userid = input_rel->userid;
-	distinct_rel->useridiscurrent = input_rel->useridiscurrent;
-	distinct_rel->fdwroutine = input_rel->fdwroutine;
-
-	/* Estimate number of distinct rows there will be */
-	if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
-		root->hasHavingQual)
-	{
-		/*
-		 * If there was grouping or aggregation, use the number of input rows
-		 * as the estimated number of DISTINCT rows (ie, assume the input is
-		 * already mostly unique).
-		 */
-		numDistinctRows = cheapest_input_path->rows;
-	}
-	else
-	{
-		/*
-		 * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
-		 */
-		List	   *distinctExprs;
-
-		distinctExprs = get_sortgrouplist_exprs(parse->distinctClause,
-												parse->targetList);
-		numDistinctRows = estimate_num_groups(root, distinctExprs,
-											  cheapest_input_path->rows,
-											  NULL);
-	}
-
-	/*
-	 * Consider sort-based implementations of DISTINCT, if possible.
-	 */
-	if (grouping_is_sortable(parse->distinctClause))
-	{
-		/*
-		 * First, if we have any adequately-presorted paths, just stick a
-		 * Unique node on those.  Then consider doing an explicit sort of the
-		 * cheapest input path and Unique'ing that.
-		 *
-		 * When we have DISTINCT ON, we must sort by the more rigorous of
-		 * DISTINCT and ORDER BY, else it won't have the desired behavior.
-		 * Also, if we do have to do an explicit sort, we might as well use
-		 * the more rigorous ordering to avoid a second sort later.  (Note
-		 * that the parser will have ensured that one clause is a prefix of
-		 * the other.)
-		 */
-		List	   *needed_pathkeys;
-
-		if (parse->hasDistinctOn &&
-			list_length(root->distinct_pathkeys) <
-			list_length(root->sort_pathkeys))
-			needed_pathkeys = root->sort_pathkeys;
-		else
-			needed_pathkeys = root->distinct_pathkeys;
-
-		foreach(lc, input_rel->pathlist)
-		{
-			Path	   *path = (Path *) lfirst(lc);
-
-			if (pathkeys_contained_in(needed_pathkeys, path->pathkeys))
-			{
-				add_path(distinct_rel, (Path *)
-						 create_upper_unique_path(root, distinct_rel,
-												  path,
-												  list_length(root->distinct_pathkeys),
-												  numDistinctRows));
-			}
-		}
-
-		/* For explicit-sort case, always use the more rigorous clause */
-		if (list_length(root->distinct_pathkeys) <
-			list_length(root->sort_pathkeys))
-		{
-			needed_pathkeys = root->sort_pathkeys;
-			/* Assert checks that parser didn't mess up... */
-			Assert(pathkeys_contained_in(root->distinct_pathkeys,
-										 needed_pathkeys));
-		}
-		else
-			needed_pathkeys = root->distinct_pathkeys;
-
-		path = cheapest_input_path;
-		if (!pathkeys_contained_in(needed_pathkeys, path->pathkeys))
-			path = (Path *) create_sort_path(root, distinct_rel,
-											 path,
-											 needed_pathkeys,
-											 -1.0);
-
-		add_path(distinct_rel, (Path *)
-				 create_upper_unique_path(root, distinct_rel,
-										  path,
-										  list_length(root->distinct_pathkeys),
-										  numDistinctRows));
-	}
-
-	/*
-	 * Consider hash-based implementations of DISTINCT, if possible.
-	 *
-	 * If we were not able to make any other types of path, we *must* hash or
-	 * die trying.  If we do have other choices, there are several things that
-	 * should prevent selection of hashing: if the query uses DISTINCT ON
-	 * (because it won't really have the expected behavior if we hash), or if
-	 * enable_hashagg is off, or if it looks like the hashtable will exceed
-	 * work_mem.
-	 *
-	 * Note: grouping_is_hashable() is much more expensive to check than the
-	 * other gating conditions, so we want to do it last.
-	 */
-	if (distinct_rel->pathlist == NIL)
-		allow_hash = true;		/* we have no alternatives */
-	else if (parse->hasDistinctOn || !enable_hashagg)
-		allow_hash = false;		/* policy-based decision not to hash */
-	else
-	{
-		Size		hashentrysize;
-
-		/* Estimate per-hash-entry space at tuple width... */
-		hashentrysize = MAXALIGN(cheapest_input_path->pathtarget->width) +
-			MAXALIGN(SizeofMinimalTupleHeader);
-		/* plus the per-hash-entry overhead */
-		hashentrysize += hash_agg_entry_size(0);
-
-		/* Allow hashing only if hashtable is predicted to fit in work_mem */
-		allow_hash = (hashentrysize * numDistinctRows <= work_mem * 1024L);
-	}
-
-	if (allow_hash && grouping_is_hashable(parse->distinctClause))
-	{
-		/* Generate hashed aggregate path --- no sort needed */
-		add_path(distinct_rel, (Path *)
-				 create_agg_path(root,
-								 distinct_rel,
-								 cheapest_input_path,
-								 cheapest_input_path->pathtarget,
-								 AGG_HASHED,
-								 AGGSPLIT_SIMPLE,
-								 parse->distinctClause,
-								 NIL,
-								 NULL,
-								 numDistinctRows));
-	}
-
-	/* Give a helpful error if we failed to find any implementation */
-	if (distinct_rel->pathlist == NIL)
-		ereport(ERROR,
-				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-				 errmsg("could not implement DISTINCT"),
-				 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
-
-	/*
-	 * If there is an FDW that's responsible for all baserels of the query,
-	 * let it consider adding ForeignPaths.
+	 * If we have a sortable GROUP BY clause, then we want a result sorted
+	 * properly for grouping.  Otherwise, if we have window functions to
+	 * evaluate, we try to sort for the first window.  Otherwise, if there's a
+	 * sortable DISTINCT clause that's more rigorous than the ORDER BY clause,
+	 * we try to produce output that's sufficiently well sorted for the
+	 * DISTINCT.  Otherwise, if there is an ORDER BY clause, we want to sort
+	 * by the ORDER BY clause.
+	 *
+	 * Note: if we have both ORDER BY and GROUP BY, and ORDER BY is a superset
+	 * of GROUP BY, it would be tempting to request sort by ORDER BY --- but
+	 * that might just leave us failing to exploit an available sort order at
+	 * all.  Needs more thought.  The choice for DISTINCT versus ORDER BY is
+	 * much easier, since we know that the parser ensured that one is a
+	 * superset of the other.
 	 */
-	if (distinct_rel->fdwroutine &&
-		distinct_rel->fdwroutine->GetForeignUpperPaths)
-		distinct_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_DISTINCT,
-													   input_rel, distinct_rel,
-													   NULL);
-
-	/* Let extensions possibly add some more paths */
-	if (create_upper_paths_hook)
-		(*create_upper_paths_hook) (root, UPPERREL_DISTINCT,
-									input_rel, distinct_rel, NULL);
-
-	/* Now choose the best path(s) */
-	set_cheapest(distinct_rel);
-
-	return distinct_rel;
+	if (root->group_pathkeys)
+		root->query_pathkeys = root->group_pathkeys;
+	else if (root->window_pathkeys)
+		root->query_pathkeys = root->window_pathkeys;
+	else if (list_length(root->distinct_pathkeys) >
+			 list_length(root->sort_pathkeys))
+		root->query_pathkeys = root->distinct_pathkeys;
+	else if (root->sort_pathkeys)
+		root->query_pathkeys = root->sort_pathkeys;
+	else
+		root->query_pathkeys = NIL;
 }
 
 /*
- * create_ordered_paths
- *
- * Build a new upperrel containing Paths for ORDER BY evaluation.
+ * create_window_paths
  *
- * All paths in the result must satisfy the ORDER BY ordering.
- * The only new path we need consider is an explicit sort on the
- * cheapest-total existing path.
+ * Build a new upperrel containing Paths for window-function evaluation.
  *
  * input_rel: contains the source-data Paths
- * target: the output tlist the result Paths must emit
- * limit_tuples: estimated bound on the number of output tuples,
- *		or -1 if no LIMIT or couldn't estimate
+ * input_target: result of make_window_input_target
+ * output_target: what the topmost WindowAggPath should return
+ * tlist: query's target list (needed to look up pathkeys)
+ * wflists: result of find_window_functions
+ * activeWindows: result of select_active_windows
+ *
+ * Note: all Paths in input_rel are expected to return input_target.
  */
 static RelOptInfo *
-create_ordered_paths(PlannerInfo *root,
-					 RelOptInfo *input_rel,
-					 PathTarget *target,
-					 bool target_parallel_safe,
-					 double limit_tuples)
+create_window_paths(PlannerInfo *root,
+					RelOptInfo *input_rel,
+					PathTarget *input_target,
+					PathTarget *output_target,
+					bool output_target_parallel_safe,
+					List *tlist,
+					WindowFuncLists *wflists,
+					List *activeWindows)
 {
-	Path	   *cheapest_input_path = input_rel->cheapest_total_path;
-	RelOptInfo *ordered_rel;
+	RelOptInfo *window_rel;
 	ListCell   *lc;
 
-	/* For now, do all work in the (ORDERED, NULL) upperrel */
-	ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
+	/* For now, do all work in the (WINDOW, NULL) upperrel */
+	window_rel = fetch_upper_rel(root, UPPERREL_WINDOW, NULL);
 
 	/*
-	 * If the input relation is not parallel-safe, then the ordered relation
-	 * can't be parallel-safe, either.  Otherwise, it's parallel-safe if the
-	 * target list is parallel-safe.
+	 * If the input relation is not parallel-safe, then the window relation
+	 * can't be parallel-safe, either.  Otherwise, we need to examine the
+	 * target list and active windows for non-parallel-safe constructs.
 	 */
-	if (input_rel->consider_parallel && target_parallel_safe)
-		ordered_rel->consider_parallel = true;
+	if (input_rel->consider_parallel && output_target_parallel_safe &&
+		is_parallel_safe(root, (Node *) activeWindows))
+		window_rel->consider_parallel = true;
 
 	/*
-	 * If the input rel belongs to a single FDW, so does the ordered_rel.
+	 * If the input rel belongs to a single FDW, so does the window rel.
 	 */
-	ordered_rel->serverid = input_rel->serverid;
-	ordered_rel->userid = input_rel->userid;
-	ordered_rel->useridiscurrent = input_rel->useridiscurrent;
-	ordered_rel->fdwroutine = input_rel->fdwroutine;
-
-	foreach(lc, input_rel->pathlist)
-	{
-		Path	   *path = (Path *) lfirst(lc);
-		bool		is_sorted;
-
-		is_sorted = pathkeys_contained_in(root->sort_pathkeys,
-										  path->pathkeys);
-		if (path == cheapest_input_path || is_sorted)
-		{
-			if (!is_sorted)
-			{
-				/* An explicit sort here can take advantage of LIMIT */
-				path = (Path *) create_sort_path(root,
-												 ordered_rel,
-												 path,
-												 root->sort_pathkeys,
-												 limit_tuples);
-			}
-
-			/* Add projection step if needed */
-			if (path->pathtarget != target)
-				path = apply_projection_to_path(root, ordered_rel,
-												path, target);
-
-			add_path(ordered_rel, path);
-		}
-	}
+	window_rel->serverid = input_rel->serverid;
+	window_rel->userid = input_rel->userid;
+	window_rel->useridiscurrent = input_rel->useridiscurrent;
+	window_rel->fdwroutine = input_rel->fdwroutine;
 
 	/*
-	 * generate_gather_paths() will have already generated a simple Gather
-	 * path for the best parallel path, if any, and the loop above will have
-	 * considered sorting it.  Similarly, generate_gather_paths() will also
-	 * have generated order-preserving Gather Merge plans which can be used
-	 * without sorting if they happen to match the sort_pathkeys, and the loop
-	 * above will have handled those as well.  However, there's one more
-	 * possibility: it may make sense to sort the cheapest partial path
-	 * according to the required output order and then use Gather Merge.
+	 * Consider computing window functions starting from the existing
+	 * cheapest-total path (which will likely require a sort) as well as any
+	 * existing paths that satisfy root->window_pathkeys (which won't).
 	 */
-	if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
-		input_rel->partial_pathlist != NIL)
+	foreach(lc, input_rel->pathlist)
 	{
-		Path	   *cheapest_partial_path;
-
-		cheapest_partial_path = linitial(input_rel->partial_pathlist);
-
-		/*
-		 * If cheapest partial path doesn't need a sort, this is redundant
-		 * with what's already been tried.
-		 */
-		if (!pathkeys_contained_in(root->sort_pathkeys,
-								   cheapest_partial_path->pathkeys))
-		{
-			Path	   *path;
-			double		total_groups;
-
-			path = (Path *) create_sort_path(root,
-											 ordered_rel,
-											 cheapest_partial_path,
-											 root->sort_pathkeys,
-											 limit_tuples);
-
-			total_groups = cheapest_partial_path->rows *
-				cheapest_partial_path->parallel_workers;
-			path = (Path *)
-				create_gather_merge_path(root, ordered_rel,
-										 path,
-										 path->pathtarget,
-										 root->sort_pathkeys, NULL,
-										 &total_groups);
-
-			/* Add projection step if needed */
-			if (path->pathtarget != target)
-				path = apply_projection_to_path(root, ordered_rel,
-												path, target);
+		Path	   *path = (Path *) lfirst(lc);
 
-			add_path(ordered_rel, path);
-		}
+		if (path == input_rel->cheapest_total_path ||
+			pathkeys_contained_in(root->window_pathkeys, path->pathkeys))
+			create_one_window_path(root,
+								   window_rel,
+								   path,
+								   input_target,
+								   output_target,
+								   tlist,
+								   wflists,
+								   activeWindows);
 	}
 
 	/*
 	 * If there is an FDW that's responsible for all baserels of the query,
 	 * let it consider adding ForeignPaths.
 	 */
-	if (ordered_rel->fdwroutine &&
-		ordered_rel->fdwroutine->GetForeignUpperPaths)
-		ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
-													  input_rel, ordered_rel,
-													  NULL);
+	if (window_rel->fdwroutine &&
+		window_rel->fdwroutine->GetForeignUpperPaths)
+		window_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_WINDOW,
+													 input_rel, window_rel,
+													 NULL);
 
 	/* Let extensions possibly add some more paths */
 	if (create_upper_paths_hook)
-		(*create_upper_paths_hook) (root, UPPERREL_ORDERED,
-									input_rel, ordered_rel, NULL);
-
-	/*
-	 * No need to bother with set_cheapest here; grouping_planner does not
-	 * need us to do it.
-	 */
-	Assert(ordered_rel->pathlist != NIL);
-
-	return ordered_rel;
-}
-
-
-/*
- * make_group_input_target
- *	  Generate appropriate PathTarget for initial input to grouping nodes.
- *
- * If there is grouping or aggregation, the scan/join subplan cannot emit
- * the query's final targetlist; for example, it certainly can't emit any
- * aggregate function calls.  This routine generates the correct target
- * for the scan/join subplan.
- *
- * The query target list passed from the parser already contains entries
- * for all ORDER BY and GROUP BY expressions, but it will not have entries
- * for variables used only in HAVING clauses; so we need to add those
- * variables to the subplan target list.  Also, we flatten all expressions
- * except GROUP BY items into their component variables; other expressions
- * will be computed by the upper plan nodes rather than by the subplan.
- * For example, given a query like
- *		SELECT a+b,SUM(c+d) FROM table GROUP BY a+b;
- * we want to pass this targetlist to the subplan:
- *		a+b,c,d
- * where the a+b target will be used by the Sort/Group steps, and the
- * other targets will be used for computing the final results.
- *
- * 'final_target' is the query's final target list (in PathTarget form)
+		(*create_upper_paths_hook) (root, UPPERREL_WINDOW,
+									input_rel, window_rel, NULL);
+
+	/* Now choose the best path(s) */
+	set_cheapest(window_rel);
+
+	return window_rel;
+}
+
+/*
+ * Stack window-function implementation steps atop the given Path, and
+ * add the result to window_rel.
  *
- * The result is the PathTarget to be computed by the Paths returned from
- * query_planner().
+ * window_rel: upperrel to contain result
+ * path: input Path to use (must return input_target)
+ * input_target: result of make_window_input_target
+ * output_target: what the topmost WindowAggPath should return
+ * tlist: query's target list (needed to look up pathkeys)
+ * wflists: result of find_window_functions
+ * activeWindows: result of select_active_windows
  */
-static PathTarget *
-make_group_input_target(PlannerInfo *root, PathTarget *final_target)
+static void
+create_one_window_path(PlannerInfo *root,
+					   RelOptInfo *window_rel,
+					   Path *path,
+					   PathTarget *input_target,
+					   PathTarget *output_target,
+					   List *tlist,
+					   WindowFuncLists *wflists,
+					   List *activeWindows)
 {
-	Query	   *parse = root->parse;
-	PathTarget *input_target;
-	List	   *non_group_cols;
-	List	   *non_group_vars;
-	int			i;
-	ListCell   *lc;
+	PathTarget *window_target;
+	ListCell   *l;
 
 	/*
-	 * We must build a target containing all grouping columns, plus any other
-	 * Vars mentioned in the query's targetlist and HAVING qual.
+	 * Since each window clause could require a different sort order, we stack
+	 * up a WindowAgg node for each clause, with sort steps between them as
+	 * needed.  (We assume that select_active_windows chose a good order for
+	 * executing the clauses in.)
+	 *
+	 * input_target should contain all Vars and Aggs needed for the result.
+	 * (In some cases we wouldn't need to propagate all of these all the way
+	 * to the top, since they might only be needed as inputs to WindowFuncs.
+	 * It's probably not worth trying to optimize that though.)  It must also
+	 * contain all window partitioning and sorting expressions, to ensure
+	 * they're computed only once at the bottom of the stack (that's critical
+	 * for volatile functions).  As we climb up the stack, we'll add outputs
+	 * for the WindowFuncs computed at each level.
 	 */
-	input_target = create_empty_pathtarget();
-	non_group_cols = NIL;
+	window_target = input_target;
 
-	i = 0;
-	foreach(lc, final_target->exprs)
+	foreach(l, activeWindows)
 	{
-		Expr	   *expr = (Expr *) lfirst(lc);
-		Index		sgref = get_pathtarget_sortgroupref(final_target, i);
+		WindowClause *wc = lfirst_node(WindowClause, l);
+		List	   *window_pathkeys;
 
-		if (sgref && parse->groupClause &&
-			get_sortgroupref_clause_noerr(sgref, parse->groupClause) != NULL)
+		window_pathkeys = make_pathkeys_for_window(root,
+												   wc,
+												   tlist);
+
+		/* Sort if necessary */
+		if (!pathkeys_contained_in(window_pathkeys, path->pathkeys))
+		{
+			path = (Path *) create_sort_path(root, window_rel,
+											 path,
+											 window_pathkeys,
+											 -1.0);
+		}
+
+		if (lnext(l))
 		{
 			/*
-			 * It's a grouping column, so add it to the input target as-is.
+			 * Add the current WindowFuncs to the output target for this
+			 * intermediate WindowAggPath.  We must copy window_target to
+			 * avoid changing the previous path's target.
+			 *
+			 * Note: a WindowFunc adds nothing to the target's eval costs; but
+			 * we do need to account for the increase in tlist width.
 			 */
-			add_column_to_pathtarget(input_target, expr, sgref);
+			ListCell   *lc2;
+
+			window_target = copy_pathtarget(window_target);
+			foreach(lc2, wflists->windowFuncs[wc->winref])
+			{
+				WindowFunc *wfunc = lfirst_node(WindowFunc, lc2);
+
+				add_column_to_pathtarget(window_target, (Expr *) wfunc, 0);
+				window_target->width += get_typavgwidth(wfunc->wintype, -1);
+			}
 		}
 		else
 		{
-			/*
-			 * Non-grouping column, so just remember the expression for later
-			 * call to pull_var_clause.
-			 */
-			non_group_cols = lappend(non_group_cols, expr);
+			/* Install the goal target in the topmost WindowAgg */
+			window_target = output_target;
 		}
 
-		i++;
+		path = (Path *)
+			create_windowagg_path(root, window_rel, path, window_target,
+								  wflists->windowFuncs[wc->winref],
+								  wc,
+								  window_pathkeys);
 	}
 
-	/*
-	 * If there's a HAVING clause, we'll need the Vars it uses, too.
-	 */
-	if (parse->havingQual)
-		non_group_cols = lappend(non_group_cols, parse->havingQual);
-
-	/*
-	 * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
-	 * add them to the input target if not already present.  (A Var used
-	 * directly as a GROUP BY item will be present already.)  Note this
-	 * includes Vars used in resjunk items, so we are covering the needs of
-	 * ORDER BY and window specifications.  Vars used within Aggrefs and
-	 * WindowFuncs will be pulled out here, too.
-	 */
-	non_group_vars = pull_var_clause((Node *) non_group_cols,
-									 PVC_RECURSE_AGGREGATES |
-									 PVC_RECURSE_WINDOWFUNCS |
-									 PVC_INCLUDE_PLACEHOLDERS);
-	add_new_columns_to_pathtarget(input_target, non_group_vars);
-
-	/* clean up cruft */
-	list_free(non_group_vars);
-	list_free(non_group_cols);
-
-	/* XXX this causes some redundant cost calculation ... */
-	return set_pathtarget_cost_width(root, input_target);
+	add_path(window_rel, path);
 }
 
 /*
- * make_partial_grouping_target
- *	  Generate appropriate PathTarget for output of partial aggregate
- *	  (or partial grouping, if there are no aggregates) nodes.
+ * create_distinct_paths
  *
- * A partial aggregation node needs to emit all the same aggregates that
- * a regular aggregation node would, plus any aggregates used in HAVING;
- * except that the Aggref nodes should be marked as partial aggregates.
+ * Build a new upperrel containing Paths for SELECT DISTINCT evaluation.
  *
- * In addition, we'd better emit any Vars and PlaceholderVars that are
- * used outside of Aggrefs in the aggregation tlist and HAVING.  (Presumably,
- * these would be Vars that are grouped by or used in grouping expressions.)
+ * input_rel: contains the source-data Paths
  *
- * grouping_target is the tlist to be emitted by the topmost aggregation step.
- * havingQual represents the HAVING clause.
+ * Note: input paths should already compute the desired pathtarget, since
+ * Sort/Unique won't project anything.
  */
-static PathTarget *
-make_partial_grouping_target(PlannerInfo *root,
-							 PathTarget *grouping_target,
-							 Node *havingQual)
+static RelOptInfo *
+create_distinct_paths(PlannerInfo *root,
+					  RelOptInfo *input_rel)
 {
 	Query	   *parse = root->parse;
-	PathTarget *partial_target;
-	List	   *non_group_cols;
-	List	   *non_group_exprs;
-	int			i;
+	Path	   *cheapest_input_path = input_rel->cheapest_total_path;
+	RelOptInfo *distinct_rel;
+	double		numDistinctRows;
+	bool		allow_hash;
+	Path	   *path;
 	ListCell   *lc;
 
-	partial_target = create_empty_pathtarget();
-	non_group_cols = NIL;
-
-	i = 0;
-	foreach(lc, grouping_target->exprs)
-	{
-		Expr	   *expr = (Expr *) lfirst(lc);
-		Index		sgref = get_pathtarget_sortgroupref(grouping_target, i);
-
-		if (sgref && parse->groupClause &&
-			get_sortgroupref_clause_noerr(sgref, parse->groupClause) != NULL)
-		{
-			/*
-			 * It's a grouping column, so add it to the partial_target as-is.
-			 * (This allows the upper agg step to repeat the grouping calcs.)
-			 */
-			add_column_to_pathtarget(partial_target, expr, sgref);
-		}
-		else
-		{
-			/*
-			 * Non-grouping column, so just remember the expression for later
-			 * call to pull_var_clause.
-			 */
-			non_group_cols = lappend(non_group_cols, expr);
-		}
-
-		i++;
-	}
+	/* For now, do all work in the (DISTINCT, NULL) upperrel */
+	distinct_rel = fetch_upper_rel(root, UPPERREL_DISTINCT, NULL);
 
 	/*
-	 * If there's a HAVING clause, we'll need the Vars/Aggrefs it uses, too.
+	 * We don't compute anything at this level, so distinct_rel will be
+	 * parallel-safe if the input rel is parallel-safe.  In particular, if
+	 * there is a DISTINCT ON (...) clause, any path for the input_rel will
+	 * output those expressions, and will not be parallel-safe unless those
+	 * expressions are parallel-safe.
 	 */
-	if (havingQual)
-		non_group_cols = lappend(non_group_cols, havingQual);
+	distinct_rel->consider_parallel = input_rel->consider_parallel;
 
 	/*
-	 * Pull out all the Vars, PlaceHolderVars, and Aggrefs mentioned in
-	 * non-group cols (plus HAVING), and add them to the partial_target if not
-	 * already present.  (An expression used directly as a GROUP BY item will
-	 * be present already.)  Note this includes Vars used in resjunk items, so
-	 * we are covering the needs of ORDER BY and window specifications.
+	 * If the input rel belongs to a single FDW, so does the distinct_rel.
 	 */
-	non_group_exprs = pull_var_clause((Node *) non_group_cols,
-									  PVC_INCLUDE_AGGREGATES |
-									  PVC_RECURSE_WINDOWFUNCS |
-									  PVC_INCLUDE_PLACEHOLDERS);
+	distinct_rel->serverid = input_rel->serverid;
+	distinct_rel->userid = input_rel->userid;
+	distinct_rel->useridiscurrent = input_rel->useridiscurrent;
+	distinct_rel->fdwroutine = input_rel->fdwroutine;
+
+	/* Estimate number of distinct rows there will be */
+	if (parse->groupClause || parse->groupingSets || parse->hasAggs ||
+		root->hasHavingQual)
+	{
+		/*
+		 * If there was grouping or aggregation, use the number of input rows
+		 * as the estimated number of DISTINCT rows (ie, assume the input is
+		 * already mostly unique).
+		 */
+		numDistinctRows = cheapest_input_path->rows;
+	}
+	else
+	{
+		/*
+		 * Otherwise, the UNIQUE filter has effects comparable to GROUP BY.
+		 */
+		List	   *distinctExprs;
 
-	add_new_columns_to_pathtarget(partial_target, non_group_exprs);
+		distinctExprs = get_sortgrouplist_exprs(parse->distinctClause,
+												parse->targetList);
+		numDistinctRows = estimate_num_groups(root, distinctExprs,
+											  cheapest_input_path->rows,
+											  NULL);
+	}
 
 	/*
-	 * Adjust Aggrefs to put them in partial mode.  At this point all Aggrefs
-	 * are at the top level of the target list, so we can just scan the list
-	 * rather than recursing through the expression trees.
+	 * Consider sort-based implementations of DISTINCT, if possible.
 	 */
-	foreach(lc, partial_target->exprs)
+	if (grouping_is_sortable(parse->distinctClause))
 	{
-		Aggref	   *aggref = (Aggref *) lfirst(lc);
-
-		if (IsA(aggref, Aggref))
-		{
-			Aggref	   *newaggref;
-
-			/*
-			 * We shouldn't need to copy the substructure of the Aggref node,
-			 * but flat-copy the node itself to avoid damaging other trees.
-			 */
-			newaggref = makeNode(Aggref);
-			memcpy(newaggref, aggref, sizeof(Aggref));
+		/*
+		 * First, if we have any adequately-presorted paths, just stick a
+		 * Unique node on those.  Then consider doing an explicit sort of the
+		 * cheapest input path and Unique'ing that.
+		 *
+		 * When we have DISTINCT ON, we must sort by the more rigorous of
+		 * DISTINCT and ORDER BY, else it won't have the desired behavior.
+		 * Also, if we do have to do an explicit sort, we might as well use
+		 * the more rigorous ordering to avoid a second sort later.  (Note
+		 * that the parser will have ensured that one clause is a prefix of
+		 * the other.)
+		 */
+		List	   *needed_pathkeys;
 
-			/* For now, assume serialization is required */
-			mark_partial_aggref(newaggref, AGGSPLIT_INITIAL_SERIAL);
+		if (parse->hasDistinctOn &&
+			list_length(root->distinct_pathkeys) <
+			list_length(root->sort_pathkeys))
+			needed_pathkeys = root->sort_pathkeys;
+		else
+			needed_pathkeys = root->distinct_pathkeys;
 
-			lfirst(lc) = newaggref;
-		}
-	}
+		foreach(lc, input_rel->pathlist)
+		{
+			Path	   *path = (Path *) lfirst(lc);
 
-	/* clean up cruft */
-	list_free(non_group_exprs);
-	list_free(non_group_cols);
+			if (pathkeys_contained_in(needed_pathkeys, path->pathkeys))
+			{
+				add_path(distinct_rel, (Path *)
+						 create_upper_unique_path(root, distinct_rel,
+												  path,
+												  list_length(root->distinct_pathkeys),
+												  numDistinctRows));
+			}
+		}
 
-	/* XXX this causes some redundant cost calculation ... */
-	return set_pathtarget_cost_width(root, partial_target);
-}
+		/* For explicit-sort case, always use the more rigorous clause */
+		if (list_length(root->distinct_pathkeys) <
+			list_length(root->sort_pathkeys))
+		{
+			needed_pathkeys = root->sort_pathkeys;
+			/* Assert checks that parser didn't mess up... */
+			Assert(pathkeys_contained_in(root->distinct_pathkeys,
+										 needed_pathkeys));
+		}
+		else
+			needed_pathkeys = root->distinct_pathkeys;
 
-/*
- * mark_partial_aggref
- *	  Adjust an Aggref to make it represent a partial-aggregation step.
- *
- * The Aggref node is modified in-place; caller must do any copying required.
- */
-void
-mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
-{
-	/* aggtranstype should be computed by this point */
-	Assert(OidIsValid(agg->aggtranstype));
-	/* ... but aggsplit should still be as the parser left it */
-	Assert(agg->aggsplit == AGGSPLIT_SIMPLE);
+		path = cheapest_input_path;
+		if (!pathkeys_contained_in(needed_pathkeys, path->pathkeys))
+			path = (Path *) create_sort_path(root, distinct_rel,
+											 path,
+											 needed_pathkeys,
+											 -1.0);
 
-	/* Mark the Aggref with the intended partial-aggregation mode */
-	agg->aggsplit = aggsplit;
+		add_path(distinct_rel, (Path *)
+				 create_upper_unique_path(root, distinct_rel,
+										  path,
+										  list_length(root->distinct_pathkeys),
+										  numDistinctRows));
+	}
 
 	/*
-	 * Adjust result type if needed.  Normally, a partial aggregate returns
-	 * the aggregate's transition type; but if that's INTERNAL and we're
-	 * serializing, it returns BYTEA instead.
+	 * Consider hash-based implementations of DISTINCT, if possible.
+	 *
+	 * If we were not able to make any other types of path, we *must* hash or
+	 * die trying.  If we do have other choices, there are several things that
+	 * should prevent selection of hashing: if the query uses DISTINCT ON
+	 * (because it won't really have the expected behavior if we hash), or if
+	 * enable_hashagg is off, or if it looks like the hashtable will exceed
+	 * work_mem.
+	 *
+	 * Note: grouping_is_hashable() is much more expensive to check than the
+	 * other gating conditions, so we want to do it last.
 	 */
-	if (DO_AGGSPLIT_SKIPFINAL(aggsplit))
-	{
-		if (agg->aggtranstype == INTERNALOID && DO_AGGSPLIT_SERIALIZE(aggsplit))
-			agg->aggtype = BYTEAOID;
-		else
-			agg->aggtype = agg->aggtranstype;
-	}
-}
-
-/*
- * postprocess_setop_tlist
- *	  Fix up targetlist returned by plan_set_operations().
- *
- * We need to transpose sort key info from the orig_tlist into new_tlist.
- * NOTE: this would not be good enough if we supported resjunk sort keys
- * for results of set operations --- then, we'd need to project a whole
- * new tlist to evaluate the resjunk columns.  For now, just ereport if we
- * find any resjunk columns in orig_tlist.
- */
-static List *
-postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
-{
-	ListCell   *l;
-	ListCell   *orig_tlist_item = list_head(orig_tlist);
-
-	foreach(l, new_tlist)
+	if (distinct_rel->pathlist == NIL)
+		allow_hash = true;		/* we have no alternatives */
+	else if (parse->hasDistinctOn || !enable_hashagg)
+		allow_hash = false;		/* policy-based decision not to hash */
+	else
 	{
-		TargetEntry *new_tle = lfirst_node(TargetEntry, l);
-		TargetEntry *orig_tle;
+		Size		hashentrysize;
 
-		/* ignore resjunk columns in setop result */
-		if (new_tle->resjunk)
-			continue;
+		/* Estimate per-hash-entry space at tuple width... */
+		hashentrysize = MAXALIGN(cheapest_input_path->pathtarget->width) +
+			MAXALIGN(SizeofMinimalTupleHeader);
+		/* plus the per-hash-entry overhead */
+		hashentrysize += hash_agg_entry_size(0);
 
-		Assert(orig_tlist_item != NULL);
-		orig_tle = lfirst_node(TargetEntry, orig_tlist_item);
-		orig_tlist_item = lnext(orig_tlist_item);
-		if (orig_tle->resjunk)	/* should not happen */
-			elog(ERROR, "resjunk output columns are not implemented");
-		Assert(new_tle->resno == orig_tle->resno);
-		new_tle->ressortgroupref = orig_tle->ressortgroupref;
+		/* Allow hashing only if hashtable is predicted to fit in work_mem */
+		allow_hash = (hashentrysize * numDistinctRows <= work_mem * 1024L);
 	}
-	if (orig_tlist_item != NULL)
-		elog(ERROR, "resjunk output columns are not implemented");
-	return new_tlist;
-}
-
-/*
- * select_active_windows
- *		Create a list of the "active" window clauses (ie, those referenced
- *		by non-deleted WindowFuncs) in the order they are to be executed.
- */
-static List *
-select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
-{
-	List	   *result;
-	List	   *actives;
-	ListCell   *lc;
 
-	/* First, make a list of the active windows */
-	actives = NIL;
-	foreach(lc, root->parse->windowClause)
+	if (allow_hash && grouping_is_hashable(parse->distinctClause))
 	{
-		WindowClause *wc = lfirst_node(WindowClause, lc);
-
-		/* It's only active if wflists shows some related WindowFuncs */
-		Assert(wc->winref <= wflists->maxWinRef);
-		if (wflists->windowFuncs[wc->winref] != NIL)
-			actives = lappend(actives, wc);
+		/* Generate hashed aggregate path --- no sort needed */
+		add_path(distinct_rel, (Path *)
+				 create_agg_path(root,
+								 distinct_rel,
+								 cheapest_input_path,
+								 cheapest_input_path->pathtarget,
+								 AGG_HASHED,
+								 AGGSPLIT_SIMPLE,
+								 parse->distinctClause,
+								 NIL,
+								 NULL,
+								 numDistinctRows));
 	}
 
+	/* Give a helpful error if we failed to find any implementation */
+	if (distinct_rel->pathlist == NIL)
+		ereport(ERROR,
+				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+				 errmsg("could not implement DISTINCT"),
+				 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
+
 	/*
-	 * Now, ensure that windows with identical partitioning/ordering clauses
-	 * are adjacent in the list.  This is required by the SQL standard, which
-	 * says that only one sort is to be used for such windows, even if they
-	 * are otherwise distinct (eg, different names or framing clauses).
-	 *
-	 * There is room to be much smarter here, for example detecting whether
-	 * one window's sort keys are a prefix of another's (so that sorting for
-	 * the latter would do for the former), or putting windows first that
-	 * match a sort order available for the underlying query.  For the moment
-	 * we are content with meeting the spec.
+	 * If there is an FDW that's responsible for all baserels of the query,
+	 * let it consider adding ForeignPaths.
 	 */
-	result = NIL;
-	while (actives != NIL)
-	{
-		WindowClause *wc = linitial_node(WindowClause, actives);
-		ListCell   *prev;
-		ListCell   *next;
-
-		/* Move wc from actives to result */
-		actives = list_delete_first(actives);
-		result = lappend(result, wc);
+	if (distinct_rel->fdwroutine &&
+		distinct_rel->fdwroutine->GetForeignUpperPaths)
+		distinct_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_DISTINCT,
+													   input_rel, distinct_rel,
+													   NULL);
 
-		/* Now move any matching windows from actives to result */
-		prev = NULL;
-		for (lc = list_head(actives); lc; lc = next)
-		{
-			WindowClause *wc2 = lfirst_node(WindowClause, lc);
+	/* Let extensions possibly add some more paths */
+	if (create_upper_paths_hook)
+		(*create_upper_paths_hook) (root, UPPERREL_DISTINCT,
+									input_rel, distinct_rel, NULL);
 
-			next = lnext(lc);
-			/* framing options are NOT to be compared here! */
-			if (equal(wc->partitionClause, wc2->partitionClause) &&
-				equal(wc->orderClause, wc2->orderClause))
-			{
-				actives = list_delete_cell(actives, lc, prev);
-				result = lappend(result, wc2);
-			}
-			else
-				prev = lc;
-		}
-	}
+	/* Now choose the best path(s) */
+	set_cheapest(distinct_rel);
 
-	return result;
+	return distinct_rel;
 }
 
 /*
- * make_window_input_target
- *	  Generate appropriate PathTarget for initial input to WindowAgg nodes.
- *
- * When the query has window functions, this function computes the desired
- * target to be computed by the node just below the first WindowAgg.
- * This tlist must contain all values needed to evaluate the window functions,
- * compute the final target list, and perform any required final sort step.
- * If multiple WindowAggs are needed, each intermediate one adds its window
- * function results onto this base tlist; only the topmost WindowAgg computes
- * the actual desired target list.
- *
- * This function is much like make_group_input_target, though not quite enough
- * like it to share code.  As in that function, we flatten most expressions
- * into their component variables.  But we do not want to flatten window
- * PARTITION BY/ORDER BY clauses, since that might result in multiple
- * evaluations of them, which would be bad (possibly even resulting in
- * inconsistent answers, if they contain volatile functions).
- * Also, we must not flatten GROUP BY clauses that were left unflattened by
- * make_group_input_target, because we may no longer have access to the
- * individual Vars in them.
+ * create_ordered_paths
  *
- * Another key difference from make_group_input_target is that we don't
- * flatten Aggref expressions, since those are to be computed below the
- * window functions and just referenced like Vars above that.
+ * Build a new upperrel containing Paths for ORDER BY evaluation.
  *
- * 'final_target' is the query's final target list (in PathTarget form)
- * 'activeWindows' is the list of active windows previously identified by
- *			select_active_windows.
+ * All paths in the result must satisfy the ORDER BY ordering.
+ * The only new path we need consider is an explicit sort on the
+ * cheapest-total existing path.
  *
- * The result is the PathTarget to be computed by the plan node immediately
- * below the first WindowAgg node.
+ * input_rel: contains the source-data Paths
+ * target: the output tlist the result Paths must emit
+ * limit_tuples: estimated bound on the number of output tuples,
+ *		or -1 if no LIMIT or couldn't estimate
  */
-static PathTarget *
-make_window_input_target(PlannerInfo *root,
-						 PathTarget *final_target,
-						 List *activeWindows)
+static RelOptInfo *
+create_ordered_paths(PlannerInfo *root,
+					 RelOptInfo *input_rel,
+					 PathTarget *target,
+					 bool target_parallel_safe,
+					 double limit_tuples)
 {
-	Query	   *parse = root->parse;
-	PathTarget *input_target;
-	Bitmapset  *sgrefs;
-	List	   *flattenable_cols;
-	List	   *flattenable_vars;
-	int			i;
+	Path	   *cheapest_input_path = input_rel->cheapest_total_path;
+	RelOptInfo *ordered_rel;
 	ListCell   *lc;
 
-	Assert(parse->hasWindowFuncs);
+	/* For now, do all work in the (ORDERED, NULL) upperrel */
+	ordered_rel = fetch_upper_rel(root, UPPERREL_ORDERED, NULL);
 
 	/*
-	 * Collect the sortgroupref numbers of window PARTITION/ORDER BY clauses
-	 * into a bitmapset for convenient reference below.
+	 * If the input relation is not parallel-safe, then the ordered relation
+	 * can't be parallel-safe, either.  Otherwise, it's parallel-safe if the
+	 * target list is parallel-safe.
 	 */
-	sgrefs = NULL;
-	foreach(lc, activeWindows)
+	if (input_rel->consider_parallel && target_parallel_safe)
+		ordered_rel->consider_parallel = true;
+
+	/*
+	 * If the input rel belongs to a single FDW, so does the ordered_rel.
+	 */
+	ordered_rel->serverid = input_rel->serverid;
+	ordered_rel->userid = input_rel->userid;
+	ordered_rel->useridiscurrent = input_rel->useridiscurrent;
+	ordered_rel->fdwroutine = input_rel->fdwroutine;
+
+	foreach(lc, input_rel->pathlist)
 	{
-		WindowClause *wc = lfirst_node(WindowClause, lc);
-		ListCell   *lc2;
+		Path	   *path = (Path *) lfirst(lc);
+		bool		is_sorted;
 
-		foreach(lc2, wc->partitionClause)
+		is_sorted = pathkeys_contained_in(root->sort_pathkeys,
+										  path->pathkeys);
+		if (path == cheapest_input_path || is_sorted)
 		{
-			SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
+			if (!is_sorted)
+			{
+				/* An explicit sort here can take advantage of LIMIT */
+				path = (Path *) create_sort_path(root,
+												 ordered_rel,
+												 path,
+												 root->sort_pathkeys,
+												 limit_tuples);
+			}
 
-			sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
-		}
-		foreach(lc2, wc->orderClause)
-		{
-			SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
+			/* Add projection step if needed */
+			if (path->pathtarget != target)
+				path = apply_projection_to_path(root, ordered_rel,
+												path, target);
 
-			sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
+			add_path(ordered_rel, path);
 		}
 	}
 
-	/* Add in sortgroupref numbers of GROUP BY clauses, too */
-	foreach(lc, parse->groupClause)
-	{
-		SortGroupClause *grpcl = lfirst_node(SortGroupClause, lc);
-
-		sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef);
-	}
-
 	/*
-	 * Construct a target containing all the non-flattenable targetlist items,
-	 * and save aside the others for a moment.
+	 * generate_gather_paths() will have already generated a simple Gather
+	 * path for the best parallel path, if any, and the loop above will have
+	 * considered sorting it.  Similarly, generate_gather_paths() will also
+	 * have generated order-preserving Gather Merge plans which can be used
+	 * without sorting if they happen to match the sort_pathkeys, and the loop
+	 * above will have handled those as well.  However, there's one more
+	 * possibility: it may make sense to sort the cheapest partial path
+	 * according to the required output order and then use Gather Merge.
 	 */
-	input_target = create_empty_pathtarget();
-	flattenable_cols = NIL;
-
-	i = 0;
-	foreach(lc, final_target->exprs)
+	if (ordered_rel->consider_parallel && root->sort_pathkeys != NIL &&
+		input_rel->partial_pathlist != NIL)
 	{
-		Expr	   *expr = (Expr *) lfirst(lc);
-		Index		sgref = get_pathtarget_sortgroupref(final_target, i);
+		Path	   *cheapest_partial_path;
+
+		cheapest_partial_path = linitial(input_rel->partial_pathlist);
 
 		/*
-		 * Don't want to deconstruct window clauses or GROUP BY items.  (Note
-		 * that such items can't contain window functions, so it's okay to
-		 * compute them below the WindowAgg nodes.)
+		 * If cheapest partial path doesn't need a sort, this is redundant
+		 * with what's already been tried.
 		 */
-		if (sgref != 0 && bms_is_member(sgref, sgrefs))
-		{
-			/*
-			 * Don't want to deconstruct this value, so add it to the input
-			 * target as-is.
-			 */
-			add_column_to_pathtarget(input_target, expr, sgref);
-		}
-		else
+		if (!pathkeys_contained_in(root->sort_pathkeys,
+								   cheapest_partial_path->pathkeys))
 		{
-			/*
-			 * Column is to be flattened, so just remember the expression for
-			 * later call to pull_var_clause.
-			 */
-			flattenable_cols = lappend(flattenable_cols, expr);
-		}
+			Path	   *path;
+			double		total_groups;
 
-		i++;
+			path = (Path *) create_sort_path(root,
+											 ordered_rel,
+											 cheapest_partial_path,
+											 root->sort_pathkeys,
+											 limit_tuples);
+
+			total_groups = cheapest_partial_path->rows *
+				cheapest_partial_path->parallel_workers;
+			path = (Path *)
+				create_gather_merge_path(root, ordered_rel,
+										 path,
+										 path->pathtarget,
+										 root->sort_pathkeys, NULL,
+										 &total_groups);
+
+			/* Add projection step if needed */
+			if (path->pathtarget != target)
+				path = apply_projection_to_path(root, ordered_rel,
+												path, target);
+
+			add_path(ordered_rel, path);
+		}
 	}
 
 	/*
-	 * Pull out all the Vars and Aggrefs mentioned in flattenable columns, and
-	 * add them to the input target if not already present.  (Some might be
-	 * there already because they're used directly as window/group clauses.)
-	 *
-	 * Note: it's essential to use PVC_INCLUDE_AGGREGATES here, so that any
-	 * Aggrefs are placed in the Agg node's tlist and not left to be computed
-	 * at higher levels.  On the other hand, we should recurse into
-	 * WindowFuncs to make sure their input expressions are available.
+	 * If there is an FDW that's responsible for all baserels of the query,
+	 * let it consider adding ForeignPaths.
 	 */
-	flattenable_vars = pull_var_clause((Node *) flattenable_cols,
-									   PVC_INCLUDE_AGGREGATES |
-									   PVC_RECURSE_WINDOWFUNCS |
-									   PVC_INCLUDE_PLACEHOLDERS);
-	add_new_columns_to_pathtarget(input_target, flattenable_vars);
-
-	/* clean up cruft */
-	list_free(flattenable_vars);
-	list_free(flattenable_cols);
-
-	/* XXX this causes some redundant cost calculation ... */
-	return set_pathtarget_cost_width(root, input_target);
-}
+	if (ordered_rel->fdwroutine &&
+		ordered_rel->fdwroutine->GetForeignUpperPaths)
+		ordered_rel->fdwroutine->GetForeignUpperPaths(root, UPPERREL_ORDERED,
+													  input_rel, ordered_rel,
+													  NULL);
 
-/*
- * make_pathkeys_for_window
- *		Create a pathkeys list describing the required input ordering
- *		for the given WindowClause.
- *
- * The required ordering is first the PARTITION keys, then the ORDER keys.
- * In the future we might try to implement windowing using hashing, in which
- * case the ordering could be relaxed, but for now we always sort.
- *
- * Caution: if you change this, see createplan.c's get_column_info_for_window!
- */
-static List *
-make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
-						 List *tlist)
-{
-	List	   *window_pathkeys;
-	List	   *window_sortclauses;
+	/* Let extensions possibly add some more paths */
+	if (create_upper_paths_hook)
+		(*create_upper_paths_hook) (root, UPPERREL_ORDERED,
+									input_rel, ordered_rel, NULL);
 
-	/* Throw error if can't sort */
-	if (!grouping_is_sortable(wc->partitionClause))
-		ereport(ERROR,
-				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-				 errmsg("could not implement window PARTITION BY"),
-				 errdetail("Window partitioning columns must be of sortable datatypes.")));
-	if (!grouping_is_sortable(wc->orderClause))
-		ereport(ERROR,
-				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-				 errmsg("could not implement window ORDER BY"),
-				 errdetail("Window ordering columns must be of sortable datatypes.")));
+	/*
+	 * No need to bother with set_cheapest here; grouping_planner does not
+	 * need us to do it.
+	 */
+	Assert(ordered_rel->pathlist != NIL);
 
-	/* Okay, make the combined pathkeys */
-	window_sortclauses = list_concat(list_copy(wc->partitionClause),
-									 list_copy(wc->orderClause));
-	window_pathkeys = make_pathkeys_for_sortclauses(root,
-													window_sortclauses,
-													tlist);
-	list_free(window_sortclauses);
-	return window_pathkeys;
+	return ordered_rel;
 }
 
+
 /*
- * make_sort_input_target
- *	  Generate appropriate PathTarget for initial input to Sort step.
- *
- * If the query has ORDER BY, this function chooses the target to be computed
- * by the node just below the Sort (and DISTINCT, if any, since Unique can't
- * project) steps.  This might or might not be identical to the query's final
- * output target.
- *
- * The main argument for keeping the sort-input tlist the same as the final
- * is that we avoid a separate projection node (which will be needed if
- * they're different, because Sort can't project).  However, there are also
- * advantages to postponing tlist evaluation till after the Sort: it ensures
- * a consistent order of evaluation for any volatile functions in the tlist,
- * and if there's also a LIMIT, we can stop the query without ever computing
- * tlist functions for later rows, which is beneficial for both volatile and
- * expensive functions.
- *
- * Our current policy is to postpone volatile expressions till after the sort
- * unconditionally (assuming that that's possible, ie they are in plain tlist
- * columns and not ORDER BY/GROUP BY/DISTINCT columns).  We also prefer to
- * postpone set-returning expressions, because running them beforehand would
- * bloat the sort dataset, and because it might cause unexpected output order
- * if the sort isn't stable.  However there's a constraint on that: all SRFs
- * in the tlist should be evaluated at the same plan step, so that they can
- * run in sync in nodeProjectSet.  So if any SRFs are in sort columns, we
- * mustn't postpone any SRFs.  (Note that in principle that policy should
- * probably get applied to the group/window input targetlists too, but we
- * have not done that historically.)  Lastly, expensive expressions are
- * postponed if there is a LIMIT, or if root->tuple_fraction shows that
- * partial evaluation of the query is possible (if neither is true, we expect
- * to have to evaluate the expressions for every row anyway), or if there are
- * any volatile or set-returning expressions (since once we've put in a
- * projection at all, it won't cost any more to postpone more stuff).
- *
- * Another issue that could potentially be considered here is that
- * evaluating tlist expressions could result in data that's either wider
- * or narrower than the input Vars, thus changing the volume of data that
- * has to go through the Sort.  However, we usually have only a very bad
- * idea of the output width of any expression more complex than a Var,
- * so for now it seems too risky to try to optimize on that basis.
+ * make_group_input_target
+ *	  Generate appropriate PathTarget for initial input to grouping nodes.
  *
- * Note that if we do produce a modified sort-input target, and then the
- * query ends up not using an explicit Sort, no particular harm is done:
- * we'll initially use the modified target for the preceding path nodes,
- * but then change them to the final target with apply_projection_to_path.
- * Moreover, in such a case the guarantees about evaluation order of
- * volatile functions still hold, since the rows are sorted already.
+ * If there is grouping or aggregation, the scan/join subplan cannot emit
+ * the query's final targetlist; for example, it certainly can't emit any
+ * aggregate function calls.  This routine generates the correct target
+ * for the scan/join subplan.
  *
- * This function has some things in common with make_group_input_target and
- * make_window_input_target, though the detailed rules for what to do are
- * different.  We never flatten/postpone any grouping or ordering columns;
- * those are needed before the sort.  If we do flatten a particular
- * expression, we leave Aggref and WindowFunc nodes alone, since those were
- * computed earlier.
+ * The query target list passed from the parser already contains entries
+ * for all ORDER BY and GROUP BY expressions, but it will not have entries
+ * for variables used only in HAVING clauses; so we need to add those
+ * variables to the subplan target list.  Also, we flatten all expressions
+ * except GROUP BY items into their component variables; other expressions
+ * will be computed by the upper plan nodes rather than by the subplan.
+ * For example, given a query like
+ *		SELECT a+b,SUM(c+d) FROM table GROUP BY a+b;
+ * we want to pass this targetlist to the subplan:
+ *		a+b,c,d
+ * where the a+b target will be used by the Sort/Group steps, and the
+ * other targets will be used for computing the final results.
  *
  * 'final_target' is the query's final target list (in PathTarget form)
- * 'have_postponed_srfs' is an output argument, see below
- *
- * The result is the PathTarget to be computed by the plan node immediately
- * below the Sort step (and the Distinct step, if any).  This will be
- * exactly final_target if we decide a projection step wouldn't be helpful.
  *
- * In addition, *have_postponed_srfs is set to true if we choose to postpone
- * any set-returning functions to after the Sort.
+ * The result is the PathTarget to be computed by the Paths returned from
+ * query_planner().
  */
 static PathTarget *
-make_sort_input_target(PlannerInfo *root,
-					   PathTarget *final_target,
-					   bool *have_postponed_srfs)
+make_group_input_target(PlannerInfo *root, PathTarget *final_target)
 {
 	Query	   *parse = root->parse;
 	PathTarget *input_target;
-	int			ncols;
-	bool	   *col_is_srf;
-	bool	   *postpone_col;
-	bool		have_srf;
-	bool		have_volatile;
-	bool		have_expensive;
-	bool		have_srf_sortcols;
-	bool		postpone_srfs;
-	List	   *postponable_cols;
-	List	   *postponable_vars;
+	List	   *non_group_cols;
+	List	   *non_group_vars;
 	int			i;
 	ListCell   *lc;
 
-	/* Shouldn't get here unless query has ORDER BY */
-	Assert(parse->sortClause);
-
-	*have_postponed_srfs = false;	/* default result */
-
-	/* Inspect tlist and collect per-column information */
-	ncols = list_length(final_target->exprs);
-	col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
-	postpone_col = (bool *) palloc0(ncols * sizeof(bool));
-	have_srf = have_volatile = have_expensive = have_srf_sortcols = false;
+	/*
+	 * We must build a target containing all grouping columns, plus any other
+	 * Vars mentioned in the query's targetlist and HAVING qual.
+	 */
+	input_target = create_empty_pathtarget();
+	non_group_cols = NIL;
 
 	i = 0;
 	foreach(lc, final_target->exprs)
 	{
 		Expr	   *expr = (Expr *) lfirst(lc);
+		Index		sgref = get_pathtarget_sortgroupref(final_target, i);
 
-		/*
-		 * If the column has a sortgroupref, assume it has to be evaluated
-		 * before sorting.  Generally such columns would be ORDER BY, GROUP
-		 * BY, etc targets.  One exception is columns that were removed from
-		 * GROUP BY by remove_useless_groupby_columns() ... but those would
-		 * only be Vars anyway.  There don't seem to be any cases where it
-		 * would be worth the trouble to double-check.
-		 */
-		if (get_pathtarget_sortgroupref(final_target, i) == 0)
+		if (sgref && parse->groupClause &&
+			get_sortgroupref_clause_noerr(sgref, parse->groupClause) != NULL)
 		{
 			/*
-			 * Check for SRF or volatile functions.  Check the SRF case first
-			 * because we must know whether we have any postponed SRFs.
+			 * It's a grouping column, so add it to the input target as-is.
 			 */
-			if (parse->hasTargetSRFs &&
-				expression_returns_set((Node *) expr))
-			{
-				/* We'll decide below whether these are postponable */
-				col_is_srf[i] = true;
-				have_srf = true;
-			}
-			else if (contain_volatile_functions((Node *) expr))
-			{
-				/* Unconditionally postpone */
-				postpone_col[i] = true;
-				have_volatile = true;
-			}
-			else
-			{
-				/*
-				 * Else check the cost.  XXX it's annoying to have to do this
-				 * when set_pathtarget_cost_width() just did it.  Refactor to
-				 * allow sharing the work?
-				 */
-				QualCost	cost;
-
-				cost_qual_eval_node(&cost, (Node *) expr, root);
-
-				/*
-				 * We arbitrarily define "expensive" as "more than 10X
-				 * cpu_operator_cost".  Note this will take in any PL function
-				 * with default cost.
-				 */
-				if (cost.per_tuple > 10 * cpu_operator_cost)
-				{
-					postpone_col[i] = true;
-					have_expensive = true;
-				}
-			}
+			add_column_to_pathtarget(input_target, expr, sgref);
 		}
 		else
 		{
-			/* For sortgroupref cols, just check if any contain SRFs */
-			if (!have_srf_sortcols &&
-				parse->hasTargetSRFs &&
-				expression_returns_set((Node *) expr))
-				have_srf_sortcols = true;
+			/*
+			 * Non-grouping column, so just remember the expression for later
+			 * call to pull_var_clause.
+			 */
+			non_group_cols = lappend(non_group_cols, expr);
 		}
 
 		i++;
 	}
 
 	/*
-	 * We can postpone SRFs if we have some but none are in sortgroupref cols.
-	 */
-	postpone_srfs = (have_srf && !have_srf_sortcols);
-
-	/*
-	 * If we don't need a post-sort projection, just return final_target.
+	 * If there's a HAVING clause, we'll need the Vars it uses, too.
 	 */
-	if (!(postpone_srfs || have_volatile ||
-		  (have_expensive &&
-		   (parse->limitCount || root->tuple_fraction > 0))))
-		return final_target;
+	if (parse->havingQual)
+		non_group_cols = lappend(non_group_cols, parse->havingQual);
 
 	/*
-	 * Report whether the post-sort projection will contain set-returning
-	 * functions.  This is important because it affects whether the Sort can
-	 * rely on the query's LIMIT (if any) to bound the number of rows it needs
-	 * to return.
+	 * Pull out all the Vars mentioned in non-group cols (plus HAVING), and
+	 * add them to the input target if not already present.  (A Var used
+	 * directly as a GROUP BY item will be present already.)  Note this
+	 * includes Vars used in resjunk items, so we are covering the needs of
+	 * ORDER BY and window specifications.  Vars used within Aggrefs and
+	 * WindowFuncs will be pulled out here, too.
 	 */
-	*have_postponed_srfs = postpone_srfs;
+	non_group_vars = pull_var_clause((Node *) non_group_cols,
+									 PVC_RECURSE_AGGREGATES |
+									 PVC_RECURSE_WINDOWFUNCS |
+									 PVC_INCLUDE_PLACEHOLDERS);
+	add_new_columns_to_pathtarget(input_target, non_group_vars);
 
-	/*
-	 * Construct the sort-input target, taking all non-postponable columns and
-	 * then adding Vars, PlaceHolderVars, Aggrefs, and WindowFuncs found in
-	 * the postponable ones.
-	 */
-	input_target = create_empty_pathtarget();
-	postponable_cols = NIL;
+	/* clean up cruft */
+	list_free(non_group_vars);
+	list_free(non_group_cols);
 
-	i = 0;
-	foreach(lc, final_target->exprs)
-	{
-		Expr	   *expr = (Expr *) lfirst(lc);
+	/* XXX this causes some redundant cost calculation ... */
+	return set_pathtarget_cost_width(root, input_target);
+}
 
-		if (postpone_col[i] || (postpone_srfs && col_is_srf[i]))
-			postponable_cols = lappend(postponable_cols, expr);
-		else
-			add_column_to_pathtarget(input_target, expr,
-									 get_pathtarget_sortgroupref(final_target, i));
+/*
+ * mark_partial_aggref
+ *	  Adjust an Aggref to make it represent a partial-aggregation step.
+ *
+ * The Aggref node is modified in-place; caller must do any copying required.
+ */
+void
+mark_partial_aggref(Aggref *agg, AggSplit aggsplit)
+{
+	/* aggtranstype should be computed by this point */
+	Assert(OidIsValid(agg->aggtranstype));
+	/* ... but aggsplit should still be as the parser left it */
+	Assert(agg->aggsplit == AGGSPLIT_SIMPLE);
 
-		i++;
-	}
+	/* Mark the Aggref with the intended partial-aggregation mode */
+	agg->aggsplit = aggsplit;
 
 	/*
-	 * Pull out all the Vars, Aggrefs, and WindowFuncs mentioned in
-	 * postponable columns, and add them to the sort-input target if not
-	 * already present.  (Some might be there already.)  We mustn't
-	 * deconstruct Aggrefs or WindowFuncs here, since the projection node
-	 * would be unable to recompute them.
+	 * Adjust result type if needed.  Normally, a partial aggregate returns
+	 * the aggregate's transition type; but if that's INTERNAL and we're
+	 * serializing, it returns BYTEA instead.
 	 */
-	postponable_vars = pull_var_clause((Node *) postponable_cols,
-									   PVC_INCLUDE_AGGREGATES |
-									   PVC_INCLUDE_WINDOWFUNCS |
-									   PVC_INCLUDE_PLACEHOLDERS);
-	add_new_columns_to_pathtarget(input_target, postponable_vars);
-
-	/* clean up cruft */
-	list_free(postponable_vars);
-	list_free(postponable_cols);
-
-	/* XXX this represents even more redundant cost calculation ... */
-	return set_pathtarget_cost_width(root, input_target);
+	if (DO_AGGSPLIT_SKIPFINAL(aggsplit))
+	{
+		if (agg->aggtranstype == INTERNALOID && DO_AGGSPLIT_SERIALIZE(aggsplit))
+			agg->aggtype = BYTEAOID;
+		else
+			agg->aggtype = agg->aggtranstype;
+	}
 }
 
 /*
- * get_cheapest_fractional_path
- *	  Find the cheapest path for retrieving a specified fraction of all
- *	  the tuples expected to be returned by the given relation.
- *
- * We interpret tuple_fraction the same way as grouping_planner.
+ * postprocess_setop_tlist
+ *	  Fix up targetlist returned by plan_set_operations().
  *
- * We assume set_cheapest() has been run on the given rel.
+ * We need to transpose sort key info from the orig_tlist into new_tlist.
+ * NOTE: this would not be good enough if we supported resjunk sort keys
+ * for results of set operations --- then, we'd need to project a whole
+ * new tlist to evaluate the resjunk columns.  For now, just ereport if we
+ * find any resjunk columns in orig_tlist.
+ */
+static List *
+postprocess_setop_tlist(List *new_tlist, List *orig_tlist)
+{
+	ListCell   *l;
+	ListCell   *orig_tlist_item = list_head(orig_tlist);
+
+	foreach(l, new_tlist)
+	{
+		TargetEntry *new_tle = lfirst_node(TargetEntry, l);
+		TargetEntry *orig_tle;
+
+		/* ignore resjunk columns in setop result */
+		if (new_tle->resjunk)
+			continue;
+
+		Assert(orig_tlist_item != NULL);
+		orig_tle = lfirst_node(TargetEntry, orig_tlist_item);
+		orig_tlist_item = lnext(orig_tlist_item);
+		if (orig_tle->resjunk)	/* should not happen */
+			elog(ERROR, "resjunk output columns are not implemented");
+		Assert(new_tle->resno == orig_tle->resno);
+		new_tle->ressortgroupref = orig_tle->ressortgroupref;
+	}
+	if (orig_tlist_item != NULL)
+		elog(ERROR, "resjunk output columns are not implemented");
+	return new_tlist;
+}
+
+/*
+ * select_active_windows
+ *		Create a list of the "active" window clauses (ie, those referenced
+ *		by non-deleted WindowFuncs) in the order they are to be executed.
  */
-Path *
-get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
+static List *
+select_active_windows(PlannerInfo *root, WindowFuncLists *wflists)
 {
-	Path	   *best_path = rel->cheapest_total_path;
-	ListCell   *l;
+	List	   *result;
+	List	   *actives;
+	ListCell   *lc;
 
-	/* If all tuples will be retrieved, just return the cheapest-total path */
-	if (tuple_fraction <= 0.0)
-		return best_path;
+	/* First, make a list of the active windows */
+	actives = NIL;
+	foreach(lc, root->parse->windowClause)
+	{
+		WindowClause *wc = lfirst_node(WindowClause, lc);
 
-	/* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
-	if (tuple_fraction >= 1.0 && best_path->rows > 0)
-		tuple_fraction /= best_path->rows;
+		/* It's only active if wflists shows some related WindowFuncs */
+		Assert(wc->winref <= wflists->maxWinRef);
+		if (wflists->windowFuncs[wc->winref] != NIL)
+			actives = lappend(actives, wc);
+	}
 
-	foreach(l, rel->pathlist)
+	/*
+	 * Now, ensure that windows with identical partitioning/ordering clauses
+	 * are adjacent in the list.  This is required by the SQL standard, which
+	 * says that only one sort is to be used for such windows, even if they
+	 * are otherwise distinct (eg, different names or framing clauses).
+	 *
+	 * There is room to be much smarter here, for example detecting whether
+	 * one window's sort keys are a prefix of another's (so that sorting for
+	 * the latter would do for the former), or putting windows first that
+	 * match a sort order available for the underlying query.  For the moment
+	 * we are content with meeting the spec.
+	 */
+	result = NIL;
+	while (actives != NIL)
 	{
-		Path	   *path = (Path *) lfirst(l);
+		WindowClause *wc = linitial_node(WindowClause, actives);
+		ListCell   *prev;
+		ListCell   *next;
 
-		if (path == rel->cheapest_total_path ||
-			compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
-			continue;
+		/* Move wc from actives to result */
+		actives = list_delete_first(actives);
+		result = lappend(result, wc);
 
-		best_path = path;
+		/* Now move any matching windows from actives to result */
+		prev = NULL;
+		for (lc = list_head(actives); lc; lc = next)
+		{
+			WindowClause *wc2 = lfirst_node(WindowClause, lc);
+
+			next = lnext(lc);
+			/* framing options are NOT to be compared here! */
+			if (equal(wc->partitionClause, wc2->partitionClause) &&
+				equal(wc->orderClause, wc2->orderClause))
+			{
+				actives = list_delete_cell(actives, lc, prev);
+				result = lappend(result, wc2);
+			}
+			else
+				prev = lc;
+		}
 	}
 
-	return best_path;
+	return result;
 }
 
 /*
- * adjust_paths_for_srfs
- *		Fix up the Paths of the given upperrel to handle tSRFs properly.
+ * make_window_input_target
+ *	  Generate appropriate PathTarget for initial input to WindowAgg nodes.
  *
- * The executor can only handle set-returning functions that appear at the
- * top level of the targetlist of a ProjectSet plan node.  If we have any SRFs
- * that are not at top level, we need to split up the evaluation into multiple
- * plan levels in which each level satisfies this constraint.  This function
- * modifies each Path of an upperrel that (might) compute any SRFs in its
- * output tlist to insert appropriate projection steps.
+ * When the query has window functions, this function computes the desired
+ * target to be computed by the node just below the first WindowAgg.
+ * This tlist must contain all values needed to evaluate the window functions,
+ * compute the final target list, and perform any required final sort step.
+ * If multiple WindowAggs are needed, each intermediate one adds its window
+ * function results onto this base tlist; only the topmost WindowAgg computes
+ * the actual desired target list.
  *
- * The given targets and targets_contain_srfs lists are from
- * split_pathtarget_at_srfs().  We assume the existing Paths emit the first
- * target in targets.
+ * This function is much like make_group_input_target, though not quite enough
+ * like it to share code.  As in that function, we flatten most expressions
+ * into their component variables.  But we do not want to flatten window
+ * PARTITION BY/ORDER BY clauses, since that might result in multiple
+ * evaluations of them, which would be bad (possibly even resulting in
+ * inconsistent answers, if they contain volatile functions).
+ * Also, we must not flatten GROUP BY clauses that were left unflattened by
+ * make_group_input_target, because we may no longer have access to the
+ * individual Vars in them.
+ *
+ * Another key difference from make_group_input_target is that we don't
+ * flatten Aggref expressions, since those are to be computed below the
+ * window functions and just referenced like Vars above that.
+ *
+ * 'final_target' is the query's final target list (in PathTarget form)
+ * 'activeWindows' is the list of active windows previously identified by
+ *			select_active_windows.
+ *
+ * The result is the PathTarget to be computed by the plan node immediately
+ * below the first WindowAgg node.
  */
-static void
-adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
-					  List *targets, List *targets_contain_srfs)
+static PathTarget *
+make_window_input_target(PlannerInfo *root,
+						 PathTarget *final_target,
+						 List *activeWindows)
 {
+	Query	   *parse = root->parse;
+	PathTarget *input_target;
+	Bitmapset  *sgrefs;
+	List	   *flattenable_cols;
+	List	   *flattenable_vars;
+	int			i;
 	ListCell   *lc;
 
-	Assert(list_length(targets) == list_length(targets_contain_srfs));
-	Assert(!linitial_int(targets_contain_srfs));
-
-	/* If no SRFs appear at this plan level, nothing to do */
-	if (list_length(targets) == 1)
-		return;
+	Assert(parse->hasWindowFuncs);
 
 	/*
-	 * Stack SRF-evaluation nodes atop each path for the rel.
-	 *
-	 * In principle we should re-run set_cheapest() here to identify the
-	 * cheapest path, but it seems unlikely that adding the same tlist eval
-	 * costs to all the paths would change that, so we don't bother. Instead,
-	 * just assume that the cheapest-startup and cheapest-total paths remain
-	 * so.  (There should be no parameterized paths anymore, so we needn't
-	 * worry about updating cheapest_parameterized_paths.)
+	 * Collect the sortgroupref numbers of window PARTITION/ORDER BY clauses
+	 * into a bitmapset for convenient reference below.
 	 */
-	foreach(lc, rel->pathlist)
+	sgrefs = NULL;
+	foreach(lc, activeWindows)
 	{
-		Path	   *subpath = (Path *) lfirst(lc);
-		Path	   *newpath = subpath;
-		ListCell   *lc1,
-				   *lc2;
+		WindowClause *wc = lfirst_node(WindowClause, lc);
+		ListCell   *lc2;
 
-		Assert(subpath->param_info == NULL);
-		forboth(lc1, targets, lc2, targets_contain_srfs)
+		foreach(lc2, wc->partitionClause)
 		{
-			PathTarget *thistarget = lfirst_node(PathTarget, lc1);
-			bool		contains_srfs = (bool) lfirst_int(lc2);
+			SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
 
-			/* If this level doesn't contain SRFs, do regular projection */
-			if (contains_srfs)
-				newpath = (Path *) create_set_projection_path(root,
-															  rel,
-															  newpath,
-															  thistarget);
-			else
-				newpath = (Path *) apply_projection_to_path(root,
-															rel,
-															newpath,
-															thistarget);
+			sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
+		}
+		foreach(lc2, wc->orderClause)
+		{
+			SortGroupClause *sortcl = lfirst_node(SortGroupClause, lc2);
+
+			sgrefs = bms_add_member(sgrefs, sortcl->tleSortGroupRef);
 		}
-		lfirst(lc) = newpath;
-		if (subpath == rel->cheapest_startup_path)
-			rel->cheapest_startup_path = newpath;
-		if (subpath == rel->cheapest_total_path)
-			rel->cheapest_total_path = newpath;
 	}
 
-	/* Likewise for partial paths, if any */
-	foreach(lc, rel->partial_pathlist)
+	/* Add in sortgroupref numbers of GROUP BY clauses, too */
+	foreach(lc, parse->groupClause)
 	{
-		Path	   *subpath = (Path *) lfirst(lc);
-		Path	   *newpath = subpath;
-		ListCell   *lc1,
-				   *lc2;
+		SortGroupClause *grpcl = lfirst_node(SortGroupClause, lc);
 
-		Assert(subpath->param_info == NULL);
-		forboth(lc1, targets, lc2, targets_contain_srfs)
-		{
-			PathTarget *thistarget = lfirst_node(PathTarget, lc1);
-			bool		contains_srfs = (bool) lfirst_int(lc2);
+		sgrefs = bms_add_member(sgrefs, grpcl->tleSortGroupRef);
+	}
 
-			/* If this level doesn't contain SRFs, do regular projection */
-			if (contains_srfs)
-				newpath = (Path *) create_set_projection_path(root,
-															  rel,
-															  newpath,
-															  thistarget);
-			else
-			{
-				/* avoid apply_projection_to_path, in case of multiple refs */
-				newpath = (Path *) create_projection_path(root,
-														  rel,
-														  newpath,
-														  thistarget);
-			}
+	/*
+	 * Construct a target containing all the non-flattenable targetlist items,
+	 * and save aside the others for a moment.
+	 */
+	input_target = create_empty_pathtarget();
+	flattenable_cols = NIL;
+
+	i = 0;
+	foreach(lc, final_target->exprs)
+	{
+		Expr	   *expr = (Expr *) lfirst(lc);
+		Index		sgref = get_pathtarget_sortgroupref(final_target, i);
+
+		/*
+		 * Don't want to deconstruct window clauses or GROUP BY items.  (Note
+		 * that such items can't contain window functions, so it's okay to
+		 * compute them below the WindowAgg nodes.)
+		 */
+		if (sgref != 0 && bms_is_member(sgref, sgrefs))
+		{
+			/*
+			 * Don't want to deconstruct this value, so add it to the input
+			 * target as-is.
+			 */
+			add_column_to_pathtarget(input_target, expr, sgref);
 		}
-		lfirst(lc) = newpath;
+		else
+		{
+			/*
+			 * Column is to be flattened, so just remember the expression for
+			 * later call to pull_var_clause.
+			 */
+			flattenable_cols = lappend(flattenable_cols, expr);
+		}
+
+		i++;
 	}
+
+	/*
+	 * Pull out all the Vars and Aggrefs mentioned in flattenable columns, and
+	 * add them to the input target if not already present.  (Some might be
+	 * there already because they're used directly as window/group clauses.)
+	 *
+	 * Note: it's essential to use PVC_INCLUDE_AGGREGATES here, so that any
+	 * Aggrefs are placed in the Agg node's tlist and not left to be computed
+	 * at higher levels.  On the other hand, we should recurse into
+	 * WindowFuncs to make sure their input expressions are available.
+	 */
+	flattenable_vars = pull_var_clause((Node *) flattenable_cols,
+									   PVC_INCLUDE_AGGREGATES |
+									   PVC_RECURSE_WINDOWFUNCS |
+									   PVC_INCLUDE_PLACEHOLDERS);
+	add_new_columns_to_pathtarget(input_target, flattenable_vars);
+
+	/* clean up cruft */
+	list_free(flattenable_vars);
+	list_free(flattenable_cols);
+
+	/* XXX this causes some redundant cost calculation ... */
+	return set_pathtarget_cost_width(root, input_target);
 }
 
 /*
- * expression_planner
- *		Perform planner's transformations on a standalone expression.
- *
- * Various utility commands need to evaluate expressions that are not part
- * of a plannable query.  They can do so using the executor's regular
- * expression-execution machinery, but first the expression has to be fed
- * through here to transform it from parser output to something executable.
+ * make_pathkeys_for_window
+ *		Create a pathkeys list describing the required input ordering
+ *		for the given WindowClause.
  *
- * Currently, we disallow sublinks in standalone expressions, so there's no
- * real "planning" involved here.  (That might not always be true though.)
- * What we must do is run eval_const_expressions to ensure that any function
- * calls are converted to positional notation and function default arguments
- * get inserted.  The fact that constant subexpressions get simplified is a
- * side-effect that is useful when the expression will get evaluated more than
- * once.  Also, we must fix operator function IDs.
+ * The required ordering is first the PARTITION keys, then the ORDER keys.
+ * In the future we might try to implement windowing using hashing, in which
+ * case the ordering could be relaxed, but for now we always sort.
  *
- * Note: this must not make any damaging changes to the passed-in expression
- * tree.  (It would actually be okay to apply fix_opfuncids to it, but since
- * we first do an expression_tree_mutator-based walk, what is returned will
- * be a new node tree.)
+ * Caution: if you change this, see createplan.c's get_column_info_for_window!
  */
-Expr *
-expression_planner(Expr *expr)
+static List *
+make_pathkeys_for_window(PlannerInfo *root, WindowClause *wc,
+						 List *tlist)
 {
-	Node	   *result;
-
-	/*
-	 * Convert named-argument function calls, insert default arguments and
-	 * simplify constant subexprs
-	 */
-	result = eval_const_expressions(NULL, (Node *) expr);
+	List	   *window_pathkeys;
+	List	   *window_sortclauses;
 
-	/* Fill in opfuncid values if missing */
-	fix_opfuncids(result);
+	/* Throw error if can't sort */
+	if (!grouping_is_sortable(wc->partitionClause))
+		ereport(ERROR,
+				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+				 errmsg("could not implement window PARTITION BY"),
+				 errdetail("Window partitioning columns must be of sortable datatypes.")));
+	if (!grouping_is_sortable(wc->orderClause))
+		ereport(ERROR,
+				(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+				 errmsg("could not implement window ORDER BY"),
+				 errdetail("Window ordering columns must be of sortable datatypes.")));
 
-	return (Expr *) result;
+	/* Okay, make the combined pathkeys */
+	window_sortclauses = list_concat(list_copy(wc->partitionClause),
+									 list_copy(wc->orderClause));
+	window_pathkeys = make_pathkeys_for_sortclauses(root,
+													window_sortclauses,
+													tlist);
+	list_free(window_sortclauses);
+	return window_pathkeys;
 }
 
-
 /*
- * plan_cluster_use_sort
- *		Use the planner to decide how CLUSTER should implement sorting
+ * make_sort_input_target
+ *	  Generate appropriate PathTarget for initial input to Sort step.
  *
- * tableOid is the OID of a table to be clustered on its index indexOid
- * (which is already known to be a btree index).  Decide whether it's
- * cheaper to do an indexscan or a seqscan-plus-sort to execute the CLUSTER.
- * Return true to use sorting, false to use an indexscan.
+ * If the query has ORDER BY, this function chooses the target to be computed
+ * by the node just below the Sort (and DISTINCT, if any, since Unique can't
+ * project) steps.  This might or might not be identical to the query's final
+ * output target.
  *
- * Note: caller had better already hold some type of lock on the table.
+ * The main argument for keeping the sort-input tlist the same as the final
+ * is that we avoid a separate projection node (which will be needed if
+ * they're different, because Sort can't project).  However, there are also
+ * advantages to postponing tlist evaluation till after the Sort: it ensures
+ * a consistent order of evaluation for any volatile functions in the tlist,
+ * and if there's also a LIMIT, we can stop the query without ever computing
+ * tlist functions for later rows, which is beneficial for both volatile and
+ * expensive functions.
+ *
+ * Our current policy is to postpone volatile expressions till after the sort
+ * unconditionally (assuming that that's possible, ie they are in plain tlist
+ * columns and not ORDER BY/GROUP BY/DISTINCT columns).  We also prefer to
+ * postpone set-returning expressions, because running them beforehand would
+ * bloat the sort dataset, and because it might cause unexpected output order
+ * if the sort isn't stable.  However there's a constraint on that: all SRFs
+ * in the tlist should be evaluated at the same plan step, so that they can
+ * run in sync in nodeProjectSet.  So if any SRFs are in sort columns, we
+ * mustn't postpone any SRFs.  (Note that in principle that policy should
+ * probably get applied to the group/window input targetlists too, but we
+ * have not done that historically.)  Lastly, expensive expressions are
+ * postponed if there is a LIMIT, or if root->tuple_fraction shows that
+ * partial evaluation of the query is possible (if neither is true, we expect
+ * to have to evaluate the expressions for every row anyway), or if there are
+ * any volatile or set-returning expressions (since once we've put in a
+ * projection at all, it won't cost any more to postpone more stuff).
+ *
+ * Another issue that could potentially be considered here is that
+ * evaluating tlist expressions could result in data that's either wider
+ * or narrower than the input Vars, thus changing the volume of data that
+ * has to go through the Sort.  However, we usually have only a very bad
+ * idea of the output width of any expression more complex than a Var,
+ * so for now it seems too risky to try to optimize on that basis.
+ *
+ * Note that if we do produce a modified sort-input target, and then the
+ * query ends up not using an explicit Sort, no particular harm is done:
+ * we'll initially use the modified target for the preceding path nodes,
+ * but then change them to the final target with apply_projection_to_path.
+ * Moreover, in such a case the guarantees about evaluation order of
+ * volatile functions still hold, since the rows are sorted already.
+ *
+ * This function has some things in common with make_group_input_target and
+ * make_window_input_target, though the detailed rules for what to do are
+ * different.  We never flatten/postpone any grouping or ordering columns;
+ * those are needed before the sort.  If we do flatten a particular
+ * expression, we leave Aggref and WindowFunc nodes alone, since those were
+ * computed earlier.
+ *
+ * 'final_target' is the query's final target list (in PathTarget form)
+ * 'have_postponed_srfs' is an output argument, see below
+ *
+ * The result is the PathTarget to be computed by the plan node immediately
+ * below the Sort step (and the Distinct step, if any).  This will be
+ * exactly final_target if we decide a projection step wouldn't be helpful.
+ *
+ * In addition, *have_postponed_srfs is set to true if we choose to postpone
+ * any set-returning functions to after the Sort.
  */
-bool
-plan_cluster_use_sort(Oid tableOid, Oid indexOid)
+static PathTarget *
+make_sort_input_target(PlannerInfo *root,
+					   PathTarget *final_target,
+					   bool *have_postponed_srfs)
 {
-	PlannerInfo *root;
-	Query	   *query;
-	PlannerGlobal *glob;
-	RangeTblEntry *rte;
-	RelOptInfo *rel;
-	IndexOptInfo *indexInfo;
-	QualCost	indexExprCost;
-	Cost		comparisonCost;
-	Path	   *seqScanPath;
-	Path		seqScanAndSortPath;
-	IndexPath  *indexScanPath;
+	Query	   *parse = root->parse;
+	PathTarget *input_target;
+	int			ncols;
+	bool	   *col_is_srf;
+	bool	   *postpone_col;
+	bool		have_srf;
+	bool		have_volatile;
+	bool		have_expensive;
+	bool		have_srf_sortcols;
+	bool		postpone_srfs;
+	List	   *postponable_cols;
+	List	   *postponable_vars;
+	int			i;
 	ListCell   *lc;
 
-	/* We can short-circuit the cost comparison if indexscans are disabled */
-	if (!enable_indexscan)
-		return true;			/* use sort */
+	/* Shouldn't get here unless query has ORDER BY */
+	Assert(parse->sortClause);
 
-	/* Set up mostly-dummy planner state */
-	query = makeNode(Query);
-	query->commandType = CMD_SELECT;
+	*have_postponed_srfs = false;	/* default result */
 
-	glob = makeNode(PlannerGlobal);
+	/* Inspect tlist and collect per-column information */
+	ncols = list_length(final_target->exprs);
+	col_is_srf = (bool *) palloc0(ncols * sizeof(bool));
+	postpone_col = (bool *) palloc0(ncols * sizeof(bool));
+	have_srf = have_volatile = have_expensive = have_srf_sortcols = false;
 
-	root = makeNode(PlannerInfo);
-	root->parse = query;
-	root->glob = glob;
-	root->query_level = 1;
-	root->planner_cxt = CurrentMemoryContext;
-	root->wt_param_id = -1;
+	i = 0;
+	foreach(lc, final_target->exprs)
+	{
+		Expr	   *expr = (Expr *) lfirst(lc);
 
-	/* Build a minimal RTE for the rel */
-	rte = makeNode(RangeTblEntry);
-	rte->rtekind = RTE_RELATION;
-	rte->relid = tableOid;
-	rte->relkind = RELKIND_RELATION;	/* Don't be too picky. */
-	rte->lateral = false;
-	rte->inh = false;
-	rte->inFromCl = true;
-	query->rtable = list_make1(rte);
+		/*
+		 * If the column has a sortgroupref, assume it has to be evaluated
+		 * before sorting.  Generally such columns would be ORDER BY, GROUP
+		 * BY, etc targets.  One exception is columns that were removed from
+		 * GROUP BY by remove_useless_groupby_columns() ... but those would
+		 * only be Vars anyway.  There don't seem to be any cases where it
+		 * would be worth the trouble to double-check.
+		 */
+		if (get_pathtarget_sortgroupref(final_target, i) == 0)
+		{
+			/*
+			 * Check for SRF or volatile functions.  Check the SRF case first
+			 * because we must know whether we have any postponed SRFs.
+			 */
+			if (parse->hasTargetSRFs &&
+				expression_returns_set((Node *) expr))
+			{
+				/* We'll decide below whether these are postponable */
+				col_is_srf[i] = true;
+				have_srf = true;
+			}
+			else if (contain_volatile_functions((Node *) expr))
+			{
+				/* Unconditionally postpone */
+				postpone_col[i] = true;
+				have_volatile = true;
+			}
+			else
+			{
+				/*
+				 * Else check the cost.  XXX it's annoying to have to do this
+				 * when set_pathtarget_cost_width() just did it.  Refactor to
+				 * allow sharing the work?
+				 */
+				QualCost	cost;
 
-	/* Set up RTE/RelOptInfo arrays */
-	setup_simple_rel_arrays(root);
+				cost_qual_eval_node(&cost, (Node *) expr, root);
 
-	/* Build RelOptInfo */
-	rel = build_simple_rel(root, 1, NULL);
+				/*
+				 * We arbitrarily define "expensive" as "more than 10X
+				 * cpu_operator_cost".  Note this will take in any PL function
+				 * with default cost.
+				 */
+				if (cost.per_tuple > 10 * cpu_operator_cost)
+				{
+					postpone_col[i] = true;
+					have_expensive = true;
+				}
+			}
+		}
+		else
+		{
+			/* For sortgroupref cols, just check if any contain SRFs */
+			if (!have_srf_sortcols &&
+				parse->hasTargetSRFs &&
+				expression_returns_set((Node *) expr))
+				have_srf_sortcols = true;
+		}
 
-	/* Locate IndexOptInfo for the target index */
-	indexInfo = NULL;
-	foreach(lc, rel->indexlist)
-	{
-		indexInfo = lfirst_node(IndexOptInfo, lc);
-		if (indexInfo->indexoid == indexOid)
-			break;
+		i++;
 	}
 
 	/*
-	 * It's possible that get_relation_info did not generate an IndexOptInfo
-	 * for the desired index; this could happen if it's not yet reached its
-	 * indcheckxmin usability horizon, or if it's a system index and we're
-	 * ignoring system indexes.  In such cases we should tell CLUSTER to not
-	 * trust the index contents but use seqscan-and-sort.
+	 * We can postpone SRFs if we have some but none are in sortgroupref cols.
 	 */
-	if (lc == NULL)				/* not in the list? */
-		return true;			/* use sort */
+	postpone_srfs = (have_srf && !have_srf_sortcols);
 
 	/*
-	 * Rather than doing all the pushups that would be needed to use
-	 * set_baserel_size_estimates, just do a quick hack for rows and width.
+	 * If we don't need a post-sort projection, just return final_target.
 	 */
-	rel->rows = rel->tuples;
-	rel->reltarget->width = get_relation_data_width(tableOid, NULL);
+	if (!(postpone_srfs || have_volatile ||
+		  (have_expensive &&
+		   (parse->limitCount || root->tuple_fraction > 0))))
+		return final_target;
 
-	root->total_table_pages = rel->pages;
+	/*
+	 * Report whether the post-sort projection will contain set-returning
+	 * functions.  This is important because it affects whether the Sort can
+	 * rely on the query's LIMIT (if any) to bound the number of rows it needs
+	 * to return.
+	 */
+	*have_postponed_srfs = postpone_srfs;
 
 	/*
-	 * Determine eval cost of the index expressions, if any.  We need to
-	 * charge twice that amount for each tuple comparison that happens during
-	 * the sort, since tuplesort.c will have to re-evaluate the index
-	 * expressions each time.  (XXX that's pretty inefficient...)
+	 * Construct the sort-input target, taking all non-postponable columns and
+	 * then adding Vars, PlaceHolderVars, Aggrefs, and WindowFuncs found in
+	 * the postponable ones.
 	 */
-	cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
-	comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
+	input_target = create_empty_pathtarget();
+	postponable_cols = NIL;
 
-	/* Estimate the cost of seq scan + sort */
-	seqScanPath = create_seqscan_path(root, rel, NULL, 0);
-	cost_sort(&seqScanAndSortPath, root, NIL,
-			  seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
-			  comparisonCost, maintenance_work_mem, -1.0);
+	i = 0;
+	foreach(lc, final_target->exprs)
+	{
+		Expr	   *expr = (Expr *) lfirst(lc);
 
-	/* Estimate the cost of index scan */
-	indexScanPath = create_index_path(root, indexInfo,
-									  NIL, NIL, NIL, NIL, NIL,
-									  ForwardScanDirection, false,
-									  NULL, 1.0, false);
+		if (postpone_col[i] || (postpone_srfs && col_is_srf[i]))
+			postponable_cols = lappend(postponable_cols, expr);
+		else
+			add_column_to_pathtarget(input_target, expr,
+									 get_pathtarget_sortgroupref(final_target, i));
 
-	return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
+		i++;
+	}
+
+	/*
+	 * Pull out all the Vars, Aggrefs, and WindowFuncs mentioned in
+	 * postponable columns, and add them to the sort-input target if not
+	 * already present.  (Some might be there already.)  We mustn't
+	 * deconstruct Aggrefs or WindowFuncs here, since the projection node
+	 * would be unable to recompute them.
+	 */
+	postponable_vars = pull_var_clause((Node *) postponable_cols,
+									   PVC_INCLUDE_AGGREGATES |
+									   PVC_INCLUDE_WINDOWFUNCS |
+									   PVC_INCLUDE_PLACEHOLDERS);
+	add_new_columns_to_pathtarget(input_target, postponable_vars);
+
+	/* clean up cruft */
+	list_free(postponable_vars);
+	list_free(postponable_cols);
+
+	/* XXX this represents even more redundant cost calculation ... */
+	return set_pathtarget_cost_width(root, input_target);
 }
 
 /*
- * plan_create_index_workers
- *		Use the planner to decide how many parallel worker processes
- *		CREATE INDEX should request for use
- *
- * tableOid is the table on which the index is to be built.  indexOid is the
- * OID of an index to be created or reindexed (which must be a btree index).
+ * get_cheapest_fractional_path
+ *	  Find the cheapest path for retrieving a specified fraction of all
+ *	  the tuples expected to be returned by the given relation.
  *
- * Return value is the number of parallel worker processes to request.  It
- * may be unsafe to proceed if this is 0.  Note that this does not include the
- * leader participating as a worker (value is always a number of parallel
- * worker processes).
+ * We interpret tuple_fraction the same way as grouping_planner.
  *
- * Note: caller had better already hold some type of lock on the table and
- * index.
+ * We assume set_cheapest() has been run on the given rel.
  */
-int
-plan_create_index_workers(Oid tableOid, Oid indexOid)
+Path *
+get_cheapest_fractional_path(RelOptInfo *rel, double tuple_fraction)
 {
-	PlannerInfo *root;
-	Query	   *query;
-	PlannerGlobal *glob;
-	RangeTblEntry *rte;
-	Relation	heap;
-	Relation	index;
-	RelOptInfo *rel;
-	int			parallel_workers;
-	BlockNumber heap_blocks;
-	double		reltuples;
-	double		allvisfrac;
-
-	/* Return immediately when parallelism disabled */
-	if (dynamic_shared_memory_type == DSM_IMPL_NONE ||
-		max_parallel_maintenance_workers == 0)
-		return 0;
-
-	/* Set up largely-dummy planner state */
-	query = makeNode(Query);
-	query->commandType = CMD_SELECT;
-
-	glob = makeNode(PlannerGlobal);
-
-	root = makeNode(PlannerInfo);
-	root->parse = query;
-	root->glob = glob;
-	root->query_level = 1;
-	root->planner_cxt = CurrentMemoryContext;
-	root->wt_param_id = -1;
-
-	/*
-	 * Build a minimal RTE.
-	 *
-	 * Set the target's table to be an inheritance parent.  This is a kludge
-	 * that prevents problems within get_relation_info(), which does not
-	 * expect that any IndexOptInfo is currently undergoing REINDEX.
-	 */
-	rte = makeNode(RangeTblEntry);
-	rte->rtekind = RTE_RELATION;
-	rte->relid = tableOid;
-	rte->relkind = RELKIND_RELATION;	/* Don't be too picky. */
-	rte->lateral = false;
-	rte->inh = true;
-	rte->inFromCl = true;
-	query->rtable = list_make1(rte);
-
-	/* Set up RTE/RelOptInfo arrays */
-	setup_simple_rel_arrays(root);
-
-	/* Build RelOptInfo */
-	rel = build_simple_rel(root, 1, NULL);
+	Path	   *best_path = rel->cheapest_total_path;
+	ListCell   *l;
 
-	heap = heap_open(tableOid, NoLock);
-	index = index_open(indexOid, NoLock);
+	/* If all tuples will be retrieved, just return the cheapest-total path */
+	if (tuple_fraction <= 0.0)
+		return best_path;
 
-	/*
-	 * Determine if it's safe to proceed.
-	 *
-	 * Currently, parallel workers can't access the leader's temporary tables.
-	 * Furthermore, any index predicate or index expressions must be parallel
-	 * safe.
-	 */
-	if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP ||
-		!is_parallel_safe(root, (Node *) RelationGetIndexExpressions(index)) ||
-		!is_parallel_safe(root, (Node *) RelationGetIndexPredicate(index)))
-	{
-		parallel_workers = 0;
-		goto done;
-	}
+	/* Convert absolute # of tuples to a fraction; no need to clamp to 0..1 */
+	if (tuple_fraction >= 1.0 && best_path->rows > 0)
+		tuple_fraction /= best_path->rows;
 
-	/*
-	 * If parallel_workers storage parameter is set for the table, accept that
-	 * as the number of parallel worker processes to launch (though still cap
-	 * at max_parallel_maintenance_workers).  Note that we deliberately do not
-	 * consider any other factor when parallel_workers is set. (e.g., memory
-	 * use by workers.)
-	 */
-	if (rel->rel_parallel_workers != -1)
+	foreach(l, rel->pathlist)
 	{
-		parallel_workers = Min(rel->rel_parallel_workers,
-							   max_parallel_maintenance_workers);
-		goto done;
-	}
-
-	/*
-	 * Estimate heap relation size ourselves, since rel->pages cannot be
-	 * trusted (heap RTE was marked as inheritance parent)
-	 */
-	estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac);
-
-	/*
-	 * Determine number of workers to scan the heap relation using generic
-	 * model
-	 */
-	parallel_workers = compute_parallel_worker(rel, heap_blocks, -1,
-											   max_parallel_maintenance_workers);
+		Path	   *path = (Path *) lfirst(l);
 
-	/*
-	 * Cap workers based on available maintenance_work_mem as needed.
-	 *
-	 * Note that each tuplesort participant receives an even share of the
-	 * total maintenance_work_mem budget.  Aim to leave participants
-	 * (including the leader as a participant) with no less than 32MB of
-	 * memory.  This leaves cases where maintenance_work_mem is set to 64MB
-	 * immediately past the threshold of being capable of launching a single
-	 * parallel worker to sort.
-	 */
-	while (parallel_workers > 0 &&
-		   maintenance_work_mem / (parallel_workers + 1) < 32768L)
-		parallel_workers--;
+		if (path == rel->cheapest_total_path ||
+			compare_fractional_path_costs(best_path, path, tuple_fraction) <= 0)
+			continue;
 
-done:
-	index_close(index, NoLock);
-	heap_close(heap, NoLock);
+		best_path = path;
+	}
 
-	return parallel_workers;
+	return best_path;
 }
 
 /*
- * add_paths_to_grouping_rel
+ * adjust_paths_for_srfs
+ *		Fix up the Paths of the given upperrel to handle tSRFs properly.
+ *
+ * The executor can only handle set-returning functions that appear at the
+ * top level of the targetlist of a ProjectSet plan node.  If we have any SRFs
+ * that are not at top level, we need to split up the evaluation into multiple
+ * plan levels in which each level satisfies this constraint.  This function
+ * modifies each Path of an upperrel that (might) compute any SRFs in its
+ * output tlist to insert appropriate projection steps.
  *
- * Add non-partial paths to grouping relation.
+ * The given targets and targets_contain_srfs lists are from
+ * split_pathtarget_at_srfs().  We assume the existing Paths emit the first
+ * target in targets.
  */
 static void
-add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
-						  RelOptInfo *grouped_rel,
-						  RelOptInfo *partially_grouped_rel,
-						  const AggClauseCosts *agg_costs,
-						  grouping_sets_data *gd, double dNumGroups,
-						  GroupPathExtraData *extra)
+adjust_paths_for_srfs(PlannerInfo *root, RelOptInfo *rel,
+					  List *targets, List *targets_contain_srfs)
 {
-	Query	   *parse = root->parse;
-	Path	   *cheapest_path = input_rel->cheapest_total_path;
 	ListCell   *lc;
-	bool		can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
-	bool		can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
-	List	   *havingQual = (List *) extra->havingQual;
-	AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
 
-	if (can_sort)
-	{
-		/*
-		 * Use any available suitably-sorted path as input, and also consider
-		 * sorting the cheapest-total path.
-		 */
-		foreach(lc, input_rel->pathlist)
-		{
-			Path	   *path = (Path *) lfirst(lc);
-			bool		is_sorted;
+	Assert(list_length(targets) == list_length(targets_contain_srfs));
+	Assert(!linitial_int(targets_contain_srfs));
 
-			is_sorted = pathkeys_contained_in(root->group_pathkeys,
-											  path->pathkeys);
-			if (path == cheapest_path || is_sorted)
-			{
-				/* Sort the cheapest-total path if it isn't already sorted */
-				if (!is_sorted)
-					path = (Path *) create_sort_path(root,
-													 grouped_rel,
-													 path,
-													 root->group_pathkeys,
-													 -1.0);
-
-				/* Now decide what to stick atop it */
-				if (parse->groupingSets)
-				{
-					consider_groupingsets_paths(root, grouped_rel,
-												path, true, can_hash,
-												gd, agg_costs, dNumGroups);
-				}
-				else if (parse->hasAggs)
-				{
-					/*
-					 * We have aggregation, possibly with plain GROUP BY. Make
-					 * an AggPath.
-					 */
-					add_path(grouped_rel, (Path *)
-							 create_agg_path(root,
-											 grouped_rel,
-											 path,
-											 grouped_rel->reltarget,
-											 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
-											 AGGSPLIT_SIMPLE,
-											 parse->groupClause,
-											 havingQual,
-											 agg_costs,
-											 dNumGroups));
-				}
-				else if (parse->groupClause)
-				{
-					/*
-					 * We have GROUP BY without aggregation or grouping sets.
-					 * Make a GroupPath.
-					 */
-					add_path(grouped_rel, (Path *)
-							 create_group_path(root,
-											   grouped_rel,
-											   path,
-											   parse->groupClause,
-											   havingQual,
-											   dNumGroups));
-				}
-				else
-				{
-					/* Other cases should have been handled above */
-					Assert(false);
-				}
-			}
-		}
+	/* If no SRFs appear at this plan level, nothing to do */
+	if (list_length(targets) == 1)
+		return;
 
-		/*
-		 * Instead of operating directly on the input relation, we can
-		 * consider finalizing a partially aggregated path.
-		 */
-		if (partially_grouped_rel != NULL)
-		{
-			foreach(lc, partially_grouped_rel->pathlist)
-			{
-				Path	   *path = (Path *) lfirst(lc);
+	/*
+	 * Stack SRF-evaluation nodes atop each path for the rel.
+	 *
+	 * In principle we should re-run set_cheapest() here to identify the
+	 * cheapest path, but it seems unlikely that adding the same tlist eval
+	 * costs to all the paths would change that, so we don't bother. Instead,
+	 * just assume that the cheapest-startup and cheapest-total paths remain
+	 * so.  (There should be no parameterized paths anymore, so we needn't
+	 * worry about updating cheapest_parameterized_paths.)
+	 */
+	foreach(lc, rel->pathlist)
+	{
+		Path	   *subpath = (Path *) lfirst(lc);
+		Path	   *newpath = subpath;
+		ListCell   *lc1,
+				   *lc2;
 
-				/*
-				 * Insert a Sort node, if required.  But there's no point in
-				 * sorting anything but the cheapest path.
-				 */
-				if (!pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
-				{
-					if (path != partially_grouped_rel->cheapest_total_path)
-						continue;
-					path = (Path *) create_sort_path(root,
-													 grouped_rel,
-													 path,
-													 root->group_pathkeys,
-													 -1.0);
-				}
+		Assert(subpath->param_info == NULL);
+		forboth(lc1, targets, lc2, targets_contain_srfs)
+		{
+			PathTarget *thistarget = lfirst_node(PathTarget, lc1);
+			bool		contains_srfs = (bool) lfirst_int(lc2);
 
-				if (parse->hasAggs)
-					add_path(grouped_rel, (Path *)
-							 create_agg_path(root,
-											 grouped_rel,
-											 path,
-											 grouped_rel->reltarget,
-											 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
-											 AGGSPLIT_FINAL_DESERIAL,
-											 parse->groupClause,
-											 havingQual,
-											 agg_final_costs,
-											 dNumGroups));
-				else
-					add_path(grouped_rel, (Path *)
-							 create_group_path(root,
-											   grouped_rel,
-											   path,
-											   parse->groupClause,
-											   havingQual,
-											   dNumGroups));
-			}
+			/* If this level doesn't contain SRFs, do regular projection */
+			if (contains_srfs)
+				newpath = (Path *) create_set_projection_path(root,
+															  rel,
+															  newpath,
+															  thistarget);
+			else
+				newpath = (Path *) apply_projection_to_path(root,
+															rel,
+															newpath,
+															thistarget);
 		}
+		lfirst(lc) = newpath;
+		if (subpath == rel->cheapest_startup_path)
+			rel->cheapest_startup_path = newpath;
+		if (subpath == rel->cheapest_total_path)
+			rel->cheapest_total_path = newpath;
 	}
 
-	if (can_hash)
+	/* Likewise for partial paths, if any */
+	foreach(lc, rel->partial_pathlist)
 	{
-		Size		hashaggtablesize;
+		Path	   *subpath = (Path *) lfirst(lc);
+		Path	   *newpath = subpath;
+		ListCell   *lc1,
+				   *lc2;
 
-		if (parse->groupingSets)
-		{
-			/*
-			 * Try for a hash-only groupingsets path over unsorted input.
-			 */
-			consider_groupingsets_paths(root, grouped_rel,
-										cheapest_path, false, true,
-										gd, agg_costs, dNumGroups);
-		}
-		else
+		Assert(subpath->param_info == NULL);
+		forboth(lc1, targets, lc2, targets_contain_srfs)
 		{
-			hashaggtablesize = estimate_hashagg_tablesize(cheapest_path,
-														  agg_costs,
-														  dNumGroups);
+			PathTarget *thistarget = lfirst_node(PathTarget, lc1);
+			bool		contains_srfs = (bool) lfirst_int(lc2);
 
-			/*
-			 * Provided that the estimated size of the hashtable does not
-			 * exceed work_mem, we'll generate a HashAgg Path, although if we
-			 * were unable to sort above, then we'd better generate a Path, so
-			 * that we at least have one.
-			 */
-			if (hashaggtablesize < work_mem * 1024L ||
-				grouped_rel->pathlist == NIL)
+			/* If this level doesn't contain SRFs, do regular projection */
+			if (contains_srfs)
+				newpath = (Path *) create_set_projection_path(root,
+															  rel,
+															  newpath,
+															  thistarget);
+			else
 			{
-				/*
-				 * We just need an Agg over the cheapest-total input path,
-				 * since input order won't matter.
-				 */
-				add_path(grouped_rel, (Path *)
-						 create_agg_path(root, grouped_rel,
-										 cheapest_path,
-										 grouped_rel->reltarget,
-										 AGG_HASHED,
-										 AGGSPLIT_SIMPLE,
-										 parse->groupClause,
-										 havingQual,
-										 agg_costs,
-										 dNumGroups));
+				/* avoid apply_projection_to_path, in case of multiple refs */
+				newpath = (Path *) create_projection_path(root,
+														  rel,
+														  newpath,
+														  thistarget);
 			}
 		}
-
-		/*
-		 * Generate a Finalize HashAgg Path atop of the cheapest partially
-		 * grouped path, assuming there is one. Once again, we'll only do this
-		 * if it looks as though the hash table won't exceed work_mem.
-		 */
-		if (partially_grouped_rel && partially_grouped_rel->pathlist)
-		{
-			Path	   *path = partially_grouped_rel->cheapest_total_path;
-
-			hashaggtablesize = estimate_hashagg_tablesize(path,
-														  agg_final_costs,
-														  dNumGroups);
-
-			if (hashaggtablesize < work_mem * 1024L)
-				add_path(grouped_rel, (Path *)
-						 create_agg_path(root,
-										 grouped_rel,
-										 path,
-										 grouped_rel->reltarget,
-										 AGG_HASHED,
-										 AGGSPLIT_FINAL_DESERIAL,
-										 parse->groupClause,
-										 havingQual,
-										 agg_final_costs,
-										 dNumGroups));
-		}
+		lfirst(lc) = newpath;
 	}
-
-	/*
-	 * When partitionwise aggregate is used, we might have fully aggregated
-	 * paths in the partial pathlist, because add_paths_to_append_rel() will
-	 * consider a path for grouped_rel consisting of a Parallel Append of
-	 * non-partial paths from each child.
-	 */
-	if (grouped_rel->partial_pathlist != NIL)
-		gather_grouping_paths(root, grouped_rel);
 }
 
 /*
- * create_partial_grouping_paths
+ * expression_planner
+ *		Perform planner's transformations on a standalone expression.
  *
- * Create a new upper relation representing the result of partial aggregation
- * and populate it with appropriate paths.  Note that we don't finalize the
- * lists of paths here, so the caller can add additional partial or non-partial
- * paths and must afterward call gather_grouping_paths and set_cheapest on
- * the returned upper relation.
+ * Various utility commands need to evaluate expressions that are not part
+ * of a plannable query.  They can do so using the executor's regular
+ * expression-execution machinery, but first the expression has to be fed
+ * through here to transform it from parser output to something executable.
  *
- * All paths for this new upper relation -- both partial and non-partial --
- * have been partially aggregated but require a subsequent FinalizeAggregate
- * step.
+ * Currently, we disallow sublinks in standalone expressions, so there's no
+ * real "planning" involved here.  (That might not always be true though.)
+ * What we must do is run eval_const_expressions to ensure that any function
+ * calls are converted to positional notation and function default arguments
+ * get inserted.  The fact that constant subexpressions get simplified is a
+ * side-effect that is useful when the expression will get evaluated more than
+ * once.  Also, we must fix operator function IDs.
  *
- * NB: This function is allowed to return NULL if it determines that there is
- * no real need to create a new RelOptInfo.
+ * Note: this must not make any damaging changes to the passed-in expression
+ * tree.  (It would actually be okay to apply fix_opfuncids to it, but since
+ * we first do an expression_tree_mutator-based walk, what is returned will
+ * be a new node tree.)
  */
-static RelOptInfo *
-create_partial_grouping_paths(PlannerInfo *root,
-							  RelOptInfo *grouped_rel,
-							  RelOptInfo *input_rel,
-							  grouping_sets_data *gd,
-							  GroupPathExtraData *extra,
-							  bool force_rel_creation)
+Expr *
+expression_planner(Expr *expr)
 {
-	Query	   *parse = root->parse;
-	RelOptInfo *partially_grouped_rel;
-	AggClauseCosts *agg_partial_costs = &extra->agg_partial_costs;
-	AggClauseCosts *agg_final_costs = &extra->agg_final_costs;
-	Path	   *cheapest_partial_path = NULL;
-	Path	   *cheapest_total_path = NULL;
-	double		dNumPartialGroups = 0;
-	double		dNumPartialPartialGroups = 0;
-	ListCell   *lc;
-	bool		can_hash = (extra->flags & GROUPING_CAN_USE_HASH) != 0;
-	bool		can_sort = (extra->flags & GROUPING_CAN_USE_SORT) != 0;
-
-	/*
-	 * Consider whether we should generate partially aggregated non-partial
-	 * paths.  We can only do this if we have a non-partial path, and only if
-	 * the parent of the input rel is performing partial partitionwise
-	 * aggregation.  (Note that extra->patype is the type of partitionwise
-	 * aggregation being used at the parent level, not this level.)
-	 */
-	if (input_rel->pathlist != NIL &&
-		extra->patype == PARTITIONWISE_AGGREGATE_PARTIAL)
-		cheapest_total_path = input_rel->cheapest_total_path;
-
-	/*
-	 * If parallelism is possible for grouped_rel, then we should consider
-	 * generating partially-grouped partial paths.  However, if the input rel
-	 * has no partial paths, then we can't.
-	 */
-	if (grouped_rel->consider_parallel && input_rel->partial_pathlist != NIL)
-		cheapest_partial_path = linitial(input_rel->partial_pathlist);
-
-	/*
-	 * If we can't partially aggregate partial paths, and we can't partially
-	 * aggregate non-partial paths, then don't bother creating the new
-	 * RelOptInfo at all, unless the caller specified force_rel_creation.
-	 */
-	if (cheapest_total_path == NULL &&
-		cheapest_partial_path == NULL &&
-		!force_rel_creation)
-		return NULL;
-
-	/*
-	 * Build a new upper relation to represent the result of partially
-	 * aggregating the rows from the input relation.
-	 */
-	partially_grouped_rel = fetch_upper_rel(root,
-											UPPERREL_PARTIAL_GROUP_AGG,
-											grouped_rel->relids);
-	partially_grouped_rel->consider_parallel =
-		grouped_rel->consider_parallel;
-	partially_grouped_rel->reloptkind = grouped_rel->reloptkind;
-	partially_grouped_rel->serverid = grouped_rel->serverid;
-	partially_grouped_rel->userid = grouped_rel->userid;
-	partially_grouped_rel->useridiscurrent = grouped_rel->useridiscurrent;
-	partially_grouped_rel->fdwroutine = grouped_rel->fdwroutine;
+	Node	   *result;
 
 	/*
-	 * Build target list for partial aggregate paths.  These paths cannot just
-	 * emit the same tlist as regular aggregate paths, because (1) we must
-	 * include Vars and Aggrefs needed in HAVING, which might not appear in
-	 * the result tlist, and (2) the Aggrefs must be set in partial mode.
+	 * Convert named-argument function calls, insert default arguments and
+	 * simplify constant subexprs
 	 */
-	partially_grouped_rel->reltarget =
-		make_partial_grouping_target(root, grouped_rel->reltarget,
-									 extra->havingQual);
+	result = eval_const_expressions(NULL, (Node *) expr);
 
-	if (!extra->partial_costs_set)
-	{
-		/*
-		 * Collect statistics about aggregates for estimating costs of
-		 * performing aggregation in parallel.
-		 */
-		MemSet(agg_partial_costs, 0, sizeof(AggClauseCosts));
-		MemSet(agg_final_costs, 0, sizeof(AggClauseCosts));
-		if (parse->hasAggs)
-		{
-			List	   *partial_target_exprs;
-
-			/* partial phase */
-			partial_target_exprs = partially_grouped_rel->reltarget->exprs;
-			get_agg_clause_costs(root, (Node *) partial_target_exprs,
-								 AGGSPLIT_INITIAL_SERIAL,
-								 agg_partial_costs);
-
-			/* final phase */
-			get_agg_clause_costs(root, (Node *) grouped_rel->reltarget->exprs,
-								 AGGSPLIT_FINAL_DESERIAL,
-								 agg_final_costs);
-			get_agg_clause_costs(root, extra->havingQual,
-								 AGGSPLIT_FINAL_DESERIAL,
-								 agg_final_costs);
-		}
+	/* Fill in opfuncid values if missing */
+	fix_opfuncids(result);
 
-		extra->partial_costs_set = true;
-	}
+	return (Expr *) result;
+}
 
-	/* Estimate number of partial groups. */
-	if (cheapest_total_path != NULL)
-		dNumPartialGroups =
-			get_number_of_groups(root,
-								 cheapest_total_path->rows,
-								 gd,
-								 extra->targetList);
-	if (cheapest_partial_path != NULL)
-		dNumPartialPartialGroups =
-			get_number_of_groups(root,
-								 cheapest_partial_path->rows,
-								 gd,
-								 extra->targetList);
-
-	if (can_sort && cheapest_total_path != NULL)
-	{
-		/* This should have been checked previously */
-		Assert(parse->hasAggs || parse->groupClause);
 
-		/*
-		 * Use any available suitably-sorted path as input, and also consider
-		 * sorting the cheapest partial path.
-		 */
-		foreach(lc, input_rel->pathlist)
-		{
-			Path	   *path = (Path *) lfirst(lc);
-			bool		is_sorted;
+/*
+ * plan_cluster_use_sort
+ *		Use the planner to decide how CLUSTER should implement sorting
+ *
+ * tableOid is the OID of a table to be clustered on its index indexOid
+ * (which is already known to be a btree index).  Decide whether it's
+ * cheaper to do an indexscan or a seqscan-plus-sort to execute the CLUSTER.
+ * Return true to use sorting, false to use an indexscan.
+ *
+ * Note: caller had better already hold some type of lock on the table.
+ */
+bool
+plan_cluster_use_sort(Oid tableOid, Oid indexOid)
+{
+	PlannerInfo *root;
+	Query	   *query;
+	PlannerGlobal *glob;
+	RangeTblEntry *rte;
+	RelOptInfo *rel;
+	IndexOptInfo *indexInfo;
+	QualCost	indexExprCost;
+	Cost		comparisonCost;
+	Path	   *seqScanPath;
+	Path		seqScanAndSortPath;
+	IndexPath  *indexScanPath;
+	ListCell   *lc;
 
-			is_sorted = pathkeys_contained_in(root->group_pathkeys,
-											  path->pathkeys);
-			if (path == cheapest_total_path || is_sorted)
-			{
-				/* Sort the cheapest partial path, if it isn't already */
-				if (!is_sorted)
-					path = (Path *) create_sort_path(root,
-													 partially_grouped_rel,
-													 path,
-													 root->group_pathkeys,
-													 -1.0);
-
-				if (parse->hasAggs)
-					add_path(partially_grouped_rel, (Path *)
-							 create_agg_path(root,
-											 partially_grouped_rel,
-											 path,
-											 partially_grouped_rel->reltarget,
-											 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
-											 AGGSPLIT_INITIAL_SERIAL,
-											 parse->groupClause,
-											 NIL,
-											 agg_partial_costs,
-											 dNumPartialGroups));
-				else
-					add_path(partially_grouped_rel, (Path *)
-							 create_group_path(root,
-											   partially_grouped_rel,
-											   path,
-											   parse->groupClause,
-											   NIL,
-											   dNumPartialGroups));
-			}
-		}
-	}
+	/* We can short-circuit the cost comparison if indexscans are disabled */
+	if (!enable_indexscan)
+		return true;			/* use sort */
 
-	if (can_sort && cheapest_partial_path != NULL)
-	{
-		/* Similar to above logic, but for partial paths. */
-		foreach(lc, input_rel->partial_pathlist)
-		{
-			Path	   *path = (Path *) lfirst(lc);
-			bool		is_sorted;
+	/* Set up mostly-dummy planner state */
+	query = makeNode(Query);
+	query->commandType = CMD_SELECT;
 
-			is_sorted = pathkeys_contained_in(root->group_pathkeys,
-											  path->pathkeys);
-			if (path == cheapest_partial_path || is_sorted)
-			{
-				/* Sort the cheapest partial path, if it isn't already */
-				if (!is_sorted)
-					path = (Path *) create_sort_path(root,
-													 partially_grouped_rel,
-													 path,
-													 root->group_pathkeys,
-													 -1.0);
-
-				if (parse->hasAggs)
-					add_partial_path(partially_grouped_rel, (Path *)
-									 create_agg_path(root,
-													 partially_grouped_rel,
-													 path,
-													 partially_grouped_rel->reltarget,
-													 parse->groupClause ? AGG_SORTED : AGG_PLAIN,
-													 AGGSPLIT_INITIAL_SERIAL,
-													 parse->groupClause,
-													 NIL,
-													 agg_partial_costs,
-													 dNumPartialPartialGroups));
-				else
-					add_partial_path(partially_grouped_rel, (Path *)
-									 create_group_path(root,
-													   partially_grouped_rel,
-													   path,
-													   parse->groupClause,
-													   NIL,
-													   dNumPartialPartialGroups));
-			}
-		}
-	}
+	glob = makeNode(PlannerGlobal);
 
-	if (can_hash && cheapest_total_path != NULL)
-	{
-		Size		hashaggtablesize;
+	root = makeNode(PlannerInfo);
+	root->parse = query;
+	root->glob = glob;
+	root->query_level = 1;
+	root->planner_cxt = CurrentMemoryContext;
+	root->wt_param_id = -1;
 
-		/* Checked above */
-		Assert(parse->hasAggs || parse->groupClause);
+	/* Build a minimal RTE for the rel */
+	rte = makeNode(RangeTblEntry);
+	rte->rtekind = RTE_RELATION;
+	rte->relid = tableOid;
+	rte->relkind = RELKIND_RELATION;	/* Don't be too picky. */
+	rte->lateral = false;
+	rte->inh = false;
+	rte->inFromCl = true;
+	query->rtable = list_make1(rte);
 
-		hashaggtablesize =
-			estimate_hashagg_tablesize(cheapest_total_path,
-									   agg_partial_costs,
-									   dNumPartialGroups);
+	/* Set up RTE/RelOptInfo arrays */
+	setup_simple_rel_arrays(root);
 
-		/*
-		 * Tentatively produce a partial HashAgg Path, depending on if it
-		 * looks as if the hash table will fit in work_mem.
-		 */
-		if (hashaggtablesize < work_mem * 1024L &&
-			cheapest_total_path != NULL)
-		{
-			add_path(partially_grouped_rel, (Path *)
-					 create_agg_path(root,
-									 partially_grouped_rel,
-									 cheapest_total_path,
-									 partially_grouped_rel->reltarget,
-									 AGG_HASHED,
-									 AGGSPLIT_INITIAL_SERIAL,
-									 parse->groupClause,
-									 NIL,
-									 agg_partial_costs,
-									 dNumPartialGroups));
-		}
-	}
+	/* Build RelOptInfo */
+	rel = build_simple_rel(root, 1, NULL);
 
-	if (can_hash && cheapest_partial_path != NULL)
+	/* Locate IndexOptInfo for the target index */
+	indexInfo = NULL;
+	foreach(lc, rel->indexlist)
 	{
-		Size		hashaggtablesize;
+		indexInfo = lfirst_node(IndexOptInfo, lc);
+		if (indexInfo->indexoid == indexOid)
+			break;
+	}
+
+	/*
+	 * It's possible that get_relation_info did not generate an IndexOptInfo
+	 * for the desired index; this could happen if it's not yet reached its
+	 * indcheckxmin usability horizon, or if it's a system index and we're
+	 * ignoring system indexes.  In such cases we should tell CLUSTER to not
+	 * trust the index contents but use seqscan-and-sort.
+	 */
+	if (lc == NULL)				/* not in the list? */
+		return true;			/* use sort */
 
-		hashaggtablesize =
-			estimate_hashagg_tablesize(cheapest_partial_path,
-									   agg_partial_costs,
-									   dNumPartialPartialGroups);
+	/*
+	 * Rather than doing all the pushups that would be needed to use
+	 * set_baserel_size_estimates, just do a quick hack for rows and width.
+	 */
+	rel->rows = rel->tuples;
+	rel->reltarget->width = get_relation_data_width(tableOid, NULL);
 
-		/* Do the same for partial paths. */
-		if (hashaggtablesize < work_mem * 1024L &&
-			cheapest_partial_path != NULL)
-		{
-			add_partial_path(partially_grouped_rel, (Path *)
-							 create_agg_path(root,
-											 partially_grouped_rel,
-											 cheapest_partial_path,
-											 partially_grouped_rel->reltarget,
-											 AGG_HASHED,
-											 AGGSPLIT_INITIAL_SERIAL,
-											 parse->groupClause,
-											 NIL,
-											 agg_partial_costs,
-											 dNumPartialPartialGroups));
-		}
-	}
+	root->total_table_pages = rel->pages;
 
 	/*
-	 * If there is an FDW that's responsible for all baserels of the query,
-	 * let it consider adding partially grouped ForeignPaths.
+	 * Determine eval cost of the index expressions, if any.  We need to
+	 * charge twice that amount for each tuple comparison that happens during
+	 * the sort, since tuplesort.c will have to re-evaluate the index
+	 * expressions each time.  (XXX that's pretty inefficient...)
 	 */
-	if (partially_grouped_rel->fdwroutine &&
-		partially_grouped_rel->fdwroutine->GetForeignUpperPaths)
-	{
-		FdwRoutine *fdwroutine = partially_grouped_rel->fdwroutine;
+	cost_qual_eval(&indexExprCost, indexInfo->indexprs, root);
+	comparisonCost = 2.0 * (indexExprCost.startup + indexExprCost.per_tuple);
 
-		fdwroutine->GetForeignUpperPaths(root,
-										 UPPERREL_PARTIAL_GROUP_AGG,
-										 input_rel, partially_grouped_rel,
-										 extra);
-	}
+	/* Estimate the cost of seq scan + sort */
+	seqScanPath = create_seqscan_path(root, rel, NULL, 0);
+	cost_sort(&seqScanAndSortPath, root, NIL,
+			  seqScanPath->total_cost, rel->tuples, rel->reltarget->width,
+			  comparisonCost, maintenance_work_mem, -1.0);
+
+	/* Estimate the cost of index scan */
+	indexScanPath = create_index_path(root, indexInfo,
+									  NIL, NIL, NIL, NIL, NIL,
+									  ForwardScanDirection, false,
+									  NULL, 1.0, false);
 
-	return partially_grouped_rel;
+	return (seqScanAndSortPath.total_cost < indexScanPath->path.total_cost);
 }
 
 /*
- * Generate Gather and Gather Merge paths for a grouping relation or partial
- * grouping relation.
+ * plan_create_index_workers
+ *		Use the planner to decide how many parallel worker processes
+ *		CREATE INDEX should request for use
+ *
+ * tableOid is the table on which the index is to be built.  indexOid is the
+ * OID of an index to be created or reindexed (which must be a btree index).
  *
- * generate_gather_paths does most of the work, but we also consider a special
- * case: we could try sorting the data by the group_pathkeys and then applying
- * Gather Merge.
+ * Return value is the number of parallel worker processes to request.  It
+ * may be unsafe to proceed if this is 0.  Note that this does not include the
+ * leader participating as a worker (value is always a number of parallel
+ * worker processes).
  *
- * NB: This function shouldn't be used for anything other than a grouped or
- * partially grouped relation not only because of the fact that it explicitly
- * references group_pathkeys but we pass "true" as the third argument to
- * generate_gather_paths().
+ * Note: caller had better already hold some type of lock on the table and
+ * index.
  */
-static void
-gather_grouping_paths(PlannerInfo *root, RelOptInfo *rel)
+int
+plan_create_index_workers(Oid tableOid, Oid indexOid)
 {
-	Path	   *cheapest_partial_path;
+	PlannerInfo *root;
+	Query	   *query;
+	PlannerGlobal *glob;
+	RangeTblEntry *rte;
+	Relation	heap;
+	Relation	index;
+	RelOptInfo *rel;
+	int			parallel_workers;
+	BlockNumber heap_blocks;
+	double		reltuples;
+	double		allvisfrac;
 
-	/* Try Gather for unordered paths and Gather Merge for ordered ones. */
-	generate_gather_paths(root, rel, true);
+	/* Return immediately when parallelism disabled */
+	if (dynamic_shared_memory_type == DSM_IMPL_NONE ||
+		max_parallel_maintenance_workers == 0)
+		return 0;
 
-	/* Try cheapest partial path + explicit Sort + Gather Merge. */
-	cheapest_partial_path = linitial(rel->partial_pathlist);
-	if (!pathkeys_contained_in(root->group_pathkeys,
-							   cheapest_partial_path->pathkeys))
-	{
-		Path	   *path;
-		double		total_groups;
-
-		total_groups =
-			cheapest_partial_path->rows * cheapest_partial_path->parallel_workers;
-		path = (Path *) create_sort_path(root, rel, cheapest_partial_path,
-										 root->group_pathkeys,
-										 -1.0);
-		path = (Path *)
-			create_gather_merge_path(root,
-									 rel,
-									 path,
-									 rel->reltarget,
-									 root->group_pathkeys,
-									 NULL,
-									 &total_groups);
+	/* Set up largely-dummy planner state */
+	query = makeNode(Query);
+	query->commandType = CMD_SELECT;
 
-		add_path(rel, path);
-	}
-}
+	glob = makeNode(PlannerGlobal);
 
-/*
- * can_partial_agg
- *
- * Determines whether or not partial grouping and/or aggregation is possible.
- * Returns true when possible, false otherwise.
- */
-static bool
-can_partial_agg(PlannerInfo *root, const AggClauseCosts *agg_costs)
-{
-	Query	   *parse = root->parse;
+	root = makeNode(PlannerInfo);
+	root->parse = query;
+	root->glob = glob;
+	root->query_level = 1;
+	root->planner_cxt = CurrentMemoryContext;
+	root->wt_param_id = -1;
 
-	if (!parse->hasAggs && parse->groupClause == NIL)
-	{
-		/*
-		 * We don't know how to do parallel aggregation unless we have either
-		 * some aggregates or a grouping clause.
-		 */
-		return false;
-	}
-	else if (parse->groupingSets)
+	/*
+	 * Build a minimal RTE.
+	 *
+	 * Set the target's table to be an inheritance parent.  This is a kludge
+	 * that prevents problems within get_relation_info(), which does not
+	 * expect that any IndexOptInfo is currently undergoing REINDEX.
+	 */
+	rte = makeNode(RangeTblEntry);
+	rte->rtekind = RTE_RELATION;
+	rte->relid = tableOid;
+	rte->relkind = RELKIND_RELATION;	/* Don't be too picky. */
+	rte->lateral = false;
+	rte->inh = true;
+	rte->inFromCl = true;
+	query->rtable = list_make1(rte);
+
+	/* Set up RTE/RelOptInfo arrays */
+	setup_simple_rel_arrays(root);
+
+	/* Build RelOptInfo */
+	rel = build_simple_rel(root, 1, NULL);
+
+	heap = heap_open(tableOid, NoLock);
+	index = index_open(indexOid, NoLock);
+
+	/*
+	 * Determine if it's safe to proceed.
+	 *
+	 * Currently, parallel workers can't access the leader's temporary tables.
+	 * Furthermore, any index predicate or index expressions must be parallel
+	 * safe.
+	 */
+	if (heap->rd_rel->relpersistence == RELPERSISTENCE_TEMP ||
+		!is_parallel_safe(root, (Node *) RelationGetIndexExpressions(index)) ||
+		!is_parallel_safe(root, (Node *) RelationGetIndexPredicate(index)))
 	{
-		/* We don't know how to do grouping sets in parallel. */
-		return false;
+		parallel_workers = 0;
+		goto done;
 	}
-	else if (agg_costs->hasNonPartial || agg_costs->hasNonSerial)
+
+	/*
+	 * If parallel_workers storage parameter is set for the table, accept that
+	 * as the number of parallel worker processes to launch (though still cap
+	 * at max_parallel_maintenance_workers).  Note that we deliberately do not
+	 * consider any other factor when parallel_workers is set. (e.g., memory
+	 * use by workers.)
+	 */
+	if (rel->rel_parallel_workers != -1)
 	{
-		/* Insufficient support for partial mode. */
-		return false;
+		parallel_workers = Min(rel->rel_parallel_workers,
+							   max_parallel_maintenance_workers);
+		goto done;
 	}
 
-	/* Everything looks good. */
-	return true;
+	/*
+	 * Estimate heap relation size ourselves, since rel->pages cannot be
+	 * trusted (heap RTE was marked as inheritance parent)
+	 */
+	estimate_rel_size(heap, NULL, &heap_blocks, &reltuples, &allvisfrac);
+
+	/*
+	 * Determine number of workers to scan the heap relation using generic
+	 * model
+	 */
+	parallel_workers = compute_parallel_worker(rel, heap_blocks, -1,
+											   max_parallel_maintenance_workers);
+
+	/*
+	 * Cap workers based on available maintenance_work_mem as needed.
+	 *
+	 * Note that each tuplesort participant receives an even share of the
+	 * total maintenance_work_mem budget.  Aim to leave participants
+	 * (including the leader as a participant) with no less than 32MB of
+	 * memory.  This leaves cases where maintenance_work_mem is set to 64MB
+	 * immediately past the threshold of being capable of launching a single
+	 * parallel worker to sort.
+	 */
+	while (parallel_workers > 0 &&
+		   maintenance_work_mem / (parallel_workers + 1) < 32768L)
+		parallel_workers--;
+
+done:
+	index_close(index, NoLock);
+	heap_close(heap, NoLock);
+
+	return parallel_workers;
 }
 
 /*
@@ -6980,208 +5235,3 @@ apply_scanjoin_target_to_paths(PlannerInfo *root,
 	 */
 	set_cheapest(rel);
 }
-
-/*
- * create_partitionwise_grouping_paths
- *
- * If the partition keys of input relation are part of the GROUP BY clause, all
- * the rows belonging to a given group come from a single partition.  This
- * allows aggregation/grouping over a partitioned relation to be broken down
- * into aggregation/grouping on each partition.  This should be no worse, and
- * often better, than the normal approach.
- *
- * However, if the GROUP BY clause does not contain all the partition keys,
- * rows from a given group may be spread across multiple partitions. In that
- * case, we perform partial aggregation for each group, append the results,
- * and then finalize aggregation.  This is less certain to win than the
- * previous case.  It may win if the PartialAggregate stage greatly reduces
- * the number of groups, because fewer rows will pass through the Append node.
- * It may lose if we have lots of small groups.
- */
-static void
-create_partitionwise_grouping_paths(PlannerInfo *root,
-									RelOptInfo *input_rel,
-									RelOptInfo *grouped_rel,
-									RelOptInfo *partially_grouped_rel,
-									const AggClauseCosts *agg_costs,
-									grouping_sets_data *gd,
-									PartitionwiseAggregateType patype,
-									GroupPathExtraData *extra)
-{
-	int			nparts = input_rel->nparts;
-	int			cnt_parts;
-	List	   *grouped_live_children = NIL;
-	List	   *partially_grouped_live_children = NIL;
-	PathTarget *target = grouped_rel->reltarget;
-
-	Assert(patype != PARTITIONWISE_AGGREGATE_NONE);
-	Assert(patype != PARTITIONWISE_AGGREGATE_PARTIAL ||
-		   partially_grouped_rel != NULL);
-
-	/* Add paths for partitionwise aggregation/grouping. */
-	for (cnt_parts = 0; cnt_parts < nparts; cnt_parts++)
-	{
-		RelOptInfo *child_input_rel = input_rel->part_rels[cnt_parts];
-		PathTarget *child_target = copy_pathtarget(target);
-		AppendRelInfo **appinfos;
-		int			nappinfos;
-		GroupPathExtraData child_extra;
-		RelOptInfo *child_grouped_rel;
-		RelOptInfo *child_partially_grouped_rel;
-
-		/* Input child rel must have a path */
-		Assert(child_input_rel->pathlist != NIL);
-
-		/*
-		 * Copy the given "extra" structure as is and then override the
-		 * members specific to this child.
-		 */
-		memcpy(&child_extra, extra, sizeof(child_extra));
-
-		appinfos = find_appinfos_by_relids(root, child_input_rel->relids,
-										   &nappinfos);
-
-		child_target->exprs = (List *)
-			adjust_appendrel_attrs(root,
-								   (Node *) target->exprs,
-								   nappinfos, appinfos);
-
-		/* Translate havingQual and targetList. */
-		child_extra.havingQual = (Node *)
-			adjust_appendrel_attrs(root,
-								   extra->havingQual,
-								   nappinfos, appinfos);
-		child_extra.targetList = (List *)
-			adjust_appendrel_attrs(root,
-								   (Node *) extra->targetList,
-								   nappinfos, appinfos);
-
-		/*
-		 * extra->patype was the value computed for our parent rel; patype is
-		 * the value for this relation.  For the child, our value is its
-		 * parent rel's value.
-		 */
-		child_extra.patype = patype;
-
-		/*
-		 * Create grouping relation to hold fully aggregated grouping and/or
-		 * aggregation paths for the child.
-		 */
-		child_grouped_rel = make_grouping_rel(root, child_input_rel,
-											  child_target,
-											  extra->target_parallel_safe,
-											  child_extra.havingQual);
-
-		/* Ignore empty children. They contribute nothing. */
-		if (IS_DUMMY_REL(child_input_rel))
-		{
-			mark_dummy_rel(child_grouped_rel);
-
-			continue;
-		}
-
-		/* Create grouping paths for this child relation. */
-		create_ordinary_grouping_paths(root, child_input_rel,
-									   child_grouped_rel,
-									   agg_costs, gd, &child_extra,
-									   &child_partially_grouped_rel);
-
-		if (child_partially_grouped_rel)
-		{
-			partially_grouped_live_children =
-				lappend(partially_grouped_live_children,
-						child_partially_grouped_rel);
-		}
-
-		if (patype == PARTITIONWISE_AGGREGATE_FULL)
-		{
-			set_cheapest(child_grouped_rel);
-			grouped_live_children = lappend(grouped_live_children,
-											child_grouped_rel);
-		}
-
-		pfree(appinfos);
-	}
-
-	/*
-	 * All children can't be dummy at this point. If they are, then the parent
-	 * too marked as dummy.
-	 */
-	Assert(grouped_live_children != NIL ||
-		   partially_grouped_live_children != NIL);
-
-	/*
-	 * Try to create append paths for partially grouped children. For full
-	 * partitionwise aggregation, we might have paths in the partial_pathlist
-	 * if parallel aggregation is possible.  For partial partitionwise
-	 * aggregation, we may have paths in both pathlist and partial_pathlist.
-	 */
-	if (partially_grouped_rel)
-	{
-		add_paths_to_append_rel(root, partially_grouped_rel,
-								partially_grouped_live_children);
-
-		/*
-		 * We need call set_cheapest, since the finalization step will use the
-		 * cheapest path from the rel.
-		 */
-		if (partially_grouped_rel->pathlist)
-			set_cheapest(partially_grouped_rel);
-	}
-
-	/* If possible, create append paths for fully grouped children. */
-	if (patype == PARTITIONWISE_AGGREGATE_FULL)
-		add_paths_to_append_rel(root, grouped_rel, grouped_live_children);
-}
-
-/*
- * group_by_has_partkey
- *
- * Returns true, if all the partition keys of the given relation are part of
- * the GROUP BY clauses, false otherwise.
- */
-static bool
-group_by_has_partkey(RelOptInfo *input_rel,
-					 List *targetList,
-					 List *groupClause)
-{
-	List	   *groupexprs = get_sortgrouplist_exprs(groupClause, targetList);
-	int			cnt = 0;
-	int			partnatts;
-
-	/* Input relation should be partitioned. */
-	Assert(input_rel->part_scheme);
-
-	/* Rule out early, if there are no partition keys present. */
-	if (!input_rel->partexprs)
-		return false;
-
-	partnatts = input_rel->part_scheme->partnatts;
-
-	for (cnt = 0; cnt < partnatts; cnt++)
-	{
-		List	   *partexprs = input_rel->partexprs[cnt];
-		ListCell   *lc;
-		bool		found = false;
-
-		foreach(lc, partexprs)
-		{
-			Expr	   *partexpr = lfirst(lc);
-
-			if (list_member(groupexprs, partexpr))
-			{
-				found = true;
-				break;
-			}
-		}
-
-		/*
-		 * If none of the partition key expressions match with any of the
-		 * GROUP BY expression, return false.
-		 */
-		if (!found)
-			return false;
-	}
-
-	return true;
-}
diff --git a/src/include/nodes/relation.h b/src/include/nodes/relation.h
index 6bf9e84c4a..ad77416050 100644
--- a/src/include/nodes/relation.h
+++ b/src/include/nodes/relation.h
@@ -2424,4 +2424,23 @@ typedef struct JoinCostWorkspace
 	double		inner_rows_total;
 } JoinCostWorkspace;
 
+
+
+/*
+ * Data specific to grouping sets
+ */
+
+typedef struct
+{
+	List	   *rollups;
+	List	   *hash_sets_idx;
+	double		dNumHashGroups;
+	bool		any_hashable;
+	Bitmapset  *unsortable_refs;
+	Bitmapset  *unhashable_refs;
+	List	   *unsortable_sets;
+	int		   *tleref_to_colnum_map;
+} grouping_sets_data;
+
+
 #endif							/* RELATION_H */
diff --git a/src/include/optimizer/paths.h b/src/include/optimizer/paths.h
index cafde307ad..89835ad259 100644
--- a/src/include/optimizer/paths.h
+++ b/src/include/optimizer/paths.h
@@ -120,6 +120,19 @@ extern bool have_partkey_equi_join(RelOptInfo *joinrel,
 					   JoinType jointype, List *restrictlist);
 
 /*
+ * aggpath.c
+ *	  routines to create grouping paths
+ */
+extern RelOptInfo *create_grouping_paths(PlannerInfo *root,
+					  RelOptInfo *input_rel,
+					  PathTarget *target,
+					  bool target_parallel_safe,
+					  const AggClauseCosts *agg_costs,
+										 grouping_sets_data *gd);
+extern List *remap_to_groupclause_idx(List *groupClause, List *gsets,
+						 int *tleref_to_colnum_map);
+
+/*
  * equivclass.c
  *	  routines for managing EquivalenceClasses
  */
diff --git a/src/include/optimizer/planmain.h b/src/include/optimizer/planmain.h
index 4e61dff241..dd6e912373 100644
--- a/src/include/optimizer/planmain.h
+++ b/src/include/optimizer/planmain.h
@@ -42,6 +42,7 @@ extern RelOptInfo *query_planner(PlannerInfo *root);
  */
 extern void preprocess_minmax_aggregates(PlannerInfo *root, List *tlist);
 
+
 /*
  * prototypes for plan/createplan.c
  */
diff --git a/src/include/optimizer/planner.h b/src/include/optimizer/planner.h
index c090396e13..497a8c0581 100644
--- a/src/include/optimizer/planner.h
+++ b/src/include/optimizer/planner.h
@@ -55,6 +55,8 @@ extern Path *get_cheapest_fractional_path(RelOptInfo *rel,
 extern Expr *expression_planner(Expr *expr);
 
 extern Expr *preprocess_phv_expression(PlannerInfo *root, Expr *expr);
+extern List *preprocess_groupclause(PlannerInfo *root, List *force);
+extern grouping_sets_data *preprocess_grouping_sets(PlannerInfo *root);
 
 extern bool plan_cluster_use_sort(Oid tableOid, Oid indexOid);
 extern int	plan_create_index_workers(Oid tableOid, Oid indexOid);
-- 
2.11.0

