diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c
index e42ef98..6a730ca 100644
--- a/src/backend/optimizer/path/allpaths.c
+++ b/src/backend/optimizer/path/allpaths.c
@@ -2200,20 +2200,75 @@ standard_join_search(PlannerInfo *root, int levels_needed, List *initial_rels)
 
 			/* Find and save the cheapest paths for this rel */
 			set_cheapest(rel);
 
 #ifdef OPTIMIZER_DEBUG
 			debug_print_rel(root, rel);
 #endif
 		}
 	}
 
+	/* Release all the memory consumed by unreferenced paths. */
+	for (lev = levels_needed - 1; lev >= 1; lev--)
+	{
+		ListCell *lc;
+		foreach (lc, root->join_rel_level[lev])
+		{
+			ListCell	*lc_path;
+			RelOptInfo *rel = (RelOptInfo *) lfirst(lc);
+
+			elog(NOTICE, "path list length before deleting %d", list_length(rel->pathlist));
+
+			lc_path = list_head(rel->pathlist);
+			while(lc_path)
+			{
+				Path *path = (Path *) lfirst(lc_path);
+
+				lc_path = lnext(lc_path);
+
+				/* Free the path if none references it. */
+				if (path->num_refs == 1)
+				{
+					elog(NOTICE, "freed unreferenced path %p of type %d", path,
+						 path->pathtype);
+
+					rel->pathlist = list_delete_ptr(rel->pathlist, path);
+					free_path(path);
+				}
+			}
+
+			elog(NOTICE, "path list length after deleting %d", list_length(rel->pathlist));
+
+			elog(NOTICE, "partial path list length before deleting %d", list_length(rel->partial_pathlist));
+			/* Do the same for partial pathlist. */
+			lc_path = list_head(rel->partial_pathlist);
+			while(lc_path)
+			{
+				Path *path = (Path *) lfirst(lc_path);
+
+				lc_path = lnext(lc_path);
+
+				/* Free the path if none references it. */
+				if (path->num_refs == 1)
+				{
+					elog(NOTICE, "freed unreferenced path %p of type %d", path,
+						 path->pathtype);
+
+					rel->partial_pathlist = list_delete_ptr(rel->partial_pathlist, path);
+					free_path(path);
+				}
+			}
+
+			elog(NOTICE, "partial path list length after deleting %d", list_length(rel->partial_pathlist));
+		}
+	}
+
 	/*
 	 * We should have a single rel at the final level.
 	 */
 	if (root->join_rel_level[levels_needed] == NIL)
 		elog(ERROR, "failed to build any %d-way joins", levels_needed);
 	Assert(list_length(root->join_rel_level[levels_needed]) == 1);
 
 	rel = (RelOptInfo *) linitial(root->join_rel_level[levels_needed]);
 
 	root->join_rel_level = NULL;
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index abb7507..6b34f6a 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -39,21 +39,21 @@ typedef enum
 } PathCostComparison;
 
 /*
  * STD_FUZZ_FACTOR is the normal fuzz factor for compare_path_costs_fuzzily.
  * XXX is it worth making this user-controllable?  It provides a tradeoff
  * between planner runtime and the accuracy of path cost comparisons.
  */
 #define STD_FUZZ_FACTOR 1.01
 
 static List *translate_sub_tlist(List *tlist, int relid);
-
+static void unref_path(Path *path);
 
 /*****************************************************************************
  *		MISC. PATH UTILITIES
  *****************************************************************************/
 
 /*
  * compare_path_costs
  *	  Return -1, 0, or +1 according as path1 is cheaper, the same cost,
  *	  or more expensive than path2 for the specified criterion.
  */
@@ -581,22 +581,21 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
 		 * Remove current element from pathlist if dominated by new.
 		 */
 		if (remove_old)
 		{
 			parent_rel->pathlist = list_delete_cell(parent_rel->pathlist,
 													p1, p1_prev);
 
 			/*
 			 * Delete the data pointed-to by the deleted cell, if possible
 			 */
-			if (!IsA(old_path, IndexPath))
-				pfree(old_path);
+			free_path(old_path);
 			/* p1_prev does not advance */
 		}
 		else
 		{
 			/* new belongs after this old path if it has cost >= old's */
 			if (new_path->total_cost >= old_path->total_cost)
 				insert_after = p1;
 			/* p1_prev advances */
 			p1_prev = p1;
 		}
@@ -610,26 +609,26 @@ add_path(RelOptInfo *parent_rel, Path *new_path)
 			break;
 	}
 
 	if (accept_new)
 	{
 		/* Accept the new path: insert it at proper place in pathlist */
 		if (insert_after)
 			lappend_cell(parent_rel->pathlist, insert_after, new_path);
 		else
 			parent_rel->pathlist = lcons(new_path, parent_rel->pathlist);
+		new_path->num_refs++;
 	}
 	else
 	{
 		/* Reject and recycle the new path */
-		if (!IsA(new_path, IndexPath))
-			pfree(new_path);
+		free_path(new_path);
 	}
 }
 
 /*
  * add_path_precheck
  *	  Check whether a proposed new path could possibly get accepted.
  *	  We assume we know the path's pathkeys and parameterization accurately,
  *	  and have lower bounds for its costs.
  *
  * Note that we do not know the path's rowcount, since getting an estimate for
@@ -821,21 +820,21 @@ add_partial_path(RelOptInfo *parent_rel, Path *new_path)
 
 		/*
 		 * Remove current element from partial_pathlist if dominated by new.
 		 */
 		if (remove_old)
 		{
 			parent_rel->partial_pathlist =
 				list_delete_cell(parent_rel->partial_pathlist, p1, p1_prev);
 			/* we should not see IndexPaths here, so always safe to delete */
 			Assert(!IsA(old_path, IndexPath));
-			pfree(old_path);
+			free_path(old_path);
 			/* p1_prev does not advance */
 		}
 		else
 		{
 			/* new belongs after this old path if it has cost >= old's */
 			if (new_path->total_cost >= old_path->total_cost)
 				insert_after = p1;
 			/* p1_prev advances */
 			p1_prev = p1;
 		}
@@ -850,27 +849,28 @@ add_partial_path(RelOptInfo *parent_rel, Path *new_path)
 	}
 
 	if (accept_new)
 	{
 		/* Accept the new path: insert it at proper place */
 		if (insert_after)
 			lappend_cell(parent_rel->partial_pathlist, insert_after, new_path);
 		else
 			parent_rel->partial_pathlist =
 				lcons(new_path, parent_rel->partial_pathlist);
+		new_path->num_refs++;
 	}
 	else
 	{
 		/* we should not see IndexPaths here, so always safe to delete */
 		Assert(!IsA(new_path, IndexPath));
 		/* Reject and recycle the new path */
-		pfree(new_path);
+		free_path(new_path);
 	}
 }
 
 /*
  * add_partial_path_precheck
  *	  Check whether a proposed new partial path could possibly get accepted.
  *
  * Unlike add_path_precheck, we can ignore startup cost and parameterization,
  * since they don't matter for partial paths (see add_partial_path).  But
  * we do want to make sure we don't add a partial path if there's already
@@ -1079,20 +1079,21 @@ create_bitmap_heap_path(PlannerInfo *root,
 	pathnode->path.parent = rel;
 	pathnode->path.pathtarget = rel->reltarget;
 	pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
 														  required_outer);
 	pathnode->path.parallel_aware = false;
 	pathnode->path.parallel_safe = rel->consider_parallel;
 	pathnode->path.parallel_workers = 0;
 	pathnode->path.pathkeys = NIL;		/* always unordered */
 
 	pathnode->bitmapqual = bitmapqual;
+	bitmapqual->num_refs++;
 
 	cost_bitmap_heap_scan(&pathnode->path, root, rel,
 						  pathnode->path.param_info,
 						  bitmapqual, loop_count);
 
 	return pathnode;
 }
 
 /*
  * create_bitmap_and_path
@@ -1228,20 +1229,22 @@ create_append_path(RelOptInfo *rel, List *subpaths, Relids required_outer,
 	 * but since it doesn't do any selection or projection, it is a pretty
 	 * cheap node.
 	 */
 	pathnode->path.rows = 0;
 	pathnode->path.startup_cost = 0;
 	pathnode->path.total_cost = 0;
 	foreach(l, subpaths)
 	{
 		Path	   *subpath = (Path *) lfirst(l);
 
+		subpath->num_refs++;
+
 		pathnode->path.rows += subpath->rows;
 
 		if (l == list_head(subpaths))	/* first node? */
 			pathnode->path.startup_cost = subpath->startup_cost;
 		pathnode->path.total_cost += subpath->total_cost;
 		pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
 			subpath->parallel_safe;
 
 		/* All child paths must have same parameterization */
 		Assert(bms_equal(PATH_REQ_OUTER(subpath), required_outer));
@@ -1290,20 +1293,21 @@ create_merge_append_path(PlannerInfo *root,
 	/*
 	 * Add up the sizes and costs of the input paths.
 	 */
 	pathnode->path.rows = 0;
 	input_startup_cost = 0;
 	input_total_cost = 0;
 	foreach(l, subpaths)
 	{
 		Path	   *subpath = (Path *) lfirst(l);
 
+		subpath->num_refs++;
 		pathnode->path.rows += subpath->rows;
 		pathnode->path.parallel_safe = pathnode->path.parallel_safe &&
 			subpath->parallel_safe;
 
 		if (pathkeys_contained_in(pathkeys, subpath->pathkeys))
 		{
 			/* Subpath is adequately ordered, we won't need to sort it */
 			input_startup_cost += subpath->startup_cost;
 			input_total_cost += subpath->total_cost;
 		}
@@ -1678,20 +1682,21 @@ create_gather_path(PlannerInfo *root, RelOptInfo *rel, Path *subpath,
 	pathnode->path.parent = rel;
 	pathnode->path.pathtarget = target;
 	pathnode->path.param_info = get_baserel_parampathinfo(root, rel,
 														  required_outer);
 	pathnode->path.parallel_aware = false;
 	pathnode->path.parallel_safe = false;
 	pathnode->path.parallel_workers = subpath->parallel_workers;
 	pathnode->path.pathkeys = NIL;		/* Gather has unordered result */
 
 	pathnode->subpath = subpath;
+	subpath->num_refs++;
 	pathnode->single_copy = false;
 
 	if (pathnode->path.parallel_workers == 0)
 	{
 		pathnode->path.parallel_workers = 1;
 		pathnode->path.pathkeys = subpath->pathkeys;
 		pathnode->single_copy = true;
 	}
 
 	cost_gather(pathnode, root, rel, pathnode->path.param_info, rows);
@@ -1999,21 +2004,23 @@ create_nestloop_path(PlannerInfo *root,
 								  required_outer,
 								  &restrict_clauses);
 	pathnode->path.parallel_aware = false;
 	pathnode->path.parallel_safe = joinrel->consider_parallel &&
 		outer_path->parallel_safe && inner_path->parallel_safe;
 	/* This is a foolish way to estimate parallel_workers, but for now... */
 	pathnode->path.parallel_workers = outer_path->parallel_workers;
 	pathnode->path.pathkeys = pathkeys;
 	pathnode->jointype = jointype;
 	pathnode->outerjoinpath = outer_path;
+	outer_path->num_refs++;
 	pathnode->innerjoinpath = inner_path;
+	inner_path->num_refs++;
 	pathnode->joinrestrictinfo = restrict_clauses;
 
 	final_cost_nestloop(root, pathnode, workspace, sjinfo, semifactors);
 
 	return pathnode;
 }
 
 /*
  * create_mergejoin_path
  *	  Creates a pathnode corresponding to a mergejoin join between
@@ -2062,21 +2069,23 @@ create_mergejoin_path(PlannerInfo *root,
 								  required_outer,
 								  &restrict_clauses);
 	pathnode->jpath.path.parallel_aware = false;
 	pathnode->jpath.path.parallel_safe = joinrel->consider_parallel &&
 		outer_path->parallel_safe && inner_path->parallel_safe;
 	/* This is a foolish way to estimate parallel_workers, but for now... */
 	pathnode->jpath.path.parallel_workers = outer_path->parallel_workers;
 	pathnode->jpath.path.pathkeys = pathkeys;
 	pathnode->jpath.jointype = jointype;
 	pathnode->jpath.outerjoinpath = outer_path;
+	outer_path->num_refs++;
 	pathnode->jpath.innerjoinpath = inner_path;
+	inner_path->num_refs++;
 	pathnode->jpath.joinrestrictinfo = restrict_clauses;
 	pathnode->path_mergeclauses = mergeclauses;
 	pathnode->outersortkeys = outersortkeys;
 	pathnode->innersortkeys = innersortkeys;
 	/* pathnode->materialize_inner will be set by final_cost_mergejoin */
 
 	final_cost_mergejoin(root, pathnode, workspace, sjinfo);
 
 	return pathnode;
 }
@@ -2136,21 +2145,23 @@ create_hashjoin_path(PlannerInfo *root,
 	 * and then we could assume that the output inherits the outer relation's
 	 * ordering, which might save a sort step.  However there is considerable
 	 * downside if our estimate of the inner relation size is badly off. For
 	 * the moment we don't risk it.  (Note also that if we wanted to take this
 	 * seriously, joinpath.c would have to consider many more paths for the
 	 * outer rel than it does now.)
 	 */
 	pathnode->jpath.path.pathkeys = NIL;
 	pathnode->jpath.jointype = jointype;
 	pathnode->jpath.outerjoinpath = outer_path;
+	outer_path->num_refs++;
 	pathnode->jpath.innerjoinpath = inner_path;
+	inner_path->num_refs++;
 	pathnode->jpath.joinrestrictinfo = restrict_clauses;
 	pathnode->path_hashclauses = hashclauses;
 	/* final_cost_hashjoin will fill in pathnode->num_batches */
 
 	final_cost_hashjoin(root, pathnode, workspace, sjinfo, semifactors);
 
 	return pathnode;
 }
 
 /*
@@ -3202,10 +3213,87 @@ reparameterize_path(PlannerInfo *root, Path *path,
 														 rel,
 														 spath->subpath,
 														 spath->path.pathkeys,
 														 required_outer);
 			}
 		default:
 			break;
 	}
 	return NULL;
 }
+
+void
+free_path(Path *path)
+{
+	/* Decrement the reference counts of paths referenced by this one. */
+	switch(path->pathtype)
+	{
+		case T_SeqScan:
+		case T_IndexScan:
+		case T_IndexOnlyScan:
+			/* Simple paths do nothing. */
+			break;
+
+		case T_MergeJoin:
+		case T_HashJoin:
+		case T_NestLoop:
+			{
+				JoinPath *jpath = (JoinPath *)path;
+				unref_path(jpath->outerjoinpath);
+				unref_path(jpath->innerjoinpath);
+			}
+			break;
+
+		case T_Append:
+		case T_MergeAppend:
+			{
+				AppendPath *appath = (AppendPath *)path;
+				ListCell   *lc;
+
+				foreach (lc, appath->subpaths)
+				{
+					Path *path = lfirst(lc);
+					unref_path(path);
+				}
+			}
+			break;
+
+		case T_Gather:
+			{
+				GatherPath *gpath = (GatherPath *) path;
+				unref_path(gpath->subpath);
+			}
+			break;
+
+		case T_BitmapHeapScan:
+			{
+				BitmapHeapPath *bhpath = (BitmapHeapPath *)path;
+				unref_path(bhpath->bitmapqual);
+			}
+			break;
+
+		default:
+			elog(ERROR, "unrecognized path type %d", path->pathtype);
+			break;
+	}
+
+	/* Now reclaim the memory. */
+	if (!IsA(path, IndexPath))
+		pfree(path);
+}
+
+static void
+unref_path(Path *path)
+{
+	if (!path)
+		return;
+
+	path->num_refs--;
+
+	if (path->num_refs == 0)
+	{
+		elog(NOTICE, "freed an unreferenced path %p of type %d not added to rel pathlist",
+			 path, path->pathtype);
+
+		free_path(path);
+	}
+}
diff --git a/src/include/nodes/relation.h b/src/include/nodes/relation.h
index 3a1255a..a58d71b 100644
--- a/src/include/nodes/relation.h
+++ b/src/include/nodes/relation.h
@@ -891,20 +891,21 @@ typedef struct Path
 	int			parallel_workers;		/* desired # of workers; 0 = not
 										 * parallel */
 
 	/* estimated size/costs for path (see costsize.c for more info) */
 	double		rows;			/* estimated number of result tuples */
 	Cost		startup_cost;	/* cost expended before fetching any tuples */
 	Cost		total_cost;		/* total cost (assuming all tuples fetched) */
 
 	List	   *pathkeys;		/* sort ordering of path's output */
 	/* pathkeys is a List of PathKey nodes; see above */
+	int			num_refs;		/* Number of objects referencing this path. */
 } Path;
 
 /* Macro for extracting a path's parameterization relids; beware double eval */
 #define PATH_REQ_OUTER(path)  \
 	((path)->param_info ? (path)->param_info->ppi_req_outer : (Relids) NULL)
 
 /*----------
  * IndexPath represents an index scan over a single index.
  *
  * This struct is used for both regular indexscans and index-only scans;
diff --git a/src/include/optimizer/paths.h b/src/include/optimizer/paths.h
index 44abe83..d658cd3 100644
--- a/src/include/optimizer/paths.h
+++ b/src/include/optimizer/paths.h
@@ -211,12 +211,13 @@ extern List *select_outer_pathkeys_for_merge(PlannerInfo *root,
 extern List *make_inner_pathkeys_for_merge(PlannerInfo *root,
 							  List *mergeclauses,
 							  List *outer_pathkeys);
 extern List *truncate_useless_pathkeys(PlannerInfo *root,
 						  RelOptInfo *rel,
 						  List *pathkeys);
 extern bool has_useful_pathkeys(PlannerInfo *root, RelOptInfo *rel);
 extern PathKey *make_canonical_pathkey(PlannerInfo *root,
 					   EquivalenceClass *eclass, Oid opfamily,
 					   int strategy, bool nulls_first);
+extern void free_path(Path *path);
 
 #endif   /* PATHS_H */
