diff --git a/src/backend/optimizer/path/joinpath.c b/src/backend/optimizer/path/joinpath.c
index c75408f552b..252cb712943 100644
--- a/src/backend/optimizer/path/joinpath.c
+++ b/src/backend/optimizer/path/joinpath.c
@@ -728,14 +728,16 @@ get_memoize_path(PlannerInfo *root, RelOptInfo *innerrel,
 		single_mode = true;
 
 	/*
-	 * Memoize normally marks cache entries as complete when it runs out of
-	 * tuples to read from its subplan.  However, with unique joins, Nested
-	 * Loop will skip to the next outer tuple after finding the first matching
-	 * inner tuple. Another case is a semi or anti join. If number of join
-	 * clauses, pushed to the inner as parameterised filter no less than the
-	 * number of join clauses, that means all the clauses have been pushed to
-	 * the inner and any tuple coming from the inner side will be successfully
-	 * used to build the join result.
+	 * Normally, memoize marks cache entries as complete when it exhausts
+	 * all tuples from its subplan.  However, in unique joins, Nested Loop
+	 * will skip to the next outer tuple after finding the first matching
+	 * inner tuple.
+	 * Another case is a SEMI or ANTI joins. If the number of join clauses,
+	 * pushed to the inner as parameterised filter is equal to or greater
+	 * than the total number of join clauses. This implies that all relevant
+	 * join conditions have been applied on the inner side, so any returned
+	 * inner tuple will be guaranteed to satisfy the join condition, making
+	 * it safe to memoize.
 	 * This means that we may not read the inner side of the
 	 * join to completion which leaves no opportunity to mark the cache entry
 	 * as complete.  To work around that, when the join is unique we
@@ -808,6 +810,8 @@ get_memoize_path(PlannerInfo *root, RelOptInfo *innerrel,
 									&hash_operators,
 									&binary_mode))
 	{
+		Assert(!(extra->inner_unique && single_mode));
+
 		return (Path *) create_memoize_path(root,
 											innerrel,
 											inner_path,
