diff --git a/contrib/intarray/bench/create_test.pl b/contrib/intarray/bench/create_test.pl
index f3262df..1323b31 100755
--- a/contrib/intarray/bench/create_test.pl
+++ b/contrib/intarray/bench/create_test.pl
@@ -15,8 +15,8 @@ create table message_section_map (
 
 EOT
 
-open(my $msg, '>', "message.tmp")             || die;
-open(my $map, '>', "message_section_map.tmp") || die;
+open(MSG, ">message.tmp")             || die;
+open(MAP, ">message_section_map.tmp") || die;
 
 srand(1);
 
@@ -42,16 +42,16 @@ foreach my $i (1 .. 200000)
 	}
 	if ($#sect < 0 || rand() < 0.1)
 	{
-		print $msg "$i\t\\N\n";
+		print MSG "$i\t\\N\n";
 	}
 	else
 	{
-		print $msg "$i\t{" . join(',', @sect) . "}\n";
-		map { print $map "$i\t$_\n" } @sect;
+		print MSG "$i\t{" . join(',', @sect) . "}\n";
+		map { print MAP "$i\t$_\n" } @sect;
 	}
 }
-close $map;
-close $msg;
+close MAP;
+close MSG;
 
 copytable('message');
 copytable('message_section_map');
@@ -79,8 +79,8 @@ sub copytable
 	my $t = shift;
 
 	print "COPY $t from stdin;\n";
-	open(my $fff, '<', "$t.tmp") || die;
-	while (<$fff>) { print; }
-	close $fff;
+	open(FFF, "$t.tmp") || die;
+	while (<FFF>) { print; }
+	close FFF;
 	print "\\.\n";
 }
diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c
index 42f4323..484cade 100644
--- a/contrib/pg_stat_statements/pg_stat_statements.c
+++ b/contrib/pg_stat_statements/pg_stat_statements.c
@@ -2513,14 +2513,16 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
 				JumbleExpr(jstate, (Node *) expr->aggfilter);
 			}
 			break;
-		case T_ArrayRef:
+		case T_SubscriptingRef:
 			{
-				ArrayRef   *aref = (ArrayRef *) node;
-
-				JumbleExpr(jstate, (Node *) aref->refupperindexpr);
-				JumbleExpr(jstate, (Node *) aref->reflowerindexpr);
-				JumbleExpr(jstate, (Node *) aref->refexpr);
-				JumbleExpr(jstate, (Node *) aref->refassgnexpr);
+				SubscriptingRef   *sbsref = (SubscriptingRef *) node;
+
+				JumbleExpr(jstate, (Node *) sbsref->refupperindexpr);
+				JumbleExpr(jstate, (Node *) sbsref->reflowerindexpr);
+				JumbleExpr(jstate, (Node *) sbsref->refexpr);
+				JumbleExpr(jstate, (Node *) sbsref->refassgnexpr);
+				APP_JUMB(sbsref->refevalfunc);
+				APP_JUMB(sbsref->refnestedfunc);
 			}
 			break;
 		case T_FuncExpr:
diff --git a/contrib/postgres_fdw/deparse.c b/contrib/postgres_fdw/deparse.c
index 1d7ec28..26cd8f0 100644
--- a/contrib/postgres_fdw/deparse.c
+++ b/contrib/postgres_fdw/deparse.c
@@ -147,7 +147,7 @@ static void deparseExpr(Expr *expr, deparse_expr_cxt *context);
 static void deparseVar(Var *node, deparse_expr_cxt *context);
 static void deparseConst(Const *node, deparse_expr_cxt *context, int showtype);
 static void deparseParam(Param *node, deparse_expr_cxt *context);
-static void deparseArrayRef(ArrayRef *node, deparse_expr_cxt *context);
+static void deparseSubscriptingRef(SubscriptingRef *node, deparse_expr_cxt *context);
 static void deparseFuncExpr(FuncExpr *node, deparse_expr_cxt *context);
 static void deparseOpExpr(OpExpr *node, deparse_expr_cxt *context);
 static void deparseOperatorName(StringInfo buf, Form_pg_operator opform);
@@ -398,9 +398,9 @@ foreign_expr_walker(Node *node,
 					state = FDW_COLLATE_UNSAFE;
 			}
 			break;
-		case T_ArrayRef:
+		case T_SubscriptingRef:
 			{
-				ArrayRef   *ar = (ArrayRef *) node;
+				SubscriptingRef   *ar = (SubscriptingRef *) node;
 
 				/* Assignment should not be in restrictions. */
 				if (ar->refassgnexpr != NULL)
@@ -2137,8 +2137,8 @@ deparseExpr(Expr *node, deparse_expr_cxt *context)
 		case T_Param:
 			deparseParam((Param *) node, context);
 			break;
-		case T_ArrayRef:
-			deparseArrayRef((ArrayRef *) node, context);
+		case T_SubscriptingRef:
+			deparseSubscriptingRef((SubscriptingRef *) node, context);
 			break;
 		case T_FuncExpr:
 			deparseFuncExpr((FuncExpr *) node, context);
@@ -2387,7 +2387,7 @@ deparseParam(Param *node, deparse_expr_cxt *context)
  * Deparse an array subscript expression.
  */
 static void
-deparseArrayRef(ArrayRef *node, deparse_expr_cxt *context)
+deparseSubscriptingRef(SubscriptingRef *node, deparse_expr_cxt *context)
 {
 	StringInfo	buf = context->buf;
 	ListCell   *lowlist_item;
diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out
index a466bf2..059c5c3 100644
--- a/contrib/postgres_fdw/expected/postgres_fdw.out
+++ b/contrib/postgres_fdw/expected/postgres_fdw.out
@@ -3276,19 +3276,16 @@ select sum(q.a), count(q.b) from ft4 left join (select 13, avg(ft1.c1), sum(ft2.
 -- Grouping sets
 explain (verbose, costs off)
 select c2, sum(c1) from ft1 where c2 < 3 group by rollup(c2) order by 1 nulls last;
-                                  QUERY PLAN                                  
-------------------------------------------------------------------------------
- Sort
-   Output: c2, (sum(c1))
-   Sort Key: ft1.c2
-   ->  MixedAggregate
-         Output: c2, sum(c1)
-         Hash Key: ft1.c2
-         Group Key: ()
-         ->  Foreign Scan on public.ft1
-               Output: c2, c1
-               Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" WHERE ((c2 < 3))
-(10 rows)
+                                            QUERY PLAN                                             
+---------------------------------------------------------------------------------------------------
+ GroupAggregate
+   Output: c2, sum(c1)
+   Group Key: ft1.c2
+   Group Key: ()
+   ->  Foreign Scan on public.ft1
+         Output: c2, c1
+         Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" WHERE ((c2 < 3)) ORDER BY c2 ASC NULLS LAST
+(7 rows)
 
 select c2, sum(c1) from ft1 where c2 < 3 group by rollup(c2) order by 1 nulls last;
  c2 |  sum   
@@ -3301,19 +3298,16 @@ select c2, sum(c1) from ft1 where c2 < 3 group by rollup(c2) order by 1 nulls la
 
 explain (verbose, costs off)
 select c2, sum(c1) from ft1 where c2 < 3 group by cube(c2) order by 1 nulls last;
-                                  QUERY PLAN                                  
-------------------------------------------------------------------------------
- Sort
-   Output: c2, (sum(c1))
-   Sort Key: ft1.c2
-   ->  MixedAggregate
-         Output: c2, sum(c1)
-         Hash Key: ft1.c2
-         Group Key: ()
-         ->  Foreign Scan on public.ft1
-               Output: c2, c1
-               Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" WHERE ((c2 < 3))
-(10 rows)
+                                            QUERY PLAN                                             
+---------------------------------------------------------------------------------------------------
+ GroupAggregate
+   Output: c2, sum(c1)
+   Group Key: ft1.c2
+   Group Key: ()
+   ->  Foreign Scan on public.ft1
+         Output: c2, c1
+         Remote SQL: SELECT "C 1", c2 FROM "S 1"."T 1" WHERE ((c2 < 3)) ORDER BY c2 ASC NULLS LAST
+(7 rows)
 
 select c2, sum(c1) from ft1 where c2 < 3 group by cube(c2) order by 1 nulls last;
  c2 |  sum   
@@ -3326,19 +3320,20 @@ select c2, sum(c1) from ft1 where c2 < 3 group by cube(c2) order by 1 nulls last
 
 explain (verbose, costs off)
 select c2, c6, sum(c1) from ft1 where c2 < 3 group by grouping sets(c2, c6) order by 1 nulls last, 2 nulls last;
-                                    QUERY PLAN                                    
-----------------------------------------------------------------------------------
+                                                 QUERY PLAN                                                  
+-------------------------------------------------------------------------------------------------------------
  Sort
    Output: c2, c6, (sum(c1))
    Sort Key: ft1.c2, ft1.c6
-   ->  HashAggregate
+   ->  GroupAggregate
          Output: c2, c6, sum(c1)
-         Hash Key: ft1.c2
-         Hash Key: ft1.c6
+         Group Key: ft1.c2
+         Sort Key: ft1.c6
+           Group Key: ft1.c6
          ->  Foreign Scan on public.ft1
                Output: c2, c6, c1
-               Remote SQL: SELECT "C 1", c2, c6 FROM "S 1"."T 1" WHERE ((c2 < 3))
-(10 rows)
+               Remote SQL: SELECT "C 1", c2, c6 FROM "S 1"."T 1" WHERE ((c2 < 3)) ORDER BY c2 ASC NULLS LAST
+(11 rows)
 
 select c2, c6, sum(c1) from ft1 where c2 < 3 group by grouping sets(c2, c6) order by 1 nulls last, 2 nulls last;
  c2 | c6 |  sum  
diff --git a/doc/src/sgml/biblio.sgml b/doc/src/sgml/biblio.sgml
index 5462bc3..ab5af16 100644
--- a/doc/src/sgml/biblio.sgml
+++ b/doc/src/sgml/biblio.sgml
@@ -12,7 +12,8 @@
    Some white papers and technical reports from the original
    <productname>POSTGRES</productname> development team
    are available at the University of California, Berkeley, Computer Science
-   Department <ulink url="http://db.cs.berkeley.edu/papers/">web site</ulink>.
+   Department <ulink url="http://db.cs.berkeley.edu/papers/">
+   web site</ulink>.
   </para>
 
   <bibliodiv>
@@ -20,6 +21,7 @@
 
    <biblioentry id="BOWMAN01">
     <title>The Practical <acronym>SQL</acronym> Handbook</title>
+    <titleabbrev>Bowman et al, 2001</titleabbrev>
     <subtitle>Using SQL Variants</subtitle>
     <edition>Fourth Edition</edition>
     <authorgroup>
@@ -37,14 +39,18 @@
      </author>
     </authorgroup>
     <isbn>0-201-70309-2</isbn>
+    <pubdate>2001</pubdate>
     <publisher>
      <publishername>Addison-Wesley Professional</publishername>
     </publisher>
-    <pubdate>2001</pubdate>
+    <copyright>
+     <year>2001</year>
+    </copyright>
    </biblioentry>
 
    <biblioentry id="DATE97">
     <title>A Guide to the <acronym>SQL</acronym> Standard</title>
+    <titleabbrev>Date and Darwen, 1997</titleabbrev>
     <subtitle>A user's guide to the standard database language <acronym>SQL</acronym></subtitle>
     <edition>Fourth Edition</edition>
     <authorgroup>
@@ -58,14 +64,19 @@
      </author>
     </authorgroup>
     <isbn>0-201-96426-0</isbn>
+    <pubdate>1997</pubdate>
     <publisher>
      <publishername>Addison-Wesley</publishername>
     </publisher>
-    <pubdate>1997</pubdate>
+    <copyright>
+     <year>1997</year>
+     <holder>Addison-Wesley Longman, Inc.</holder>
+    </copyright>
    </biblioentry>
 
    <biblioentry id="DATE04">
     <title>An Introduction to Database Systems</title>
+    <titleabbrev>Date, 2004</titleabbrev>
     <edition>Eighth Edition</edition>
     <authorgroup>
      <author>
@@ -74,10 +85,14 @@
      </author>
     </authorgroup>
     <isbn>0-321-19784-4</isbn>
+    <pubdate>2003</pubdate>
     <publisher>
      <publishername>Addison-Wesley</publishername>
     </publisher>
-    <pubdate>2003</pubdate>
+    <copyright>
+     <year>2004</year>
+     <holder>Pearson Education, Inc.</holder>
+    </copyright>
    </biblioentry>
 
   <biblioentry id="ELMA04">
@@ -94,14 +109,18 @@
     </author>
    </authorgroup>
    <isbn>0-321-12226-7</isbn>
+   <pubdate>2003</pubdate>
    <publisher>
     <publishername>Addison-Wesley</publishername>
    </publisher>
-   <pubdate>2003</pubdate>
+   <copyright>
+    <year>2004</year>
+   </copyright>
   </biblioentry>
 
    <biblioentry id="MELT93">
     <title>Understanding the New <acronym>SQL</acronym></title>
+    <titleabbrev>Melton and Simon, 1993</titleabbrev>
     <subtitle>A complete guide</subtitle>
     <authorgroup>
      <author>
@@ -114,15 +133,20 @@
      </author>
     </authorgroup>
     <isbn>1-55860-245-3</isbn>
+    <pubdate>1993</pubdate>
     <publisher>
      <publishername>Morgan Kaufmann</publishername>
     </publisher>
-    <pubdate>1993</pubdate>
+    <copyright>
+     <year>1993</year>
+     <holder>Morgan Kaufmann Publishers, Inc.</holder>
+    </copyright>
    </biblioentry>
 
    <biblioentry id="ULL88">
     <title>Principles of Database and Knowledge</title>
     <subtitle>Base Systems</subtitle>
+    <titleabbrev>Ullman, 1988</titleabbrev>
     <authorgroup>
      <author>
       <firstname>Jeffrey D.</firstname>
@@ -143,6 +167,7 @@
 
    <biblioentry id="SIM98">
     <title>Enhancement of the ANSI SQL Implementation of PostgreSQL</title>
+    <titleabbrev>Simkovics, 1998</titleabbrev>
     <authorgroup>
      <author>
       <firstname>Stefan</firstname>
@@ -178,15 +203,16 @@ ssimkovi@ag.or.at
      </para>
     </abstract>
 
+    <pubdate>November 29, 1998</pubdate>
     <publisher>
      <publishername>Department of Information Systems, Vienna University of Technology</publishername>
      <address>Vienna, Austria</address>
     </publisher>
-    <pubdate>November 29, 1998</pubdate>
    </biblioentry>
 
    <biblioentry id="YU95">
     <title>The <productname>Postgres95</productname> User Manual</title>
+    <titleabbrev>Yu and Chen, 1995</titleabbrev>
     <authorgroup>
      <author>
       <firstname>A.</firstname>
@@ -197,17 +223,24 @@ ssimkovi@ag.or.at
       <surname>Chen</surname>
      </author>
     </authorgroup>
+    <authorgroup>
+     <collab>
+      <collabname>The POSTGRES Group</collabname>
+     </collab>
+    </authorgroup>
+
+    <pubdate>Sept. 5, 1995</pubdate>
     <publisher>
      <publishername>University  of  California</publishername>
      <address>Berkeley, California</address>
     </publisher>
-    <pubdate>Sept. 5, 1995</pubdate>
    </biblioentry>
 
   <biblioentry id="FONG">
-   <title><ulink url="http://db.cs.berkeley.edu/papers/UCB-MS-zfong.pdf">The
-   design and implementation of the <productname>POSTGRES</productname> query
-   optimizer</ulink></title>
+   <title>
+   <ulink url="http://db.cs.berkeley.edu/papers/UCB-MS-zfong.pdf">
+   The design and implementation of the <productname>POSTGRES</productname> query optimizer
+   </ulink></title>
    <author>
     <firstname>Zelaine</firstname>
     <surname>Fong</surname>
@@ -224,23 +257,25 @@ ssimkovi@ag.or.at
 
    <biblioentry id="OLSON93">
     <title>Partial indexing in POSTGRES: research project</title>
+    <titleabbrev>Olson, 1993</titleabbrev>
     <authorgroup>
      <author>
       <firstname>Nels</firstname>
       <surname>Olson</surname>
      </author>
     </authorgroup>
+    <pubdate>1993</pubdate>
     <pubsnumber>UCB Engin T7.49.1993 O676</pubsnumber>
     <publisher>
      <publishername>University  of  California</publishername>
      <address>Berkeley, California</address>
     </publisher>
-    <pubdate>1993</pubdate>
    </biblioentry>
 
    <biblioentry id="ONG90">
    <biblioset relation="article">
     <title>A Unified Framework for Version Modeling Using Production Rules in a Database System</title>
+    <titleabbrev>Ong and Goh, 1990</titleabbrev>
     <authorgroup>
      <author>
       <firstname>L.</firstname>
@@ -254,18 +289,20 @@ ssimkovi@ag.or.at
    </biblioset>
    <biblioset relation="journal">
     <title>ERL Technical Memorandum M90/33</title>
+    <pubdate>April, 1990</pubdate>
     <publisher>
      <publishername>University  of  California</publishername>
      <address>Berkeley, California</address>
     </publisher>
-    <pubdate>April, 1990</pubdate>
    </biblioset>
    </biblioentry>
 
    <biblioentry id="ROWE87">
    <biblioset relation="article">
-    <title><ulink url="http://db.cs.berkeley.edu/papers/ERL-M87-13.pdf">The <productname>POSTGRES</productname>
-    data model</ulink></title>
+    <title><ulink url="http://db.cs.berkeley.edu/papers/ERL-M87-13.pdf">
+    The <productname>POSTGRES</productname> data model
+    </ulink></title>
+    <titleabbrev>Rowe and Stonebraker, 1987</titleabbrev>
     <authorgroup>
      <author>
       <firstname>L.</firstname>
@@ -286,8 +323,14 @@ ssimkovi@ag.or.at
 
    <biblioentry id="SESHADRI95">
    <biblioset relation="article">
-    <title><ulink url="http://citeseer.ist.psu.edu/seshadri95generalized.html">Generalized
-    Partial Indexes</ulink></title>
+    <title>Generalized Partial Indexes
+    <ulink url="http://citeseer.ist.psu.edu/seshadri95generalized.html">(cached version)
+<!--
+     Original URL:  http://citeseer.ist.psu.edu/seshadri95generalized.html
+-->
+    </ulink>
+    </title>
+    <titleabbrev>Seshardri, 1995</titleabbrev>
     <authorgroup>
      <author>
       <firstname>P.</firstname>
@@ -304,19 +347,21 @@ ssimkovi@ag.or.at
      <confdates>6-10 March 1995</confdates>
      <address>Taipeh, Taiwan</address>
     </confgroup>
+    <pubdate>1995</pubdate>
     <pubsnumber>Cat. No.95CH35724</pubsnumber>
     <publisher>
      <publishername>IEEE Computer Society Press</publishername>
      <address>Los Alamitos, California</address>
     </publisher>
-    <pubdate>1995</pubdate>
     <pagenums>420-7</pagenums>
    </biblioentry>
 
    <biblioentry id="STON86">
    <biblioset relation="article">
-    <title><ulink url="http://db.cs.berkeley.edu/papers/ERL-M85-95.pdf">The
-    design of <productname>POSTGRES</productname></ulink></title>
+    <title><ulink url="http://db.cs.berkeley.edu/papers/ERL-M85-95.pdf">
+    The design of <productname>POSTGRES</productname>
+    </ulink></title>
+    <titleabbrev>Stonebraker and Rowe, 1986</titleabbrev>
     <authorgroup>
      <author>
       <firstname>M.</firstname>
@@ -338,6 +383,7 @@ ssimkovi@ag.or.at
    <biblioentry id="STON87a">
    <biblioset relation="article">
     <title>The design of the <productname>POSTGRES</productname> rules system</title>
+    <titleabbrev>Stonebraker, Hanson, Hong, 1987</titleabbrev>
     <authorgroup>
      <author>
       <firstname>M.</firstname>
@@ -362,9 +408,10 @@ ssimkovi@ag.or.at
 
    <biblioentry id="STON87b">
    <biblioset relation="article">
-    <title><ulink url="http://db.cs.berkeley.edu/papers/ERL-M87-06.pdf">The
-    design of the <productname>POSTGRES</productname> storage
-    system</ulink></title>
+    <title><ulink url="http://db.cs.berkeley.edu/papers/ERL-M87-06.pdf">
+    The design of the <productname>POSTGRES</productname> storage system
+    </ulink></title>
+    <titleabbrev>Stonebraker, 1987</titleabbrev>
     <authorgroup>
      <author>
       <firstname>M.</firstname>
@@ -381,9 +428,10 @@ ssimkovi@ag.or.at
 
    <biblioentry id="STON89">
    <biblioset relation="article">
-    <title><ulink url="http://db.cs.berkeley.edu/papers/ERL-M89-82.pdf">A
-    commentary on the <productname>POSTGRES</productname> rules
-    system</ulink></title>
+    <title><ulink url="http://db.cs.berkeley.edu/papers/ERL-M89-82.pdf">
+    A commentary on the <productname>POSTGRES</productname> rules system
+    </ulink></title>
+    <titleabbrev>Stonebraker et al, 1989</titleabbrev>
     <authorgroup>
      <author>
       <firstname>M.</firstname>
@@ -407,8 +455,10 @@ ssimkovi@ag.or.at
 
    <biblioentry id="STON89b">
    <biblioset relation="article">
-    <title><ulink url="http://db.cs.berkeley.edu/papers/ERL-M89-17.pdf">The
-    case for partial indexes</ulink></title>
+    <title><ulink url="http://db.cs.berkeley.edu/papers/ERL-M89-17.pdf">
+    The case for partial indexes
+    </ulink></title>
+    <titleabbrev>Stonebraker, M, 1989b</titleabbrev>
     <authorgroup>
      <author>
       <firstname>M.</firstname>
@@ -418,15 +468,17 @@ ssimkovi@ag.or.at
    </biblioset>
    <biblioset relation="journal">
     <title>SIGMOD Record 18(4)</title>
-    <date>Dec. 1989</date>
     <pagenums>4-11</pagenums>
+    <date>Dec. 1989</date>
    </biblioset>
    </biblioentry>
 
    <biblioentry id="STON90a">
    <biblioset relation="article">
-    <title><ulink url="http://db.cs.berkeley.edu/papers/ERL-M90-34.pdf">The
-    implementation of <productname>POSTGRES</productname></ulink></title>
+    <title><ulink url="http://db.cs.berkeley.edu/papers/ERL-M90-34.pdf">
+    The implementation of <productname>POSTGRES</productname>
+    </ulink></title>
+    <titleabbrev>Stonebraker, Rowe, Hirohama, 1990</titleabbrev>
     <authorgroup>
      <author>
       <firstname>M.</firstname>
@@ -453,8 +505,10 @@ ssimkovi@ag.or.at
 
    <biblioentry id="STON90b">
    <biblioset relation="article">
-    <title><ulink url="http://db.cs.berkeley.edu/papers/ERL-M90-36.pdf">On
-    Rules, Procedures, Caching and Views in Database Systems</ulink></title>
+    <title><ulink url="http://db.cs.berkeley.edu/papers/ERL-M90-36.pdf">
+    On Rules, Procedures, Caching and Views in Database Systems
+    </ulink></title>
+    <titleabbrev>Stonebraker et al, ACM, 1990</titleabbrev>
     <authorgroup>
      <author>
       <firstname>M.</firstname>
diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml
index ac39c63..6f0a6f2 100644
--- a/doc/src/sgml/catalogs.sgml
+++ b/doc/src/sgml/catalogs.sgml
@@ -7753,6 +7753,13 @@
      </row>
 
      <row>
+      <entry><structfield>typsubscripting</structfield></entry>
+      <entry><type>regproc</type></entry>
+      <entry><literal><link linkend="catalog-pg-proc"><structname>pg_proc</structname></link>.oid</literal></entry>
+      <entry>Custom subscripting function with type-specific logic, or 0 if this type doesn't support subscripting.</entry>
+     </row>
+
+     <row>
       <entry><structfield>typdefaultbin</structfield></entry>
       <entry><type>pg_node_tree</type></entry>
       <entry></entry>
diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml
index ac339fb..2de3540 100644
--- a/doc/src/sgml/config.sgml
+++ b/doc/src/sgml/config.sgml
@@ -4363,8 +4363,8 @@ SELECT * FROM parent WHERE key = 2400;
         find the logs currently in use by the instance. Here is an example of
         this file's content:
 <programlisting>
-stderr log/postgresql.log
-csvlog log/postgresql.csv
+stderr pg_log/postgresql.log
+csvlog pg_log/postgresql.csv
 </programlisting>
 
         <filename>current_logfiles</filename> is recreated when a new log file
@@ -4466,7 +4466,7 @@ local0.*    /var/log/postgresql
         cluster data directory.
         This parameter can only be set in the <filename>postgresql.conf</>
         file or on the server command line.
-        The default is <literal>log</literal>.
+        The default is <literal>pg_log</literal>.
        </para>
       </listitem>
      </varlistentry>
diff --git a/doc/src/sgml/ddl.sgml b/doc/src/sgml/ddl.sgml
index d1e915c11..09b5b3f 100644
--- a/doc/src/sgml/ddl.sgml
+++ b/doc/src/sgml/ddl.sgml
@@ -3854,12 +3854,8 @@ ANALYZE measurement;
 
     <listitem>
      <para>
-      Using the <literal>ON CONFLICT</literal> clause with partitioned tables
-      will cause an error if <literal>DO UPDATE</literal> is specified as the
-      alternative action, because unique or exclusion constraints can only be
-      created on individual partitions.  There is no support for enforcing
-      uniqueness (or an exclusion constraint) across an entire partitioning
-      hierarchy.
+      <command>INSERT</command> statements with <literal>ON CONFLICT</>
+      clause are currently not allowed on partitioned tables.
      </para>
     </listitem>
 
diff --git a/doc/src/sgml/extend.sgml b/doc/src/sgml/extend.sgml
index c4f211b..5b77da4 100644
--- a/doc/src/sgml/extend.sgml
+++ b/doc/src/sgml/extend.sgml
@@ -35,6 +35,11 @@
     </listitem>
     <listitem>
      <para>
+      subscripting procedure (starting in <xref linkend="xsubscripting">)
+     </para>
+    </listitem>
+    <listitem>
+     <para>
       operator classes for indexes (starting in <xref linkend="xindex">)
      </para>
     </listitem>
@@ -283,6 +288,7 @@
   &xaggr;
   &xtypes;
   &xoper;
+  &xsubscripting;
   &xindex;
 
 
diff --git a/doc/src/sgml/file-fdw.sgml b/doc/src/sgml/file-fdw.sgml
index 74941a6..309a303 100644
--- a/doc/src/sgml/file-fdw.sgml
+++ b/doc/src/sgml/file-fdw.sgml
@@ -262,7 +262,7 @@ CREATE FOREIGN TABLE pglog (
   location text,
   application_name text
 ) SERVER pglog
-OPTIONS ( filename '/home/josh/data/log/pglog.csv', format 'csv' );
+OPTIONS ( filename '/home/josh/9.1/data/pg_log/pglog.csv', format 'csv' );
 </programlisting>
   </para>
 
diff --git a/doc/src/sgml/filelist.sgml b/doc/src/sgml/filelist.sgml
index 6782f07..14051c7 100644
--- a/doc/src/sgml/filelist.sgml
+++ b/doc/src/sgml/filelist.sgml
@@ -72,6 +72,7 @@
 <!ENTITY xplang     SYSTEM "xplang.sgml">
 <!ENTITY xoper      SYSTEM "xoper.sgml">
 <!ENTITY xtypes     SYSTEM "xtypes.sgml">
+<!ENTITY xsubscripting SYSTEM "xsubscripting.sgml">
 <!ENTITY plperl     SYSTEM "plperl.sgml">
 <!ENTITY plpython   SYSTEM "plpython.sgml">
 <!ENTITY plsql      SYSTEM "plpgsql.sgml">
diff --git a/doc/src/sgml/generate-errcodes-table.pl b/doc/src/sgml/generate-errcodes-table.pl
index 01fc616..66be811 100644
--- a/doc/src/sgml/generate-errcodes-table.pl
+++ b/doc/src/sgml/generate-errcodes-table.pl
@@ -9,7 +9,7 @@ use strict;
 print
   "<!-- autogenerated from src/backend/utils/errcodes.txt, do not edit -->\n";
 
-open my $errcodes, '<', $ARGV[0] or die;
+open my $errcodes, $ARGV[0] or die;
 
 while (<$errcodes>)
 {
diff --git a/doc/src/sgml/json.sgml b/doc/src/sgml/json.sgml
index 3cf78d6..efffd5c 100644
--- a/doc/src/sgml/json.sgml
+++ b/doc/src/sgml/json.sgml
@@ -569,4 +569,29 @@ SELECT jdoc-&gt;'guid', jdoc-&gt;'name' FROM api WHERE jdoc @&gt; '{"tags": ["qu
       compared using the default database collation.
   </para>
  </sect2>
+
+ <sect2 id="json-subscripting">
+  <title>JSON subscripting</title>
+  <para>
+   JSONB data type support array-style subscripting expressions to extract or update particular element. An example of subscripting syntax:
+<programlisting>
+-- Extract value by key
+SELECT ('{"a": 1}'::jsonb)['a'];
+
+-- Extract nested value by key path
+SELECT ('{"a": {"b": {"c": 1}}}'::jsonb)['a']['b']['c'];
+
+-- Extract element by index
+SELECT ('[1, "2", null]'::jsonb)['1'];
+
+-- Update value by key
+UPDATE table_name set jsonb_field['key'] = 1;
+
+-- Select records using where clause with subscripting
+SELECT * from table_name where jsonb_field['key'] = '"value"';
+</programlisting>
+  </para>
+ </sect2>
+
+
 </sect1>
diff --git a/doc/src/sgml/mk_feature_tables.pl b/doc/src/sgml/mk_feature_tables.pl
index 9b111b8..93dab21 100644
--- a/doc/src/sgml/mk_feature_tables.pl
+++ b/doc/src/sgml/mk_feature_tables.pl
@@ -6,11 +6,11 @@ use strict;
 
 my $yesno = $ARGV[0];
 
-open my $pack, '<', $ARGV[1] or die;
+open PACK, $ARGV[1] or die;
 
 my %feature_packages;
 
-while (<$pack>)
+while (<PACK>)
 {
 	chomp;
 	my ($fid, $pname) = split /\t/;
@@ -24,13 +24,13 @@ while (<$pack>)
 	}
 }
 
-close $pack;
+close PACK;
 
-open my $feat, '<', $ARGV[2] or die;
+open FEAT, $ARGV[2] or die;
 
 print "<tbody>\n";
 
-while (<$feat>)
+while (<FEAT>)
 {
 	chomp;
 	my ($feature_id,      $feature_name, $subfeature_id,
@@ -69,4 +69,4 @@ while (<$feat>)
 
 print "</tbody>\n";
 
-close $feat;
+close FEAT;
diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml
index 9856968..e930731 100644
--- a/doc/src/sgml/monitoring.sgml
+++ b/doc/src/sgml/monitoring.sgml
@@ -620,8 +620,8 @@ postgres   27093  0.0  0.0  30096  2752 ?        Ss   11:34   0:00 postgres: ser
     <row>
      <entry><structfield>backend_start</></entry>
      <entry><type>timestamp with time zone</></entry>
-     <entry>Time when this process was started.  For client backends,
-      this is the time the client connected to the server.
+     <entry>Time when this process was started, i.e., when the
+      client connected to the server
      </entry>
     </row>
     <row>
@@ -797,17 +797,6 @@ postgres   27093  0.0  0.0  30096  2752 ?        Ss   11:34   0:00 postgres: ser
       <xref linkend="guc-track-activity-query-size">.
      </entry>
     </row>
-    <row>
-     <entry><structfield>backend_type</structfield></entry>
-     <entry><type>text</type></entry>
-     <entry>Type of current backend. Possible types are 
-      <literal>autovacuum launcher</>, <literal>autovacuum worker</>,
-      <literal>background worker</>, <literal>background writer</>,
-      <literal>client backend</>, <literal>checkpointer</>,
-      <literal>startup</>, <literal>walreceiver</>,
-      <literal>walsender</> and <literal>walwriter</>.
-     </entry>
-    </row>
    </tbody>
    </tgroup>
   </table>
diff --git a/doc/src/sgml/plpython.sgml b/doc/src/sgml/plpython.sgml
index 777a7ef..fb5d336 100644
--- a/doc/src/sgml/plpython.sgml
+++ b/doc/src/sgml/plpython.sgml
@@ -1047,14 +1047,6 @@ rv = plpy.execute(plan, ["name"], 5)
      </para>
 
      <para>
-      Alternatively, you can call the <function>execute</function> method on
-      the plan object:
-<programlisting>
-rv = plan.execute(["name"], 5)
-</programlisting>
-     </para>
-
-     <para>
       Query parameters and result row fields are converted between PostgreSQL
       and Python data types as described in <xref linkend="plpython-data">.
      </para>
@@ -1089,9 +1081,7 @@ $$ LANGUAGE plpythonu;
       as <literal>plpy.execute</literal> (except for the row limit) and returns
       a cursor object, which allows you to process large result sets in smaller
       chunks.  As with <literal>plpy.execute</literal>, either a query string
-      or a plan object along with a list of arguments can be used, or
-      the <function>cursor</function> function can be called as a method of
-      the plan object.
+      or a plan object along with a list of arguments can be used.
      </para>
 
      <para>
@@ -1135,7 +1125,7 @@ $$ LANGUAGE plpythonu;
 CREATE FUNCTION count_odd_prepared() RETURNS integer AS $$
 odd = 0
 plan = plpy.prepare("select num from largetable where num % $1 &lt;&gt; 0", ["integer"])
-rows = list(plpy.cursor(plan, [2]))  # or: = list(plan.cursor([2]))
+rows = list(plpy.cursor(plan, [2]))
 
 return len(rows)
 $$ LANGUAGE plpythonu;
diff --git a/doc/src/sgml/ref/alter_collation.sgml b/doc/src/sgml/ref/alter_collation.sgml
index 71cf4de..bf934ce 100644
--- a/doc/src/sgml/ref/alter_collation.sgml
+++ b/doc/src/sgml/ref/alter_collation.sgml
@@ -93,8 +93,7 @@ ALTER COLLATION <replaceable>name</replaceable> SET SCHEMA <replaceable>new_sche
     <listitem>
      <para>
       Updated the collation version.
-      See <xref linkend="sql-altercollation-notes"
-      endterm="sql-altercollation-notes-title"> below.
+      See <xref linkend="sql-altercollation-notes"> below.
      </para>
     </listitem>
    </varlistentry>
@@ -102,7 +101,7 @@ ALTER COLLATION <replaceable>name</replaceable> SET SCHEMA <replaceable>new_sche
  </refsect1>
 
  <refsect1 id="sql-altercollation-notes">
-  <title id="sql-altercollation-notes-title">Notes</title>
+  <title>Notes</title>
 
   <para>
    When using collations provided by the ICU library, the ICU-specific version
diff --git a/doc/src/sgml/ref/alter_table.sgml b/doc/src/sgml/ref/alter_table.sgml
index 7829f37..75de226 100644
--- a/doc/src/sgml/ref/alter_table.sgml
+++ b/doc/src/sgml/ref/alter_table.sgml
@@ -175,14 +175,9 @@ ALTER TABLE [ IF EXISTS ] <replaceable class="PARAMETER">name</replaceable>
      </para>
 
      <para>
-      If this table is a partition, one cannot perform <literal>DROP NOT NULL</literal>
+      If this table is a partition, one cannot perform <literal>DROP NOT NULL</>
       on a column if it is marked <literal>NOT NULL</literal> in the parent
-      table.  To drop the <literal>NOT NULL</literal> constraint from all the
-      partitions, perform <literal>DROP NOT NULL</literal> on the parent
-      table.  Even if there is no <literal>NOT NULL</> constraint on the
-      parent, such a constraint can still be added to individual partitions,
-      if desired; that is, the children can disallow nulls even if the parent
-      allows them, but not the other way around. 
+      table.
      </para>
     </listitem>
    </varlistentry>
diff --git a/doc/src/sgml/ref/create_table.sgml b/doc/src/sgml/ref/create_table.sgml
index 283d53e..9ed25c0 100644
--- a/doc/src/sgml/ref/create_table.sgml
+++ b/doc/src/sgml/ref/create_table.sgml
@@ -261,43 +261,43 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI
       any existing partition of that parent.
      </para>
 
-     <para>
-      Each of the values specified in the partition bound specification is
-      a literal, <literal>NULL</literal>, or <literal>UNBOUNDED</literal>.
-      A literal is either a numeric constant or a string constant that is
-      coercible to the corresponding partition key column's type.
-     </para>
-
-     <para>
-      When creating a range partition, the lower bound specified with
-      <literal>FROM</literal> is an inclusive bound, whereas the upper
-      bound specified with <literal>TO</literal> is an exclusive bound.
-      That is, the values specified in the <literal>FROM</literal> list
-      are accepted values of the corresponding partition key columns in a
-      given partition, whereas those in the <literal>TO</literal> list are
-      not.  To be precise, this applies only to the first of the partition
-      key columns for which the corresponding values in the <literal>FROM</literal>
-      and <literal>TO</literal> lists are not equal.  All rows in a given
-      partition contain the same values for all preceding columns, equal to
-      those specified in <literal>FROM</literal> and <literal>TO</literal>
-      lists.  On the other hand, any subsequent columns are insignificant
-      as far as implicit partition constraint is concerned.
-     </para>
-
-     <para>
-      Specifying <literal>UNBOUNDED</literal> in <literal>FROM</literal>
-      signifies <literal>-infinity</literal> as the lower bound of the
-      corresponding column, whereas it signifies <literal>+infinity</literal>
-      as the upper bound when specified in <literal>TO</literal>.
-     </para>
-
-     <para>
-      When creating a list partition, <literal>NULL</literal> can be
-      specified to signify that the partition allows the partition key
-      column to be null.  However, there cannot be more than one such
-      list partition for a given parent table.  <literal>NULL</literal>
-      cannot be specified for range partitions.
-     </para>
+     <note>
+      <para>
+       Each of the values specified in the partition bound specification is
+       a literal, <literal>NULL</literal>, or <literal>UNBOUNDED</literal>.
+       A literal is either a numeric constant or a string constant that is
+       coercable to the corresponding partition key column's type.
+      </para>
+
+      <para>
+       When creating a range partition, the lower bound specified with
+       <literal>FROM</literal> is an inclusive bound, whereas the upper bound
+       specified with <literal>TO</literal> is an exclusive bound.  That is,
+       the values specified in the <literal>FROM</literal> list are accepted
+       values of the corresponding partition key columns in a given partition,
+       whereas those in the <literal>TO</literal> list are not.  To be precise,
+       this applies only to the first of the partition key columns for which
+       the corresponding values in the <literal>FROM</literal> and
+       <literal>TO</literal> lists are not equal.  All rows in a given
+       partition contain the same values for all preceding columns, equal to
+       those specified in <literal>FROM</literal> and <literal>TO</literal>
+       lists.  On the other hand, any subsequent columns are insignificant
+       as far as implicit partition constraint is concerned.
+
+       Specifying <literal>UNBOUNDED</literal> in <literal>FROM</literal>
+       signifies <literal>-infinity</literal> as the lower bound of the
+       corresponding column, whereas it signifies <literal>+infinity</literal>
+       as the upper bound when specified in <literal>TO</literal>.
+      </para>
+
+      <para>
+       When creating a list partition, <literal>NULL</literal> can be specified
+       to signify that the partition allows the partition key column to be null.
+       However, there cannot be more than one such list partitions for a given
+       parent table.  <literal>NULL</literal> cannot specified for range
+       partitions.
+      </para>
+     </note>
 
      <para>
       A partition must have the same column names and types as the partitioned
diff --git a/doc/src/sgml/ref/create_type.sgml b/doc/src/sgml/ref/create_type.sgml
index 7146c4a..c3df0f9 100644
--- a/doc/src/sgml/ref/create_type.sgml
+++ b/doc/src/sgml/ref/create_type.sgml
@@ -54,6 +54,7 @@ CREATE TYPE <replaceable class="parameter">name</replaceable> (
     [ , ELEMENT = <replaceable class="parameter">element</replaceable> ]
     [ , DELIMITER = <replaceable class="parameter">delimiter</replaceable> ]
     [ , COLLATABLE = <replaceable class="parameter">collatable</replaceable> ]
+    [ , SUBSCRIPTING = <replaceable class="parameter">subscripting_function</replaceable> ]
 )
 
 CREATE TYPE <replaceable class="parameter">name</replaceable>
@@ -194,7 +195,8 @@ CREATE TYPE <replaceable class="parameter">name</replaceable>
    <replaceable class="parameter">send_function</replaceable>,
    <replaceable class="parameter">type_modifier_input_function</replaceable>,
    <replaceable class="parameter">type_modifier_output_function</replaceable> and
-   <replaceable class="parameter">analyze_function</replaceable>
+   <replaceable class="parameter">analyze_function</replaceable>,
+   <replaceable class="parameter">subscripting_function</replaceable>
    are optional.  Generally these functions have to be coded in C
    or another low-level language.
   </para>
@@ -451,6 +453,22 @@ CREATE TYPE <replaceable class="parameter">name</replaceable>
    make use of the collation information; this does not happen
    automatically merely by marking the type collatable.
   </para>
+
+  <para>
+   The optional
+   <replaceable class="parameter">subscripting_function</replaceable>
+   contains type-specific logic for subscripting of the data type.
+   By default, there is no such function, which means that the data
+   type doesn't support subscripting. The subscripting function must be
+   declared to take a single argument of type <type>internal</>, and return
+   a <type>internal</> result. There are two examples of implementation for
+   subscripting function in case of array
+   (<replaceable class="parameter">array_subscripting</replaceable>)
+   and jsonb
+   (<replaceable class="parameter">jsonb_subscripting</replaceable>)
+   types in <filename>src/backend/utils/adt/arrayfuncs.c</> and
+   <filename>src/backend/utils/adt/jsonfuncs.c</> corresponding.
+  </para>
   </refsect2>
 
   <refsect2>
@@ -766,6 +784,16 @@ CREATE TYPE <replaceable class="parameter">name</replaceable>
      </para>
     </listitem>
    </varlistentry>
+
+   <varlistentry>
+    <term><replaceable class="parameter">subscripting_function</replaceable></term>
+    <listitem>
+     <para>
+      The name of a function that contains type-specific subscripting logic for
+      the data type.
+     </para>
+    </listitem>
+   </varlistentry>
   </variablelist>
  </refsect1>
 
diff --git a/doc/src/sgml/stylesheet-fo.xsl b/doc/src/sgml/stylesheet-fo.xsl
index 8b555d1..434e69d 100644
--- a/doc/src/sgml/stylesheet-fo.xsl
+++ b/doc/src/sgml/stylesheet-fo.xsl
@@ -18,43 +18,12 @@
   <xsl:attribute name="wrap-option">wrap</xsl:attribute>
 </xsl:attribute-set>
 
-<xsl:attribute-set name="nongraphical.admonition.properties">
-  <xsl:attribute name="border-style">solid</xsl:attribute>
-  <xsl:attribute name="border-width">1pt</xsl:attribute>
-  <xsl:attribute name="border-color">black</xsl:attribute>
-  <xsl:attribute name="padding-start">12pt</xsl:attribute>
-  <xsl:attribute name="padding-end">12pt</xsl:attribute>
-  <xsl:attribute name="padding-top">6pt</xsl:attribute>
-  <xsl:attribute name="padding-bottom">6pt</xsl:attribute>
-</xsl:attribute-set>
-
-<xsl:attribute-set name="admonition.title.properties">
-  <xsl:attribute name="text-align">center</xsl:attribute>
-</xsl:attribute-set>
-
 <!-- Change display of some elements -->
 
 <xsl:template match="command">
   <xsl:call-template name="inline.monoseq"/>
 </xsl:template>
 
-<xsl:template match="confgroup" mode="bibliography.mode">
-  <fo:inline>
-    <xsl:apply-templates select="conftitle/text()" mode="bibliography.mode"/>
-    <xsl:text>, </xsl:text>
-    <xsl:apply-templates select="confdates/text()" mode="bibliography.mode"/>
-    <xsl:value-of select="$biblioentry.item.separator"/>
-  </fo:inline>
-</xsl:template>
-
-<xsl:template match="isbn" mode="bibliography.mode">
-  <fo:inline>
-    <xsl:text>ISBN </xsl:text>
-    <xsl:apply-templates mode="bibliography.mode"/>
-    <xsl:value-of select="$biblioentry.item.separator"/>
-  </fo:inline>
-</xsl:template>
-
 <!-- bug fix from <https://sourceforge.net/p/docbook/bugs/1360/#831b> -->
 
 <xsl:template match="varlistentry/term" mode="xref-to">
diff --git a/doc/src/sgml/stylesheet.xsl b/doc/src/sgml/stylesheet.xsl
index e36e8cc..efcb80f 100644
--- a/doc/src/sgml/stylesheet.xsl
+++ b/doc/src/sgml/stylesheet.xsl
@@ -40,27 +40,6 @@
   <xsl:call-template name="inline.monoseq"/>
 </xsl:template>
 
-<xsl:template match="confgroup" mode="bibliography.mode">
-  <span>
-    <xsl:call-template name="common.html.attributes"/>
-    <xsl:call-template name="id.attribute"/>
-    <xsl:apply-templates select="conftitle/text()" mode="bibliography.mode"/>
-    <xsl:text>, </xsl:text>
-    <xsl:apply-templates select="confdates/text()" mode="bibliography.mode"/>
-    <xsl:copy-of select="$biblioentry.item.separator"/>
-  </span>
-</xsl:template>
-
-<xsl:template match="isbn" mode="bibliography.mode">
-  <span>
-    <xsl:call-template name="common.html.attributes"/>
-    <xsl:call-template name="id.attribute"/>
-    <xsl:text>ISBN </xsl:text>
-    <xsl:apply-templates mode="bibliography.mode"/>
-    <xsl:copy-of select="$biblioentry.item.separator"/>
-  </span>
-</xsl:template>
-
 
 <!-- table of contents configuration -->
 
diff --git a/doc/src/sgml/xsubscripting.sgml b/doc/src/sgml/xsubscripting.sgml
new file mode 100644
index 0000000..6fd4283
--- /dev/null
+++ b/doc/src/sgml/xsubscripting.sgml
@@ -0,0 +1,102 @@
+<!-- doc/src/sgml/xsubscripting.sgml -->
+
+ <sect1 id="xsubscripting">
+  <title>User-defined subscripting procedure</title>
+
+  <indexterm zone="xsubscripting">
+    <primary>custom subscripting</primary>
+  </indexterm>
+  <para>
+  When you define a new base type, you can also specify a custom procedure
+  to handle subscripting expressions. It should contains logic for verification
+  and decide which function must be used for evaluation of this expression.
+  For instance:
+</para>
+<programlisting><![CDATA[
+typedef struct Custom
+{
+    int first;
+    int second;
+}   Custom;
+
+Datum
+custom_subscripting_extract(PG_FUNCTION_ARGS)
+{
+    Custom                      *result = (Custom *) sbsdata->containerSource;
+    ExprEvalStep                *step = (ExprEvalStep *) PG_GETARG_POINTER(1);
+
+    // Some assign logic based on sbsdata
+}
+
+Datum
+custom_subscripting_assign(PG_FUNCTION_ARGS)
+{
+    Custom                  *containerSource = (Custom *) PG_GETARG_DATUM(0);
+    ExprEvalStep            *step = (ExprEvalStep *) PG_GETARG_POINTER(1);
+
+    // Some extraction logic based on sbsdata
+}
+
+PG_FUNCTION_INFO_V1(custom_subscripting);
+
+Datum
+custom_subscript_parse(PG_FUNCTION_ARGS)
+{
+    bool                isAssignment = PG_GETARG_BOOL(0);
+    SubscriptingRef    *sbsref = (SubscriptingRef *) PG_GETARG_POINTER(0);
+    Datum               assign_proc = CStringGetTextDatum("custom_subscripting_assign");
+    Datum               extract_proc = CStringGetTextDatum("custom_subscripting_extract");
+
+    // Some verifications or type coersion
+
+    if (isAssignment)
+        sbsref->refevalfunc = DirectFunctionCall1(to_regproc, assign_proc);
+    else
+        sbsref->refevalfunc = DirectFunctionCall1(to_regproc, extract_proc);
+
+    PG_RETURN_POINTER(sbsref);
+}]]>
+</programlisting>
+
+<para>
+    Then you can define a subscripting procedure and a custom data type:
+</para>
+<programlisting>
+CREATE FUNCTION custom_subscript_parse(internal)
+    RETURNS internal
+    AS '<replaceable>filename</replaceable>'
+    LANGUAGE C IMMUTABLE STRICT;
+
+CREATE TYPE custom (
+   internallength = 4,
+   input = custom_in,
+   output = custom_out,
+   subscripting = custom_subscript_parse
+);
+</programlisting>
+
+<para>
+    and use it as usual:
+</para>
+<programlisting>
+CREATE TABLE test_subscripting (
+    data    custom
+);
+
+INSERT INTO test_subscripting VALUES ('(1, 2)');
+
+SELECT data[0] from test_subscripting;
+
+UPDATE test_subscripting SET data[1] = 3;
+</programlisting>
+
+
+  <para>
+   The examples of custom subscripting implementation can be found in
+   <filename>subscripting.sql</filename> and <filename>subscripting.c</filename>
+   in the <filename>src/tutorial</> directory of the source distribution.
+   See the <filename>README</> file in that directory for instructions
+   about running the examples.
+  </para>
+
+</sect1>
diff --git a/src/backend/access/hash/hash_xlog.c b/src/backend/access/hash/hash_xlog.c
index d9ac42c..de7522e 100644
--- a/src/backend/access/hash/hash_xlog.c
+++ b/src/backend/access/hash/hash_xlog.c
@@ -957,6 +957,8 @@ hash_xlog_vacuum_get_latestRemovedXid(XLogReaderState *record)
 	OffsetNumber	hoffnum;
 	TransactionId	latestRemovedXid = InvalidTransactionId;
 	int		i;
+	char *ptr;
+	Size len;
 
 	xlrec = (xl_hash_vacuum_one_page *) XLogRecGetData(record);
 
@@ -975,20 +977,12 @@ hash_xlog_vacuum_get_latestRemovedXid(XLogReaderState *record)
 		return latestRemovedXid;
 
 	/*
-	 * Check if WAL replay has reached a consistent database state. If not,
-	 * we must PANIC. See the definition of btree_xlog_delete_get_latestRemovedXid
-	 * for more details.
-	 */
-	if (!reachedConsistency)
-		elog(PANIC, "hash_xlog_vacuum_get_latestRemovedXid: cannot operate with inconsistent data");
-
-	/*
 	 * Get index page.  If the DB is consistent, this should not fail, nor
 	 * should any of the heap page fetches below.  If one does, we return
 	 * InvalidTransactionId to cancel all HS transactions.  That's probably
 	 * overkill, but it's safe, and certainly better than panicking here.
 	 */
-	XLogRecGetBlockTag(record, 0, &rnode, NULL, &blkno);
+	XLogRecGetBlockTag(record, 1, &rnode, NULL, &blkno);
 	ibuffer = XLogReadBufferExtended(rnode, MAIN_FORKNUM, blkno, RBM_NORMAL);
 
 	if (!BufferIsValid(ibuffer))
@@ -1000,7 +994,9 @@ hash_xlog_vacuum_get_latestRemovedXid(XLogReaderState *record)
 	 * Loop through the deleted index items to obtain the TransactionId from
 	 * the heap items they point to.
 	 */
-	unused = (OffsetNumber *) ((char *) xlrec + SizeOfHashVacuumOnePage);
+	ptr = XLogRecGetBlockData(record, 1, &len);
+
+	unused = (OffsetNumber *) ptr;
 
 	for (i = 0; i < xlrec->ntuples; i++)
 	{
@@ -1125,15 +1121,23 @@ hash_xlog_vacuum_one_page(XLogReaderState *record)
 
 	if (action == BLK_NEEDS_REDO)
 	{
+		char *ptr;
+		Size len;
+
+		ptr = XLogRecGetBlockData(record, 0, &len);
+
 		page = (Page) BufferGetPage(buffer);
 
-		if (XLogRecGetDataLen(record) > SizeOfHashVacuumOnePage)
+		if (len > 0)
 		{
 			OffsetNumber *unused;
+			OffsetNumber *unend;
 
-			unused = (OffsetNumber *) ((char *) xldata + SizeOfHashVacuumOnePage);
+			unused = (OffsetNumber *) ptr;
+			unend = (OffsetNumber *) ((char *) ptr + len);
 
-			PageIndexMultiDelete(page, unused, xldata->ntuples);
+			if ((unend - unused) > 0)
+				PageIndexMultiDelete(page, unused, unend - unused);
 		}
 
 		/*
diff --git a/src/backend/access/hash/hashinsert.c b/src/backend/access/hash/hashinsert.c
index 8699b5b..8640e85 100644
--- a/src/backend/access/hash/hashinsert.c
+++ b/src/backend/access/hash/hashinsert.c
@@ -344,6 +344,7 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
 	Page	page = BufferGetPage(buf);
 	HashPageOpaque	pageopaque;
 	HashMetaPage	metap;
+	double tuples_removed = 0;
 
 	/* Scan each tuple in page to see if it is marked as LP_DEAD */
 	maxoff = PageGetMaxOffsetNumber(page);
@@ -354,7 +355,10 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
 		ItemId	itemId = PageGetItemId(page, offnum);
 
 		if (ItemIdIsDead(itemId))
+		{
 			deletable[ndeletable++] = offnum;
+			tuples_removed += 1;
+		}
 	}
 
 	if (ndeletable > 0)
@@ -382,7 +386,7 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
 		pageopaque->hasho_flag &= ~LH_PAGE_HAS_DEAD_TUPLES;
 
 		metap = HashPageGetMeta(BufferGetPage(metabuf));
-		metap->hashm_ntuples -= ndeletable;
+		metap->hashm_ntuples -= tuples_removed;
 
 		MarkBufferDirty(buf);
 		MarkBufferDirty(metabuf);
@@ -394,18 +398,13 @@ _hash_vacuum_one_page(Relation rel, Buffer metabuf, Buffer buf,
 			XLogRecPtr	recptr;
 
 			xlrec.hnode = hnode;
-			xlrec.ntuples = ndeletable;
+			xlrec.ntuples = tuples_removed;
 
 			XLogBeginInsert();
-			XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
 			XLogRegisterData((char *) &xlrec, SizeOfHashVacuumOnePage);
 
-			/*
-			 * We need the target-offsets array whether or not we store the whole
-			 * buffer, to allow us to find the latestRemovedXid on a standby
-			 * server.
-			 */
-			XLogRegisterData((char *) deletable,
+			XLogRegisterBuffer(0, buf, REGBUF_STANDARD);
+			XLogRegisterBufData(0, (char *) deletable,
 						ndeletable * sizeof(OffsetNumber));
 
 			XLogRegisterBuffer(1, metabuf, REGBUF_STANDARD);
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 61ca2ec..622cc4b 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -1002,8 +1002,8 @@ _hash_alloc_buckets(Relation rel, BlockNumber firstblock, uint32 nblocks)
 	page = (Page) zerobuf;
 
 	/*
-	 * Initialize the page.  Just zeroing the page won't work; see
-	 * _hash_freeovflpage for similar usage.
+	 * Initialize the freed overflow page.  Just zeroing the page won't work,
+	 * See _hash_freeovflpage for similar usage.
 	 */
 	_hash_pageinit(page, BLCKSZ);
 
diff --git a/src/backend/access/heap/tuptoaster.c b/src/backend/access/heap/tuptoaster.c
index aa5a45d..19e7048 100644
--- a/src/backend/access/heap/tuptoaster.c
+++ b/src/backend/access/heap/tuptoaster.c
@@ -1289,74 +1289,6 @@ toast_flatten_tuple_to_datum(HeapTupleHeader tup,
 
 
 /* ----------
- * toast_build_flattened_tuple -
- *
- *	Build a tuple containing no out-of-line toasted fields.
- *	(This does not eliminate compressed or short-header datums.)
- *
- *	This is essentially just like heap_form_tuple, except that it will
- *	expand any external-data pointers beforehand.
- *
- *	It's not very clear whether it would be preferable to decompress
- *	in-line compressed datums while at it.  For now, we don't.
- * ----------
- */
-HeapTuple
-toast_build_flattened_tuple(TupleDesc tupleDesc,
-							Datum *values,
-							bool *isnull)
-{
-	HeapTuple	new_tuple;
-	Form_pg_attribute *att = tupleDesc->attrs;
-	int			numAttrs = tupleDesc->natts;
-	int			num_to_free;
-	int			i;
-	Datum		new_values[MaxTupleAttributeNumber];
-	Pointer		freeable_values[MaxTupleAttributeNumber];
-
-	/*
-	 * We can pass the caller's isnull array directly to heap_form_tuple, but
-	 * we potentially need to modify the values array.
-	 */
-	Assert(numAttrs <= MaxTupleAttributeNumber);
-	memcpy(new_values, values, numAttrs * sizeof(Datum));
-
-	num_to_free = 0;
-	for (i = 0; i < numAttrs; i++)
-	{
-		/*
-		 * Look at non-null varlena attributes
-		 */
-		if (!isnull[i] && att[i]->attlen == -1)
-		{
-			struct varlena *new_value;
-
-			new_value = (struct varlena *) DatumGetPointer(new_values[i]);
-			if (VARATT_IS_EXTERNAL(new_value))
-			{
-				new_value = heap_tuple_fetch_attr(new_value);
-				new_values[i] = PointerGetDatum(new_value);
-				freeable_values[num_to_free++] = (Pointer) new_value;
-			}
-		}
-	}
-
-	/*
-	 * Form the reconfigured tuple.
-	 */
-	new_tuple = heap_form_tuple(tupleDesc, new_values, isnull);
-
-	/*
-	 * Free allocated temp values
-	 */
-	for (i = 0; i < num_to_free; i++)
-		pfree(freeable_values[i]);
-
-	return new_tuple;
-}
-
-
-/* ----------
  * toast_compress_datum -
  *
  *	Create a compressed version of a varlena datum
diff --git a/src/backend/access/rmgrdesc/hashdesc.c b/src/backend/access/rmgrdesc/hashdesc.c
index 35d86dc..5f5f4a0 100644
--- a/src/backend/access/rmgrdesc/hashdesc.c
+++ b/src/backend/access/rmgrdesc/hashdesc.c
@@ -113,7 +113,7 @@ hash_desc(StringInfo buf, XLogReaderState *record)
 			{
 				xl_hash_vacuum_one_page *xlrec = (xl_hash_vacuum_one_page *) rec;
 
-				appendStringInfo(buf, "ntuples %d",
+				appendStringInfo(buf, "ntuples %g",
 								 xlrec->ntuples);
 				break;
 			}
diff --git a/src/backend/access/transam/clog.c b/src/backend/access/transam/clog.c
index 7a007a6..2d33510 100644
--- a/src/backend/access/transam/clog.c
+++ b/src/backend/access/transam/clog.c
@@ -577,13 +577,6 @@ ShutdownCLOG(void)
 	/* Flush dirty CLOG pages to disk */
 	TRACE_POSTGRESQL_CLOG_CHECKPOINT_START(false);
 	SimpleLruFlush(ClogCtl, false);
-
-	/*
-	 * fsync pg_xact to ensure that any files flushed previously are durably
-	 * on disk.
-	 */
-	fsync_fname("pg_xact", true);
-
 	TRACE_POSTGRESQL_CLOG_CHECKPOINT_DONE(false);
 }
 
@@ -596,13 +589,6 @@ CheckPointCLOG(void)
 	/* Flush dirty CLOG pages to disk */
 	TRACE_POSTGRESQL_CLOG_CHECKPOINT_START(true);
 	SimpleLruFlush(ClogCtl, true);
-
-	/*
-	 * fsync pg_xact to ensure that any files flushed previously are durably
-	 * on disk.
-	 */
-	fsync_fname("pg_xact", true);
-
 	TRACE_POSTGRESQL_CLOG_CHECKPOINT_DONE(true);
 }
 
diff --git a/src/backend/access/transam/commit_ts.c b/src/backend/access/transam/commit_ts.c
index 03ffa20..8e1df6e 100644
--- a/src/backend/access/transam/commit_ts.c
+++ b/src/backend/access/transam/commit_ts.c
@@ -746,12 +746,6 @@ ShutdownCommitTs(void)
 {
 	/* Flush dirty CommitTs pages to disk */
 	SimpleLruFlush(CommitTsCtl, false);
-
-	/*
-	 * fsync pg_commit_ts to ensure that any files flushed previously are durably
-	 * on disk.
-	 */
-	fsync_fname("pg_commit_ts", true);
 }
 
 /*
@@ -762,12 +756,6 @@ CheckPointCommitTs(void)
 {
 	/* Flush dirty CommitTs pages to disk */
 	SimpleLruFlush(CommitTsCtl, true);
-
-	/*
-	 * fsync pg_commit_ts to ensure that any files flushed previously are durably
-	 * on disk.
-	 */
-	fsync_fname("pg_commit_ts", true);
 }
 
 /*
diff --git a/src/backend/access/transam/twophase.c b/src/backend/access/transam/twophase.c
index 83169cc..4b4999f 100644
--- a/src/backend/access/transam/twophase.c
+++ b/src/backend/access/transam/twophase.c
@@ -1650,14 +1650,6 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
 	}
 	LWLockRelease(TwoPhaseStateLock);
 
-	/*
-	 * Flush unconditionally the parent directory to make any information
-	 * durable on disk.  Two-phase files could have been removed and those
-	 * removals need to be made persistent as well as any files newly created
-	 * previously since the last checkpoint.
-	 */
-	fsync_fname(TWOPHASE_DIR, true);
-
 	TRACE_POSTGRESQL_TWOPHASE_CHECKPOINT_DONE();
 
 	if (log_checkpoints && serialized_xacts > 0)
diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c
index 61ca81d..58790e0 100644
--- a/src/backend/access/transam/xlog.c
+++ b/src/backend/access/transam/xlog.c
@@ -3475,7 +3475,7 @@ InstallXLogFileSegment(XLogSegNo *segno, char *tmppath,
 	if (!find_free)
 	{
 		/* Force installation: get rid of any pre-existing segment file */
-		durable_unlink(path, DEBUG1);
+		unlink(path);
 	}
 	else
 	{
@@ -4026,13 +4026,16 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr)
 					  path)));
 			return;
 		}
-		rc = durable_unlink(newpath, LOG);
+		rc = unlink(newpath);
 #else
-		rc = durable_unlink(path, LOG);
+		rc = unlink(path);
 #endif
 		if (rc != 0)
 		{
-			/* Message already logged by durable_unlink() */
+			ereport(LOG,
+					(errcode_for_file_access(),
+			   errmsg("could not remove old transaction log file \"%s\": %m",
+					  path)));
 			return;
 		}
 		CheckpointStats.ckpt_segs_removed++;
@@ -10768,13 +10771,17 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
 						(errcode_for_file_access(),
 						 errmsg("could not read file \"%s\": %m",
 								BACKUP_LABEL_FILE)));
-			durable_unlink(BACKUP_LABEL_FILE, ERROR);
+			if (unlink(BACKUP_LABEL_FILE) != 0)
+				ereport(ERROR,
+						(errcode_for_file_access(),
+						 errmsg("could not remove file \"%s\": %m",
+								BACKUP_LABEL_FILE)));
 
 			/*
 			 * Remove tablespace_map file if present, it is created only if there
 			 * are tablespaces.
 			 */
-			durable_unlink(TABLESPACE_MAP, DEBUG1);
+			unlink(TABLESPACE_MAP);
 		}
 		PG_END_ENSURE_ERROR_CLEANUP(pg_stop_backup_callback, (Datum) BoolGetDatum(exclusive));
 	}
diff --git a/src/backend/bootstrap/bootstrap.c b/src/backend/bootstrap/bootstrap.c
index d8efdb5..6cfce4f 100644
--- a/src/backend/bootstrap/bootstrap.c
+++ b/src/backend/bootstrap/bootstrap.c
@@ -387,10 +387,6 @@ AuxiliaryProcessMain(int argc, char *argv[])
 		/* finish setting up bufmgr.c */
 		InitBufferPoolBackend();
 
-		/* Initialize backend status information */
-		pgstat_initialize();
-		pgstat_bestart();
-
 		/* register a before-shutdown callback for LWLock cleanup */
 		before_shmem_exit(ShutdownAuxiliaryProcess, 0);
 	}
diff --git a/src/backend/catalog/Catalog.pm b/src/backend/catalog/Catalog.pm
index 6ffd5f9..bccbc51 100644
--- a/src/backend/catalog/Catalog.pm
+++ b/src/backend/catalog/Catalog.pm
@@ -44,13 +44,13 @@ sub Catalogs
 		$catalog{columns} = [];
 		$catalog{data}    = [];
 
-		open(my $ifh, '<', $input_file) || die "$input_file: $!";
+		open(INPUT_FILE, '<', $input_file) || die "$input_file: $!";
 
 		my ($filename) = ($input_file =~ m/(\w+)\.h$/);
 		my $natts_pat = "Natts_$filename";
 
 		# Scan the input file.
-		while (<$ifh>)
+		while (<INPUT_FILE>)
 		{
 
 			# Strip C-style comments.
@@ -59,7 +59,7 @@ sub Catalogs
 			{
 
 				# handle multi-line comments properly.
-				my $next_line = <$ifh>;
+				my $next_line = <INPUT_FILE>;
 				die "$input_file: ends within C-style comment\n"
 				  if !defined $next_line;
 				$_ .= $next_line;
@@ -211,7 +211,7 @@ sub Catalogs
 			}
 		}
 		$catalogs{$catname} = \%catalog;
-		close $ifh;
+		close INPUT_FILE;
 	}
 	return \%catalogs;
 }
diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c
index ee27cae..3ade3b1 100644
--- a/src/backend/catalog/dependency.c
+++ b/src/backend/catalog/dependency.c
@@ -1647,6 +1647,14 @@ find_expr_references_walker(Node *node,
 						   context->addrs);
 		/* fall through to examine arguments */
 	}
+	else if (IsA(node, SubscriptingRef))
+	{
+		SubscriptingRef   *sbsref = (SubscriptingRef *) node;
+
+		add_object_address(OCLASS_PROC, sbsref->refevalfunc, 0,
+						   context->addrs);
+		/* fall through to examine arguments */
+	}
 	else if (IsA(node, OpExpr))
 	{
 		OpExpr	   *opexpr = (OpExpr *) node;
diff --git a/src/backend/catalog/genbki.pl b/src/backend/catalog/genbki.pl
index f9ecb02..079516c 100644
--- a/src/backend/catalog/genbki.pl
+++ b/src/backend/catalog/genbki.pl
@@ -66,16 +66,16 @@ if ($output_path ne '' && substr($output_path, -1) ne '/')
 # Open temp files
 my $tmpext  = ".tmp$$";
 my $bkifile = $output_path . 'postgres.bki';
-open my $bki, '>', $bkifile . $tmpext
+open BKI, '>', $bkifile . $tmpext
   or die "can't open $bkifile$tmpext: $!";
 my $schemafile = $output_path . 'schemapg.h';
-open my $schemapg, '>', $schemafile . $tmpext
+open SCHEMAPG, '>', $schemafile . $tmpext
   or die "can't open $schemafile$tmpext: $!";
 my $descrfile = $output_path . 'postgres.description';
-open my $descr, '>', $descrfile . $tmpext
+open DESCR, '>', $descrfile . $tmpext
   or die "can't open $descrfile$tmpext: $!";
 my $shdescrfile = $output_path . 'postgres.shdescription';
-open my $shdescr, '>', $shdescrfile . $tmpext
+open SHDESCR, '>', $shdescrfile . $tmpext
   or die "can't open $shdescrfile$tmpext: $!";
 
 # Fetch some special data that we will substitute into the output file.
@@ -97,7 +97,7 @@ my $catalogs = Catalog::Catalogs(@input_files);
 # Generate postgres.bki, postgres.description, and postgres.shdescription
 
 # version marker for .bki file
-print $bki "# PostgreSQL $major_version\n";
+print BKI "# PostgreSQL $major_version\n";
 
 # vars to hold data needed for schemapg.h
 my %schemapg_entries;
@@ -110,7 +110,7 @@ foreach my $catname (@{ $catalogs->{names} })
 
 	# .bki CREATE command for this catalog
 	my $catalog = $catalogs->{$catname};
-	print $bki "create $catname $catalog->{relation_oid}"
+	print BKI "create $catname $catalog->{relation_oid}"
 	  . $catalog->{shared_relation}
 	  . $catalog->{bootstrap}
 	  . $catalog->{without_oids}
@@ -120,7 +120,7 @@ foreach my $catname (@{ $catalogs->{names} })
 	my @attnames;
 	my $first = 1;
 
-	print $bki " (\n";
+	print BKI " (\n";
 	foreach my $column (@{ $catalog->{columns} })
 	{
 		my $attname = $column->{name};
@@ -130,27 +130,27 @@ foreach my $catname (@{ $catalogs->{names} })
 
 		if (!$first)
 		{
-			print $bki " ,\n";
+			print BKI " ,\n";
 		}
 		$first = 0;
 
-		print $bki " $attname = $atttype";
+		print BKI " $attname = $atttype";
 
 		if (defined $column->{forcenotnull})
 		{
-			print $bki " FORCE NOT NULL";
+			print BKI " FORCE NOT NULL";
 		}
 		elsif (defined $column->{forcenull})
 		{
-			print $bki " FORCE NULL";
+			print BKI " FORCE NULL";
 		}
 	}
-	print $bki "\n )\n";
+	print BKI "\n )\n";
 
    # open it, unless bootstrap case (create bootstrap does this automatically)
 	if ($catalog->{bootstrap} eq '')
 	{
-		print $bki "open $catname\n";
+		print BKI "open $catname\n";
 	}
 
 	if (defined $catalog->{data})
@@ -175,17 +175,17 @@ foreach my $catname (@{ $catalogs->{names} })
 
 			# Write to postgres.bki
 			my $oid = $row->{oid} ? "OID = $row->{oid} " : '';
-			printf $bki "insert %s( %s)\n", $oid, $row->{bki_values};
+			printf BKI "insert %s( %s)\n", $oid, $row->{bki_values};
 
 		   # Write comments to postgres.description and postgres.shdescription
 			if (defined $row->{descr})
 			{
-				printf $descr "%s\t%s\t0\t%s\n", $row->{oid}, $catname,
+				printf DESCR "%s\t%s\t0\t%s\n", $row->{oid}, $catname,
 				  $row->{descr};
 			}
 			if (defined $row->{shdescr})
 			{
-				printf $shdescr "%s\t%s\t%s\n", $row->{oid}, $catname,
+				printf SHDESCR "%s\t%s\t%s\n", $row->{oid}, $catname,
 				  $row->{shdescr};
 			}
 		}
@@ -267,7 +267,7 @@ foreach my $catname (@{ $catalogs->{names} })
 		}
 	}
 
-	print $bki "close $catname\n";
+	print BKI "close $catname\n";
 }
 
 # Any information needed for the BKI that is not contained in a pg_*.h header
@@ -276,19 +276,19 @@ foreach my $catname (@{ $catalogs->{names} })
 # Write out declare toast/index statements
 foreach my $declaration (@{ $catalogs->{toasting}->{data} })
 {
-	print $bki $declaration;
+	print BKI $declaration;
 }
 
 foreach my $declaration (@{ $catalogs->{indexing}->{data} })
 {
-	print $bki $declaration;
+	print BKI $declaration;
 }
 
 
 # Now generate schemapg.h
 
 # Opening boilerplate for schemapg.h
-print $schemapg <<EOM;
+print SCHEMAPG <<EOM;
 /*-------------------------------------------------------------------------
  *
  * schemapg.h
@@ -313,19 +313,19 @@ EOM
 # Emit schemapg declarations
 foreach my $table_name (@tables_needing_macros)
 {
-	print $schemapg "\n#define Schema_$table_name \\\n";
-	print $schemapg join ", \\\n", @{ $schemapg_entries{$table_name} };
-	print $schemapg "\n";
+	print SCHEMAPG "\n#define Schema_$table_name \\\n";
+	print SCHEMAPG join ", \\\n", @{ $schemapg_entries{$table_name} };
+	print SCHEMAPG "\n";
 }
 
 # Closing boilerplate for schemapg.h
-print $schemapg "\n#endif /* SCHEMAPG_H */\n";
+print SCHEMAPG "\n#endif /* SCHEMAPG_H */\n";
 
 # We're done emitting data
-close $bki;
-close $schemapg;
-close $descr;
-close $shdescr;
+close BKI;
+close SCHEMAPG;
+close DESCR;
+close SHDESCR;
 
 # Finally, rename the completed files into place.
 Catalog::RenameTempFile($bkifile,     $tmpext);
@@ -425,7 +425,7 @@ sub bki_insert
 	my @attnames   = @_;
 	my $oid        = $row->{oid} ? "OID = $row->{oid} " : '';
 	my $bki_values = join ' ', map $row->{$_}, @attnames;
-	printf $bki "insert %s( %s)\n", $oid, $bki_values;
+	printf BKI "insert %s( %s)\n", $oid, $bki_values;
 }
 
 # The field values of a Schema_pg_xxx declaration are similar, but not
@@ -472,15 +472,15 @@ sub find_defined_symbol
 		}
 		my $file = $path . $catalog_header;
 		next if !-f $file;
-		open(my $find_defined_symbol, '<', $file) || die "$file: $!";
-		while (<$find_defined_symbol>)
+		open(FIND_DEFINED_SYMBOL, '<', $file) || die "$file: $!";
+		while (<FIND_DEFINED_SYMBOL>)
 		{
 			if (/^#define\s+\Q$symbol\E\s+(\S+)/)
 			{
 				return $1;
 			}
 		}
-		close $find_defined_symbol;
+		close FIND_DEFINED_SYMBOL;
 		die "$file: no definition found for $symbol\n";
 	}
 	die "$catalog_header: not found in any include directory\n";
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index eee5e2f..b01be1a 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -976,7 +976,8 @@ AddNewRelationType(const char *typeName,
 				   -1,			/* typmod */
 				   0,			/* array dimensions for typBaseType */
 				   false,		/* Type NOT NULL */
-				   InvalidOid); /* rowtypes never have a collation */
+				   InvalidOid,  /* rowtypes never have a collation */
+				   InvalidOid);	/* typsbsparse - none */
 }
 
 /* --------------------------------
@@ -1246,7 +1247,8 @@ heap_create_with_catalog(const char *relname,
 				   -1,			/* typmod */
 				   0,			/* array dimensions for typBaseType */
 				   false,		/* Type NOT NULL */
-				   InvalidOid); /* rowtypes never have a collation */
+				   InvalidOid,  /* rowtypes never have a collation */
+				   F_ARRAY_SUBSCRIPT_PARSE);	/* array implementation */
 
 		pfree(relarrayname);
 	}
diff --git a/src/backend/catalog/partition.c b/src/backend/catalog/partition.c
index f9fd136..2b5b8e8 100644
--- a/src/backend/catalog/partition.c
+++ b/src/backend/catalog/partition.c
@@ -921,12 +921,8 @@ get_qual_from_partbound(Relation rel, Relation parent, Node *bound)
  * map_partition_varattnos - maps varattno of any Vars in expr from the
  * parent attno to partition attno.
  *
- * We must allow for cases where physical attnos of a partition can be
+ * We must allow for a case where physical attnos of a partition can be
  * different from the parent's.
- *
- * Note: this will work on any node tree, so really the argument and result
- * should be declared "Node *".  But a substantial majority of the callers
- * are working on Lists, so it's less messy to do the casts internally.
  */
 List *
 map_partition_varattnos(List *expr, int target_varno,
@@ -1729,14 +1725,10 @@ get_partition_for_tuple(PartitionDispatch *pd,
 						errmsg("range partition key of row contains null")));
 		}
 
-		/*
-		 * A null partition key is only acceptable if null-accepting list
-		 * partition exists.
-		 */
-		cur_index = -1;
-		if (isnull[0] && partdesc->boundinfo->has_null)
+		if (partdesc->boundinfo->has_null && isnull[0])
+			/* Tuple maps to the null-accepting list partition */
 			cur_index = partdesc->boundinfo->null_index;
-		else if (!isnull[0])
+		else
 		{
 			/* Else bsearch in partdesc->boundinfo */
 			bool		equal = false;
diff --git a/src/backend/catalog/pg_inherits.c b/src/backend/catalog/pg_inherits.c
index 827ad2a..9bd2cd1 100644
--- a/src/backend/catalog/pg_inherits.c
+++ b/src/backend/catalog/pg_inherits.c
@@ -31,16 +31,7 @@
 #include "utils/fmgroids.h"
 #include "utils/syscache.h"
 #include "utils/tqual.h"
-#include "utils/memutils.h"
 
-/*
- * Entry of a hash table used in find_all_inheritors. See below.
- */
-typedef struct SeenRelsEntry
-{
-	Oid			 rel_id;			/* relation oid */
-	ListCell	*numparents_cell;	/* corresponding list cell */
-} SeenRelsEntry;
 
 /*
  * find_inheritance_children
@@ -166,34 +157,11 @@ find_inheritance_children(Oid parentrelId, LOCKMODE lockmode)
 List *
 find_all_inheritors(Oid parentrelId, LOCKMODE lockmode, List **numparents)
 {
-	/* hash table for O(1) rel_oid -> rel_numparents cell lookup */
-	HTAB		   *seen_rels;
-	HASHCTL			ctl;
-	MemoryContext	new_ctx;
 	List	   *rels_list,
 			   *rel_numparents;
 	ListCell   *l;
 
 	/*
-	 * We need a separate memory context for a hash table. This is because
-	 * hash table is used only in this procedure. To free a memory we need to
-	 * call hash_destroy which is just a wrapper around MemoryContextDelete.
-	 */
-	new_ctx = AllocSetContextCreate(CurrentMemoryContext,
-									"FindAllInheritorsSeenRelsContext",
-									ALLOCSET_DEFAULT_SIZES);
-
-	memset(&ctl, 0, sizeof(ctl));
-	ctl.keysize = sizeof(Oid);
-	ctl.entrysize = sizeof(SeenRelsEntry);
-	ctl.hcxt = new_ctx;
-
-	seen_rels = hash_create(
-		"find_all_inheritors temporary table",
-		32, /* start small and extend */
-		&ctl, HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
-
-	/*
 	 * We build a list starting with the given rel and adding all direct and
 	 * indirect children.  We can use a single list as both the record of
 	 * already-found rels and the agenda of rels yet to be scanned for more
@@ -222,21 +190,26 @@ find_all_inheritors(Oid parentrelId, LOCKMODE lockmode, List **numparents)
 		foreach(lc, currentchildren)
 		{
 			Oid			child_oid = lfirst_oid(lc);
-			bool			found;
-			SeenRelsEntry	*hash_entry;
+			bool		found = false;
+			ListCell   *lo;
+			ListCell   *li;
 
-			hash_entry = hash_search(seen_rels, &child_oid, HASH_ENTER, &found);
-			if (found)
+			/* if the rel is already there, bump number-of-parents counter */
+			forboth(lo, rels_list, li, rel_numparents)
 			{
-				/* if the rel is already there, bump number-of-parents counter */
-				lfirst_int(hash_entry->numparents_cell)++;
+				if (lfirst_oid(lo) == child_oid)
+				{
+					lfirst_int(li)++;
+					found = true;
+					break;
+				}
 			}
-			else
+
+			/* if it's not there, add it. expect 1 parent, initially. */
+			if (!found)
 			{
-				/* if it's not there, add it. expect 1 parent, initially. */
 				rels_list = lappend_oid(rels_list, child_oid);
 				rel_numparents = lappend_int(rel_numparents, 1);
-				hash_entry->numparents_cell = rel_numparents->tail;
 			}
 		}
 	}
@@ -245,9 +218,6 @@ find_all_inheritors(Oid parentrelId, LOCKMODE lockmode, List **numparents)
 		*numparents = rel_numparents;
 	else
 		list_free(rel_numparents);
-
-	hash_destroy(seen_rels);
-
 	return rels_list;
 }
 
diff --git a/src/backend/catalog/pg_type.c b/src/backend/catalog/pg_type.c
index 04c10c6..b8d2bed 100644
--- a/src/backend/catalog/pg_type.c
+++ b/src/backend/catalog/pg_type.c
@@ -118,6 +118,7 @@ TypeShellMake(const char *typeName, Oid typeNamespace, Oid ownerId)
 	values[Anum_pg_type_typtypmod - 1] = Int32GetDatum(-1);
 	values[Anum_pg_type_typndims - 1] = Int32GetDatum(0);
 	values[Anum_pg_type_typcollation - 1] = ObjectIdGetDatum(InvalidOid);
+	values[Anum_pg_type_typsbsparse - 1] = ObjectIdGetDatum(InvalidOid);
 	nulls[Anum_pg_type_typdefaultbin - 1] = true;
 	nulls[Anum_pg_type_typdefault - 1] = true;
 	nulls[Anum_pg_type_typacl - 1] = true;
@@ -164,6 +165,7 @@ TypeShellMake(const char *typeName, Oid typeNamespace, Oid ownerId)
 								 false,
 								 InvalidOid,
 								 InvalidOid,
+								 InvalidOid,
 								 NULL,
 								 false);
 
@@ -222,7 +224,8 @@ TypeCreate(Oid newTypeOid,
 		   int32 typeMod,
 		   int32 typNDims,		/* Array dimensions for baseType */
 		   bool typeNotNull,
-		   Oid typeCollation)
+		   Oid typeCollation,
+		   Oid subscriptingProcedure)
 {
 	Relation	pg_type_desc;
 	Oid			typeObjectId;
@@ -362,6 +365,7 @@ TypeCreate(Oid newTypeOid,
 	values[Anum_pg_type_typtypmod - 1] = Int32GetDatum(typeMod);
 	values[Anum_pg_type_typndims - 1] = Int32GetDatum(typNDims);
 	values[Anum_pg_type_typcollation - 1] = ObjectIdGetDatum(typeCollation);
+	values[Anum_pg_type_typsbsparse - 1] = ObjectIdGetDatum(subscriptingProcedure);
 
 	/*
 	 * initialize the default binary value for this type.  Check for nulls of
@@ -479,6 +483,7 @@ TypeCreate(Oid newTypeOid,
 								 isImplicitArray,
 								 baseType,
 								 typeCollation,
+								 subscriptingProcedure,
 								 (defaultTypeBin ?
 								  stringToNode(defaultTypeBin) :
 								  NULL),
@@ -525,6 +530,7 @@ GenerateTypeDependencies(Oid typeNamespace,
 						 bool isImplicitArray,
 						 Oid baseType,
 						 Oid typeCollation,
+						 Oid subscriptingProcedure,
 						 Node *defaultExpr,
 						 bool rebuild)
 {
@@ -677,6 +683,14 @@ GenerateTypeDependencies(Oid typeNamespace,
 	/* Normal dependency on the default expression. */
 	if (defaultExpr)
 		recordDependencyOnExpr(&myself, defaultExpr, NIL, DEPENDENCY_NORMAL);
+
+	if (OidIsValid(subscriptingProcedure))
+	{
+		referenced.classId = ProcedureRelationId;
+		referenced.objectId = subscriptingProcedure;
+		referenced.objectSubId = 0;
+		recordDependencyOn(&myself, &referenced, DEPENDENCY_NORMAL);
+	}
 }
 
 /*
diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql
index d357c8b..d8b762e 100644
--- a/src/backend/catalog/system_views.sql
+++ b/src/backend/catalog/system_views.sql
@@ -716,8 +716,7 @@ CREATE VIEW pg_stat_activity AS
             S.state,
             S.backend_xid,
             s.backend_xmin,
-            S.query,
-            S.backend_type
+            S.query
     FROM pg_stat_get_activity(NULL) AS S
         LEFT JOIN pg_database AS D ON (S.datid = D.oid)
         LEFT JOIN pg_authid AS U ON (S.usesysid = U.oid);
diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c
index ea19ba6..1036b96 100644
--- a/src/backend/commands/explain.c
+++ b/src/backend/commands/explain.c
@@ -1015,10 +1015,6 @@ ExplainNode(PlanState *planstate, List *ancestors,
 						pname = "HashAggregate";
 						strategy = "Hashed";
 						break;
-					case AGG_MIXED:
-						pname = "MixedAggregate";
-						strategy = "Mixed";
-						break;
 					default:
 						pname = "Aggregate ???";
 						strategy = "???";
@@ -1982,19 +1978,6 @@ show_grouping_set_keys(PlanState *planstate,
 	ListCell   *lc;
 	List	   *gsets = aggnode->groupingSets;
 	AttrNumber *keycols = aggnode->grpColIdx;
-	const char *keyname;
-	const char *keysetname;
-
-	if (aggnode->aggstrategy == AGG_HASHED || aggnode->aggstrategy == AGG_MIXED)
-	{
-		keyname = "Hash Key";
-		keysetname = "Hash Keys";
-	}
-	else
-	{
-		keyname = "Group Key";
-		keysetname = "Group Keys";
-	}
 
 	ExplainOpenGroup("Grouping Set", NULL, true, es);
 
@@ -2009,7 +1992,7 @@ show_grouping_set_keys(PlanState *planstate,
 			es->indent++;
 	}
 
-	ExplainOpenGroup(keysetname, keysetname, false, es);
+	ExplainOpenGroup("Group Keys", "Group Keys", false, es);
 
 	foreach(lc, gsets)
 	{
@@ -2033,12 +2016,12 @@ show_grouping_set_keys(PlanState *planstate,
 		}
 
 		if (!result && es->format == EXPLAIN_FORMAT_TEXT)
-			ExplainPropertyText(keyname, "()", es);
+			ExplainPropertyText("Group Key", "()", es);
 		else
-			ExplainPropertyListNested(keyname, result, es);
+			ExplainPropertyListNested("Group Key", result, es);
 	}
 
-	ExplainCloseGroup(keysetname, keysetname, false, es);
+	ExplainCloseGroup("Group Keys", "Group Keys", false, es);
 
 	if (sortnode && es->format == EXPLAIN_FORMAT_TEXT)
 		es->indent--;
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 4cf2efb..96cf42a 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -167,7 +167,7 @@ typedef struct AlteredTableInfo
 	Oid			newTableSpace;	/* new tablespace; 0 means no change */
 	bool		chgPersistence; /* T if SET LOGGED/UNLOGGED is used */
 	char		newrelpersistence;		/* if above is true */
-	Expr	   *partition_constraint;	/* for attach partition validation */
+	List	   *partition_constraint;	/* for attach partition validation */
 	/* Objects to rebuild after completing ALTER TYPE operations */
 	List	   *changedConstraintOids;	/* OIDs of constraints to rebuild */
 	List	   *changedConstraintDefs;	/* string definitions of same */
@@ -3740,7 +3740,7 @@ ATRewriteCatalogs(List **wqueue, LOCKMODE lockmode)
 		 */
 		if (((tab->relkind == RELKIND_RELATION ||
 			  tab->relkind == RELKIND_PARTITIONED_TABLE) &&
-			 tab->partition_constraint == NULL) ||
+			 tab->partition_constraint == NIL) ||
 			tab->relkind == RELKIND_MATVIEW)
 			AlterTableCreateToastTable(tab->relid, (Datum) 0, lockmode);
 	}
@@ -4182,7 +4182,7 @@ ATRewriteTables(AlterTableStmt *parsetree, List **wqueue, LOCKMODE lockmode)
 			 * generated by ALTER TABLE commands, but don't rebuild data.
 			 */
 			if (tab->constraints != NIL || tab->new_notnull ||
-				tab->partition_constraint != NULL)
+				tab->partition_constraint != NIL)
 				ATRewriteTable(tab, InvalidOid, lockmode);
 
 			/*
@@ -4330,7 +4330,7 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
 	if (tab->partition_constraint)
 	{
 		needscan = true;
-		partqualstate = ExecPrepareExpr(tab->partition_constraint, estate);
+		partqualstate = ExecPrepareCheck(tab->partition_constraint, estate);
 	}
 
 	foreach(l, tab->newvals)
@@ -13354,9 +13354,9 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd)
 						RelationGetRelationName(attachRel))));
 
 	/*
-	 * Set up to have the table be scanned to validate the partition
+	 * Set up to have the table to be scanned to validate the partition
 	 * constraint (see partConstraint above).  If it's a partitioned table, we
-	 * instead schedule its leaf partitions to be scanned.
+	 * instead schdule its leaf partitions to be scanned instead.
 	 */
 	if (!skip_validate)
 	{
@@ -13376,6 +13376,7 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd)
 			Oid			part_relid = lfirst_oid(lc);
 			Relation	part_rel;
 			Expr	   *constr;
+			List	   *my_constr;
 
 			/* Lock already taken */
 			if (part_relid != RelationGetRelid(attachRel))
@@ -13397,11 +13398,12 @@ ATExecAttachPartition(List **wqueue, Relation rel, PartitionCmd *cmd)
 			/* Grab a work queue entry */
 			tab = ATGetQueueEntry(wqueue, part_rel);
 
-			/* Adjust constraint to match this partition */
 			constr = linitial(partConstraint);
-			tab->partition_constraint = (Expr *)
-				map_partition_varattnos((List *) constr, 1,
-										part_rel, rel);
+			my_constr = make_ands_implicit((Expr *) constr);
+			tab->partition_constraint = map_partition_varattnos(my_constr,
+																1,
+																part_rel,
+																rel);
 			/* keep our lock until commit */
 			if (part_rel != attachRel)
 				heap_close(part_rel, NoLock);
diff --git a/src/backend/commands/typecmds.c b/src/backend/commands/typecmds.c
index c765e97..07b2f99 100644
--- a/src/backend/commands/typecmds.c
+++ b/src/backend/commands/typecmds.c
@@ -94,6 +94,7 @@ static Oid	findTypeSendFunction(List *procname, Oid typeOid);
 static Oid	findTypeTypmodinFunction(List *procname);
 static Oid	findTypeTypmodoutFunction(List *procname);
 static Oid	findTypeAnalyzeFunction(List *procname, Oid typeOid);
+static Oid	findTypeSubscriptingFunction(List *procname);
 static Oid	findRangeSubOpclass(List *opcname, Oid subtype);
 static Oid	findRangeCanonicalFunction(List *procname, Oid typeOid);
 static Oid	findRangeSubtypeDiffFunction(List *procname, Oid subtype);
@@ -125,6 +126,7 @@ DefineType(ParseState *pstate, List *names, List *parameters)
 	List	   *typmodinName = NIL;
 	List	   *typmodoutName = NIL;
 	List	   *analyzeName = NIL;
+	List	   *subscriptingName = NIL;
 	char		category = TYPCATEGORY_USER;
 	bool		preferred = false;
 	char		delimiter = DEFAULT_TYPDELIM;
@@ -143,6 +145,7 @@ DefineType(ParseState *pstate, List *names, List *parameters)
 	DefElem    *typmodinNameEl = NULL;
 	DefElem    *typmodoutNameEl = NULL;
 	DefElem    *analyzeNameEl = NULL;
+	DefElem    *subscriptingNameEl = NULL;
 	DefElem    *categoryEl = NULL;
 	DefElem    *preferredEl = NULL;
 	DefElem    *delimiterEl = NULL;
@@ -165,6 +168,7 @@ DefineType(ParseState *pstate, List *names, List *parameters)
 	Oid			resulttype;
 	ListCell   *pl;
 	ObjectAddress address;
+	Oid			subscriptingOid = InvalidOid;
 
 	/*
 	 * As of Postgres 8.4, we require superuser privilege to create a base
@@ -264,6 +268,9 @@ DefineType(ParseState *pstate, List *names, List *parameters)
 		else if (pg_strcasecmp(defel->defname, "analyze") == 0 ||
 				 pg_strcasecmp(defel->defname, "analyse") == 0)
 			defelp = &analyzeNameEl;
+		else if (pg_strcasecmp(defel->defname, "subscripting") == 0 ||
+				 pg_strcasecmp(defel->defname, "subscripting") == 0)
+			defelp = &subscriptingNameEl;
 		else if (pg_strcasecmp(defel->defname, "category") == 0)
 			defelp = &categoryEl;
 		else if (pg_strcasecmp(defel->defname, "preferred") == 0)
@@ -334,6 +341,8 @@ DefineType(ParseState *pstate, List *names, List *parameters)
 		typmodoutName = defGetQualifiedName(typmodoutNameEl);
 	if (analyzeNameEl)
 		analyzeName = defGetQualifiedName(analyzeNameEl);
+	if (subscriptingNameEl)
+		subscriptingName = defGetQualifiedName(subscriptingNameEl);
 	if (categoryEl)
 	{
 		char	   *p = defGetString(categoryEl);
@@ -515,6 +524,9 @@ DefineType(ParseState *pstate, List *names, List *parameters)
 	if (analyzeName)
 		analyzeOid = findTypeAnalyzeFunction(analyzeName, typoid);
 
+	if (subscriptingName)
+		subscriptingOid = findTypeSubscriptingFunction(subscriptingName);
+
 	/*
 	 * Check permissions on functions.  We choose to require the creator/owner
 	 * of a type to also own the underlying functions.  Since creating a type
@@ -634,7 +646,8 @@ DefineType(ParseState *pstate, List *names, List *parameters)
 				   -1,			/* typMod (Domains only) */
 				   0,			/* Array Dimensions of typbasetype */
 				   false,		/* Type NOT NULL */
-				   collation);	/* type's collation */
+				   collation,	/* type's collation */
+				   subscriptingOid);	/* subscripting procedure */
 	Assert(typoid == address.objectId);
 
 	/*
@@ -675,7 +688,8 @@ DefineType(ParseState *pstate, List *names, List *parameters)
 			   -1,				/* typMod (Domains only) */
 			   0,				/* Array dimensions of typbasetype */
 			   false,			/* Type NOT NULL */
-			   collation);		/* type's collation */
+			   collation,		/* type's collation */
+			   F_ARRAY_SUBSCRIPT_PARSE);
 
 	pfree(array_type);
 
@@ -737,6 +751,7 @@ DefineDomain(CreateDomainStmt *stmt)
 	Oid			receiveProcedure;
 	Oid			sendProcedure;
 	Oid			analyzeProcedure;
+	Oid			subscriptingProcedure;
 	bool		byValue;
 	char		category;
 	char		delimiter;
@@ -860,6 +875,9 @@ DefineDomain(CreateDomainStmt *stmt)
 	/* Analysis function */
 	analyzeProcedure = baseType->typanalyze;
 
+	/* Subscripting function */
+	subscriptingProcedure = baseType->typsbsparse;
+
 	/* Inherited default value */
 	datum = SysCacheGetAttr(TYPEOID, typeTup,
 							Anum_pg_type_typdefault, &isnull);
@@ -1061,7 +1079,8 @@ DefineDomain(CreateDomainStmt *stmt)
 				   basetypeMod, /* typeMod value */
 				   typNDims,	/* Array dimensions for base type */
 				   typNotNull,	/* Type NOT NULL */
-				   domaincoll); /* type's collation */
+				   domaincoll,  /* type's collation */
+				   subscriptingProcedure);	/* subscripting procedure */
 
 	/*
 	 * Process constraints which refer to the domain ID returned by TypeCreate
@@ -1173,7 +1192,8 @@ DefineEnum(CreateEnumStmt *stmt)
 				   -1,			/* typMod (Domains only) */
 				   0,			/* Array dimensions of typbasetype */
 				   false,		/* Type NOT NULL */
-				   InvalidOid); /* type's collation */
+				   InvalidOid,  /* type's collation */
+				   InvalidOid);	/* typsbsparse - none */
 
 	/* Enter the enum's values into pg_enum */
 	EnumValuesCreate(enumTypeAddr.objectId, stmt->vals);
@@ -1213,7 +1233,8 @@ DefineEnum(CreateEnumStmt *stmt)
 			   -1,				/* typMod (Domains only) */
 			   0,				/* Array dimensions of typbasetype */
 			   false,			/* Type NOT NULL */
-			   InvalidOid);		/* type's collation */
+			   InvalidOid,		/* type's collation */
+			   F_ARRAY_SUBSCRIPT_PARSE);	/* array subscripting implementation */
 
 	pfree(enumArrayName);
 
@@ -1501,7 +1522,8 @@ DefineRange(CreateRangeStmt *stmt)
 				   -1,			/* typMod (Domains only) */
 				   0,			/* Array dimensions of typbasetype */
 				   false,		/* Type NOT NULL */
-				   InvalidOid); /* type's collation (ranges never have one) */
+				   InvalidOid,  /* type's collation (ranges never have one) */
+				   InvalidOid);	/* typsbsparse - none */
 	Assert(typoid == address.objectId);
 
 	/* Create the entry in pg_range */
@@ -1543,7 +1565,8 @@ DefineRange(CreateRangeStmt *stmt)
 			   -1,				/* typMod (Domains only) */
 			   0,				/* Array dimensions of typbasetype */
 			   false,			/* Type NOT NULL */
-			   InvalidOid);		/* typcollation */
+			   InvalidOid,		/* typcollation */
+			   F_ARRAY_SUBSCRIPT_PARSE);	/* array subscripting implementation */
 
 	pfree(rangeArrayName);
 
@@ -1887,6 +1910,33 @@ findTypeAnalyzeFunction(List *procname, Oid typeOid)
 	return procOid;
 }
 
+static Oid
+findTypeSubscriptingFunction(List *procname)
+{
+	Oid			argList[1];
+	Oid			procOid;
+
+	/*
+	 * Subscripting functions always take one INTERNAL argument and return INTERNAL.
+	 */
+	argList[0] = INTERNALOID;
+
+	procOid = LookupFuncName(procname, 1, argList, true);
+	if (!OidIsValid(procOid))
+		ereport(ERROR,
+				(errcode(ERRCODE_UNDEFINED_FUNCTION),
+				 errmsg("function %s does not exist",
+						func_signature_string(procname, 1, NIL, argList))));
+
+	if (get_func_rettype(procOid) != INTERNALOID)
+		ereport(ERROR,
+				(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
+				 errmsg("type subscripting function %s must return type %s",
+						NameListToString(procname), "internal")));
+
+	return procOid;
+}
+
 /*
  * Find suitable support functions and opclasses for a range type.
  */
@@ -2240,6 +2290,7 @@ AlterDomainDefault(List *names, Node *defaultRaw)
 							 false,		/* a domain isn't an implicit array */
 							 typTup->typbasetype,
 							 typTup->typcollation,
+							 typTup->typsbsparse,
 							 defaultExpr,
 							 true);		/* Rebuild is true */
 
diff --git a/src/backend/executor/execExpr.c b/src/backend/executor/execExpr.c
index 5a84742..02f7d10 100644
--- a/src/backend/executor/execExpr.c
+++ b/src/backend/executor/execExpr.c
@@ -65,7 +65,7 @@ static void ExecInitExprSlots(ExprState *state, Node *node);
 static bool get_last_attnums_walker(Node *node, LastAttnumInfo *info);
 static void ExecInitWholeRowVar(ExprEvalStep *scratch, Var *variable,
 					PlanState *parent);
-static void ExecInitArrayRef(ExprEvalStep *scratch, ArrayRef *aref,
+static void ExecInitSubscriptingRef(ExprEvalStep *scratch, SubscriptingRef *aref,
 				 PlanState *parent, ExprState *state,
 				 Datum *resv, bool *resnull);
 static bool isAssignmentIndirectionExpr(Expr *expr);
@@ -785,11 +785,11 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state,
 				break;
 			}
 
-		case T_ArrayRef:
+		case T_SubscriptingRef:
 			{
-				ArrayRef   *aref = (ArrayRef *) node;
+				SubscriptingRef   *aref = (SubscriptingRef *) node;
 
-				ExecInitArrayRef(&scratch, aref, parent, state, resv, resnull);
+				ExecInitSubscriptingRef(&scratch, aref, parent, state, resv, resnull);
 				break;
 			}
 
@@ -1103,7 +1103,7 @@ ExecInitExprRec(Expr *node, PlanState *parent, ExprState *state,
 					/*
 					 * Use the CaseTestExpr mechanism to pass down the old
 					 * value of the field being replaced; this is needed in
-					 * case the newval is itself a FieldStore or ArrayRef that
+					 * case the newval is itself a FieldStore or SubscriptingRef that
 					 * has to obtain and modify the old value.  It's safe to
 					 * reuse the CASE mechanism because there cannot be a CASE
 					 * between here and where the value would be needed, and a
@@ -2286,31 +2286,40 @@ ExecInitWholeRowVar(ExprEvalStep *scratch, Var *variable, PlanState *parent)
 }
 
 /*
- * Prepare evaluation of an ArrayRef expression.
+ * Prepare evaluation of an SubscriptingRef expression.
  */
 static void
-ExecInitArrayRef(ExprEvalStep *scratch, ArrayRef *aref, PlanState *parent,
+ExecInitSubscriptingRef(ExprEvalStep *scratch, SubscriptingRef *aref, PlanState *parent,
 				 ExprState *state, Datum *resv, bool *resnull)
 {
 	bool		isAssignment = (aref->refassgnexpr != NULL);
-	ArrayRefState *arefstate = palloc0(sizeof(ArrayRefState));
+	SubscriptingRefState *arefstate = palloc0(sizeof(SubscriptingRefState));
 	List	   *adjust_jumps = NIL;
 	ListCell   *lc;
 	int			i;
+	FmgrInfo   *eval_finfo, *nested_finfo;
 
-	/* Fill constant fields of ArrayRefState */
+	eval_finfo = palloc0(sizeof(FmgrInfo));
+	nested_finfo = palloc0(sizeof(FmgrInfo));
+
+	fmgr_info(aref->refevalfunc, eval_finfo);
+	if (OidIsValid(aref->refnestedfunc))
+	{
+		fmgr_info(aref->refnestedfunc, nested_finfo);
+	}
+
+	scratch->d.sbsref.eval_finfo = eval_finfo;
+	scratch->d.sbsref.nested_finfo = nested_finfo;
+
+	/* Fill constant fields of SubscriptingRefState */
 	arefstate->isassignment = isAssignment;
 	arefstate->refelemtype = aref->refelemtype;
-	arefstate->refattrlength = get_typlen(aref->refarraytype);
-	get_typlenbyvalalign(aref->refelemtype,
-						 &arefstate->refelemlength,
-						 &arefstate->refelembyval,
-						 &arefstate->refelemalign);
+	arefstate->refattrlength = get_typlen(aref->refcontainertype);
 
 	/*
 	 * Evaluate array input.  It's safe to do so into resv/resnull, because we
 	 * won't use that as target for any of the other subexpressions, and it'll
-	 * be overwritten by the final EEOP_ARRAYREF_FETCH/ASSIGN step, which is
+	 * be overwritten by the final EEOP_SBSREF_FETCH/ASSIGN step, which is
 	 * pushed last.
 	 */
 	ExecInitExprRec(aref->refexpr, parent, state, resv, resnull);
@@ -2325,22 +2334,26 @@ ExecInitArrayRef(ExprEvalStep *scratch, ArrayRef *aref, PlanState *parent,
 		scratch->opcode = EEOP_JUMP_IF_NULL;
 		scratch->d.jump.jumpdone = -1;	/* adjust later */
 		ExprEvalPushStep(state, scratch);
+
+		scratch->d.sbsref.eval_finfo = eval_finfo;
+		scratch->d.sbsref.nested_finfo = nested_finfo;
+
 		adjust_jumps = lappend_int(adjust_jumps,
 								   state->steps_len - 1);
 	}
 
 	/* Verify subscript list lengths are within limit */
-	if (list_length(aref->refupperindexpr) > MAXDIM)
+	if (list_length(aref->refupperindexpr) > MAX_SUBSCRIPT_DEPTH)
 		ereport(ERROR,
 				(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
 				 errmsg("number of array dimensions (%d) exceeds the maximum allowed (%d)",
-						list_length(aref->refupperindexpr), MAXDIM)));
+						list_length(aref->refupperindexpr), MAX_SUBSCRIPT_DEPTH)));
 
-	if (list_length(aref->reflowerindexpr) > MAXDIM)
+	if (list_length(aref->reflowerindexpr) > MAX_SUBSCRIPT_DEPTH)
 		ereport(ERROR,
 				(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
 				 errmsg("number of array dimensions (%d) exceeds the maximum allowed (%d)",
-						list_length(aref->reflowerindexpr), MAXDIM)));
+						list_length(aref->reflowerindexpr), MAX_SUBSCRIPT_DEPTH)));
 
 	/* Evaluate upper subscripts */
 	i = 0;
@@ -2362,13 +2375,17 @@ ExecInitArrayRef(ExprEvalStep *scratch, ArrayRef *aref, PlanState *parent,
 		ExecInitExprRec(e, parent, state,
 					  &arefstate->subscriptvalue, &arefstate->subscriptnull);
 
-		/* ... and then ARRAYREF_SUBSCRIPT saves it into step's workspace */
-		scratch->opcode = EEOP_ARRAYREF_SUBSCRIPT;
-		scratch->d.arrayref_subscript.state = arefstate;
-		scratch->d.arrayref_subscript.off = i;
-		scratch->d.arrayref_subscript.isupper = true;
-		scratch->d.arrayref_subscript.jumpdone = -1;	/* adjust later */
+		/* ... and then SBSREF_SUBSCRIPT saves it into step's workspace */
+		scratch->opcode = EEOP_SBSREF_SUBSCRIPT;
+		scratch->d.sbsref_subscript.state = arefstate;
+		scratch->d.sbsref_subscript.off = i;
+		scratch->d.sbsref_subscript.isupper = true;
+		scratch->d.sbsref_subscript.jumpdone = -1;	/* adjust later */
 		ExprEvalPushStep(state, scratch);
+
+		scratch->d.sbsref.eval_finfo = eval_finfo;
+		scratch->d.sbsref.nested_finfo = nested_finfo;
+
 		adjust_jumps = lappend_int(adjust_jumps,
 								   state->steps_len - 1);
 		i++;
@@ -2395,13 +2412,17 @@ ExecInitArrayRef(ExprEvalStep *scratch, ArrayRef *aref, PlanState *parent,
 		ExecInitExprRec(e, parent, state,
 					  &arefstate->subscriptvalue, &arefstate->subscriptnull);
 
-		/* ... and then ARRAYREF_SUBSCRIPT saves it into step's workspace */
-		scratch->opcode = EEOP_ARRAYREF_SUBSCRIPT;
-		scratch->d.arrayref_subscript.state = arefstate;
-		scratch->d.arrayref_subscript.off = i;
-		scratch->d.arrayref_subscript.isupper = false;
-		scratch->d.arrayref_subscript.jumpdone = -1;	/* adjust later */
+		/* ... and then SBSREF_SUBSCRIPT saves it into step's workspace */
+		scratch->opcode = EEOP_SBSREF_SUBSCRIPT;
+		scratch->d.sbsref_subscript.state = arefstate;
+		scratch->d.sbsref_subscript.off = i;
+		scratch->d.sbsref_subscript.isupper = false;
+		scratch->d.sbsref_subscript.jumpdone = -1;	/* adjust later */
 		ExprEvalPushStep(state, scratch);
+
+		scratch->d.sbsref.eval_finfo = eval_finfo;
+		scratch->d.sbsref.nested_finfo = nested_finfo;
+
 		adjust_jumps = lappend_int(adjust_jumps,
 								   state->steps_len - 1);
 		i++;
@@ -2420,7 +2441,7 @@ ExecInitArrayRef(ExprEvalStep *scratch, ArrayRef *aref, PlanState *parent,
 
 		/*
 		 * We might have a nested-assignment situation, in which the
-		 * refassgnexpr is itself a FieldStore or ArrayRef that needs to
+		 * refassgnexpr is itself a FieldStore or SubscriptingRef that needs to
 		 * obtain and modify the previous value of the array element or slice
 		 * being replaced.  If so, we have to extract that value from the
 		 * array and pass it down via the CaseTextExpr mechanism.  It's safe
@@ -2434,12 +2455,15 @@ ExecInitArrayRef(ExprEvalStep *scratch, ArrayRef *aref, PlanState *parent,
 		 */
 		if (isAssignmentIndirectionExpr(aref->refassgnexpr))
 		{
-			scratch->opcode = EEOP_ARRAYREF_OLD;
-			scratch->d.arrayref.state = arefstate;
+			scratch->opcode = EEOP_SBSREF_OLD;
+			scratch->d.sbsref.state = arefstate;
 			ExprEvalPushStep(state, scratch);
+
+			scratch->d.sbsref.eval_finfo = eval_finfo;
+			scratch->d.sbsref.nested_finfo = nested_finfo;
 		}
 
-		/* ARRAYREF_OLD puts extracted value into prevvalue/prevnull */
+		/* SBSREF_OLD puts extracted value into prevvalue/prevnull */
 		save_innermost_caseval = state->innermost_caseval;
 		save_innermost_casenull = state->innermost_casenull;
 		state->innermost_caseval = &arefstate->prevvalue;
@@ -2453,16 +2477,24 @@ ExecInitArrayRef(ExprEvalStep *scratch, ArrayRef *aref, PlanState *parent,
 		state->innermost_casenull = save_innermost_casenull;
 
 		/* and perform the assignment */
-		scratch->opcode = EEOP_ARRAYREF_ASSIGN;
-		scratch->d.arrayref.state = arefstate;
+		scratch->opcode = EEOP_SBSREF_ASSIGN;
+		scratch->d.sbsref.state = arefstate;
 		ExprEvalPushStep(state, scratch);
+
+		scratch->d.sbsref.eval_finfo = eval_finfo;
+		scratch->d.sbsref.nested_finfo = nested_finfo;
+
 	}
 	else
 	{
 		/* array fetch is much simpler */
-		scratch->opcode = EEOP_ARRAYREF_FETCH;
-		scratch->d.arrayref.state = arefstate;
+		scratch->opcode = EEOP_SBSREF_FETCH;
+		scratch->d.sbsref.state = arefstate;
 		ExprEvalPushStep(state, scratch);
+
+		scratch->d.sbsref.eval_finfo = eval_finfo;
+		scratch->d.sbsref.nested_finfo = nested_finfo;
+
 	}
 
 	/* adjust jump targets */
@@ -2470,10 +2502,10 @@ ExecInitArrayRef(ExprEvalStep *scratch, ArrayRef *aref, PlanState *parent,
 	{
 		ExprEvalStep *as = &state->steps[lfirst_int(lc)];
 
-		if (as->opcode == EEOP_ARRAYREF_SUBSCRIPT)
+		if (as->opcode == EEOP_SBSREF_SUBSCRIPT)
 		{
-			Assert(as->d.arrayref_subscript.jumpdone == -1);
-			as->d.arrayref_subscript.jumpdone = state->steps_len;
+			Assert(as->d.sbsref_subscript.jumpdone == -1);
+			as->d.sbsref_subscript.jumpdone = state->steps_len;
 		}
 		else
 		{
@@ -2485,8 +2517,8 @@ ExecInitArrayRef(ExprEvalStep *scratch, ArrayRef *aref, PlanState *parent,
 }
 
 /*
- * Helper for preparing ArrayRef expressions for evaluation: is expr a nested
- * FieldStore or ArrayRef that might need the old element value passed down?
+ * Helper for preparing SubscriptingRef expressions for evaluation: is expr a nested
+ * FieldStore or SubscriptingRef that might need the old element value passed down?
  *
  * (We could use this in FieldStore too, but in that case passing the old
  * value is so cheap there's no need.)
@@ -2503,11 +2535,11 @@ isAssignmentIndirectionExpr(Expr *expr)
 		if (fstore->arg && IsA(fstore->arg, CaseTestExpr))
 			return true;
 	}
-	else if (IsA(expr, ArrayRef))
+	else if (IsA(expr, SubscriptingRef))
 	{
-		ArrayRef   *arrayRef = (ArrayRef *) expr;
+		SubscriptingRef   *sbsRef = (SubscriptingRef *) expr;
 
-		if (arrayRef->refexpr && IsA(arrayRef->refexpr, CaseTestExpr))
+		if (sbsRef->refexpr && IsA(sbsRef->refexpr, CaseTestExpr))
 			return true;
 	}
 	return false;
diff --git a/src/backend/executor/execExprInterp.c b/src/backend/executor/execExprInterp.c
index 982d16c..888ac19 100644
--- a/src/backend/executor/execExprInterp.c
+++ b/src/backend/executor/execExprInterp.c
@@ -58,7 +58,7 @@
  */
 #include "postgres.h"
 
-#include "access/tuptoaster.h"
+#include "access/htup_details.h"
 #include "catalog/pg_type.h"
 #include "executor/execExpr.h"
 #include "executor/nodeSubplan.h"
@@ -346,10 +346,10 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
 		&&CASE_EEOP_FIELDSELECT,
 		&&CASE_EEOP_FIELDSTORE_DEFORM,
 		&&CASE_EEOP_FIELDSTORE_FORM,
-		&&CASE_EEOP_ARRAYREF_SUBSCRIPT,
-		&&CASE_EEOP_ARRAYREF_OLD,
-		&&CASE_EEOP_ARRAYREF_ASSIGN,
-		&&CASE_EEOP_ARRAYREF_FETCH,
+		&&CASE_EEOP_SBSREF_SUBSCRIPT,
+		&&CASE_EEOP_SBSREF_OLD,
+		&&CASE_EEOP_SBSREF_ASSIGN,
+		&&CASE_EEOP_SBSREF_FETCH,
 		&&CASE_EEOP_DOMAIN_TESTVAL,
 		&&CASE_EEOP_DOMAIN_NOTNULL,
 		&&CASE_EEOP_DOMAIN_CHECK,
@@ -958,11 +958,10 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
 		EEO_CASE(EEOP_BOOLTEST_IS_TRUE)
 		{
 			if (*op->resnull)
-			{
 				*op->resvalue = BoolGetDatum(false);
-				*op->resnull = false;
-			}
-			/* else, input value is the correct output as well */
+			else
+				*op->resvalue = *op->resvalue;
+			*op->resnull = false;
 
 			EEO_NEXT();
 		}
@@ -970,12 +969,10 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
 		EEO_CASE(EEOP_BOOLTEST_IS_NOT_TRUE)
 		{
 			if (*op->resnull)
-			{
 				*op->resvalue = BoolGetDatum(true);
-				*op->resnull = false;
-			}
 			else
 				*op->resvalue = BoolGetDatum(!DatumGetBool(*op->resvalue));
+			*op->resnull = false;
 
 			EEO_NEXT();
 		}
@@ -983,12 +980,10 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
 		EEO_CASE(EEOP_BOOLTEST_IS_FALSE)
 		{
 			if (*op->resnull)
-			{
 				*op->resvalue = BoolGetDatum(false);
-				*op->resnull = false;
-			}
 			else
 				*op->resvalue = BoolGetDatum(!DatumGetBool(*op->resvalue));
+			*op->resnull = false;
 
 			EEO_NEXT();
 		}
@@ -996,11 +991,10 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
 		EEO_CASE(EEOP_BOOLTEST_IS_NOT_FALSE)
 		{
 			if (*op->resnull)
-			{
 				*op->resvalue = BoolGetDatum(true);
-				*op->resnull = false;
-			}
-			/* else, input value is the correct output as well */
+			else
+				*op->resvalue = *op->resvalue;
+			*op->resnull = false;
 
 			EEO_NEXT();
 		}
@@ -1346,43 +1340,43 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
 			EEO_NEXT();
 		}
 
-		EEO_CASE(EEOP_ARRAYREF_SUBSCRIPT)
+		EEO_CASE(EEOP_SBSREF_SUBSCRIPT)
 		{
 			/* Process an array subscript */
 
 			/* too complex for an inline implementation */
-			if (ExecEvalArrayRefSubscript(state, op))
+			if (ExecEvalSubscriptingRef(state, op))
 			{
 				EEO_NEXT();
 			}
 			else
 			{
-				/* Subscript is null, short-circuit ArrayRef to NULL */
-				EEO_JUMP(op->d.arrayref_subscript.jumpdone);
+				/* Subscript is null, short-circuit SubscriptingRef to NULL */
+				EEO_JUMP(op->d.sbsref_subscript.jumpdone);
 			}
 		}
 
-		EEO_CASE(EEOP_ARRAYREF_OLD)
+		EEO_CASE(EEOP_SBSREF_OLD)
 		{
 			/*
-			 * Fetch the old value in an arrayref assignment, in case it's
+			 * Fetch the old value in an sbsref assignment, in case it's
 			 * referenced (via a CaseTestExpr) inside the assignment
 			 * expression.
 			 */
 
 			/* too complex for an inline implementation */
-			ExecEvalArrayRefOld(state, op);
+			ExecEvalSubscriptingRefOld(state, op);
 
 			EEO_NEXT();
 		}
 
 		/*
-		 * Perform ArrayRef assignment
+		 * Perform SubscriptingRef assignment
 		 */
-		EEO_CASE(EEOP_ARRAYREF_ASSIGN)
+		EEO_CASE(EEOP_SBSREF_ASSIGN)
 		{
 			/* too complex for an inline implementation */
-			ExecEvalArrayRefAssign(state, op);
+			ExecEvalSubscriptingRefAssign(state, op);
 
 			EEO_NEXT();
 		}
@@ -1390,10 +1384,10 @@ ExecInterpExpr(ExprState *state, ExprContext *econtext, bool *isnull)
 		/*
 		 * Fetch subset of an array.
 		 */
-		EEO_CASE(EEOP_ARRAYREF_FETCH)
+		EEO_CASE(EEOP_SBSREF_FETCH)
 		{
 			/* too complex for an inline implementation */
-			ExecEvalArrayRefFetch(state, op);
+			ExecEvalSubscriptingRefFetch(state, op);
 
 			EEO_NEXT();
 		}
@@ -2554,21 +2548,21 @@ ExecEvalFieldStoreForm(ExprState *state, ExprEvalStep *op, ExprContext *econtext
 }
 
 /*
- * Process a subscript in an ArrayRef expression.
+ * Process a subscript in an SubscriptingRef expression.
  *
  * If subscript is NULL, throw error in assignment case, or in fetch case
  * set result to NULL and return false (instructing caller to skip the rest
- * of the ArrayRef sequence).
+ * of the SubscriptingRef sequence).
  *
  * Subscript expression result is in subscriptvalue/subscriptnull.
  * On success, integer subscript value has been saved in upperindex[] or
  * lowerindex[] for use later.
  */
 bool
-ExecEvalArrayRefSubscript(ExprState *state, ExprEvalStep *op)
+ExecEvalSubscriptingRef(ExprState *state, ExprEvalStep *op)
 {
-	ArrayRefState *arefstate = op->d.arrayref_subscript.state;
-	int		   *indexes;
+	SubscriptingRefState *arefstate = op->d.sbsref_subscript.state;
+	Datum		*indexes;
 	int			off;
 
 	/* If any index expr yields NULL, result is NULL or error */
@@ -2583,68 +2577,43 @@ ExecEvalArrayRefSubscript(ExprState *state, ExprEvalStep *op)
 	}
 
 	/* Convert datum to int, save in appropriate place */
-	if (op->d.arrayref_subscript.isupper)
-		indexes = arefstate->upperindex;
+	if (op->d.sbsref_subscript.isupper)
+		indexes = arefstate->upper;
 	else
-		indexes = arefstate->lowerindex;
-	off = op->d.arrayref_subscript.off;
+		indexes = arefstate->lower;
+	off = op->d.sbsref_subscript.off;
 
-	indexes[off] = DatumGetInt32(arefstate->subscriptvalue);
+	indexes[off] = arefstate->subscriptvalue;
 
 	return true;
 }
 
 /*
- * Evaluate ArrayRef fetch.
+ * Evaluate SubscriptingRef fetch.
  *
  * Source array is in step's result variable.
  */
 void
-ExecEvalArrayRefFetch(ExprState *state, ExprEvalStep *op)
+ExecEvalSubscriptingRefFetch(ExprState *state, ExprEvalStep *op)
 {
-	ArrayRefState *arefstate = op->d.arrayref.state;
-
 	/* Should not get here if source array (or any subscript) is null */
 	Assert(!(*op->resnull));
 
-	if (arefstate->numlower == 0)
-	{
-		/* Scalar case */
-		*op->resvalue = array_get_element(*op->resvalue,
-										  arefstate->numupper,
-										  arefstate->upperindex,
-										  arefstate->refattrlength,
-										  arefstate->refelemlength,
-										  arefstate->refelembyval,
-										  arefstate->refelemalign,
-										  op->resnull);
-	}
-	else
-	{
-		/* Slice case */
-		*op->resvalue = array_get_slice(*op->resvalue,
-										arefstate->numupper,
-										arefstate->upperindex,
-										arefstate->lowerindex,
-										arefstate->upperprovided,
-										arefstate->lowerprovided,
-										arefstate->refattrlength,
-										arefstate->refelemlength,
-										arefstate->refelembyval,
-										arefstate->refelemalign);
-	}
+	*op->resvalue = FunctionCall2(op->d.sbsref.eval_finfo,
+				  PointerGetDatum(*op->resvalue),
+				  PointerGetDatum(op));
 }
 
 /*
- * Compute old array element/slice value for an ArrayRef assignment
+ * Compute old array element/slice value for an SubscriptingRef assignment
  * expression.  Will only be generated if the new-value subexpression
- * contains ArrayRef or FieldStore.  The value is stored into the
- * ArrayRefState's prevvalue/prevnull fields.
+ * contains SubscriptingRef or FieldStore.  The value is stored into the
+ * SubscriptingRefState's prevvalue/prevnull fields.
  */
 void
-ExecEvalArrayRefOld(ExprState *state, ExprEvalStep *op)
+ExecEvalSubscriptingRefOld(ExprState *state, ExprEvalStep *op)
 {
-	ArrayRefState *arefstate = op->d.arrayref.state;
+	SubscriptingRefState *arefstate = op->d.sbsref.state;
 
 	if (*op->resnull)
 	{
@@ -2652,99 +2621,42 @@ ExecEvalArrayRefOld(ExprState *state, ExprEvalStep *op)
 		arefstate->prevvalue = (Datum) 0;
 		arefstate->prevnull = true;
 	}
-	else if (arefstate->numlower == 0)
-	{
-		/* Scalar case */
-		arefstate->prevvalue = array_get_element(*op->resvalue,
-												 arefstate->numupper,
-												 arefstate->upperindex,
-												 arefstate->refattrlength,
-												 arefstate->refelemlength,
-												 arefstate->refelembyval,
-												 arefstate->refelemalign,
-												 &arefstate->prevnull);
-	}
 	else
 	{
-		/* Slice case */
-		/* this is currently unreachable */
-		arefstate->prevvalue = array_get_slice(*op->resvalue,
-											   arefstate->numupper,
-											   arefstate->upperindex,
-											   arefstate->lowerindex,
-											   arefstate->upperprovided,
-											   arefstate->lowerprovided,
-											   arefstate->refattrlength,
-											   arefstate->refelemlength,
-											   arefstate->refelembyval,
-											   arefstate->refelemalign);
-		arefstate->prevnull = false;
+		arefstate->prevvalue = FunctionCall2(op->d.sbsref.nested_finfo,
+					  PointerGetDatum(*op->resvalue),
+					  PointerGetDatum(op));
+
+		if (arefstate->numlower != 0)
+			arefstate->prevnull = false;
+
 	}
 }
 
 /*
- * Evaluate ArrayRef assignment.
+ * Evaluate SubscriptingRef assignment.
  *
  * Input array (possibly null) is in result area, replacement value is in
- * ArrayRefState's replacevalue/replacenull.
+ * SubscriptingRefState's replacevalue/replacenull.
  */
 void
-ExecEvalArrayRefAssign(ExprState *state, ExprEvalStep *op)
+ExecEvalSubscriptingRefAssign(ExprState *state, ExprEvalStep *op)
 {
-	ArrayRefState *arefstate = op->d.arrayref.state;
-
+	SubscriptingRefState *arefstate = op->d.sbsref.state;
 	/*
 	 * For an assignment to a fixed-length array type, both the original array
 	 * and the value to be assigned into it must be non-NULL, else we punt and
 	 * return the original array.
 	 */
-	if (arefstate->refattrlength > 0)	/* fixed-length array? */
+	if (arefstate->refattrlength > 0)
 	{
 		if (*op->resnull || arefstate->replacenull)
 			return;
 	}
 
-	/*
-	 * For assignment to varlena arrays, we handle a NULL original array by
-	 * substituting an empty (zero-dimensional) array; insertion of the new
-	 * element will result in a singleton array value.  It does not matter
-	 * whether the new element is NULL.
-	 */
-	if (*op->resnull)
-	{
-		*op->resvalue = PointerGetDatum(construct_empty_array(arefstate->refelemtype));
-		*op->resnull = false;
-	}
-
-	if (arefstate->numlower == 0)
-	{
-		/* Scalar case */
-		*op->resvalue = array_set_element(*op->resvalue,
-										  arefstate->numupper,
-										  arefstate->upperindex,
-										  arefstate->replacevalue,
-										  arefstate->replacenull,
-										  arefstate->refattrlength,
-										  arefstate->refelemlength,
-										  arefstate->refelembyval,
-										  arefstate->refelemalign);
-	}
-	else
-	{
-		/* Slice case */
-		*op->resvalue = array_set_slice(*op->resvalue,
-										arefstate->numupper,
-										arefstate->upperindex,
-										arefstate->lowerindex,
-										arefstate->upperprovided,
-										arefstate->lowerprovided,
-										arefstate->replacevalue,
-										arefstate->replacenull,
-										arefstate->refattrlength,
-										arefstate->refelemlength,
-										arefstate->refelembyval,
-										arefstate->refelemalign);
-	}
+	*op->resvalue = FunctionCall2(op->d.sbsref.eval_finfo,
+				  PointerGetDatum(*op->resvalue),
+				  PointerGetDatum(op));
 }
 
 /*
@@ -3508,24 +3420,24 @@ ExecEvalWholeRowVar(ExprState *state, ExprEvalStep *op, ExprContext *econtext)
 	}
 
 	/*
-	 * Build a composite datum, making sure any toasted fields get detoasted.
+	 * Copy the slot tuple and make sure any toasted fields get detoasted.
 	 *
-	 * (Note: it is critical that we not change the slot's state here.)
+	 * (The intermediate copy is a tad annoying here, but we currently have no
+	 * primitive that will do the right thing.  Note it is critical that we
+	 * not change the slot's state, so we can't use ExecFetchSlotTupleDatum.)
 	 */
-	tuple = toast_build_flattened_tuple(slot->tts_tupleDescriptor,
-										slot->tts_values,
-										slot->tts_isnull);
-	dtuple = tuple->t_data;
+	tuple = ExecCopySlotTuple(slot);
+	dtuple = (HeapTupleHeader)
+		DatumGetPointer(heap_copy_tuple_as_datum(tuple,
+												 slot->tts_tupleDescriptor));
+	heap_freetuple(tuple);
 
 	/*
 	 * Label the datum with the composite type info we identified before.
-	 *
-	 * (Note: we could skip doing this by passing op->d.wholerow.tupdesc to
-	 * the tuple build step; but that seems a tad risky so let's not.)
 	 */
 	HeapTupleHeaderSetTypeId(dtuple, op->d.wholerow.tupdesc->tdtypeid);
 	HeapTupleHeaderSetTypMod(dtuple, op->d.wholerow.tupdesc->tdtypmod);
 
-	*op->resvalue = PointerGetDatum(dtuple);
 	*op->resnull = false;
+	*op->resvalue = PointerGetDatum(dtuple);
 }
diff --git a/src/backend/executor/nodeAgg.c b/src/backend/executor/nodeAgg.c
index ef35da6..471acc4 100644
--- a/src/backend/executor/nodeAgg.c
+++ b/src/backend/executor/nodeAgg.c
@@ -122,19 +122,12 @@
  *	  specific).
  *
  *	  Where more complex grouping sets are used, we break them down into
- *	  "phases", where each phase has a different sort order (except phase 0
- *	  which is reserved for hashing).  During each phase but the last, the
- *	  input tuples are additionally stored in a tuplesort which is keyed to the
- *	  next phase's sort order; during each phase but the first, the input
- *	  tuples are drawn from the previously sorted data.  (The sorting of the
- *	  data for the first phase is handled by the planner, as it might be
- *	  satisfied by underlying nodes.)
- *
- *	  Hashing can be mixed with sorted grouping.  To do this, we have an
- *	  AGG_MIXED strategy that populates the hashtables during the first sorted
- *	  phase, and switches to reading them out after completing all sort phases.
- *	  We can also support AGG_HASHED with multiple hash tables and no sorting
- *	  at all.
+ *	  "phases", where each phase has a different sort order.  During each
+ *	  phase but the last, the input tuples are additionally stored in a
+ *	  tuplesort which is keyed to the next phase's sort order; during each
+ *	  phase but the first, the input tuples are drawn from the previously
+ *	  sorted data.  (The sorting of the data for the first phase is handled by
+ *	  the planner, as it might be satisfied by underlying nodes.)
  *
  *	  From the perspective of aggregate transition and final functions, the
  *	  only issue regarding grouping sets is this: a single call site (flinfo)
@@ -146,54 +139,7 @@
  *	  sensitive to the grouping set for which the aggregate function is
  *	  currently being called.
  *
- *	  Plan structure:
- *
- *	  What we get from the planner is actually one "real" Agg node which is
- *	  part of the plan tree proper, but which optionally has an additional list
- *	  of Agg nodes hung off the side via the "chain" field.  This is because an
- *	  Agg node happens to be a convenient representation of all the data we
- *	  need for grouping sets.
- *
- *	  For many purposes, we treat the "real" node as if it were just the first
- *	  node in the chain.  The chain must be ordered such that hashed entries
- *	  come before sorted/plain entries; the real node is marked AGG_MIXED if
- *	  there are both types present (in which case the real node describes one
- *	  of the hashed groupings, other AGG_HASHED nodes may optionally follow in
- *	  the chain, followed in turn by AGG_SORTED or (one) AGG_PLAIN node).  If
- *	  the real node is marked AGG_HASHED or AGG_SORTED, then all the chained
- *	  nodes must be of the same type; if it is AGG_PLAIN, there can be no
- *	  chained nodes.
- *
- *	  We collect all hashed nodes into a single "phase", numbered 0, and create
- *	  a sorted phase (numbered 1..n) for each AGG_SORTED or AGG_PLAIN node.
- *	  Phase 0 is allocated even if there are no hashes, but remains unused in
- *	  that case.
- *
- *	  AGG_HASHED nodes actually refer to only a single grouping set each,
- *	  because for each hashed grouping we need a separate grpColIdx and
- *	  numGroups estimate.  AGG_SORTED nodes represent a "rollup", a list of
- *	  grouping sets that share a sort order.  Each AGG_SORTED node other than
- *	  the first one has an associated Sort node which describes the sort order
- *	  to be used; the first sorted node takes its input from the outer subtree,
- *	  which the planner has already arranged to provide ordered data.
- *
- *	  Memory and ExprContext usage:
- *
- *	  Because we're accumulating aggregate values across input rows, we need to
- *	  use more memory contexts than just simple input/output tuple contexts.
- *	  In fact, for a rollup, we need a separate context for each grouping set
- *	  so that we can reset the inner (finer-grained) aggregates on their group
- *	  boundaries while continuing to accumulate values for outer
- *	  (coarser-grained) groupings.  On top of this, we might be simultaneously
- *	  populating hashtables; however, we only need one context for all the
- *	  hashtables.
- *
- *	  So we create an array, aggcontexts, with an ExprContext for each grouping
- *	  set in the largest rollup that we're going to process, and use the
- *	  per-tuple memory context of those ExprContexts to store the aggregate
- *	  transition values.  hashcontext is the single context created to support
- *	  all hash tables.
- *
+ *	  TODO: AGG_HASHED doesn't support multiple grouping sets yet.
  *
  * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
@@ -486,7 +432,6 @@ typedef struct AggStatePerGroupData
  */
 typedef struct AggStatePerPhaseData
 {
-	AggStrategy aggstrategy;	/* strategy for this phase */
 	int			numsets;		/* number of grouping sets (or 0) */
 	int		   *gset_lengths;	/* lengths of grouping sets */
 	Bitmapset **grouped_cols;	/* column groupings for rollup */
@@ -495,30 +440,7 @@ typedef struct AggStatePerPhaseData
 	Sort	   *sortnode;		/* Sort node for input ordering for phase */
 }	AggStatePerPhaseData;
 
-/*
- * AggStatePerHashData - per-hashtable state
- *
- * When doing grouping sets with hashing, we have one of these for each
- * grouping set. (When doing hashing without grouping sets, we have just one of
- * them.)
- */
-typedef struct AggStatePerHashData
-{
-	TupleHashTable hashtable;	/* hash table with one entry per group */
-	TupleHashIterator hashiter; /* for iterating through hash table */
-	TupleTableSlot *hashslot;	/* slot for loading hash table */
-	FmgrInfo   *hashfunctions;	/* per-grouping-field hash fns */
-	FmgrInfo   *eqfunctions;	/* per-grouping-field equality fns */
-	int			numCols;		/* number of hash key columns */
-	int			numhashGrpCols; /* number of columns in hash table */
-	int			largestGrpColIdx;		/* largest col required for hashing */
-	AttrNumber *hashGrpColIdxInput;		/* hash col indices in input slot */
-	AttrNumber *hashGrpColIdxHash;		/* indices in hashtbl tuples */
-	Agg		   *aggnode;		/* original Agg node, for numGroups etc. */
-} AggStatePerHashData;
-
 
-static void select_current_set(AggState *aggstate, int setno, bool is_hash);
 static void initialize_phase(AggState *aggstate, int newphase);
 static TupleTableSlot *fetch_input_tuple(AggState *aggstate);
 static void initialize_aggregates(AggState *aggstate,
@@ -527,8 +449,7 @@ static void initialize_aggregates(AggState *aggstate,
 static void advance_transition_function(AggState *aggstate,
 							AggStatePerTrans pertrans,
 							AggStatePerGroup pergroupstate);
-static void advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup,
-				   AggStatePerGroup *pergroups);
+static void advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup);
 static void advance_combine_function(AggState *aggstate,
 						 AggStatePerTrans pertrans,
 						 AggStatePerGroup pergroupstate);
@@ -552,13 +473,14 @@ static void prepare_projection_slot(AggState *aggstate,
 						int currentSet);
 static void finalize_aggregates(AggState *aggstate,
 					AggStatePerAgg peragg,
-					AggStatePerGroup pergroup);
+					AggStatePerGroup pergroup,
+					int currentSet);
 static TupleTableSlot *project_aggregates(AggState *aggstate);
 static Bitmapset *find_unaggregated_cols(AggState *aggstate);
 static bool find_unaggregated_cols_walker(Node *node, Bitmapset **colnos);
 static void build_hash_table(AggState *aggstate);
-static TupleHashEntryData *lookup_hash_entry(AggState *aggstate);
-static AggStatePerGroup *lookup_hash_entries(AggState *aggstate);
+static TupleHashEntryData *lookup_hash_entry(AggState *aggstate,
+				  TupleTableSlot *inputslot);
 static TupleTableSlot *agg_retrieve_direct(AggState *aggstate);
 static void agg_fill_hash_table(AggState *aggstate);
 static TupleTableSlot *agg_retrieve_hash_table(AggState *aggstate);
@@ -579,31 +501,13 @@ static int find_compatible_pertrans(AggState *aggstate, Aggref *newagg,
 
 
 /*
- * Select the current grouping set; affects current_set and
- * curaggcontext.
- */
-static void
-select_current_set(AggState *aggstate, int setno, bool is_hash)
-{
-	if (is_hash)
-		aggstate->curaggcontext = aggstate->hashcontext;
-	else
-		aggstate->curaggcontext = aggstate->aggcontexts[setno];
-
-	aggstate->current_set = setno;
-}
-
-/*
- * Switch to phase "newphase", which must either be 0 or 1 (to reset) or
+ * Switch to phase "newphase", which must either be 0 (to reset) or
  * current_phase + 1. Juggle the tuplesorts accordingly.
- *
- * Phase 0 is for hashing, which we currently handle last in the AGG_MIXED
- * case, so when entering phase 0, all we need to do is drop open sorts.
  */
 static void
 initialize_phase(AggState *aggstate, int newphase)
 {
-	Assert(newphase <= 1 || newphase == aggstate->current_phase + 1);
+	Assert(newphase == 0 || newphase == aggstate->current_phase + 1);
 
 	/*
 	 * Whatever the previous state, we're now done with whatever input
@@ -615,7 +519,7 @@ initialize_phase(AggState *aggstate, int newphase)
 		aggstate->sort_in = NULL;
 	}
 
-	if (newphase <= 1)
+	if (newphase == 0)
 	{
 		/*
 		 * Discard any existing output tuplesort.
@@ -642,7 +546,7 @@ initialize_phase(AggState *aggstate, int newphase)
 	 * If this isn't the last phase, we need to sort appropriately for the
 	 * next phase in sequence.
 	 */
-	if (newphase > 0 && newphase < aggstate->numphases - 1)
+	if (newphase < aggstate->numphases - 1)
 	{
 		Sort	   *sortnode = aggstate->phases[newphase + 1].sortnode;
 		PlanState  *outerNode = outerPlanState(aggstate);
@@ -663,7 +567,7 @@ initialize_phase(AggState *aggstate, int newphase)
 }
 
 /*
- * Fetch a tuple from either the outer plan (for phase 1) or from the sorter
+ * Fetch a tuple from either the outer plan (for phase 0) or from the sorter
  * populated by the previous phase.  Copy it to the sorter for the next phase
  * if any.
  */
@@ -691,8 +595,8 @@ fetch_input_tuple(AggState *aggstate)
 /*
  * (Re)Initialize an individual aggregate.
  *
- * This function handles only one grouping set, already set in
- * aggstate->current_set.
+ * This function handles only one grouping set (already set in
+ * aggstate->current_set).
  *
  * When called, CurrentMemoryContext should be the per-query context.
  */
@@ -749,7 +653,7 @@ initialize_aggregate(AggState *aggstate, AggStatePerTrans pertrans,
 		MemoryContext oldContext;
 
 		oldContext = MemoryContextSwitchTo(
-							 aggstate->curaggcontext->ecxt_per_tuple_memory);
+		aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
 		pergroupstate->transValue = datumCopy(pertrans->initValue,
 											  pertrans->transtypeByVal,
 											  pertrans->transtypeLen);
@@ -772,9 +676,8 @@ initialize_aggregate(AggState *aggstate, AggStatePerTrans pertrans,
  *
  * If there are multiple grouping sets, we initialize only the first numReset
  * of them (the grouping sets are ordered so that the most specific one, which
- * is reset most often, is first). As a convenience, if numReset is 0, we
- * reinitialize all sets. numReset is -1 to initialize a hashtable entry, in
- * which case the caller must have used select_current_set appropriately.
+ * is reset most often, is first). As a convenience, if numReset is < 1, we
+ * reinitialize all sets.
  *
  * When called, CurrentMemoryContext should be the per-query context.
  */
@@ -786,36 +689,24 @@ initialize_aggregates(AggState *aggstate,
 	int			transno;
 	int			numGroupingSets = Max(aggstate->phase->numsets, 1);
 	int			setno = 0;
-	int			numTrans = aggstate->numtrans;
 	AggStatePerTrans transstates = aggstate->pertrans;
 
-	if (numReset == 0)
+	if (numReset < 1)
 		numReset = numGroupingSets;
 
-	for (transno = 0; transno < numTrans; transno++)
+	for (transno = 0; transno < aggstate->numtrans; transno++)
 	{
 		AggStatePerTrans pertrans = &transstates[transno];
 
-		if (numReset < 0)
+		for (setno = 0; setno < numReset; setno++)
 		{
 			AggStatePerGroup pergroupstate;
 
-			pergroupstate = &pergroup[transno];
+			pergroupstate = &pergroup[transno + (setno * (aggstate->numtrans))];
 
-			initialize_aggregate(aggstate, pertrans, pergroupstate);
-		}
-		else
-		{
-			for (setno = 0; setno < numReset; setno++)
-			{
-				AggStatePerGroup pergroupstate;
+			aggstate->current_set = setno;
 
-				pergroupstate = &pergroup[transno + (setno * numTrans)];
-
-				select_current_set(aggstate, setno, false);
-
-				initialize_aggregate(aggstate, pertrans, pergroupstate);
-			}
+			initialize_aggregate(aggstate, pertrans, pergroupstate);
 		}
 	}
 }
@@ -866,7 +757,7 @@ advance_transition_function(AggState *aggstate,
 			 * do not need to pfree the old transValue, since it's NULL.
 			 */
 			oldContext = MemoryContextSwitchTo(
-							 aggstate->curaggcontext->ecxt_per_tuple_memory);
+											   aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
 			pergroupstate->transValue = datumCopy(fcinfo->arg[1],
 												  pertrans->transtypeByVal,
 												  pertrans->transtypeLen);
@@ -916,7 +807,7 @@ advance_transition_function(AggState *aggstate,
 	{
 		if (!fcinfo->isnull)
 		{
-			MemoryContextSwitchTo(aggstate->curaggcontext->ecxt_per_tuple_memory);
+			MemoryContextSwitchTo(aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
 			if (DatumIsReadWriteExpandedObject(newVal,
 											   false,
 											   pertrans->transtypeLen) &&
@@ -947,21 +838,17 @@ advance_transition_function(AggState *aggstate,
 /*
  * Advance each aggregate transition state for one input tuple.  The input
  * tuple has been stored in tmpcontext->ecxt_outertuple, so that it is
- * accessible to ExecEvalExpr.
- *
- * We have two sets of transition states to handle: one for sorted aggregation
- * and one for hashed; we do them both here, to avoid multiple evaluation of
- * the inputs.
+ * accessible to ExecEvalExpr.  pergroup is the array of per-group structs to
+ * use (this might be in a hashtable entry).
  *
  * When called, CurrentMemoryContext should be the per-query context.
  */
 static void
-advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup, AggStatePerGroup *pergroups)
+advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup)
 {
 	int			transno;
 	int			setno = 0;
 	int			numGroupingSets = Max(aggstate->phase->numsets, 1);
-	int			numHashes = aggstate->num_hashes;
 	int			numTrans = aggstate->numtrans;
 	TupleTableSlot *slot = aggstate->evalslot;
 
@@ -993,7 +880,6 @@ advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup, AggStatePerGro
 		{
 			/* DISTINCT and/or ORDER BY case */
 			Assert(slot->tts_nvalid >= (pertrans->numInputs + inputoff));
-			Assert(!pergroups);
 
 			/*
 			 * If the transfn is strict, we want to check for nullity before
@@ -1054,36 +940,13 @@ advance_aggregates(AggState *aggstate, AggStatePerGroup pergroup, AggStatePerGro
 				fcinfo->argnull[i + 1] = slot->tts_isnull[i + inputoff];
 			}
 
-			if (pergroup)
-			{
-				/* advance transition states for ordered grouping */
-
-				for (setno = 0; setno < numGroupingSets; setno++)
-				{
-					AggStatePerGroup pergroupstate;
-
-					select_current_set(aggstate, setno, false);
-
-					pergroupstate = &pergroup[transno + (setno * numTrans)];
-
-					advance_transition_function(aggstate, pertrans, pergroupstate);
-				}
-			}
-
-			if (pergroups)
+			for (setno = 0; setno < numGroupingSets; setno++)
 			{
-				/* advance transition states for hashed grouping */
-
-				for (setno = 0; setno < numHashes; setno++)
-				{
-					AggStatePerGroup pergroupstate;
-
-					select_current_set(aggstate, setno, true);
+				AggStatePerGroup pergroupstate = &pergroup[transno + (setno * numTrans)];
 
-					pergroupstate = &pergroups[setno][transno];
+				aggstate->current_set = setno;
 
-					advance_transition_function(aggstate, pertrans, pergroupstate);
-				}
+				advance_transition_function(aggstate, pertrans, pergroupstate);
 			}
 		}
 	}
@@ -1104,7 +967,7 @@ combine_aggregates(AggState *aggstate, AggStatePerGroup pergroup)
 	TupleTableSlot *slot;
 
 	/* combine not supported with grouping sets */
-	Assert(aggstate->phase->numsets <= 1);
+	Assert(aggstate->phase->numsets == 0);
 
 	/* compute input for all aggregates */
 	slot = ExecProject(aggstate->evalproj);
@@ -1197,7 +1060,7 @@ advance_combine_function(AggState *aggstate,
 			if (!pertrans->transtypeByVal)
 			{
 				oldContext = MemoryContextSwitchTo(
-							 aggstate->curaggcontext->ecxt_per_tuple_memory);
+												   aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
 				pergroupstate->transValue = datumCopy(fcinfo->arg[1],
 													pertrans->transtypeByVal,
 													  pertrans->transtypeLen);
@@ -1242,7 +1105,7 @@ advance_combine_function(AggState *aggstate,
 	{
 		if (!fcinfo->isnull)
 		{
-			MemoryContextSwitchTo(aggstate->curaggcontext->ecxt_per_tuple_memory);
+			MemoryContextSwitchTo(aggstate->aggcontexts[aggstate->current_set]->ecxt_per_tuple_memory);
 			if (DatumIsReadWriteExpandedObject(newVal,
 											   false,
 											   pertrans->transtypeLen) &&
@@ -1696,16 +1559,15 @@ prepare_projection_slot(AggState *aggstate, TupleTableSlot *slot, int currentSet
 /*
  * Compute the final value of all aggregates for one group.
  *
- * This function handles only one grouping set at a time, which the caller must
- * have selected.  It's also the caller's responsibility to adjust the supplied
- * pergroup parameter to point to the current set's transvalues.
+ * This function handles only one grouping set at a time.
  *
  * Results are stored in the output econtext aggvalues/aggnulls.
  */
 static void
 finalize_aggregates(AggState *aggstate,
 					AggStatePerAgg peraggs,
-					AggStatePerGroup pergroup)
+					AggStatePerGroup pergroup,
+					int currentSet)
 {
 	ExprContext *econtext = aggstate->ss.ps.ps_ExprContext;
 	Datum	   *aggvalues = econtext->ecxt_aggvalues;
@@ -1713,6 +1575,11 @@ finalize_aggregates(AggState *aggstate,
 	int			aggno;
 	int			transno;
 
+	Assert(currentSet == 0 ||
+		   ((Agg *) aggstate->ss.ps.plan)->aggstrategy != AGG_HASHED);
+
+	aggstate->current_set = currentSet;
+
 	/*
 	 * If there were any DISTINCT and/or ORDER BY aggregates, sort their
 	 * inputs and run the transition functions.
@@ -1722,12 +1589,11 @@ finalize_aggregates(AggState *aggstate,
 		AggStatePerTrans pertrans = &aggstate->pertrans[transno];
 		AggStatePerGroup pergroupstate;
 
-		pergroupstate = &pergroup[transno];
+		pergroupstate = &pergroup[transno + (currentSet * (aggstate->numtrans))];
 
 		if (pertrans->numSortCols > 0)
 		{
-			Assert(aggstate->aggstrategy != AGG_HASHED &&
-				   aggstate->aggstrategy != AGG_MIXED);
+			Assert(((Agg *) aggstate->ss.ps.plan)->aggstrategy != AGG_HASHED);
 
 			if (pertrans->numInputs == 1)
 				process_ordered_aggregate_single(aggstate,
@@ -1749,7 +1615,7 @@ finalize_aggregates(AggState *aggstate,
 		int			transno = peragg->transno;
 		AggStatePerGroup pergroupstate;
 
-		pergroupstate = &pergroup[transno];
+		pergroupstate = &pergroup[transno + (currentSet * (aggstate->numtrans))];
 
 		if (DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit))
 			finalize_partialaggregate(aggstate, peragg, pergroupstate,
@@ -1831,7 +1697,7 @@ find_unaggregated_cols_walker(Node *node, Bitmapset **colnos)
 }
 
 /*
- * Initialize the hash table(s) to empty.
+ * Initialize the hash table to empty.
  *
  * To implement hashed aggregation, we need a hashtable that stores a
  * representative tuple and an array of AggStatePerGroup structs for each
@@ -1839,40 +1705,29 @@ find_unaggregated_cols_walker(Node *node, Bitmapset **colnos)
  * GROUP BY columns.  The per-group data is allocated in lookup_hash_entry(),
  * for each entry.
  *
- * We have a separate hashtable and associated perhash data structure for each
- * grouping set for which we're doing hashing.
- *
- * The hash tables always live in the hashcontext's per-tuple memory context
- * (there is only one of these for all tables together, since they are all
- * reset at the same time).
+ * The hash table always lives in the aggcontext memory context.
  */
 static void
 build_hash_table(AggState *aggstate)
 {
+	Agg		   *node = (Agg *) aggstate->ss.ps.plan;
 	MemoryContext tmpmem = aggstate->tmpcontext->ecxt_per_tuple_memory;
 	Size		additionalsize;
-	int			i;
 
-	Assert(aggstate->aggstrategy == AGG_HASHED || aggstate->aggstrategy == AGG_MIXED);
+	Assert(node->aggstrategy == AGG_HASHED);
+	Assert(node->numGroups > 0);
 
-	additionalsize = aggstate->numtrans * sizeof(AggStatePerGroupData);
+	additionalsize = aggstate->numaggs * sizeof(AggStatePerGroupData);
 
-	for (i = 0; i < aggstate->num_hashes; ++i)
-	{
-		AggStatePerHash perhash = &aggstate->perhash[i];
-
-		Assert(perhash->aggnode->numGroups > 0);
-
-		perhash->hashtable = BuildTupleHashTable(perhash->numCols,
-												 perhash->hashGrpColIdxHash,
-												 perhash->eqfunctions,
-												 perhash->hashfunctions,
-												 perhash->aggnode->numGroups,
-												 additionalsize,
-								aggstate->hashcontext->ecxt_per_tuple_memory,
-												 tmpmem,
+	aggstate->hashtable = BuildTupleHashTable(node->numCols,
+											  aggstate->hashGrpColIdxHash,
+											  aggstate->phase->eqfunctions,
+											  aggstate->hashfunctions,
+											  node->numGroups,
+											  additionalsize,
+							 aggstate->aggcontexts[0]->ecxt_per_tuple_memory,
+											  tmpmem,
 								  DO_AGGSPLIT_SKIPFINAL(aggstate->aggsplit));
-	}
 }
 
 /*
@@ -1895,98 +1750,72 @@ build_hash_table(AggState *aggstate)
  * the array is preserved over ExecReScanAgg, so we allocate it in the
  * per-query context (unlike the hash table itself).
  */
-static void
+static List *
 find_hash_columns(AggState *aggstate)
 {
-	Bitmapset  *base_colnos;
+	Agg		   *node = (Agg *) aggstate->ss.ps.plan;
+	Bitmapset  *colnos;
+	List	   *collist;
+	TupleDesc	hashDesc;
 	List	   *outerTlist = outerPlanState(aggstate)->plan->targetlist;
-	int			numHashes = aggstate->num_hashes;
-	int			j;
-
-	/* Find Vars that will be needed in tlist and qual */
-	base_colnos = find_unaggregated_cols(aggstate);
-
-	for (j = 0; j < numHashes; ++j)
-	{
-		AggStatePerHash perhash = &aggstate->perhash[j];
-		Bitmapset  *colnos = bms_copy(base_colnos);
-		AttrNumber *grpColIdx = perhash->aggnode->grpColIdx;
-		List	   *hashTlist = NIL;
-		TupleDesc	hashDesc;
-		int			i;
-
-		perhash->largestGrpColIdx = 0;
-
-		/*
-		 * If we're doing grouping sets, then some Vars might be referenced in
-		 * tlist/qual for the benefit of other grouping sets, but not needed
-		 * when hashing; i.e. prepare_projection_slot will null them out, so
-		 * there'd be no point storing them.  Use prepare_projection_slot's
-		 * logic to determine which.
-		 */
-		if (aggstate->phases[0].grouped_cols)
-		{
-			Bitmapset  *grouped_cols = aggstate->phases[0].grouped_cols[j];
-			ListCell   *lc;
+	List		*hashTlist = NIL;
+	int			i;
 
-			foreach(lc, aggstate->all_grouped_cols)
-			{
-				int			attnum = lfirst_int(lc);
+	aggstate->largestGrpColIdx = 0;
 
-				if (!bms_is_member(attnum, grouped_cols))
-					colnos = bms_del_member(colnos, attnum);
-			}
-		}
-		/* Add in all the grouping columns */
-		for (i = 0; i < perhash->numCols; i++)
-			colnos = bms_add_member(colnos, grpColIdx[i]);
+	/* Find Vars that will be needed in tlist and qual */
+	colnos = find_unaggregated_cols(aggstate);
+	/* Add in all the grouping columns */
+	for (i = 0; i < node->numCols; i++)
+		colnos = bms_add_member(colnos, node->grpColIdx[i]);
+	/* Convert to list, using lcons so largest element ends up first */
+	collist = NIL;
 
-		perhash->hashGrpColIdxInput =
-			palloc(bms_num_members(colnos) * sizeof(AttrNumber));
-		perhash->hashGrpColIdxHash =
-			palloc(perhash->numCols * sizeof(AttrNumber));
+	aggstate->hashGrpColIdxInput =
+		palloc(bms_num_members(colnos) * sizeof(AttrNumber));
+	aggstate->hashGrpColIdxHash =
+		palloc(node->numCols * sizeof(AttrNumber));
 
-		/*
-		 * First build mapping for columns directly hashed. These are the
-		 * first, because they'll be accessed when computing hash values and
-		 * comparing tuples for exact matches. We also build simple mapping
-		 * for execGrouping, so it knows where to find the to-be-hashed /
-		 * compared columns in the input.
-		 */
-		for (i = 0; i < perhash->numCols; i++)
-		{
-			perhash->hashGrpColIdxInput[i] = grpColIdx[i];
-			perhash->hashGrpColIdxHash[i] = i + 1;
-			perhash->numhashGrpCols++;
-			/* delete already mapped columns */
-			bms_del_member(colnos, grpColIdx[i]);
-		}
+	/*
+	 * First build mapping for columns directly hashed. These are the first,
+	 * because they'll be accessed when computing hash values and comparing
+	 * tuples for exact matches. We also build simple mapping for
+	 * execGrouping, so it knows where to find the to-be-hashed / compared
+	 * columns in the input.
+	 */
+	for (i = 0; i < node->numCols; i++)
+	{
+		aggstate->hashGrpColIdxInput[i] = node->grpColIdx[i];
+		aggstate->hashGrpColIdxHash[i] = i + 1;
+		aggstate->numhashGrpCols++;
+		/* delete already mapped columns */
+		bms_del_member(colnos, node->grpColIdx[i]);
+	}
 
-		/* and add the remaining columns */
-		while ((i = bms_first_member(colnos)) >= 0)
-		{
-			perhash->hashGrpColIdxInput[perhash->numhashGrpCols] = i;
-			perhash->numhashGrpCols++;
-		}
+	/* and add the remaining columns */
+	while ((i = bms_first_member(colnos)) >= 0)
+	{
+		aggstate->hashGrpColIdxInput[aggstate->numhashGrpCols] = i;
+		aggstate->numhashGrpCols++;
+	}
 
-		/* and build a tuple descriptor for the hashtable */
-		for (i = 0; i < perhash->numhashGrpCols; i++)
-		{
-			int			varNumber = perhash->hashGrpColIdxInput[i] - 1;
+	/* and build a tuple descriptor for the hashtable */
+	for (i = 0; i < aggstate->numhashGrpCols; i++)
+	{
+		int			varNumber = aggstate->hashGrpColIdxInput[i] - 1;
 
-			hashTlist = lappend(hashTlist, list_nth(outerTlist, varNumber));
-			perhash->largestGrpColIdx =
-				Max(varNumber + 1, perhash->largestGrpColIdx);
-		}
+		hashTlist = lappend(hashTlist, list_nth(outerTlist, varNumber));
+		aggstate->largestGrpColIdx =
+			Max(varNumber + 1, aggstate->largestGrpColIdx);
+	}
 
-		hashDesc = ExecTypeFromTL(hashTlist, false);
-		ExecSetSlotDescriptor(perhash->hashslot, hashDesc);
+	hashDesc = ExecTypeFromTL(hashTlist, false);
+	ExecSetSlotDescriptor(aggstate->hashslot, hashDesc);
 
-		list_free(hashTlist);
-		bms_free(colnos);
-	}
+	list_free(hashTlist);
+	bms_free(colnos);
 
-	bms_free(base_colnos);
+	return collist;
 }
 
 /*
@@ -2011,30 +1840,26 @@ hash_agg_entry_size(int numAggs)
 }
 
 /*
- * Find or create a hashtable entry for the tuple group containing the current
- * tuple (already set in tmpcontext's outertuple slot), in the current grouping
- * set (which the caller must have selected - note that initialize_aggregate
- * depends on this).
+ * Find or create a hashtable entry for the tuple group containing the
+ * given tuple.
  *
  * When called, CurrentMemoryContext should be the per-query context.
  */
 static TupleHashEntryData *
-lookup_hash_entry(AggState *aggstate)
+lookup_hash_entry(AggState *aggstate, TupleTableSlot *inputslot)
 {
-	TupleTableSlot *inputslot = aggstate->tmpcontext->ecxt_outertuple;
-	AggStatePerHash perhash = &aggstate->perhash[aggstate->current_set];
-	TupleTableSlot *hashslot = perhash->hashslot;
+	TupleTableSlot *hashslot = aggstate->hashslot;
 	TupleHashEntryData *entry;
 	bool		isnew;
-	int			i;
+	int i;
 
 	/* transfer just the needed columns into hashslot */
-	slot_getsomeattrs(inputslot, perhash->largestGrpColIdx);
+	slot_getsomeattrs(inputslot, aggstate->largestGrpColIdx);
 	ExecClearTuple(hashslot);
 
-	for (i = 0; i < perhash->numhashGrpCols; i++)
+	for (i = 0; i < aggstate->numhashGrpCols; i++)
 	{
-		int			varNumber = perhash->hashGrpColIdxInput[i] - 1;
+		int			varNumber = aggstate->hashGrpColIdxInput[i] - 1;
 
 		hashslot->tts_values[i] = inputslot->tts_values[varNumber];
 		hashslot->tts_isnull[i] = inputslot->tts_isnull[varNumber];
@@ -2042,44 +1867,22 @@ lookup_hash_entry(AggState *aggstate)
 	ExecStoreVirtualTuple(hashslot);
 
 	/* find or create the hashtable entry using the filtered tuple */
-	entry = LookupTupleHashEntry(perhash->hashtable, hashslot, &isnew);
+	entry = LookupTupleHashEntry(aggstate->hashtable, hashslot, &isnew);
 
 	if (isnew)
 	{
 		entry->additional = (AggStatePerGroup)
-			MemoryContextAlloc(perhash->hashtable->tablecxt,
+			MemoryContextAlloc(aggstate->hashtable->tablecxt,
 						  sizeof(AggStatePerGroupData) * aggstate->numtrans);
 		/* initialize aggregates for new tuple group */
 		initialize_aggregates(aggstate, (AggStatePerGroup) entry->additional,
-							  -1);
+							  0);
 	}
 
 	return entry;
 }
 
 /*
- * Look up hash entries for the current tuple in all hashed grouping sets,
- * returning an array of pergroup pointers suitable for advance_aggregates.
- *
- * Be aware that lookup_hash_entry can reset the tmpcontext.
- */
-static AggStatePerGroup *
-lookup_hash_entries(AggState *aggstate)
-{
-	int			numHashes = aggstate->num_hashes;
-	AggStatePerGroup *pergroup = aggstate->hash_pergroup;
-	int			setno;
-
-	for (setno = 0; setno < numHashes; setno++)
-	{
-		select_current_set(aggstate, setno, true);
-		pergroup[setno] = lookup_hash_entry(aggstate)->additional;
-	}
-
-	return pergroup;
-}
-
-/*
  * ExecAgg -
  *
  *	  ExecAgg receives tuples from its outer subplan and aggregates over
@@ -2095,22 +1898,19 @@ lookup_hash_entries(AggState *aggstate)
 TupleTableSlot *
 ExecAgg(AggState *node)
 {
-	TupleTableSlot *result = NULL;
+	TupleTableSlot *result;
 
 	if (!node->agg_done)
 	{
 		/* Dispatch based on strategy */
-		switch (node->phase->aggstrategy)
+		switch (node->phase->aggnode->aggstrategy)
 		{
 			case AGG_HASHED:
 				if (!node->table_filled)
 					agg_fill_hash_table(node);
-				/* FALLTHROUGH */
-			case AGG_MIXED:
 				result = agg_retrieve_hash_table(node);
 				break;
-			case AGG_PLAIN:
-			case AGG_SORTED:
+			default:
 				result = agg_retrieve_direct(node);
 				break;
 		}
@@ -2133,7 +1933,6 @@ agg_retrieve_direct(AggState *aggstate)
 	ExprContext *tmpcontext;
 	AggStatePerAgg peragg;
 	AggStatePerGroup pergroup;
-	AggStatePerGroup *hash_pergroups = NULL;
 	TupleTableSlot *outerslot;
 	TupleTableSlot *firstSlot;
 	TupleTableSlot *result;
@@ -2220,19 +2019,6 @@ agg_retrieve_direct(AggState *aggstate)
 				node = aggstate->phase->aggnode;
 				numReset = numGroupingSets;
 			}
-			else if (aggstate->aggstrategy == AGG_MIXED)
-			{
-				/*
-				 * Mixed mode; we've output all the grouped stuff and have
-				 * full hashtables, so switch to outputting those.
-				 */
-				initialize_phase(aggstate, 0);
-				aggstate->table_filled = true;
-				ResetTupleHashIterator(aggstate->perhash[0].hashtable,
-									   &aggstate->perhash[0].hashiter);
-				select_current_set(aggstate, 0, true);
-				return agg_retrieve_hash_table(aggstate);
-			}
 			else
 			{
 				aggstate->agg_done = true;
@@ -2269,7 +2055,7 @@ agg_retrieve_direct(AggState *aggstate)
 		 *----------
 		 */
 		if (aggstate->input_done ||
-			(node->aggstrategy != AGG_PLAIN &&
+			(node->aggstrategy == AGG_SORTED &&
 			 aggstate->projected_set != -1 &&
 			 aggstate->projected_set < (numGroupingSets - 1) &&
 			 nextSetSize > 0 &&
@@ -2382,22 +2168,10 @@ agg_retrieve_direct(AggState *aggstate)
 				 */
 				for (;;)
 				{
-					/*
-					 * During phase 1 only of a mixed agg, we need to update
-					 * hashtables as well in advance_aggregates.
-					 */
-					if (aggstate->aggstrategy == AGG_MIXED &&
-						aggstate->current_phase == 1)
-					{
-						hash_pergroups = lookup_hash_entries(aggstate);
-					}
-					else
-						hash_pergroups = NULL;
-
 					if (DO_AGGSPLIT_COMBINE(aggstate->aggsplit))
 						combine_aggregates(aggstate, pergroup);
 					else
-						advance_aggregates(aggstate, pergroup, hash_pergroups);
+						advance_aggregates(aggstate, pergroup);
 
 					/* Reset per-input-tuple context after each tuple */
 					ResetExprContext(tmpcontext);
@@ -2424,7 +2198,7 @@ agg_retrieve_direct(AggState *aggstate)
 					 * If we are grouping, check whether we've crossed a group
 					 * boundary.
 					 */
-					if (node->aggstrategy != AGG_PLAIN)
+					if (node->aggstrategy == AGG_SORTED)
 					{
 						if (!execTuplesMatch(firstSlot,
 											 outerslot,
@@ -2457,11 +2231,7 @@ agg_retrieve_direct(AggState *aggstate)
 
 		prepare_projection_slot(aggstate, econtext->ecxt_outertuple, currentSet);
 
-		select_current_set(aggstate, currentSet, false);
-
-		finalize_aggregates(aggstate,
-							peragg,
-							pergroup + (currentSet * aggstate->numtrans));
+		finalize_aggregates(aggstate, peragg, pergroup, currentSet);
 
 		/*
 		 * If there's no row to project right now, we must continue rather
@@ -2477,13 +2247,21 @@ agg_retrieve_direct(AggState *aggstate)
 }
 
 /*
- * ExecAgg for hashed case: read input and build hash table
+ * ExecAgg for hashed case: phase 1, read input and build hash table
  */
 static void
 agg_fill_hash_table(AggState *aggstate)
 {
+	ExprContext *tmpcontext;
+	TupleHashEntryData *entry;
 	TupleTableSlot *outerslot;
-	ExprContext *tmpcontext = aggstate->tmpcontext;
+
+	/*
+	 * get state info from node
+	 *
+	 * tmpcontext is the per-input-tuple expression context
+	 */
+	tmpcontext = aggstate->tmpcontext;
 
 	/*
 	 * Process each outer-plan tuple, and then fetch the next one, until we
@@ -2491,40 +2269,32 @@ agg_fill_hash_table(AggState *aggstate)
 	 */
 	for (;;)
 	{
-		AggStatePerGroup *pergroups;
-
 		outerslot = fetch_input_tuple(aggstate);
 		if (TupIsNull(outerslot))
 			break;
-
-		/* set up for lookup_hash_entries and advance_aggregates */
+		/* set up for advance_aggregates call */
 		tmpcontext->ecxt_outertuple = outerslot;
 
-		/* Find or build hashtable entries */
-		pergroups = lookup_hash_entries(aggstate);
+		/* Find or build hashtable entry for this tuple's group */
+		entry = lookup_hash_entry(aggstate, outerslot);
 
 		/* Advance the aggregates */
 		if (DO_AGGSPLIT_COMBINE(aggstate->aggsplit))
-			combine_aggregates(aggstate, pergroups[0]);
+			combine_aggregates(aggstate, (AggStatePerGroup) entry->additional);
 		else
-			advance_aggregates(aggstate, NULL, pergroups);
+			advance_aggregates(aggstate, (AggStatePerGroup) entry->additional);
 
-		/*
-		 * Reset per-input-tuple context after each tuple, but note that the
-		 * hash lookups do this too
-		 */
-		ResetExprContext(aggstate->tmpcontext);
+		/* Reset per-input-tuple context after each tuple */
+		ResetExprContext(tmpcontext);
 	}
 
 	aggstate->table_filled = true;
-	/* Initialize to walk the first hash table */
-	select_current_set(aggstate, 0, true);
-	ResetTupleHashIterator(aggstate->perhash[0].hashtable,
-						   &aggstate->perhash[0].hashiter);
+	/* Initialize to walk the hash table */
+	ResetTupleHashIterator(aggstate->hashtable, &aggstate->hashiter);
 }
 
 /*
- * ExecAgg for hashed case: retrieving groups from hash table
+ * ExecAgg for hashed case: phase 2, retrieving groups from hash table
  */
 static TupleTableSlot *
 agg_retrieve_hash_table(AggState *aggstate)
@@ -2535,22 +2305,17 @@ agg_retrieve_hash_table(AggState *aggstate)
 	TupleHashEntryData *entry;
 	TupleTableSlot *firstSlot;
 	TupleTableSlot *result;
-	AggStatePerHash perhash;
+	TupleTableSlot *hashslot;
 
 	/*
-	 * get state info from node.
-	 *
-	 * econtext is the per-output-tuple expression context.
+	 * get state info from node
 	 */
+	/* econtext is the per-output-tuple expression context */
 	econtext = aggstate->ss.ps.ps_ExprContext;
 	peragg = aggstate->peragg;
 	firstSlot = aggstate->ss.ss_ScanTupleSlot;
+	hashslot = aggstate->hashslot;
 
-	/*
-	 * Note that perhash (and therefore anything accessed through it) can
-	 * change inside the loop, as we change between grouping sets.
-	 */
-	perhash = &aggstate->perhash[aggstate->current_set];
 
 	/*
 	 * We loop retrieving groups until we find one satisfying
@@ -2558,37 +2323,17 @@ agg_retrieve_hash_table(AggState *aggstate)
 	 */
 	while (!aggstate->agg_done)
 	{
-		TupleTableSlot *hashslot = perhash->hashslot;
-		int			i;
+		int i;
 
 		/*
 		 * Find the next entry in the hash table
 		 */
-		entry = ScanTupleHashTable(perhash->hashtable, &perhash->hashiter);
+		entry = ScanTupleHashTable(aggstate->hashtable, &aggstate->hashiter);
 		if (entry == NULL)
 		{
-			int			nextset = aggstate->current_set + 1;
-
-			if (nextset < aggstate->num_hashes)
-			{
-				/*
-				 * Switch to next grouping set, reinitialize, and restart the
-				 * loop.
-				 */
-				select_current_set(aggstate, nextset, true);
-
-				perhash = &aggstate->perhash[aggstate->current_set];
-
-				ResetTupleHashIterator(perhash->hashtable, &perhash->hashiter);
-
-				continue;
-			}
-			else
-			{
-				/* No more hashtables, so done */
-				aggstate->agg_done = TRUE;
-				return NULL;
-			}
+			/* No more entries in hashtable, so done */
+			aggstate->agg_done = TRUE;
+			return NULL;
 		}
 
 		/*
@@ -2611,9 +2356,9 @@ agg_retrieve_hash_table(AggState *aggstate)
 		memset(firstSlot->tts_isnull, true,
 			   firstSlot->tts_tupleDescriptor->natts * sizeof(bool));
 
-		for (i = 0; i < perhash->numhashGrpCols; i++)
+		for (i = 0; i < aggstate->numhashGrpCols; i++)
 		{
-			int			varNumber = perhash->hashGrpColIdxInput[i] - 1;
+			int			varNumber = aggstate->hashGrpColIdxInput[i] - 1;
 
 			firstSlot->tts_values[varNumber] = hashslot->tts_values[i];
 			firstSlot->tts_isnull[varNumber] = hashslot->tts_isnull[i];
@@ -2622,18 +2367,14 @@ agg_retrieve_hash_table(AggState *aggstate)
 
 		pergroup = (AggStatePerGroup) entry->additional;
 
+		finalize_aggregates(aggstate, peragg, pergroup, 0);
+
 		/*
 		 * Use the representative input tuple for any references to
 		 * non-aggregated input columns in the qual and tlist.
 		 */
 		econtext->ecxt_outertuple = firstSlot;
 
-		prepare_projection_slot(aggstate,
-								econtext->ecxt_outertuple,
-								aggstate->current_set);
-
-		finalize_aggregates(aggstate, peragg, pergroup);
-
 		result = project_aggregates(aggstate);
 		if (result)
 			return result;
@@ -2647,8 +2388,7 @@ agg_retrieve_hash_table(AggState *aggstate)
  * ExecInitAgg
  *
  *	Creates the run-time information for the agg node produced by the
- *	planner and initializes its outer subtree.
- *
+ *	planner and initializes its outer subtree
  * -----------------
  */
 AggState *
@@ -2663,18 +2403,14 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
 				transno,
 				aggno;
 	int			phase;
-	int			phaseidx;
 	List	   *combined_inputeval;
 	ListCell   *l;
 	Bitmapset  *all_grouped_cols = NULL;
 	int			numGroupingSets = 1;
 	int			numPhases;
-	int			numHashes;
 	int			column_offset;
 	int			i = 0;
 	int			j = 0;
-	bool		use_hashing = (node->aggstrategy == AGG_HASHED ||
-							   node->aggstrategy == AGG_MIXED);
 
 	/* check for unsupported flags */
 	Assert(!(eflags & (EXEC_FLAG_BACKWARD | EXEC_FLAG_MARK)));
@@ -2689,9 +2425,9 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
 	aggstate->aggs = NIL;
 	aggstate->numaggs = 0;
 	aggstate->numtrans = 0;
-	aggstate->aggstrategy = node->aggstrategy;
 	aggstate->aggsplit = node->aggsplit;
 	aggstate->maxsets = 0;
+	aggstate->hashfunctions = NULL;
 	aggstate->projected_set = -1;
 	aggstate->current_set = 0;
 	aggstate->peragg = NULL;
@@ -2701,22 +2437,18 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
 	aggstate->agg_done = false;
 	aggstate->pergroup = NULL;
 	aggstate->grp_firstTuple = NULL;
+	aggstate->hashtable = NULL;
 	aggstate->sort_in = NULL;
 	aggstate->sort_out = NULL;
 
 	/*
-	 * phases[0] always exists, but is dummy in sorted/plain mode
-	 */
-	numPhases = (use_hashing ? 1 : 2);
-	numHashes = (use_hashing ? 1 : 0);
-
-	/*
 	 * Calculate the maximum number of grouping sets in any phase; this
-	 * determines the size of some allocations.  Also calculate the number of
-	 * phases, since all hashed/mixed nodes contribute to only a single phase.
+	 * determines the size of some allocations.
 	 */
 	if (node->groupingSets)
 	{
+		Assert(node->aggstrategy != AGG_HASHED);
+
 		numGroupingSets = list_length(node->groupingSets);
 
 		foreach(l, node->chain)
@@ -2725,32 +2457,22 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
 
 			numGroupingSets = Max(numGroupingSets,
 								  list_length(agg->groupingSets));
-
-			/*
-			 * additional AGG_HASHED aggs become part of phase 0, but all
-			 * others add an extra phase.
-			 */
-			if (agg->aggstrategy != AGG_HASHED)
-				++numPhases;
-			else
-				++numHashes;
 		}
 	}
 
 	aggstate->maxsets = numGroupingSets;
-	aggstate->numphases = numPhases;
+	aggstate->numphases = numPhases = 1 + list_length(node->chain);
 
 	aggstate->aggcontexts = (ExprContext **)
 		palloc0(sizeof(ExprContext *) * numGroupingSets);
 
 	/*
 	 * Create expression contexts.  We need three or more, one for
-	 * per-input-tuple processing, one for per-output-tuple processing, one
-	 * for all the hashtables, and one for each grouping set.  The per-tuple
-	 * memory context of the per-grouping-set ExprContexts (aggcontexts)
-	 * replaces the standalone memory context formerly used to hold transition
-	 * values.  We cheat a little by using ExecAssignExprContext() to build
-	 * all of them.
+	 * per-input-tuple processing, one for per-output-tuple processing, and
+	 * one for each grouping set.  The per-tuple memory context of the
+	 * per-grouping-set ExprContexts (aggcontexts) replaces the standalone
+	 * memory context formerly used to hold transition values.  We cheat a
+	 * little by using ExecAssignExprContext() to build all of them.
 	 *
 	 * NOTE: the details of what is stored in aggcontexts and what is stored
 	 * in the regular per-query memory context are driven by a simple
@@ -2766,21 +2488,14 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
 		aggstate->aggcontexts[i] = aggstate->ss.ps.ps_ExprContext;
 	}
 
-	if (use_hashing)
-	{
-		ExecAssignExprContext(estate, &aggstate->ss.ps);
-		aggstate->hashcontext = aggstate->ss.ps.ps_ExprContext;
-	}
-
 	ExecAssignExprContext(estate, &aggstate->ss.ps);
 
 	/*
-	 * tuple table initialization.
-	 *
-	 * For hashtables, we create some additional slots below.
+	 * tuple table initialization
 	 */
 	ExecInitScanTupleSlot(estate, &aggstate->ss);
 	ExecInitResultTupleSlot(estate, &aggstate->ss.ps);
+	aggstate->hashslot = ExecInitExtraTupleSlot(estate);
 	aggstate->sort_slot = ExecInitExtraTupleSlot(estate);
 
 	/*
@@ -2844,26 +2559,19 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
 	 * For each phase, prepare grouping set data and fmgr lookup data for
 	 * compare functions.  Accumulate all_grouped_cols in passing.
 	 */
-	aggstate->phases = palloc0(numPhases * sizeof(AggStatePerPhaseData));
 
-	aggstate->num_hashes = numHashes;
-	if (numHashes)
-	{
-		aggstate->perhash = palloc0(sizeof(AggStatePerHashData) * numHashes);
-		aggstate->phases[0].numsets = 0;
-		aggstate->phases[0].gset_lengths = palloc(numHashes * sizeof(int));
-		aggstate->phases[0].grouped_cols = palloc(numHashes * sizeof(Bitmapset *));
-	}
+	aggstate->phases = palloc0(numPhases * sizeof(AggStatePerPhaseData));
 
-	phase = 0;
-	for (phaseidx = 0; phaseidx <= list_length(node->chain); ++phaseidx)
+	for (phase = 0; phase < numPhases; ++phase)
 	{
+		AggStatePerPhase phasedata = &aggstate->phases[phase];
 		Agg		   *aggnode;
 		Sort	   *sortnode;
+		int			num_sets;
 
-		if (phaseidx > 0)
+		if (phase > 0)
 		{
-			aggnode = castNode(Agg, list_nth(node->chain, phaseidx - 1));
+			aggnode = castNode(Agg, list_nth(node->chain, phase - 1));
 			sortnode = castNode(Sort, aggnode->plan.lefttree);
 		}
 		else
@@ -2872,91 +2580,53 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
 			sortnode = NULL;
 		}
 
-		Assert(phase <= 1 || sortnode);
+		phasedata->numsets = num_sets = list_length(aggnode->groupingSets);
 
-		if (aggnode->aggstrategy == AGG_HASHED
-			|| aggnode->aggstrategy == AGG_MIXED)
+		if (num_sets)
 		{
-			AggStatePerPhase phasedata = &aggstate->phases[0];
-			AggStatePerHash perhash;
-			Bitmapset  *cols = NULL;
-
-			Assert(phase == 0);
-			i = phasedata->numsets++;
-			perhash = &aggstate->perhash[i];
+			phasedata->gset_lengths = palloc(num_sets * sizeof(int));
+			phasedata->grouped_cols = palloc(num_sets * sizeof(Bitmapset *));
 
-			/* phase 0 always points to the "real" Agg in the hash case */
-			phasedata->aggnode = node;
-			phasedata->aggstrategy = node->aggstrategy;
-
-			/* but the actual Agg node representing this hash is saved here */
-			perhash->aggnode = aggnode;
-
-			phasedata->gset_lengths[i] = perhash->numCols = aggnode->numCols;
+			i = 0;
+			foreach(l, aggnode->groupingSets)
+			{
+				int			current_length = list_length(lfirst(l));
+				Bitmapset  *cols = NULL;
 
-			for (j = 0; j < aggnode->numCols; ++j)
-				cols = bms_add_member(cols, aggnode->grpColIdx[j]);
+				/* planner forces this to be correct */
+				for (j = 0; j < current_length; ++j)
+					cols = bms_add_member(cols, aggnode->grpColIdx[j]);
 
-			phasedata->grouped_cols[i] = cols;
+				phasedata->grouped_cols[i] = cols;
+				phasedata->gset_lengths[i] = current_length;
+				++i;
+			}
 
-			all_grouped_cols = bms_add_members(all_grouped_cols, cols);
-			continue;
+			all_grouped_cols = bms_add_members(all_grouped_cols,
+											   phasedata->grouped_cols[0]);
 		}
 		else
 		{
-			AggStatePerPhase phasedata = &aggstate->phases[++phase];
-			int			num_sets;
-
-			phasedata->numsets = num_sets = list_length(aggnode->groupingSets);
-
-			if (num_sets)
-			{
-				phasedata->gset_lengths = palloc(num_sets * sizeof(int));
-				phasedata->grouped_cols = palloc(num_sets * sizeof(Bitmapset *));
-
-				i = 0;
-				foreach(l, aggnode->groupingSets)
-				{
-					int			current_length = list_length(lfirst(l));
-					Bitmapset  *cols = NULL;
-
-					/* planner forces this to be correct */
-					for (j = 0; j < current_length; ++j)
-						cols = bms_add_member(cols, aggnode->grpColIdx[j]);
-
-					phasedata->grouped_cols[i] = cols;
-					phasedata->gset_lengths[i] = current_length;
-
-					++i;
-				}
-
-				all_grouped_cols = bms_add_members(all_grouped_cols,
-												 phasedata->grouped_cols[0]);
-			}
-			else
-			{
-				Assert(phaseidx == 0);
-
-				phasedata->gset_lengths = NULL;
-				phasedata->grouped_cols = NULL;
-			}
+			Assert(phase == 0);
 
-			/*
-			 * If we are grouping, precompute fmgr lookup data for inner loop.
-			 */
-			if (aggnode->aggstrategy == AGG_SORTED)
-			{
-				Assert(aggnode->numCols > 0);
+			phasedata->gset_lengths = NULL;
+			phasedata->grouped_cols = NULL;
+		}
 
-				phasedata->eqfunctions =
-					execTuplesMatchPrepare(aggnode->numCols,
-										   aggnode->grpOperators);
-			}
+		/*
+		 * If we are grouping, precompute fmgr lookup data for inner loop.
+		 */
+		if (aggnode->aggstrategy == AGG_SORTED)
+		{
+			Assert(aggnode->numCols > 0);
 
-			phasedata->aggnode = aggnode;
-			phasedata->aggstrategy = aggnode->aggstrategy;
-			phasedata->sortnode = sortnode;
+			phasedata->eqfunctions =
+				execTuplesMatchPrepare(aggnode->numCols,
+									   aggnode->grpOperators);
 		}
+
+		phasedata->aggnode = aggnode;
+		phasedata->sortnode = sortnode;
 	}
 
 	/*
@@ -2967,6 +2637,13 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
 		aggstate->all_grouped_cols = lcons_int(i, aggstate->all_grouped_cols);
 
 	/*
+	 * Initialize current phase-dependent values to initial phase
+	 */
+
+	aggstate->current_phase = 0;
+	initialize_phase(aggstate, 0);
+
+	/*
 	 * Set up aggregate-result storage in the output expr context, and also
 	 * allocate my private per-agg working storage
 	 */
@@ -2980,30 +2657,23 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
 	aggstate->peragg = peraggs;
 	aggstate->pertrans = pertransstates;
 
+
 	/*
 	 * Hashing can only appear in the initial phase.
 	 */
-	if (use_hashing)
+	if (node->aggstrategy == AGG_HASHED)
 	{
-		for (i = 0; i < numHashes; ++i)
-		{
-			aggstate->perhash[i].hashslot = ExecInitExtraTupleSlot(estate);
-
-			execTuplesHashPrepare(aggstate->perhash[i].numCols,
-								  aggstate->perhash[i].aggnode->grpOperators,
-								  &aggstate->perhash[i].eqfunctions,
-								  &aggstate->perhash[i].hashfunctions);
-		}
+		find_hash_columns(aggstate);
 
-		/* this is an array of pointers, not structures */
-		aggstate->hash_pergroup = palloc0(sizeof(AggStatePerGroup) * numHashes);
+		execTuplesHashPrepare(node->numCols,
+							  node->grpOperators,
+							  &aggstate->phases[0].eqfunctions,
+							  &aggstate->hashfunctions);
 
-		find_hash_columns(aggstate);
 		build_hash_table(aggstate);
 		aggstate->table_filled = false;
 	}
-
-	if (node->aggstrategy != AGG_HASHED)
+	else
 	{
 		AggStatePerGroup pergroup;
 
@@ -3014,25 +2684,6 @@ ExecInitAgg(Agg *node, EState *estate, int eflags)
 		aggstate->pergroup = pergroup;
 	}
 
-	/*
-	 * Initialize current phase-dependent values to initial phase. The initial
-	 * phase is 1 (first sort pass) for all strategies that use sorting (if
-	 * hashing is being done too, then phase 0 is processed last); but if only
-	 * hashing is being done, then phase 0 is all there is.
-	 */
-	if (node->aggstrategy == AGG_HASHED)
-	{
-		aggstate->current_phase = 0;
-		initialize_phase(aggstate, 0);
-		select_current_set(aggstate, 0, true);
-	}
-	else
-	{
-		aggstate->current_phase = 1;
-		initialize_phase(aggstate, 1);
-		select_current_set(aggstate, 0, false);
-	}
-
 	/* -----------------
 	 * Perform lookups of aggregate function info, and initialize the
 	 * unchanging fields of the per-agg and per-trans data.
@@ -3610,7 +3261,7 @@ build_pertrans_for_aggref(AggStatePerTrans pertrans,
 		 * We don't implement DISTINCT or ORDER BY aggs in the HASHED case
 		 * (yet)
 		 */
-		Assert(aggstate->aggstrategy != AGG_HASHED && aggstate->aggstrategy != AGG_MIXED);
+		Assert(((Agg *) aggstate->ss.ps.plan)->aggstrategy != AGG_HASHED);
 
 		/* If we have only one input, we need its len/byval info. */
 		if (numInputs == 1)
@@ -3859,8 +3510,6 @@ ExecEndAgg(AggState *node)
 	/* And ensure any agg shutdown callbacks have been called */
 	for (setno = 0; setno < numGroupingSets; setno++)
 		ReScanExprContext(node->aggcontexts[setno]);
-	if (node->hashcontext)
-		ReScanExprContext(node->hashcontext);
 
 	/*
 	 * We don't actually free any ExprContexts here (see comment in
@@ -3888,7 +3537,7 @@ ExecReScanAgg(AggState *node)
 
 	node->agg_done = false;
 
-	if (node->aggstrategy == AGG_HASHED)
+	if (aggnode->aggstrategy == AGG_HASHED)
 	{
 		/*
 		 * In the hashed case, if we haven't yet built the hash table then we
@@ -3908,9 +3557,7 @@ ExecReScanAgg(AggState *node)
 		if (outerPlan->chgParam == NULL &&
 			!bms_overlap(node->ss.ps.chgParam, aggnode->aggParams))
 		{
-			ResetTupleHashIterator(node->perhash[0].hashtable,
-								   &node->perhash[0].hashiter);
-			select_current_set(node, 0, true);
+			ResetTupleHashIterator(node->hashtable, &node->hashiter);
 			return;
 		}
 	}
@@ -3935,7 +3582,11 @@ ExecReScanAgg(AggState *node)
 	 * ExecReScan already did it. But we do need to reset our per-grouping-set
 	 * contexts, which may have transvalues stored in them. (We use rescan
 	 * rather than just reset because transfns may have registered callbacks
-	 * that need to be run now.) For the AGG_HASHED case, see below.
+	 * that need to be run now.)
+	 *
+	 * Note that with AGG_HASHED, the hash table is allocated in a sub-context
+	 * of the aggcontext. This used to be an issue, but now, resetting a
+	 * context automatically deletes sub-contexts too.
 	 */
 
 	for (setno = 0; setno < numGroupingSets; setno++)
@@ -3955,21 +3606,13 @@ ExecReScanAgg(AggState *node)
 	MemSet(econtext->ecxt_aggvalues, 0, sizeof(Datum) * node->numaggs);
 	MemSet(econtext->ecxt_aggnulls, 0, sizeof(bool) * node->numaggs);
 
-	/*
-	 * With AGG_HASHED/MIXED, the hash table is allocated in a sub-context of
-	 * the hashcontext. This used to be an issue, but now, resetting a context
-	 * automatically deletes sub-contexts too.
-	 */
-	if (node->aggstrategy == AGG_HASHED || node->aggstrategy == AGG_MIXED)
+	if (aggnode->aggstrategy == AGG_HASHED)
 	{
-		ReScanExprContext(node->hashcontext);
 		/* Rebuild an empty hash table */
 		build_hash_table(node);
 		node->table_filled = false;
-		/* iterator will be reset when the table is filled */
 	}
-
-	if (node->aggstrategy != AGG_HASHED)
+	else
 	{
 		/*
 		 * Reset the per-group state (in particular, mark transvalues null)
@@ -3977,8 +3620,8 @@ ExecReScanAgg(AggState *node)
 		MemSet(node->pergroup, 0,
 			 sizeof(AggStatePerGroupData) * node->numaggs * numGroupingSets);
 
-		/* reset to phase 1 */
-		initialize_phase(node, 1);
+		/* reset to phase 0 */
+		initialize_phase(node, 0);
 
 		node->input_done = false;
 		node->projected_set = -1;
@@ -4019,7 +3662,7 @@ AggCheckCallContext(FunctionCallInfo fcinfo, MemoryContext *aggcontext)
 		if (aggcontext)
 		{
 			AggState   *aggstate = ((AggState *) fcinfo->context);
-			ExprContext *cxt = aggstate->curaggcontext;
+			ExprContext *cxt = aggstate->aggcontexts[aggstate->current_set];
 
 			*aggcontext = cxt->ecxt_per_tuple_memory;
 		}
@@ -4108,7 +3751,7 @@ AggRegisterCallback(FunctionCallInfo fcinfo,
 	if (fcinfo->context && IsA(fcinfo->context, AggState))
 	{
 		AggState   *aggstate = (AggState *) fcinfo->context;
-		ExprContext *cxt = aggstate->curaggcontext;
+		ExprContext *cxt = aggstate->aggcontexts[aggstate->current_set];
 
 		RegisterExprContextCallback(cxt, func, arg);
 
diff --git a/src/backend/lib/Makefile b/src/backend/lib/Makefile
index f222c6c..2d2ba84 100644
--- a/src/backend/lib/Makefile
+++ b/src/backend/lib/Makefile
@@ -12,7 +12,7 @@ subdir = src/backend/lib
 top_builddir = ../../..
 include $(top_builddir)/src/Makefile.global
 
-OBJS = binaryheap.o bipartite_match.o hyperloglog.o ilist.o knapsack.o \
-       pairingheap.o rbtree.o stringinfo.o
+OBJS = binaryheap.o bipartite_match.o hyperloglog.o ilist.o pairingheap.o \
+       rbtree.o stringinfo.o
 
 include $(top_srcdir)/src/backend/common.mk
diff --git a/src/backend/lib/knapsack.c b/src/backend/lib/knapsack.c
deleted file mode 100644
index ddf2b9a..0000000
--- a/src/backend/lib/knapsack.c
+++ /dev/null
@@ -1,114 +0,0 @@
-/*-------------------------------------------------------------------------
- *
- * knapsack.c
- *	  Knapsack problem solver
- *
- * Given input vectors of integral item weights (must be >= 0) and values
- * (double >= 0), compute the set of items which produces the greatest total
- * value without exceeding a specified total weight; each item is included at
- * most once (this is the 0/1 knapsack problem).  Weight 0 items will always be
- * included.
- *
- * The performance of this algorithm is pseudo-polynomial, O(nW) where W is the
- * weight limit.  To use with non-integral weights or approximate solutions,
- * the caller should pre-scale the input weights to a suitable range.  This
- * allows approximate solutions in polynomial time (the general case of the
- * exact problem is NP-hard).
- *
- * Copyright (c) 2017, PostgreSQL Global Development Group
- *
- * IDENTIFICATION
- *	  src/backend/lib/knapsack.c
- *
- *-------------------------------------------------------------------------
- */
-#include "postgres.h"
-
-#include <math.h>
-#include <limits.h>
-
-#include "lib/knapsack.h"
-#include "miscadmin.h"
-#include "nodes/bitmapset.h"
-#include "utils/builtins.h"
-#include "utils/memutils.h"
-#include "utils/palloc.h"
-
-/*
- * DiscreteKnapsack
- *
- * The item_values input is optional; if omitted, all the items are assumed to
- * have value 1.
- *
- * Returns a Bitmapset of the 0..(n-1) indexes of the items chosen for
- * inclusion in the solution.
- *
- * This uses the usual dynamic-programming algorithm, adapted to reuse the
- * memory on each pass (by working from larger weights to smaller).  At the
- * start of pass number i, the values[w] array contains the largest value
- * computed with total weight <= w, using only items with indices < i; and
- * sets[w] contains the bitmap of items actually used for that value.  (The
- * bitmapsets are all pre-initialized with an unused high bit so that memory
- * allocation is done only once.)
- */
-Bitmapset *
-DiscreteKnapsack(int max_weight, int num_items,
-				 int *item_weights, double *item_values)
-{
-	MemoryContext local_ctx = AllocSetContextCreate(CurrentMemoryContext,
-													"Knapsack",
-													ALLOCSET_SMALL_MINSIZE,
-													ALLOCSET_SMALL_INITSIZE,
-													ALLOCSET_SMALL_MAXSIZE);
-	MemoryContext oldctx = MemoryContextSwitchTo(local_ctx);
-	double	   *values;
-	Bitmapset **sets;
-	Bitmapset  *result;
-	int			i,
-				j;
-
-	Assert(max_weight >= 0);
-	Assert(num_items > 0 && item_weights);
-
-	values = palloc((1 + max_weight) * sizeof(double));
-	sets = palloc((1 + max_weight) * sizeof(Bitmapset *));
-
-	for (i = 0; i <= max_weight; ++i)
-	{
-		values[i] = 0;
-		sets[i] = bms_make_singleton(num_items);
-	}
-
-	for (i = 0; i < num_items; ++i)
-	{
-		int			iw = item_weights[i];
-		double		iv = item_values ? item_values[i] : 1;
-
-		for (j = max_weight; j >= iw; --j)
-		{
-			int			ow = j - iw;
-
-			if (values[j] <= values[ow] + iv)
-			{
-				/* copy sets[ow] to sets[j] without realloc */
-				if (j != ow)
-				{
-					sets[j] = bms_del_members(sets[j], sets[j]);
-					sets[j] = bms_add_members(sets[j], sets[ow]);
-				}
-
-				sets[j] = bms_add_member(sets[j], i);
-
-				values[j] = values[ow] + iv;
-			}
-		}
-	}
-
-	MemoryContextSwitchTo(oldctx);
-
-	result = bms_del_member(bms_copy(sets[max_weight]), num_items);
-
-	MemoryContextDelete(local_ctx);
-
-	return result;
-}
diff --git a/src/backend/libpq/auth.c b/src/backend/libpq/auth.c
index a3c6c6d..5f4f557 100644
--- a/src/backend/libpq/auth.c
+++ b/src/backend/libpq/auth.c
@@ -2570,16 +2570,14 @@ CheckCertAuth(Port *port)
  */
 
 /*
- * RADIUS authentication is described in RFC2865 (and several others).
+ * RADIUS authentication is described in RFC2865 (and several
+ * others).
  */
 
 #define RADIUS_VECTOR_LENGTH 16
 #define RADIUS_HEADER_LENGTH 20
 #define RADIUS_MAX_PASSWORD_LENGTH 128
 
-/* Maximum size of a RADIUS packet we will create or accept */
-#define RADIUS_BUFFER_SIZE 1024
-
 typedef struct
 {
 	uint8		attribute;
@@ -2593,8 +2591,6 @@ typedef struct
 	uint8		id;
 	uint16		length;
 	uint8		vector[RADIUS_VECTOR_LENGTH];
-	/* this is a bit longer than strictly necessary: */
-	char		pad[RADIUS_BUFFER_SIZE - RADIUS_VECTOR_LENGTH];
 } radius_packet;
 
 /* RADIUS packet types */
@@ -2611,6 +2607,9 @@ typedef struct
 /* RADIUS service types */
 #define RADIUS_AUTHENTICATE_ONLY	8
 
+/* Maximum size of a RADIUS packet we will create or accept */
+#define RADIUS_BUFFER_SIZE 1024
+
 /* Seconds to wait - XXX: should be in a config variable! */
 #define RADIUS_TIMEOUT 3
 
@@ -2735,12 +2734,10 @@ CheckRADIUSAuth(Port *port)
 static int
 PerformRadiusTransaction(char *server, char *secret, char *portstr, char *identifier, char *user_name, char *passwd)
 {
-	radius_packet radius_send_pack;
-	radius_packet radius_recv_pack;
-	radius_packet *packet = &radius_send_pack;
-	radius_packet *receivepacket = &radius_recv_pack;
-	char	   *radius_buffer = (char *) &radius_send_pack;
-	char	   *receive_buffer = (char *) &radius_recv_pack;
+	char		radius_buffer[RADIUS_BUFFER_SIZE];
+	char		receive_buffer[RADIUS_BUFFER_SIZE];
+	radius_packet *packet = (radius_packet *) radius_buffer;
+	radius_packet *receivepacket = (radius_packet *) receive_buffer;
 	int32		service = htonl(RADIUS_AUTHENTICATE_ONLY);
 	uint8	   *cryptvector;
 	int			encryptedpasswordlen;
@@ -2796,7 +2793,6 @@ PerformRadiusTransaction(char *server, char *secret, char *portstr, char *identi
 	{
 		ereport(LOG,
 				(errmsg("could not generate random encryption vector")));
-		pg_freeaddrinfo_all(hint.ai_family, serveraddrs);
 		return STATUS_ERROR;
 	}
 	packet->id = packet->vector[0];
@@ -2831,7 +2827,6 @@ PerformRadiusTransaction(char *server, char *secret, char *portstr, char *identi
 			ereport(LOG,
 					(errmsg("could not perform MD5 encryption of password")));
 			pfree(cryptvector);
-			pg_freeaddrinfo_all(hint.ai_family, serveraddrs);
 			return STATUS_ERROR;
 		}
 
@@ -2847,7 +2842,7 @@ PerformRadiusTransaction(char *server, char *secret, char *portstr, char *identi
 
 	radius_add_attribute(packet, RADIUS_PASSWORD, encryptedpassword, encryptedpasswordlen);
 
-	/* Length needs to be in network order on the wire */
+	/* Length need to be in network order on the wire */
 	packetlength = packet->length;
 	packet->length = htons(packet->length);
 
@@ -2873,7 +2868,6 @@ PerformRadiusTransaction(char *server, char *secret, char *portstr, char *identi
 	localaddr.sin_addr.s_addr = INADDR_ANY;
 	addrsize = sizeof(struct sockaddr_in);
 #endif
-
 	if (bind(sock, (struct sockaddr *) & localaddr, addrsize))
 	{
 		ereport(LOG,
@@ -2970,7 +2964,6 @@ PerformRadiusTransaction(char *server, char *secret, char *portstr, char *identi
 		{
 			ereport(LOG,
 					(errmsg("could not read RADIUS response: %m")));
-			closesocket(sock);
 			return STATUS_ERROR;
 		}
 
diff --git a/src/backend/nodes/bitmapset.c b/src/backend/nodes/bitmapset.c
index bf8545d..252af5c 100644
--- a/src/backend/nodes/bitmapset.c
+++ b/src/backend/nodes/bitmapset.c
@@ -21,7 +21,6 @@
 #include "postgres.h"
 
 #include "access/hash.h"
-#include "nodes/pg_list.h"
 
 
 #define WORDNUM(x)	((x) / BITS_PER_BITMAPWORD)
@@ -459,35 +458,6 @@ bms_overlap(const Bitmapset *a, const Bitmapset *b)
 }
 
 /*
- * bms_overlap_list - does a set overlap an integer list?
- */
-bool
-bms_overlap_list(const Bitmapset *a, const List *b)
-{
-	ListCell   *lc;
-	int			wordnum,
-				bitnum;
-
-	if (a == NULL || b == NIL)
-		return false;
-
-	foreach(lc, b)
-	{
-		int			x = lfirst_int(lc);
-
-		if (x < 0)
-			elog(ERROR, "negative bitmapset member not allowed");
-		wordnum = WORDNUM(x);
-		bitnum = BITNUM(x);
-		if (wordnum < a->nwords)
-			if ((a->words[wordnum] & ((bitmapword) 1 << bitnum)) != 0)
-				return true;
-	}
-
-	return false;
-}
-
-/*
  * bms_nonempty_difference - do sets have a nonempty difference?
  */
 bool
diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c
index c23d5c5..7ac0b40 100644
--- a/src/backend/nodes/copyfuncs.c
+++ b/src/backend/nodes/copyfuncs.c
@@ -1387,17 +1387,19 @@ _copyWindowFunc(const WindowFunc *from)
 }
 
 /*
- * _copyArrayRef
+ * _copySubscriptingRef
  */
-static ArrayRef *
-_copyArrayRef(const ArrayRef *from)
+static SubscriptingRef *
+_copySubscriptingRef(const SubscriptingRef *from)
 {
-	ArrayRef   *newnode = makeNode(ArrayRef);
+	SubscriptingRef   *newnode = makeNode(SubscriptingRef);
 
-	COPY_SCALAR_FIELD(refarraytype);
+	COPY_SCALAR_FIELD(refcontainertype);
 	COPY_SCALAR_FIELD(refelemtype);
 	COPY_SCALAR_FIELD(reftypmod);
 	COPY_SCALAR_FIELD(refcollid);
+	COPY_SCALAR_FIELD(refevalfunc);
+	COPY_SCALAR_FIELD(refnestedfunc);
 	COPY_NODE_FIELD(refupperindexpr);
 	COPY_NODE_FIELD(reflowerindexpr);
 	COPY_NODE_FIELD(refexpr);
@@ -4800,8 +4802,8 @@ copyObject(const void *from)
 		case T_WindowFunc:
 			retval = _copyWindowFunc(from);
 			break;
-		case T_ArrayRef:
-			retval = _copyArrayRef(from);
+		case T_SubscriptingRef:
+			retval = _copySubscriptingRef(from);
 			break;
 		case T_FuncExpr:
 			retval = _copyFuncExpr(from);
diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c
index 5941b7a..2f837e1 100644
--- a/src/backend/nodes/equalfuncs.c
+++ b/src/backend/nodes/equalfuncs.c
@@ -264,12 +264,14 @@ _equalWindowFunc(const WindowFunc *a, const WindowFunc *b)
 }
 
 static bool
-_equalArrayRef(const ArrayRef *a, const ArrayRef *b)
+_equalSubscriptingRef(const SubscriptingRef *a, const SubscriptingRef *b)
 {
-	COMPARE_SCALAR_FIELD(refarraytype);
+	COMPARE_SCALAR_FIELD(refcontainertype);
 	COMPARE_SCALAR_FIELD(refelemtype);
 	COMPARE_SCALAR_FIELD(reftypmod);
 	COMPARE_SCALAR_FIELD(refcollid);
+	COMPARE_SCALAR_FIELD(refevalfunc);
+	COMPARE_SCALAR_FIELD(refnestedfunc);
 	COMPARE_NODE_FIELD(refupperindexpr);
 	COMPARE_NODE_FIELD(reflowerindexpr);
 	COMPARE_NODE_FIELD(refexpr);
@@ -2997,8 +2999,8 @@ equal(const void *a, const void *b)
 		case T_WindowFunc:
 			retval = _equalWindowFunc(a, b);
 			break;
-		case T_ArrayRef:
-			retval = _equalArrayRef(a, b);
+		case T_SubscriptingRef:
+			retval = _equalSubscriptingRef(a, b);
 			break;
 		case T_FuncExpr:
 			retval = _equalFuncExpr(a, b);
diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c
index 6e52eb7..65baca9 100644
--- a/src/backend/nodes/nodeFuncs.c
+++ b/src/backend/nodes/nodeFuncs.c
@@ -66,15 +66,15 @@ exprType(const Node *expr)
 		case T_WindowFunc:
 			type = ((const WindowFunc *) expr)->wintype;
 			break;
-		case T_ArrayRef:
+		case T_SubscriptingRef:
 			{
-				const ArrayRef *arrayref = (const ArrayRef *) expr;
+				const SubscriptingRef *sbsref = (const SubscriptingRef *) expr;
 
 				/* slice and/or store operations yield the array type */
-				if (arrayref->reflowerindexpr || arrayref->refassgnexpr)
-					type = arrayref->refarraytype;
+				if (IsAssignment(sbsref) || sbsref->reflowerindexpr)
+					type = sbsref->refcontainertype;
 				else
-					type = arrayref->refelemtype;
+					type = sbsref->refelemtype;
 			}
 			break;
 		case T_FuncExpr:
@@ -283,9 +283,9 @@ exprTypmod(const Node *expr)
 			return ((const Const *) expr)->consttypmod;
 		case T_Param:
 			return ((const Param *) expr)->paramtypmod;
-		case T_ArrayRef:
+		case T_SubscriptingRef:
 			/* typmod is the same for array or element */
-			return ((const ArrayRef *) expr)->reftypmod;
+			return ((const SubscriptingRef *) expr)->reftypmod;
 		case T_FuncExpr:
 			{
 				int32		coercedTypmod;
@@ -769,8 +769,8 @@ exprCollation(const Node *expr)
 		case T_WindowFunc:
 			coll = ((const WindowFunc *) expr)->wincollid;
 			break;
-		case T_ArrayRef:
-			coll = ((const ArrayRef *) expr)->refcollid;
+		case T_SubscriptingRef:
+			coll = ((const SubscriptingRef *) expr)->refcollid;
 			break;
 		case T_FuncExpr:
 			coll = ((const FuncExpr *) expr)->funccollid;
@@ -1010,8 +1010,8 @@ exprSetCollation(Node *expr, Oid collation)
 		case T_WindowFunc:
 			((WindowFunc *) expr)->wincollid = collation;
 			break;
-		case T_ArrayRef:
-			((ArrayRef *) expr)->refcollid = collation;
+		case T_SubscriptingRef:
+			((SubscriptingRef *) expr)->refcollid = collation;
 			break;
 		case T_FuncExpr:
 			((FuncExpr *) expr)->funccollid = collation;
@@ -1235,9 +1235,9 @@ exprLocation(const Node *expr)
 			/* function name should always be the first thing */
 			loc = ((const WindowFunc *) expr)->location;
 			break;
-		case T_ArrayRef:
+		case T_SubscriptingRef:
 			/* just use array argument's location */
-			loc = exprLocation((Node *) ((const ArrayRef *) expr)->refexpr);
+			loc = exprLocation((Node *) ((const SubscriptingRef *) expr)->refexpr);
 			break;
 		case T_FuncExpr:
 			{
@@ -1681,6 +1681,14 @@ check_functions_in_node(Node *node, check_function_callback checker,
 					return true;
 			}
 			break;
+		case T_SubscriptingRef:
+			{
+				SubscriptingRef   *sbsref = (SubscriptingRef *) node;
+
+				if (checker(sbsref->refevalfunc, context))
+					return true;
+			}
+			break;
 		case T_FuncExpr:
 			{
 				FuncExpr   *expr = (FuncExpr *) node;
@@ -1930,21 +1938,25 @@ expression_tree_walker(Node *node,
 					return true;
 			}
 			break;
-		case T_ArrayRef:
+		case T_SubscriptingRef:
 			{
-				ArrayRef   *aref = (ArrayRef *) node;
+				SubscriptingRef   *sbsref = (SubscriptingRef *) node;
 
 				/* recurse directly for upper/lower array index lists */
-				if (expression_tree_walker((Node *) aref->refupperindexpr,
+				if (expression_tree_walker((Node *) sbsref->refupperindexpr,
 										   walker, context))
 					return true;
-				if (expression_tree_walker((Node *) aref->reflowerindexpr,
+				if (expression_tree_walker((Node *) sbsref->reflowerindexpr,
 										   walker, context))
 					return true;
 				/* walker must see the refexpr and refassgnexpr, however */
-				if (walker(aref->refexpr, context))
+				if (walker(sbsref->refexpr, context))
 					return true;
-				if (walker(aref->refassgnexpr, context))
+
+				if (!IsAssignment(node))
+					break;
+
+				if (walker(sbsref->refassgnexpr, context))
 					return true;
 			}
 			break;
@@ -2538,20 +2550,21 @@ expression_tree_mutator(Node *node,
 				return (Node *) newnode;
 			}
 			break;
-		case T_ArrayRef:
+		case T_SubscriptingRef:
 			{
-				ArrayRef   *arrayref = (ArrayRef *) node;
-				ArrayRef   *newnode;
+				SubscriptingRef   *sbsref = (SubscriptingRef *) node;
+				SubscriptingRef   *newnode;
 
-				FLATCOPY(newnode, arrayref, ArrayRef);
-				MUTATE(newnode->refupperindexpr, arrayref->refupperindexpr,
+				FLATCOPY(newnode, sbsref, SubscriptingRef);
+				MUTATE(newnode->refupperindexpr, sbsref->refupperindexpr,
 					   List *);
-				MUTATE(newnode->reflowerindexpr, arrayref->reflowerindexpr,
+				MUTATE(newnode->reflowerindexpr, sbsref->reflowerindexpr,
 					   List *);
-				MUTATE(newnode->refexpr, arrayref->refexpr,
+				MUTATE(newnode->refexpr, sbsref->refexpr,
 					   Expr *);
-				MUTATE(newnode->refassgnexpr, arrayref->refassgnexpr,
+				MUTATE(newnode->refassgnexpr, sbsref->refassgnexpr,
 					   Expr *);
+
 				return (Node *) newnode;
 			}
 			break;
diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c
index bbb63a4..5f667d6 100644
--- a/src/backend/nodes/outfuncs.c
+++ b/src/backend/nodes/outfuncs.c
@@ -1137,14 +1137,16 @@ _outWindowFunc(StringInfo str, const WindowFunc *node)
 }
 
 static void
-_outArrayRef(StringInfo str, const ArrayRef *node)
+_outSubscriptingRef(StringInfo str, const SubscriptingRef *node)
 {
-	WRITE_NODE_TYPE("ARRAYREF");
+	WRITE_NODE_TYPE("SUBSCRIPTINGREF");
 
-	WRITE_OID_FIELD(refarraytype);
+	WRITE_OID_FIELD(refcontainertype);
 	WRITE_OID_FIELD(refelemtype);
 	WRITE_INT_FIELD(reftypmod);
 	WRITE_OID_FIELD(refcollid);
+	WRITE_OID_FIELD(refevalfunc);
+	WRITE_OID_FIELD(refnestedfunc);
 	WRITE_NODE_FIELD(refupperindexpr);
 	WRITE_NODE_FIELD(reflowerindexpr);
 	WRITE_NODE_FIELD(refexpr);
@@ -1942,28 +1944,6 @@ _outAggPath(StringInfo str, const AggPath *node)
 }
 
 static void
-_outRollupData(StringInfo str, const RollupData *node)
-{
-	WRITE_NODE_TYPE("ROLLUP");
-
-	WRITE_NODE_FIELD(groupClause);
-	WRITE_NODE_FIELD(gsets);
-	WRITE_NODE_FIELD(gsets_data);
-	WRITE_FLOAT_FIELD(numGroups, "%.0f");
-	WRITE_BOOL_FIELD(hashable);
-	WRITE_BOOL_FIELD(is_hashed);
-}
-
-static void
-_outGroupingSetData(StringInfo str, const GroupingSetData *node)
-{
-	WRITE_NODE_TYPE("GSDATA");
-
-	WRITE_NODE_FIELD(set);
-	WRITE_FLOAT_FIELD(numGroups, "%.0f");
-}
-
-static void
 _outGroupingSetsPath(StringInfo str, const GroupingSetsPath *node)
 {
 	WRITE_NODE_TYPE("GROUPINGSETSPATH");
@@ -1971,8 +1951,8 @@ _outGroupingSetsPath(StringInfo str, const GroupingSetsPath *node)
 	_outPathInfo(str, (const Path *) node);
 
 	WRITE_NODE_FIELD(subpath);
-	WRITE_ENUM_FIELD(aggstrategy, AggStrategy);
-	WRITE_NODE_FIELD(rollups);
+	WRITE_NODE_FIELD(rollup_groupclauses);
+	WRITE_NODE_FIELD(rollup_lists);
 	WRITE_NODE_FIELD(qual);
 }
 
@@ -3710,8 +3690,8 @@ outNode(StringInfo str, const void *obj)
 			case T_WindowFunc:
 				_outWindowFunc(str, obj);
 				break;
-			case T_ArrayRef:
-				_outArrayRef(str, obj);
+			case T_SubscriptingRef:
+				_outSubscriptingRef(str, obj);
 				break;
 			case T_FuncExpr:
 				_outFuncExpr(str, obj);
@@ -3983,18 +3963,14 @@ outNode(StringInfo str, const void *obj)
 			case T_PlannerParamItem:
 				_outPlannerParamItem(str, obj);
 				break;
-			case T_RollupData:
-				_outRollupData(str, obj);
-				break;
-			case T_GroupingSetData:
-				_outGroupingSetData(str, obj);
-				break;
 			case T_StatisticExtInfo:
 				_outStatisticExtInfo(str, obj);
 				break;
+
 			case T_ExtensibleNode:
 				_outExtensibleNode(str, obj);
 				break;
+
 			case T_CreateStmt:
 				_outCreateStmt(str, obj);
 				break;
diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c
index 474f221..f1c5d54 100644
--- a/src/backend/nodes/readfuncs.c
+++ b/src/backend/nodes/readfuncs.c
@@ -632,17 +632,19 @@ _readWindowFunc(void)
 }
 
 /*
- * _readArrayRef
+ * _readSubscriptingRef
  */
-static ArrayRef *
-_readArrayRef(void)
+static SubscriptingRef *
+_readSubscriptingRef(void)
 {
-	READ_LOCALS(ArrayRef);
+	READ_LOCALS(SubscriptingRef);
 
-	READ_OID_FIELD(refarraytype);
+	READ_OID_FIELD(refcontainertype);
 	READ_OID_FIELD(refelemtype);
 	READ_INT_FIELD(reftypmod);
 	READ_OID_FIELD(refcollid);
+	READ_OID_FIELD(refevalfunc);
+	READ_OID_FIELD(refnestedfunc);
 	READ_NODE_FIELD(refupperindexpr);
 	READ_NODE_FIELD(reflowerindexpr);
 	READ_NODE_FIELD(refexpr);
@@ -2439,8 +2441,8 @@ parseNodeString(void)
 		return_value = _readGroupingFunc();
 	else if (MATCH("WINDOWFUNC", 10))
 		return_value = _readWindowFunc();
-	else if (MATCH("ARRAYREF", 8))
-		return_value = _readArrayRef();
+	else if (MATCH("SUBSCRIPTINGREF", 15))
+		return_value = _readSubscriptingRef();
 	else if (MATCH("FUNCEXPR", 8))
 		return_value = _readFuncExpr();
 	else if (MATCH("NAMEDARGEXPR", 12))
diff --git a/src/backend/nodes/tidbitmap.c b/src/backend/nodes/tidbitmap.c
index eab8f68..ae7a913 100644
--- a/src/backend/nodes/tidbitmap.c
+++ b/src/backend/nodes/tidbitmap.c
@@ -1533,11 +1533,9 @@ pagetable_allocate(pagetable_hash *pagetable, Size size)
 	 * new memory so that pagetable_free can free the old entry.
 	 */
 	tbm->dsapagetableold = tbm->dsapagetable;
-	tbm->dsapagetable = dsa_allocate_extended(tbm->dsa,
-											  sizeof(PTEntryArray) + size,
-											DSA_ALLOC_HUGE | DSA_ALLOC_ZERO);
-	ptbase = dsa_get_address(tbm->dsa, tbm->dsapagetable);
+	tbm->dsapagetable = dsa_allocate0(tbm->dsa, sizeof(PTEntryArray) + size);
 
+	ptbase = dsa_get_address(tbm->dsa, tbm->dsapagetable);
 	return ptbase->ptentry;
 }
 
diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c
index 92de2b7..8f5142b 100644
--- a/src/backend/optimizer/path/costsize.c
+++ b/src/backend/optimizer/path/costsize.c
@@ -1884,16 +1884,11 @@ cost_agg(Path *path, PlannerInfo *root,
 		total_cost = startup_cost + cpu_tuple_cost;
 		output_tuples = 1;
 	}
-	else if (aggstrategy == AGG_SORTED || aggstrategy == AGG_MIXED)
+	else if (aggstrategy == AGG_SORTED)
 	{
 		/* Here we are able to deliver output on-the-fly */
 		startup_cost = input_startup_cost;
 		total_cost = input_total_cost;
-		if (aggstrategy == AGG_MIXED && !enable_hashagg)
-		{
-			startup_cost += disable_cost;
-			total_cost += disable_cost;
-		}
 		/* calcs phrased this way to match HASHED case, see note above */
 		total_cost += aggcosts->transCost.startup;
 		total_cost += aggcosts->transCost.per_tuple * input_tuples;
@@ -3478,6 +3473,11 @@ cost_qual_eval_walker(Node *node, cost_qual_eval_context *context)
 		context->total.per_tuple +=
 			get_func_cost(((FuncExpr *) node)->funcid) * cpu_operator_cost;
 	}
+	else if (IsA(node, SubscriptingRef))
+	{
+		context->total.per_tuple +=
+			get_func_cost(((SubscriptingRef *) node)->refevalfunc) * cpu_operator_cost;
+	}
 	else if (IsA(node, OpExpr) ||
 			 IsA(node, DistinctExpr) ||
 			 IsA(node, NullIfExpr))
diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index aafec58..c80c999 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -1783,15 +1783,18 @@ create_groupingsets_plan(PlannerInfo *root, GroupingSetsPath *best_path)
 {
 	Agg		   *plan;
 	Plan	   *subplan;
-	List	   *rollups = best_path->rollups;
+	List	   *rollup_groupclauses = best_path->rollup_groupclauses;
+	List	   *rollup_lists = best_path->rollup_lists;
 	AttrNumber *grouping_map;
 	int			maxref;
 	List	   *chain;
-	ListCell   *lc;
+	ListCell   *lc,
+			   *lc2;
 
 	/* Shouldn't get here without grouping sets */
 	Assert(root->parse->groupingSets);
-	Assert(rollups != NIL);
+	Assert(rollup_lists != NIL);
+	Assert(list_length(rollup_lists) == list_length(rollup_groupclauses));
 
 	/*
 	 * Agg can project, so no need to be terribly picky about child tlist, but
@@ -1843,86 +1846,72 @@ create_groupingsets_plan(PlannerInfo *root, GroupingSetsPath *best_path)
 	 * costs will be shown by EXPLAIN.
 	 */
 	chain = NIL;
-	if (list_length(rollups) > 1)
+	if (list_length(rollup_groupclauses) > 1)
 	{
-		ListCell   *lc2 = lnext(list_head(rollups));
-		bool		is_first_sort = ((RollupData *) linitial(rollups))->is_hashed;
-
-		for_each_cell(lc, lc2)
+		forboth(lc, rollup_groupclauses, lc2, rollup_lists)
 		{
-			RollupData *rollup = lfirst(lc);
+			List	   *groupClause = (List *) lfirst(lc);
+			List	   *gsets = (List *) lfirst(lc2);
 			AttrNumber *new_grpColIdx;
-			Plan	   *sort_plan = NULL;
+			Plan	   *sort_plan;
 			Plan	   *agg_plan;
-			AggStrategy strat;
-
-			new_grpColIdx = remap_groupColIdx(root, rollup->groupClause);
 
-			if (!rollup->is_hashed && !is_first_sort)
-			{
-				sort_plan = (Plan *)
-					make_sort_from_groupcols(rollup->groupClause,
-											 new_grpColIdx,
-											 subplan);
-			}
+			/* We want to iterate over all but the last rollup list elements */
+			if (lnext(lc) == NULL)
+				break;
 
-			if (!rollup->is_hashed)
-				is_first_sort = false;
+			new_grpColIdx = remap_groupColIdx(root, groupClause);
 
-			if (rollup->is_hashed)
-				strat = AGG_HASHED;
-			else if (list_length(linitial(rollup->gsets)) == 0)
-				strat = AGG_PLAIN;
-			else
-				strat = AGG_SORTED;
+			sort_plan = (Plan *)
+				make_sort_from_groupcols(groupClause,
+										 new_grpColIdx,
+										 subplan);
 
 			agg_plan = (Plan *) make_agg(NIL,
 										 NIL,
-										 strat,
+										 AGG_SORTED,
 										 AGGSPLIT_SIMPLE,
-							   list_length((List *) linitial(rollup->gsets)),
+									   list_length((List *) linitial(gsets)),
 										 new_grpColIdx,
-								   extract_grouping_ops(rollup->groupClause),
-										 rollup->gsets,
+										 extract_grouping_ops(groupClause),
+										 gsets,
 										 NIL,
-										 rollup->numGroups,
+										 0,		/* numGroups not needed */
 										 sort_plan);
 
 			/*
-			 * Remove stuff we don't need to avoid bloating debug output.
+			 * Nuke stuff we don't need to avoid bloating debug output.
 			 */
-			if (sort_plan)
-			{
-				sort_plan->targetlist = NIL;
-				sort_plan->lefttree = NULL;
-			}
+			sort_plan->targetlist = NIL;
+			sort_plan->lefttree = NULL;
 
 			chain = lappend(chain, agg_plan);
 		}
 	}
 
 	/*
-	 * Now make the real Agg node
+	 * Now make the final Agg node
 	 */
 	{
-		RollupData *rollup = linitial(rollups);
+		List	   *groupClause = (List *) llast(rollup_groupclauses);
+		List	   *gsets = (List *) llast(rollup_lists);
 		AttrNumber *top_grpColIdx;
 		int			numGroupCols;
 
-		top_grpColIdx = remap_groupColIdx(root, rollup->groupClause);
+		top_grpColIdx = remap_groupColIdx(root, groupClause);
 
-		numGroupCols = list_length((List *) linitial(rollup->gsets));
+		numGroupCols = list_length((List *) linitial(gsets));
 
 		plan = make_agg(build_path_tlist(root, &best_path->path),
 						best_path->qual,
-						best_path->aggstrategy,
+						(numGroupCols > 0) ? AGG_SORTED : AGG_PLAIN,
 						AGGSPLIT_SIMPLE,
 						numGroupCols,
 						top_grpColIdx,
-						extract_grouping_ops(rollup->groupClause),
-						rollup->gsets,
+						extract_grouping_ops(groupClause),
+						gsets,
 						chain,
-						rollup->numGroups,
+						0,		/* numGroups not needed */
 						subplan);
 
 		/* Copy cost data from Path to Plan */
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index fa7a5f8..9061950 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -30,7 +30,6 @@
 #include "foreign/fdwapi.h"
 #include "miscadmin.h"
 #include "lib/bipartite_match.h"
-#include "lib/knapsack.h"
 #include "nodes/makefuncs.h"
 #include "nodes/nodeFuncs.h"
 #ifdef OPTIMIZER_DEBUG
@@ -92,31 +91,12 @@ typedef struct
 	List	   *groupClause;	/* overrides parse->groupClause */
 } standard_qp_extra;
 
-/*
- * Data specific to grouping sets
- */
-
-typedef struct
-{
-	List	   *rollups;
-	List	   *hash_sets_idx;
-	double		dNumHashGroups;
-	bool		any_hashable;
-	Bitmapset  *unsortable_refs;
-	Bitmapset  *unhashable_refs;
-	List	   *unsortable_sets;
-	int		   *tleref_to_colnum_map;
-} grouping_sets_data;
-
 /* Local functions */
 static Node *preprocess_expression(PlannerInfo *root, Node *expr, int kind);
 static void preprocess_qual_conditions(PlannerInfo *root, Node *jtnode);
 static void inheritance_planner(PlannerInfo *root);
 static void grouping_planner(PlannerInfo *root, bool inheritance_update,
 				 double tuple_fraction);
-static grouping_sets_data *preprocess_grouping_sets(PlannerInfo *root);
-static List *remap_to_groupclause_idx(List *groupClause, List *gsets,
-						 int *tleref_to_colnum_map);
 static void preprocess_rowmarks(PlannerInfo *root);
 static double preprocess_limit(PlannerInfo *root,
 				 double tuple_fraction,
@@ -129,7 +109,8 @@ static List *reorder_grouping_sets(List *groupingSets, List *sortclause);
 static void standard_qp_callback(PlannerInfo *root, void *extra);
 static double get_number_of_groups(PlannerInfo *root,
 					 double path_rows,
-					 grouping_sets_data *gd);
+					 List *rollup_lists,
+					 List *rollup_groupclauses);
 static Size estimate_hashagg_tablesize(Path *path,
 						   const AggClauseCosts *agg_costs,
 						   double dNumGroups);
@@ -137,16 +118,8 @@ static RelOptInfo *create_grouping_paths(PlannerInfo *root,
 					  RelOptInfo *input_rel,
 					  PathTarget *target,
 					  const AggClauseCosts *agg_costs,
-					  grouping_sets_data *gd);
-static void consider_groupingsets_paths(PlannerInfo *root,
-							RelOptInfo *grouped_rel,
-							Path *path,
-							bool is_sorted,
-							bool can_hash,
-							PathTarget *target,
-							grouping_sets_data *gd,
-							const AggClauseCosts *agg_costs,
-							double dNumGroups);
+					  List *rollup_lists,
+					  List *rollup_groupclauses);
 static RelOptInfo *create_window_paths(PlannerInfo *root,
 					RelOptInfo *input_rel,
 					PathTarget *input_target,
@@ -1567,7 +1540,8 @@ grouping_planner(PlannerInfo *root, bool inheritance_update,
 		AggClauseCosts agg_costs;
 		WindowFuncLists *wflists = NULL;
 		List	   *activeWindows = NIL;
-		grouping_sets_data *gset_data = NULL;
+		List	   *rollup_lists = NIL;
+		List	   *rollup_groupclauses = NIL;
 		standard_qp_extra qp_extra;
 
 		/* A recursive query should always have setOperations */
@@ -1576,7 +1550,84 @@ grouping_planner(PlannerInfo *root, bool inheritance_update,
 		/* Preprocess grouping sets and GROUP BY clause, if any */
 		if (parse->groupingSets)
 		{
-			gset_data = preprocess_grouping_sets(root);
+			int		   *tleref_to_colnum_map;
+			List	   *sets;
+			int			maxref;
+			ListCell   *lc;
+			ListCell   *lc2;
+			ListCell   *lc_set;
+
+			parse->groupingSets = expand_grouping_sets(parse->groupingSets, -1);
+
+			/* Identify max SortGroupRef in groupClause, for array sizing */
+			maxref = 0;
+			foreach(lc, parse->groupClause)
+			{
+				SortGroupClause *gc = lfirst(lc);
+
+				if (gc->tleSortGroupRef > maxref)
+					maxref = gc->tleSortGroupRef;
+			}
+
+			/* Allocate workspace array for remapping */
+			tleref_to_colnum_map = (int *) palloc((maxref + 1) * sizeof(int));
+
+			/* Examine the rollup sets */
+			sets = extract_rollup_sets(parse->groupingSets);
+
+			foreach(lc_set, sets)
+			{
+				List	   *current_sets = (List *) lfirst(lc_set);
+				List	   *groupclause;
+				int			ref;
+
+				/*
+				 * Reorder the current list of grouping sets into correct
+				 * prefix order.  If only one aggregation pass is needed, try
+				 * to make the list match the ORDER BY clause; if more than
+				 * one pass is needed, we don't bother with that.
+				 */
+				current_sets = reorder_grouping_sets(current_sets,
+													 (list_length(sets) == 1
+													  ? parse->sortClause
+													  : NIL));
+
+				/*
+				 * Order the groupClause appropriately.  If the first grouping
+				 * set is empty, this can match regular GROUP BY
+				 * preprocessing, otherwise we have to force the groupClause
+				 * to match that grouping set's order.
+				 */
+				groupclause = preprocess_groupclause(root,
+													 linitial(current_sets));
+
+				/*
+				 * Now that we've pinned down an order for the groupClause for
+				 * this list of grouping sets, we need to remap the entries in
+				 * the grouping sets from sortgrouprefs to plain indices
+				 * (0-based) into the groupClause for this collection of
+				 * grouping sets.
+				 */
+				ref = 0;
+				foreach(lc, groupclause)
+				{
+					SortGroupClause *gc = lfirst(lc);
+
+					tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
+				}
+
+				foreach(lc, current_sets)
+				{
+					foreach(lc2, (List *) lfirst(lc))
+					{
+						lfirst_int(lc2) = tleref_to_colnum_map[lfirst_int(lc2)];
+					}
+				}
+
+				/* Save the reordered sets and corresponding groupclauses */
+				rollup_lists = lcons(current_sets, rollup_lists);
+				rollup_groupclauses = lcons(groupclause, rollup_groupclauses);
+			}
 		}
 		else
 		{
@@ -1670,9 +1721,8 @@ grouping_planner(PlannerInfo *root, bool inheritance_update,
 		/* Set up data needed by standard_qp_callback */
 		qp_extra.tlist = tlist;
 		qp_extra.activeWindows = activeWindows;
-		qp_extra.groupClause = (gset_data
-								? (gset_data->rollups ? ((RollupData *) linitial(gset_data->rollups))->groupClause : NIL)
-								: parse->groupClause);
+		qp_extra.groupClause =
+			parse->groupingSets ? llast(rollup_groupclauses) : parse->groupClause;
 
 		/*
 		 * Generate the best unsorted and presorted paths for the scan/join
@@ -1872,7 +1922,8 @@ grouping_planner(PlannerInfo *root, bool inheritance_update,
 												current_rel,
 												grouping_target,
 												&agg_costs,
-												gset_data);
+												rollup_lists,
+												rollup_groupclauses);
 			/* Fix things up if grouping_target contains SRFs */
 			if (parse->hasTargetSRFs)
 				adjust_paths_for_srfs(root, current_rel,
@@ -1909,6 +1960,7 @@ grouping_planner(PlannerInfo *root, bool inheritance_update,
 			current_rel = create_distinct_paths(root,
 												current_rel);
 		}
+
 	}							/* end of if (setOperations) */
 
 	/*
@@ -2061,221 +2113,6 @@ grouping_planner(PlannerInfo *root, bool inheritance_update,
 	/* Note: currently, we leave it to callers to do set_cheapest() */
 }
 
-/*
- * Do preprocessing for groupingSets clause and related data.  This handles the
- * preliminary steps of expanding the grouping sets, organizing them into lists
- * of rollups, and preparing annotations which will later be filled in with
- * size estimates.
- */
-static grouping_sets_data *
-preprocess_grouping_sets(PlannerInfo *root)
-{
-	Query	   *parse = root->parse;
-	List	   *sets;
-	int			maxref = 0;
-	ListCell   *lc;
-	ListCell   *lc_set;
-	grouping_sets_data *gd = palloc0(sizeof(grouping_sets_data));
-
-	parse->groupingSets = expand_grouping_sets(parse->groupingSets, -1);
-
-	gd->any_hashable = false;
-	gd->unhashable_refs = NULL;
-	gd->unsortable_refs = NULL;
-	gd->unsortable_sets = NIL;
-
-	if (parse->groupClause)
-	{
-		ListCell   *lc;
-
-		foreach(lc, parse->groupClause)
-		{
-			SortGroupClause *gc = lfirst(lc);
-			Index		ref = gc->tleSortGroupRef;
-
-			if (ref > maxref)
-				maxref = ref;
-
-			if (!gc->hashable)
-				gd->unhashable_refs = bms_add_member(gd->unhashable_refs, ref);
-
-			if (!OidIsValid(gc->sortop))
-				gd->unsortable_refs = bms_add_member(gd->unsortable_refs, ref);
-		}
-	}
-
-	/* Allocate workspace array for remapping */
-	gd->tleref_to_colnum_map = (int *) palloc((maxref + 1) * sizeof(int));
-
-	/*
-	 * If we have any unsortable sets, we must extract them before trying to
-	 * prepare rollups. Unsortable sets don't go through
-	 * reorder_grouping_sets, so we must apply the GroupingSetData annotation
-	 * here.
-	 */
-	if (!bms_is_empty(gd->unsortable_refs))
-	{
-		List	   *sortable_sets = NIL;
-
-		foreach(lc, parse->groupingSets)
-		{
-			List	   *gset = lfirst(lc);
-
-			if (bms_overlap_list(gd->unsortable_refs, gset))
-			{
-				GroupingSetData *gs = makeNode(GroupingSetData);
-
-				gs->set = gset;
-				gd->unsortable_sets = lappend(gd->unsortable_sets, gs);
-
-				/*
-				 * We must enforce here that an unsortable set is hashable;
-				 * later code assumes this.  Parse analysis only checks that
-				 * every individual column is either hashable or sortable.
-				 *
-				 * Note that passing this test doesn't guarantee we can
-				 * generate a plan; there might be other showstoppers.
-				 */
-				if (bms_overlap_list(gd->unhashable_refs, gset))
-					ereport(ERROR,
-							(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-							 errmsg("could not implement GROUP BY"),
-							 errdetail("Some of the datatypes only support hashing, while others only support sorting.")));
-			}
-			else
-				sortable_sets = lappend(sortable_sets, gset);
-		}
-
-		if (sortable_sets)
-			sets = extract_rollup_sets(sortable_sets);
-		else
-			sets = NIL;
-	}
-	else
-		sets = extract_rollup_sets(parse->groupingSets);
-
-	foreach(lc_set, sets)
-	{
-		List	   *current_sets = (List *) lfirst(lc_set);
-		RollupData *rollup = makeNode(RollupData);
-		GroupingSetData *gs;
-
-		/*
-		 * Reorder the current list of grouping sets into correct prefix
-		 * order.  If only one aggregation pass is needed, try to make the
-		 * list match the ORDER BY clause; if more than one pass is needed, we
-		 * don't bother with that.
-		 *
-		 * Note that this reorders the sets from smallest-member-first to
-		 * largest-member-first, and applies the GroupingSetData annotations,
-		 * though the data will be filled in later.
-		 */
-		current_sets = reorder_grouping_sets(current_sets,
-											 (list_length(sets) == 1
-											  ? parse->sortClause
-											  : NIL));
-
-		/*
-		 * Get the initial (and therefore largest) grouping set.
-		 */
-		gs = linitial(current_sets);
-
-		/*
-		 * Order the groupClause appropriately.  If the first grouping set is
-		 * empty, then the groupClause must also be empty; otherwise we have
-		 * to force the groupClause to match that grouping set's order.
-		 *
-		 * (The first grouping set can be empty even though parse->groupClause
-		 * is not empty only if all non-empty grouping sets are unsortable.
-		 * The groupClauses for hashed grouping sets are built later on.)
-		 */
-		if (gs->set)
-			rollup->groupClause = preprocess_groupclause(root, gs->set);
-		else
-			rollup->groupClause = NIL;
-
-		/*
-		 * Is it hashable? We pretend empty sets are hashable even though we
-		 * actually force them not to be hashed later. But don't bother if
-		 * there's nothing but empty sets (since in that case we can't hash
-		 * anything).
-		 */
-		if (gs->set &&
-			!bms_overlap_list(gd->unhashable_refs, gs->set))
-		{
-			rollup->hashable = true;
-			gd->any_hashable = true;
-		}
-
-		/*
-		 * Now that we've pinned down an order for the groupClause for this
-		 * list of grouping sets, we need to remap the entries in the grouping
-		 * sets from sortgrouprefs to plain indices (0-based) into the
-		 * groupClause for this collection of grouping sets. We keep the
-		 * original form for later use, though.
-		 */
-		rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
-												 current_sets,
-												 gd->tleref_to_colnum_map);
-		rollup->gsets_data = current_sets;
-
-		gd->rollups = lappend(gd->rollups, rollup);
-	}
-
-	if (gd->unsortable_sets)
-	{
-		/*
-		 * We have not yet pinned down a groupclause for this, but we will
-		 * need index-based lists for estimation purposes. Construct
-		 * hash_sets_idx based on the entire original groupclause for now.
-		 */
-		gd->hash_sets_idx = remap_to_groupclause_idx(parse->groupClause,
-													 gd->unsortable_sets,
-												   gd->tleref_to_colnum_map);
-		gd->any_hashable = true;
-	}
-
-	return gd;
-}
-
-/*
- * Given a groupclause and a list of GroupingSetData, return equivalent sets
- * (without annotation) mapped to indexes into the given groupclause.
- */
-static List *
-remap_to_groupclause_idx(List *groupClause,
-						 List *gsets,
-						 int *tleref_to_colnum_map)
-{
-	int			ref = 0;
-	List	   *result = NIL;
-	ListCell   *lc;
-
-	foreach(lc, groupClause)
-	{
-		SortGroupClause *gc = lfirst(lc);
-
-		tleref_to_colnum_map[gc->tleSortGroupRef] = ref++;
-	}
-
-	foreach(lc, gsets)
-	{
-		List	   *set = NIL;
-		ListCell   *lc2;
-		GroupingSetData *gs = lfirst(lc);
-
-		foreach(lc2, gs->set)
-		{
-			set = lappend_int(set, tleref_to_colnum_map[lfirst_int(lc2)]);
-		}
-
-		result = lappend(result, set);
-	}
-
-	return result;
-}
-
-
 
 /*
  * Detect whether a plan node is a "dummy" plan created when a relation
@@ -3191,7 +3028,7 @@ extract_rollup_sets(List *groupingSets)
 
 /*
  * Reorder the elements of a list of grouping sets such that they have correct
- * prefix relationships. Also inserts the GroupingSetData annotations.
+ * prefix relationships.
  *
  * The input must be ordered with smallest sets first; the result is returned
  * with largest sets first.  Note that the result shares no list substructure
@@ -3214,7 +3051,6 @@ reorder_grouping_sets(List *groupingsets, List *sortclause)
 	{
 		List	   *candidate = lfirst(lc);
 		List	   *new_elems = list_difference_int(candidate, previous);
-		GroupingSetData *gs = makeNode(GroupingSetData);
 
 		if (list_length(new_elems) > 0)
 		{
@@ -3242,8 +3078,7 @@ reorder_grouping_sets(List *groupingsets, List *sortclause)
 			}
 		}
 
-		gs->set = list_copy(previous);
-		result = lcons(gs, result);
+		result = lcons(list_copy(previous), result);
 		list_free(new_elems);
 	}
 
@@ -3338,16 +3173,15 @@ standard_qp_callback(PlannerInfo *root, void *extra)
  * Estimate number of groups produced by grouping clauses (1 if not grouping)
  *
  * path_rows: number of output rows from scan/join step
- * gsets: grouping set data, or NULL if not doing grouping sets
- *
- * If doing grouping sets, we also annotate the gsets data with the estimates
- * for each set and each individual rollup list, with a view to later
- * determining whether some combination of them could be hashed instead.
+ * rollup_lists: list of grouping sets, or NIL if not doing grouping sets
+ * rollup_groupclauses: list of grouping clauses for grouping sets,
+ *		or NIL if not doing grouping sets
  */
 static double
 get_number_of_groups(PlannerInfo *root,
 					 double path_rows,
-					 grouping_sets_data *gd)
+					 List *rollup_lists,
+					 List *rollup_groupclauses)
 {
 	Query	   *parse = root->parse;
 	double		dNumGroups;
@@ -3359,60 +3193,28 @@ get_number_of_groups(PlannerInfo *root,
 		if (parse->groupingSets)
 		{
 			/* Add up the estimates for each grouping set */
-			ListCell   *lc;
-			ListCell   *lc2;
+			ListCell   *lc,
+					   *lc2;
 
 			dNumGroups = 0;
-
-			foreach(lc, gd->rollups)
+			forboth(lc, rollup_groupclauses, lc2, rollup_lists)
 			{
-				RollupData *rollup = lfirst(lc);
-				ListCell   *lc;
+				List	   *groupClause = (List *) lfirst(lc);
+				List	   *gsets = (List *) lfirst(lc2);
+				ListCell   *lc3;
 
-				groupExprs = get_sortgrouplist_exprs(rollup->groupClause,
+				groupExprs = get_sortgrouplist_exprs(groupClause,
 													 parse->targetList);
 
-				rollup->numGroups = 0.0;
-
-				forboth(lc, rollup->gsets, lc2, rollup->gsets_data)
+				foreach(lc3, gsets)
 				{
-					List	   *gset = (List *) lfirst(lc);
-					GroupingSetData *gs = lfirst(lc2);
-					double		numGroups = estimate_num_groups(root,
-																groupExprs,
-																path_rows,
-																&gset);
-
-					gs->numGroups = numGroups;
-					rollup->numGroups += numGroups;
-				}
-
-				dNumGroups += rollup->numGroups;
-			}
-
-			if (gd->hash_sets_idx)
-			{
-				ListCell   *lc;
-
-				gd->dNumHashGroups = 0;
+					List	   *gset = (List *) lfirst(lc3);
 
-				groupExprs = get_sortgrouplist_exprs(parse->groupClause,
-													 parse->targetList);
-
-				forboth(lc, gd->hash_sets_idx, lc2, gd->unsortable_sets)
-				{
-					List	   *gset = (List *) lfirst(lc);
-					GroupingSetData *gs = lfirst(lc2);
-					double		numGroups = estimate_num_groups(root,
-																groupExprs,
-																path_rows,
-																&gset);
-
-					gs->numGroups = numGroups;
-					gd->dNumHashGroups += numGroups;
+					dNumGroups += estimate_num_groups(root,
+													  groupExprs,
+													  path_rows,
+													  &gset);
 				}
-
-				dNumGroups += gd->dNumHashGroups;
 			}
 		}
 		else
@@ -3448,11 +3250,6 @@ get_number_of_groups(PlannerInfo *root,
  * estimate_hashagg_tablesize
  *	  estimate the number of bytes that a hash aggregate hashtable will
  *	  require based on the agg_costs, path width and dNumGroups.
- *
- * XXX this may be over-estimating the size now that hashagg knows to omit
- * unneeded columns from the hashtable. Also for mixed-mode grouping sets,
- * grouping columns not in the hashed set are counted here even though hashagg
- * won't store them. Is this a problem?
  */
 static Size
 estimate_hashagg_tablesize(Path *path, const AggClauseCosts *agg_costs,
@@ -3503,7 +3300,8 @@ create_grouping_paths(PlannerInfo *root,
 					  RelOptInfo *input_rel,
 					  PathTarget *target,
 					  const AggClauseCosts *agg_costs,
-					  grouping_sets_data *gd)
+					  List *rollup_lists,
+					  List *rollup_groupclauses)
 {
 	Query	   *parse = root->parse;
 	Path	   *cheapest_path = input_rel->cheapest_total_path;
@@ -3612,7 +3410,8 @@ create_grouping_paths(PlannerInfo *root,
 	 */
 	dNumGroups = get_number_of_groups(root,
 									  cheapest_path->rows,
-									  gd);
+									  rollup_lists,
+									  rollup_groupclauses);
 
 	/*
 	 * Determine whether it's possible to perform sort-based implementations
@@ -3620,22 +3419,15 @@ create_grouping_paths(PlannerInfo *root,
 	 * grouping_is_sortable() is trivially true, and all the
 	 * pathkeys_contained_in() tests will succeed too, so that we'll consider
 	 * every surviving input path.)
-	 *
-	 * If we have grouping sets, we might be able to sort some but not all of
-	 * them; in this case, we need can_sort to be true as long as we must
-	 * consider any sorted-input plan.
 	 */
-	can_sort = (gd && gd->rollups != NIL)
-		|| grouping_is_sortable(parse->groupClause);
+	can_sort = grouping_is_sortable(parse->groupClause);
 
 	/*
 	 * Determine whether we should consider hash-based implementations of
 	 * grouping.
 	 *
-	 * Hashed aggregation only applies if we're grouping. If we have grouping
-	 * sets, some groups might be hashable but others not; in this case we set
-	 * can_hash true as long as there is nothing globally preventing us from
-	 * hashing (and we should therefore consider plans with hashes).
+	 * Hashed aggregation only applies if we're grouping.  We currently can't
+	 * hash if there are grouping sets, though.
 	 *
 	 * Executor doesn't support hashed aggregation with DISTINCT or ORDER BY
 	 * aggregates.  (Doing so would imply storing *all* the input values in
@@ -3648,8 +3440,9 @@ create_grouping_paths(PlannerInfo *root,
 	 * other gating conditions, so we want to do it last.
 	 */
 	can_hash = (parse->groupClause != NIL &&
+				parse->groupingSets == NIL &&
 				agg_costs->numOrderedAggs == 0 &&
-		 (gd ? gd->any_hashable : grouping_is_hashable(parse->groupClause)));
+				grouping_is_hashable(parse->groupClause));
 
 	/*
 	 * If grouped_rel->consider_parallel is true, then paths that we generate
@@ -3715,7 +3508,8 @@ create_grouping_paths(PlannerInfo *root,
 		/* Estimate number of partial groups. */
 		dNumPartialGroups = get_number_of_groups(root,
 												 cheapest_partial_path->rows,
-												 gd);
+												 NIL,
+												 NIL);
 
 		/*
 		 * Collect statistics about aggregates for estimating costs of
@@ -3848,9 +3642,20 @@ create_grouping_paths(PlannerInfo *root,
 				/* Now decide what to stick atop it */
 				if (parse->groupingSets)
 				{
-					consider_groupingsets_paths(root, grouped_rel,
-												path, true, can_hash, target,
-												gd, agg_costs, dNumGroups);
+					/*
+					 * We have grouping sets, possibly with aggregation.  Make
+					 * a GroupingSetsPath.
+					 */
+					add_path(grouped_rel, (Path *)
+							 create_groupingsets_path(root,
+													  grouped_rel,
+													  path,
+													  target,
+												  (List *) parse->havingQual,
+													  rollup_lists,
+													  rollup_groupclauses,
+													  agg_costs,
+													  dNumGroups));
 				}
 				else if (parse->hasAggs)
 				{
@@ -4011,45 +3816,33 @@ create_grouping_paths(PlannerInfo *root,
 
 	if (can_hash)
 	{
-		if (parse->groupingSets)
-		{
-			/*
-			 * Try for a hash-only groupingsets path over unsorted input.
-			 */
-			consider_groupingsets_paths(root, grouped_rel,
-										cheapest_path, false, true, target,
-										gd, agg_costs, dNumGroups);
-		}
-		else
-		{
-			hashaggtablesize = estimate_hashagg_tablesize(cheapest_path,
-														  agg_costs,
-														  dNumGroups);
+		hashaggtablesize = estimate_hashagg_tablesize(cheapest_path,
+													  agg_costs,
+													  dNumGroups);
 
+		/*
+		 * Provided that the estimated size of the hashtable does not exceed
+		 * work_mem, we'll generate a HashAgg Path, although if we were unable
+		 * to sort above, then we'd better generate a Path, so that we at
+		 * least have one.
+		 */
+		if (hashaggtablesize < work_mem * 1024L ||
+			grouped_rel->pathlist == NIL)
+		{
 			/*
-			 * Provided that the estimated size of the hashtable does not
-			 * exceed work_mem, we'll generate a HashAgg Path, although if we
-			 * were unable to sort above, then we'd better generate a Path, so
-			 * that we at least have one.
+			 * We just need an Agg over the cheapest-total input path, since
+			 * input order won't matter.
 			 */
-			if (hashaggtablesize < work_mem * 1024L ||
-				grouped_rel->pathlist == NIL)
-			{
-				/*
-				 * We just need an Agg over the cheapest-total input path,
-				 * since input order won't matter.
-				 */
-				add_path(grouped_rel, (Path *)
-						 create_agg_path(root, grouped_rel,
-										 cheapest_path,
-										 target,
-										 AGG_HASHED,
-										 AGGSPLIT_SIMPLE,
-										 parse->groupClause,
-										 (List *) parse->havingQual,
-										 agg_costs,
-										 dNumGroups));
-			}
+			add_path(grouped_rel, (Path *)
+					 create_agg_path(root, grouped_rel,
+									 cheapest_path,
+									 target,
+									 AGG_HASHED,
+									 AGGSPLIT_SIMPLE,
+									 parse->groupClause,
+									 (List *) parse->havingQual,
+									 agg_costs,
+									 dNumGroups));
 		}
 
 		/*
@@ -4128,344 +3921,6 @@ create_grouping_paths(PlannerInfo *root,
 	return grouped_rel;
 }
 
-
-/*
- * For a given input path, consider the possible ways of doing grouping sets on
- * it, by combinations of hashing and sorting.  This can be called multiple
- * times, so it's important that it not scribble on input.  No result is
- * returned, but any generated paths are added to grouped_rel.
- */
-static void
-consider_groupingsets_paths(PlannerInfo *root,
-							RelOptInfo *grouped_rel,
-							Path *path,
-							bool is_sorted,
-							bool can_hash,
-							PathTarget *target,
-							grouping_sets_data *gd,
-							const AggClauseCosts *agg_costs,
-							double dNumGroups)
-{
-	Query	   *parse = root->parse;
-
-	/*
-	 * If we're not being offered sorted input, then only consider plans that
-	 * can be done entirely by hashing.
-	 *
-	 * We can hash everything if it looks like it'll fit in work_mem. But if
-	 * the input is actually sorted despite not being advertised as such, we
-	 * prefer to make use of that in order to use less memory.
-	 *
-	 * If none of the grouping sets are sortable, then ignore the work_mem
-	 * limit and generate a path anyway, since otherwise we'll just fail.
-	 */
-	if (!is_sorted)
-	{
-		List	   *new_rollups = NIL;
-		RollupData *unhashed_rollup = NULL;
-		List	   *sets_data;
-		List	   *empty_sets_data = NIL;
-		List	   *empty_sets = NIL;
-		ListCell   *lc;
-		ListCell   *l_start = list_head(gd->rollups);
-		AggStrategy strat = AGG_HASHED;
-		Size		hashsize;
-		double		exclude_groups = 0.0;
-
-		Assert(can_hash);
-
-		if (pathkeys_contained_in(root->group_pathkeys, path->pathkeys))
-		{
-			unhashed_rollup = lfirst(l_start);
-			exclude_groups = unhashed_rollup->numGroups;
-			l_start = lnext(l_start);
-		}
-
-		hashsize = estimate_hashagg_tablesize(path,
-											  agg_costs,
-											  dNumGroups - exclude_groups);
-
-		/*
-		 * gd->rollups is empty if we have only unsortable columns to work
-		 * with.  Override work_mem in that case; otherwise, we'll rely on the
-		 * sorted-input case to generate usable mixed paths.
-		 */
-		if (hashsize > work_mem * 1024L && gd->rollups)
-			return;				/* nope, won't fit */
-
-		/*
-		 * We need to burst the existing rollups list into individual grouping
-		 * sets and recompute a groupClause for each set.
-		 */
-		sets_data = list_copy(gd->unsortable_sets);
-
-		for_each_cell(lc, l_start)
-		{
-			RollupData *rollup = lfirst(lc);
-
-			/*
-			 * If we find an unhashable rollup that's not been skipped by the
-			 * "actually sorted" check above, we can't cope; we'd need sorted
-			 * input (with a different sort order) but we can't get that here.
-			 * So bail out; we'll get a valid path from the is_sorted case
-			 * instead.
-			 *
-			 * The mere presence of empty grouping sets doesn't make a rollup
-			 * unhashable (see preprocess_grouping_sets), we handle those
-			 * specially below.
-			 */
-			if (!rollup->hashable)
-				return;
-			else
-				sets_data = list_concat(sets_data, list_copy(rollup->gsets_data));
-		}
-		foreach(lc, sets_data)
-		{
-			GroupingSetData *gs = lfirst(lc);
-			List	   *gset = gs->set;
-			RollupData *rollup;
-
-			if (gset == NIL)
-			{
-				/* Empty grouping sets can't be hashed. */
-				empty_sets_data = lappend(empty_sets_data, gs);
-				empty_sets = lappend(empty_sets, NIL);
-			}
-			else
-			{
-				rollup = makeNode(RollupData);
-
-				rollup->groupClause = preprocess_groupclause(root, gset);
-				rollup->gsets_data = list_make1(gs);
-				rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
-														 rollup->gsets_data,
-												   gd->tleref_to_colnum_map);
-				rollup->numGroups = gs->numGroups;
-				rollup->hashable = true;
-				rollup->is_hashed = true;
-				new_rollups = lappend(new_rollups, rollup);
-			}
-		}
-
-		/*
-		 * If we didn't find anything nonempty to hash, then bail.  We'll
-		 * generate a path from the is_sorted case.
-		 */
-		if (new_rollups == NIL)
-			return;
-
-		/*
-		 * If there were empty grouping sets they should have been in the
-		 * first rollup.
-		 */
-		Assert(!unhashed_rollup || !empty_sets);
-
-		if (unhashed_rollup)
-		{
-			new_rollups = lappend(new_rollups, unhashed_rollup);
-			strat = AGG_MIXED;
-		}
-		else if (empty_sets)
-		{
-			RollupData *rollup = makeNode(RollupData);
-
-			rollup->groupClause = NIL;
-			rollup->gsets_data = empty_sets_data;
-			rollup->gsets = empty_sets;
-			rollup->numGroups = list_length(empty_sets);
-			rollup->hashable = false;
-			rollup->is_hashed = false;
-			new_rollups = lappend(new_rollups, rollup);
-			strat = AGG_MIXED;
-		}
-
-		add_path(grouped_rel, (Path *)
-				 create_groupingsets_path(root,
-										  grouped_rel,
-										  path,
-										  target,
-										  (List *) parse->havingQual,
-										  strat,
-										  new_rollups,
-										  agg_costs,
-										  dNumGroups));
-		return;
-	}
-
-	/*
-	 * If we have sorted input but nothing we can do with it, bail.
-	 */
-	if (list_length(gd->rollups) == 0)
-		return;
-
-	/*
-	 * Given sorted input, we try and make two paths: one sorted and one mixed
-	 * sort/hash. (We need to try both because hashagg might be disabled, or
-	 * some columns might not be sortable.)
-	 *
-	 * can_hash is passed in as false if some obstacle elsewhere (such as
-	 * ordered aggs) means that we shouldn't consider hashing at all.
-	 */
-	if (can_hash && gd->any_hashable)
-	{
-		List	   *rollups = NIL;
-		List	   *hash_sets = list_copy(gd->unsortable_sets);
-		double		availspace = (work_mem * 1024.0);
-		ListCell   *lc;
-
-		/*
-		 * Account first for space needed for groups we can't sort at all.
-		 */
-		availspace -= (double) estimate_hashagg_tablesize(path,
-														  agg_costs,
-														  gd->dNumHashGroups);
-
-		if (availspace > 0 && list_length(gd->rollups) > 1)
-		{
-			double		scale;
-			int			num_rollups = list_length(gd->rollups);
-			int			k_capacity;
-			int		   *k_weights = palloc(num_rollups * sizeof(int));
-			Bitmapset  *hash_items = NULL;
-			int			i;
-
-			/*
-			 * We treat this as a knapsack problem: the knapsack capacity
-			 * represents work_mem, the item weights are the estimated memory
-			 * usage of the hashtables needed to implement a single rollup, and
-			 * we really ought to use the cost saving as the item value;
-			 * however, currently the costs assigned to sort nodes don't
-			 * reflect the comparison costs well, and so we treat all items as
-			 * of equal value (each rollup we hash instead saves us one sort).
-			 *
-			 * To use the discrete knapsack, we need to scale the values to a
-			 * reasonably small bounded range.  We choose to allow a 5% error
-			 * margin; we have no more than 4096 rollups in the worst possible
-			 * case, which with a 5% error margin will require a bit over 42MB
-			 * of workspace. (Anyone wanting to plan queries that complex had
-			 * better have the memory for it.  In more reasonable cases, with
-			 * no more than a couple of dozen rollups, the memory usage will
-			 * be negligible.)
-			 *
-			 * k_capacity is naturally bounded, but we clamp the values for
-			 * scale and weight (below) to avoid overflows or underflows (or
-			 * uselessly trying to use a scale factor less than 1 byte).
-			 */
-			scale = Max(availspace / (20.0 * num_rollups), 1.0);
-			k_capacity = (int) floor(availspace / scale);
-
-			/*
-			 * We leave the first rollup out of consideration since it's the
-			 * one that matches the input sort order.  We assign indexes "i"
-			 * to only those entries considered for hashing; the second loop,
-			 * below, must use the same condition.
-			 */
-			i = 0;
-			for_each_cell(lc, lnext(list_head(gd->rollups)))
-			{
-				RollupData *rollup = lfirst(lc);
-
-				if (rollup->hashable)
-				{
-					double		sz = estimate_hashagg_tablesize(path,
-																agg_costs,
-														  rollup->numGroups);
-
-					/*
-					 * If sz is enormous, but work_mem (and hence scale) is
-					 * small, avoid integer overflow here.
-					 */
-					k_weights[i] = (int) Min(floor(sz / scale),
-											 k_capacity + 1.0);
-					++i;
-				}
-			}
-
-			/*
-			 * Apply knapsack algorithm; compute the set of items which
-			 * maximizes the value stored (in this case the number of sorts
-			 * saved) while keeping the total size (approximately) within
-			 * capacity.
-			 */
-			if (i > 0)
-				hash_items = DiscreteKnapsack(k_capacity, i, k_weights, NULL);
-
-			if (!bms_is_empty(hash_items))
-			{
-				rollups = list_make1(linitial(gd->rollups));
-
-				i = 0;
-				for_each_cell(lc, lnext(list_head(gd->rollups)))
-				{
-					RollupData *rollup = lfirst(lc);
-
-					if (rollup->hashable)
-					{
-						if (bms_is_member(i, hash_items))
-							hash_sets = list_concat(hash_sets,
-											  list_copy(rollup->gsets_data));
-						else
-							rollups = lappend(rollups, rollup);
-						++i;
-					}
-					else
-						rollups = lappend(rollups, rollup);
-				}
-			}
-		}
-
-		if (!rollups && hash_sets)
-			rollups = list_copy(gd->rollups);
-
-		foreach(lc, hash_sets)
-		{
-			GroupingSetData *gs = lfirst(lc);
-			RollupData *rollup = makeNode(RollupData);
-
-			Assert(gs->set != NIL);
-
-			rollup->groupClause = preprocess_groupclause(root, gs->set);
-			rollup->gsets_data = list_make1(gs);
-			rollup->gsets = remap_to_groupclause_idx(rollup->groupClause,
-													 rollup->gsets_data,
-												   gd->tleref_to_colnum_map);
-			rollup->numGroups = gs->numGroups;
-			rollup->hashable = true;
-			rollup->is_hashed = true;
-			rollups = lcons(rollup, rollups);
-		}
-
-		if (rollups)
-		{
-			add_path(grouped_rel, (Path *)
-					 create_groupingsets_path(root,
-											  grouped_rel,
-											  path,
-											  target,
-											  (List *) parse->havingQual,
-											  AGG_MIXED,
-											  rollups,
-											  agg_costs,
-											  dNumGroups));
-		}
-	}
-
-	/*
-	 * Now try the simple sorted case.
-	 */
-	if (!gd->unsortable_sets)
-		add_path(grouped_rel, (Path *)
-				 create_groupingsets_path(root,
-										  grouped_rel,
-										  path,
-										  target,
-										  (List *) parse->havingQual,
-										  AGG_SORTED,
-										  gd->rollups,
-										  agg_costs,
-										  dNumGroups));
-}
-
 /*
  * create_window_paths
  *
diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c
index 5930747..910e969 100644
--- a/src/backend/optimizer/plan/setrefs.c
+++ b/src/backend/optimizer/plan/setrefs.c
@@ -1348,6 +1348,11 @@ fix_expr_common(PlannerInfo *root, Node *node)
 		record_plan_function_dependency(root,
 										((FuncExpr *) node)->funcid);
 	}
+	else if (IsA(node, SubscriptingRef))
+	{
+		record_plan_function_dependency(root,
+										((SubscriptingRef *) node)->refevalfunc);
+	}
 	else if (IsA(node, OpExpr))
 	{
 		set_opfuncid((OpExpr *) node);
diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c
index a578867..268f89d 100644
--- a/src/backend/optimizer/util/clauses.c
+++ b/src/backend/optimizer/util/clauses.c
@@ -354,8 +354,8 @@ make_and_qual(Node *qual1, Node *qual2)
 }
 
 /*
- * The planner frequently prefers to represent qualification expressions
- * as lists of boolean expressions with implicit AND semantics.
+ * Sometimes (such as in the input of ExecQual), we use lists of expression
+ * nodes with implicit AND semantics.
  *
  * These functions convert between an AND-semantics expression list and the
  * ordinary representation of a boolean expression.
@@ -1265,12 +1265,10 @@ contain_nonstrict_functions_walker(Node *node, void *context)
 		/* a window function could return non-null with null input */
 		return true;
 	}
-	if (IsA(node, ArrayRef))
+	if (IsA(node, SubscriptingRef))
 	{
 		/* array assignment is nonstrict, but subscripting is strict */
-		if (((ArrayRef *) node)->refassgnexpr != NULL)
-			return true;
-		/* else fall through to check args */
+		return true;
 	}
 	if (IsA(node, DistinctExpr))
 	{
@@ -1446,7 +1444,6 @@ contain_leaked_vars_walker(Node *node, void *context)
 		case T_Var:
 		case T_Const:
 		case T_Param:
-		case T_ArrayRef:
 		case T_ArrayExpr:
 		case T_FieldSelect:
 		case T_FieldStore:
@@ -1476,6 +1473,7 @@ contain_leaked_vars_walker(Node *node, void *context)
 		case T_ScalarArrayOpExpr:
 		case T_CoerceViaIO:
 		case T_ArrayCoerceExpr:
+		case T_SubscriptingRef:
 
 			/*
 			 * If node contains a leaky function call, and there's any Var
@@ -3521,7 +3519,7 @@ eval_const_expressions_mutator(Node *node,
 	 * For any node type not handled above, we recurse using
 	 * expression_tree_mutator, which will copy the node unchanged but try to
 	 * simplify its arguments (if any) using this routine. For example: we
-	 * cannot eliminate an ArrayRef node, but we might be able to simplify
+	 * cannot eliminate an SubscriptingRef node, but we might be able to simplify
 	 * constant expressions in its subscripts.
 	 */
 	return expression_tree_mutator(node, eval_const_expressions_mutator,
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 999ebce..fca96eb 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -2697,9 +2697,10 @@ create_agg_path(PlannerInfo *root,
  * 'subpath' is the path representing the source of data
  * 'target' is the PathTarget to be computed
  * 'having_qual' is the HAVING quals if any
- * 'rollups' is a list of RollupData nodes
+ * 'rollup_lists' is a list of grouping sets
+ * 'rollup_groupclauses' is a list of grouping clauses for grouping sets
  * 'agg_costs' contains cost info about the aggregate functions to be computed
- * 'numGroups' is the estimated total number of groups
+ * 'numGroups' is the estimated number of groups
  */
 GroupingSetsPath *
 create_groupingsets_path(PlannerInfo *root,
@@ -2707,15 +2708,13 @@ create_groupingsets_path(PlannerInfo *root,
 						 Path *subpath,
 						 PathTarget *target,
 						 List *having_qual,
-						 AggStrategy aggstrategy,
-						 List *rollups,
+						 List *rollup_lists,
+						 List *rollup_groupclauses,
 						 const AggClauseCosts *agg_costs,
 						 double numGroups)
 {
 	GroupingSetsPath *pathnode = makeNode(GroupingSetsPath);
-	ListCell   *lc;
-	bool		is_first = true;
-	bool		is_first_sort = true;
+	int			numGroupCols;
 
 	/* The topmost generated Plan node will be an Agg */
 	pathnode->path.pathtype = T_Agg;
@@ -2729,109 +2728,74 @@ create_groupingsets_path(PlannerInfo *root,
 	pathnode->subpath = subpath;
 
 	/*
-	 * Simplify callers by downgrading AGG_SORTED to AGG_PLAIN, and AGG_MIXED
-	 * to AGG_HASHED, here if possible.
-	 */
-	if (aggstrategy == AGG_SORTED &&
-		list_length(rollups) == 1 &&
-		((RollupData *) linitial(rollups))->groupClause == NIL)
-		aggstrategy = AGG_PLAIN;
-
-	if (aggstrategy == AGG_MIXED &&
-		list_length(rollups) == 1)
-		aggstrategy = AGG_HASHED;
-
-	/*
 	 * Output will be in sorted order by group_pathkeys if, and only if, there
 	 * is a single rollup operation on a non-empty list of grouping
 	 * expressions.
 	 */
-	if (aggstrategy == AGG_SORTED && list_length(rollups) == 1)
+	if (list_length(rollup_groupclauses) == 1 &&
+		((List *) linitial(rollup_groupclauses)) != NIL)
 		pathnode->path.pathkeys = root->group_pathkeys;
 	else
 		pathnode->path.pathkeys = NIL;
 
-	pathnode->aggstrategy = aggstrategy;
-	pathnode->rollups = rollups;
+	pathnode->rollup_groupclauses = rollup_groupclauses;
+	pathnode->rollup_lists = rollup_lists;
 	pathnode->qual = having_qual;
 
-	Assert(rollups != NIL);
-	Assert(aggstrategy != AGG_PLAIN || list_length(rollups) == 1);
-	Assert(aggstrategy != AGG_MIXED || list_length(rollups) > 1);
+	Assert(rollup_lists != NIL);
+	Assert(list_length(rollup_lists) == list_length(rollup_groupclauses));
+
+	/* Account for cost of the topmost Agg node */
+	numGroupCols = list_length((List *) linitial((List *) llast(rollup_lists)));
+
+	cost_agg(&pathnode->path, root,
+			 (numGroupCols > 0) ? AGG_SORTED : AGG_PLAIN,
+			 agg_costs,
+			 numGroupCols,
+			 numGroups,
+			 subpath->startup_cost,
+			 subpath->total_cost,
+			 subpath->rows);
 
-	foreach(lc, rollups)
+	/*
+	 * Add in the costs and output rows of the additional sorting/aggregation
+	 * steps, if any.  Only total costs count, since the extra sorts aren't
+	 * run on startup.
+	 */
+	if (list_length(rollup_lists) > 1)
 	{
-		RollupData *rollup = lfirst(lc);
-		List	   *gsets = rollup->gsets;
-		int			numGroupCols = list_length(linitial(gsets));
+		ListCell   *lc;
 
-		/*
-		 * In AGG_SORTED or AGG_PLAIN mode, the first rollup takes the
-		 * (already-sorted) input, and following ones do their own sort.
-		 *
-		 * In AGG_HASHED mode, there is one rollup for each grouping set.
-		 *
-		 * In AGG_MIXED mode, the first rollups are hashed, the first
-		 * non-hashed one takes the (already-sorted) input, and following ones
-		 * do their own sort.
-		 */
-		if (is_first)
-		{
-			cost_agg(&pathnode->path, root,
-					 aggstrategy,
-					 agg_costs,
-					 numGroupCols,
-					 rollup->numGroups,
-					 subpath->startup_cost,
-					 subpath->total_cost,
-					 subpath->rows);
-			is_first = false;
-			if (!rollup->is_hashed)
-				is_first_sort = false;
-		}
-		else
+		foreach(lc, rollup_lists)
 		{
+			List	   *gsets = (List *) lfirst(lc);
 			Path		sort_path;		/* dummy for result of cost_sort */
 			Path		agg_path;		/* dummy for result of cost_agg */
 
-			if (rollup->is_hashed || is_first_sort)
-			{
-				/*
-				 * Account for cost of aggregation, but don't charge input
-				 * cost again
-				 */
-				cost_agg(&agg_path, root,
-						 rollup->is_hashed ? AGG_HASHED : AGG_SORTED,
-						 agg_costs,
-						 numGroupCols,
-						 rollup->numGroups,
-						 0.0, 0.0,
-						 subpath->rows);
-				if (!rollup->is_hashed)
-					is_first_sort = false;
-			}
-			else
-			{
-				/* Account for cost of sort, but don't charge input cost again */
-				cost_sort(&sort_path, root, NIL,
-						  0.0,
-						  subpath->rows,
-						  subpath->pathtarget->width,
-						  0.0,
-						  work_mem,
-						  -1.0);
-
-				/* Account for cost of aggregation */
-
-				cost_agg(&agg_path, root,
-						 AGG_SORTED,
-						 agg_costs,
-						 numGroupCols,
-						 rollup->numGroups,
-						 sort_path.startup_cost,
-						 sort_path.total_cost,
-						 sort_path.rows);
-			}
+			/* We must iterate over all but the last rollup_lists element */
+			if (lnext(lc) == NULL)
+				break;
+
+			/* Account for cost of sort, but don't charge input cost again */
+			cost_sort(&sort_path, root, NIL,
+					  0.0,
+					  subpath->rows,
+					  subpath->pathtarget->width,
+					  0.0,
+					  work_mem,
+					  -1.0);
+
+			/* Account for cost of aggregation */
+			numGroupCols = list_length((List *) linitial(gsets));
+
+			cost_agg(&agg_path, root,
+					 AGG_SORTED,
+					 agg_costs,
+					 numGroupCols,
+					 numGroups, /* XXX surely not right for all steps? */
+					 sort_path.startup_cost,
+					 sort_path.total_cost,
+					 sort_path.rows);
 
 			pathnode->path.total_cost += agg_path.total_cost;
 			pathnode->path.rows += agg_path.rows;
diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c
index 25699fb..1e4b2bd 100644
--- a/src/backend/parser/analyze.c
+++ b/src/backend/parser/analyze.c
@@ -842,8 +842,16 @@ transformInsertStmt(ParseState *pstate, InsertStmt *stmt)
 
 	/* Process ON CONFLICT, if any. */
 	if (stmt->onConflictClause)
+	{
+		/* Bail out if target relation is partitioned table */
+		if (pstate->p_target_rangetblentry->relkind == RELKIND_PARTITIONED_TABLE)
+			ereport(ERROR,
+					(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+					 errmsg("ON CONFLICT clause is not supported with partitioned tables")));
+
 		qry->onConflict = transformOnConflictClause(pstate,
 													stmt->onConflictClause);
+	}
 
 	/*
 	 * If we have a RETURNING clause, we need to add the target relation to
@@ -960,13 +968,10 @@ transformInsertRow(ParseState *pstate, List *exprlist,
 
 					expr = (Expr *) linitial(fstore->newvals);
 				}
-				else if (IsA(expr, ArrayRef))
+				else if (IsA(expr, SubscriptingRef) && IsAssignment(expr))
 				{
-					ArrayRef   *aref = (ArrayRef *) expr;
-
-					if (aref->refassgnexpr == NULL)
-						break;
-					expr = aref->refassgnexpr;
+					SubscriptingRef   *sbsref = (SubscriptingRef *) expr;
+					expr = sbsref->refassgnexpr;
 				}
 				else
 					break;
diff --git a/src/backend/parser/check_keywords.pl b/src/backend/parser/check_keywords.pl
index 84fef1d..45862ce 100644
--- a/src/backend/parser/check_keywords.pl
+++ b/src/backend/parser/check_keywords.pl
@@ -14,7 +14,7 @@ my $kwlist_filename = $ARGV[1];
 
 my $errors = 0;
 
-sub error
+sub error(@)
 {
 	print STDERR @_;
 	$errors = 1;
@@ -29,18 +29,18 @@ $keyword_categories{'col_name_keyword'}       = 'COL_NAME_KEYWORD';
 $keyword_categories{'type_func_name_keyword'} = 'TYPE_FUNC_NAME_KEYWORD';
 $keyword_categories{'reserved_keyword'}       = 'RESERVED_KEYWORD';
 
-open(my $gram, '<', $gram_filename) || die("Could not open : $gram_filename");
+open(GRAM, $gram_filename) || die("Could not open : $gram_filename");
 
-my $kcat;
+my ($S, $s, $k, $n, $kcat);
 my $comment;
 my @arr;
 my %keywords;
 
-line: while (my $S = <$gram>)
+line: while (<GRAM>)
 {
-	chomp $S;    # strip record separator
+	chomp;    # strip record separator
 
-	my $s;
+	$S = $_;
 
 	# Make sure any braces are split
 	$s = '{', $S =~ s/$s/ { /g;
@@ -54,7 +54,7 @@ line: while (my $S = <$gram>)
 	{
 
 		# Is this the beginning of a keyword list?
-		foreach my $k (keys %keyword_categories)
+		foreach $k (keys %keyword_categories)
 		{
 			if ($S =~ m/^($k):/)
 			{
@@ -66,7 +66,7 @@ line: while (my $S = <$gram>)
 	}
 
 	# Now split the line into individual fields
-	my $n = (@arr = split(' ', $S));
+	$n = (@arr = split(' ', $S));
 
 	# Ok, we're in a keyword list. Go through each field in turn
 	for (my $fieldIndexer = 0; $fieldIndexer < $n; $fieldIndexer++)
@@ -109,15 +109,15 @@ line: while (my $S = <$gram>)
 		push @{ $keywords{$kcat} }, $arr[$fieldIndexer];
 	}
 }
-close $gram;
+close GRAM;
 
 # Check that each keyword list is in alphabetical order (just for neatnik-ism)
-my ($prevkword, $bare_kword);
-foreach my $kcat (keys %keyword_categories)
+my ($prevkword, $kword, $bare_kword);
+foreach $kcat (keys %keyword_categories)
 {
 	$prevkword = '';
 
-	foreach my $kword (@{ $keywords{$kcat} })
+	foreach $kword (@{ $keywords{$kcat} })
 	{
 
 		# Some keyword have a _P suffix. Remove it for the comparison.
@@ -149,12 +149,12 @@ while (my ($kcat, $kcat_id) = each(%keyword_categories))
 
 # Now read in kwlist.h
 
-open(my $kwlist, '<', $kwlist_filename) || die("Could not open : $kwlist_filename");
+open(KWLIST, $kwlist_filename) || die("Could not open : $kwlist_filename");
 
 my $prevkwstring = '';
 my $bare_kwname;
 my %kwhash;
-kwlist_line: while (<$kwlist>)
+kwlist_line: while (<KWLIST>)
 {
 	my ($line) = $_;
 
@@ -219,7 +219,7 @@ kwlist_line: while (<$kwlist>)
 		}
 	}
 }
-close $kwlist;
+close KWLIST;
 
 # Check that we've paired up all keywords from gram.y with lines in kwlist.h
 while (my ($kwcat, $kwcat_id) = each(%keyword_categories))
diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c
index d3ed073..926031e 100644
--- a/src/backend/parser/parse_expr.c
+++ b/src/backend/parser/parse_expr.c
@@ -472,13 +472,13 @@ transformIndirection(ParseState *pstate, Node *basenode, List *indirection)
 
 			/* process subscripts before this field selection */
 			if (subscripts)
-				result = (Node *) transformArraySubscripts(pstate,
-														   result,
-														   exprType(result),
-														   InvalidOid,
-														   exprTypmod(result),
-														   subscripts,
-														   NULL);
+				result = (Node *) transformContainerSubscripts(pstate,
+															   result,
+															   exprType(result),
+															   InvalidOid,
+															   exprTypmod(result),
+															   subscripts,
+															   NULL);
 			subscripts = NIL;
 
 			newresult = ParseFuncOrColumn(pstate,
@@ -493,13 +493,13 @@ transformIndirection(ParseState *pstate, Node *basenode, List *indirection)
 	}
 	/* process trailing subscripts, if any */
 	if (subscripts)
-		result = (Node *) transformArraySubscripts(pstate,
-												   result,
-												   exprType(result),
-												   InvalidOid,
-												   exprTypmod(result),
-												   subscripts,
-												   NULL);
+		result = (Node *) transformContainerSubscripts(pstate,
+													   result,
+													   exprType(result),
+													   InvalidOid,
+													   exprTypmod(result),
+													   subscripts,
+													   NULL);
 
 	return result;
 }
diff --git a/src/backend/parser/parse_node.c b/src/backend/parser/parse_node.c
index 30cc7da..75c17ba 100644
--- a/src/backend/parser/parse_node.c
+++ b/src/backend/parser/parse_node.c
@@ -202,18 +202,22 @@ make_var(ParseState *pstate, RangeTblEntry *rte, int attrno, int location)
 
 /*
  * transformArrayType()
- *		Identify the types involved in a subscripting operation
+ *		Identify the types involved in a subscripting operation for array
  *
  * On entry, arrayType/arrayTypmod identify the type of the input value
  * to be subscripted (which could be a domain type).  These are modified
  * if necessary to identify the actual array type and typmod, and the
  * array's element type is returned.  An error is thrown if the input isn't
  * an array type.
+ *
+ * NOTE: This part of type-specific code is not separated into type-specific
+ * subscripting procedure for now, but it does not affect on the whole logic,
+ * since InvalidOid will be return in case of other types not an error.
+ * An error will appears only if a subscripting procedure is not defined.
  */
 Oid
-transformArrayType(Oid *arrayType, int32 *arrayTypmod)
+transformArrayType(Oid *containerType, int32 *containerTypmod)
 {
-	Oid			origArrayType = *arrayType;
 	Oid			elementType;
 	HeapTuple	type_tuple_array;
 	Form_pg_type type_struct_array;
@@ -225,7 +229,7 @@ transformArrayType(Oid *arrayType, int32 *arrayTypmod)
 	 * itself.  (Note that we provide no method whereby the creator of a
 	 * domain over an array type could hide its ability to be subscripted.)
 	 */
-	*arrayType = getBaseTypeAndTypmod(*arrayType, arrayTypmod);
+	*containerType = getBaseTypeAndTypmod(*containerType, containerTypmod);
 
 	/*
 	 * We treat int2vector and oidvector as though they were domains over
@@ -234,25 +238,20 @@ transformArrayType(Oid *arrayType, int32 *arrayTypmod)
 	 * xxxvector type; so we want the result of a slice operation to be
 	 * considered to be of the more general type.
 	 */
-	if (*arrayType == INT2VECTOROID)
-		*arrayType = INT2ARRAYOID;
-	else if (*arrayType == OIDVECTOROID)
-		*arrayType = OIDARRAYOID;
+	if (*containerType == INT2VECTOROID)
+		*containerType = INT2ARRAYOID;
+	else if (*containerType == OIDVECTOROID)
+		*containerType = OIDARRAYOID;
 
 	/* Get the type tuple for the array */
-	type_tuple_array = SearchSysCache1(TYPEOID, ObjectIdGetDatum(*arrayType));
+	type_tuple_array = SearchSysCache1(TYPEOID, ObjectIdGetDatum(*containerType));
 	if (!HeapTupleIsValid(type_tuple_array))
-		elog(ERROR, "cache lookup failed for type %u", *arrayType);
+		elog(ERROR, "cache lookup failed for type %u", *containerType);
 	type_struct_array = (Form_pg_type) GETSTRUCT(type_tuple_array);
 
 	/* needn't check typisdefined since this will fail anyway */
 
 	elementType = type_struct_array->typelem;
-	if (elementType == InvalidOid)
-		ereport(ERROR,
-				(errcode(ERRCODE_DATATYPE_MISMATCH),
-				 errmsg("cannot subscript type %s because it is not an array",
-						format_type_be(origArrayType))));
 
 	ReleaseSysCache(type_tuple_array);
 
@@ -260,61 +259,79 @@ transformArrayType(Oid *arrayType, int32 *arrayTypmod)
 }
 
 /*
- * transformArraySubscripts()
- *		Transform array subscripting.  This is used for both
- *		array fetch and array assignment.
+ * transformContainerSubscripts()
+ *		Transform container subscripting.  This is used for both
+ *		container fetch and container assignment.
  *
- * In an array fetch, we are given a source array value and we produce an
- * expression that represents the result of extracting a single array element
- * or an array slice.
+ * In a container fetch, we are given a source container value and we produce
+ * an expression that represents the result of extracting a single container
+ * element or a container slice.
  *
- * In an array assignment, we are given a destination array value plus a
- * source value that is to be assigned to a single element or a slice of
- * that array.  We produce an expression that represents the new array value
- * with the source data inserted into the right part of the array.
+ * In a container assignment, we are given a destination container value plus a
+ * source value that is to be assigned to a single element or a slice of that
+ * container.  We produce an expression that represents the new container value
+ * with the source data inserted into the right part of the container.
  *
- * For both cases, if the source array is of a domain-over-array type,
- * the result is of the base array type or its element type; essentially,
- * we must fold a domain to its base type before applying subscripting.
- * (Note that int2vector and oidvector are treated as domains here.)
+ * For both cases, this function contains only general subscripting logic while
+ * type-specific logic (e.g. type verifications and coersion) is placend in
+ * separate procedure indicated by typsbsparse. There is only one exception
+ * for now about domain-over-container, if the source container is of a
+ * domain-over-container type, the result is of the base container type or its
+ * element type; essentially, we must fold a domain to its base type before
+ * applying subscripting.  (Note that int2vector and oidvector are treated as
+ * domains here.) If domain verification failed we assume, that element type
+ * must be the same as container type (e.g. in case of jsonb).
+ * An error will appear in case if current container type doesn't have a
+ * subscripting procedure.
  *
- * pstate		Parse state
- * arrayBase	Already-transformed expression for the array as a whole
- * arrayType	OID of array's datatype (should match type of arrayBase,
- *				or be the base type of arrayBase's domain type)
- * elementType	OID of array's element type (fetch with transformArrayType,
- *				or pass InvalidOid to do it here)
- * arrayTypMod	typmod for the array (which is also typmod for the elements)
- * indirection	Untransformed list of subscripts (must not be NIL)
- * assignFrom	NULL for array fetch, else transformed expression for source.
+ * pstate			Parse state
+ * containerBase	Already-transformed expression for the container as a whole
+ * containerType	OID of container's datatype (should match type of containerBase,
+ *					or be the base type of containerBase's domain type)
+ * elementType		OID of container's element type (fetch with transformArrayType,
+ *					or pass InvalidOid to do it here)
+ * containerTypMod	typmod for the container (which is also typmod for the elements)
+ * indirection		Untransformed list of subscripts (must not be NIL)
+ * assignFrom		NULL for container fetch, else transformed expression for source.
  */
-ArrayRef *
-transformArraySubscripts(ParseState *pstate,
-						 Node *arrayBase,
-						 Oid arrayType,
-						 Oid elementType,
-						 int32 arrayTypMod,
-						 List *indirection,
-						 Node *assignFrom)
+
+Node *
+transformContainerSubscripts(ParseState *pstate,
+							 Node *containerBase,
+							 Oid containerType,
+							 Oid elementType,
+							 int32 containerTypMod,
+							 List *indirection,
+							 Node *assignFrom)
 {
-	bool		isSlice = false;
-	List	   *upperIndexpr = NIL;
-	List	   *lowerIndexpr = NIL;
-	ListCell   *idx;
-	ArrayRef   *aref;
+	bool				isSlice = false;
+	List			   *upperIndexpr = NIL;
+	List			   *lowerIndexpr = NIL;
+	ListCell		   *idx;
+	SubscriptingRef	   *sbsref;
+	RegProcedure		typsbsparse = get_typsbsparse(containerType);
+
+	if (!OidIsValid(typsbsparse))
+		ereport(ERROR,
+				(errcode(ERRCODE_DATATYPE_MISMATCH),
+				 errmsg("cannot subscript type %s because it does not support subscripting",
+						format_type_be(containerType))));
 
 	/*
 	 * Caller may or may not have bothered to determine elementType.  Note
-	 * that if the caller did do so, arrayType/arrayTypMod must be as modified
+	 * that if the caller did do so, containerType/containerTypMod must be as modified
 	 * by transformArrayType, ie, smash domain to base type.
 	 */
 	if (!OidIsValid(elementType))
-		elementType = transformArrayType(&arrayType, &arrayTypMod);
+		elementType = transformArrayType(&containerType, &containerTypMod);
+
+	if (!OidIsValid(elementType))
+		elementType = containerType;
 
 	/*
-	 * A list containing only simple subscripts refers to a single array
+	 * A list containing only simple subscripts refers to a single container
 	 * element.  If any of the items are slice specifiers (lower:upper), then
-	 * the subscript expression means an array slice operation.  In this case,
+	 * the subscript expression means an container slice operation.  In this case,
 	 * we convert any non-slice items to slices by treating the single
 	 * subscript as the upper bound and supplying an assumed lower bound of 1.
 	 * We have to prescan the list to see if there are any slice items.
@@ -343,107 +360,37 @@ transformArraySubscripts(ParseState *pstate,
 			if (ai->lidx)
 			{
 				subexpr = transformExpr(pstate, ai->lidx, pstate->p_expr_kind);
-				/* If it's not int4 already, try to coerce */
-				subexpr = coerce_to_target_type(pstate,
-												subexpr, exprType(subexpr),
-												INT4OID, -1,
-												COERCION_ASSIGNMENT,
-												COERCE_IMPLICIT_CAST,
-												-1);
-				if (subexpr == NULL)
-					ereport(ERROR,
-							(errcode(ERRCODE_DATATYPE_MISMATCH),
-							 errmsg("array subscript must have type integer"),
-						parser_errposition(pstate, exprLocation(ai->lidx))));
-			}
-			else if (!ai->is_slice)
-			{
-				/* Make a constant 1 */
-				subexpr = (Node *) makeConst(INT4OID,
-											 -1,
-											 InvalidOid,
-											 sizeof(int32),
-											 Int32GetDatum(1),
-											 false,
-											 true);		/* pass by value */
 			}
 			else
 			{
 				/* Slice with omitted lower bound, put NULL into the list */
 				subexpr = NULL;
 			}
-			lowerIndexpr = lappend(lowerIndexpr, subexpr);
-		}
-		else
-			Assert(ai->lidx == NULL && !ai->is_slice);
-
-		if (ai->uidx)
-		{
-			subexpr = transformExpr(pstate, ai->uidx, pstate->p_expr_kind);
-			/* If it's not int4 already, try to coerce */
-			subexpr = coerce_to_target_type(pstate,
-											subexpr, exprType(subexpr),
-											INT4OID, -1,
-											COERCION_ASSIGNMENT,
-											COERCE_IMPLICIT_CAST,
-											-1);
-			if (subexpr == NULL)
-				ereport(ERROR,
-						(errcode(ERRCODE_DATATYPE_MISMATCH),
-						 errmsg("array subscript must have type integer"),
-						 parser_errposition(pstate, exprLocation(ai->uidx))));
-		}
-		else
-		{
-			/* Slice with omitted upper bound, put NULL into the list */
-			Assert(isSlice && ai->is_slice);
-			subexpr = NULL;
+			lowerIndexpr = lappend(lowerIndexpr, list_make2(subexpr, ai));
 		}
+		subexpr = transformExpr(pstate, ai->uidx, pstate->p_expr_kind);
 		upperIndexpr = lappend(upperIndexpr, subexpr);
 	}
 
 	/*
-	 * If doing an array store, coerce the source value to the right type.
-	 * (This should agree with the coercion done by transformAssignedExpr.)
+	 * Ready to build the SubscriptingRef node.
 	 */
+	sbsref = (SubscriptingRef *) makeNode(SubscriptingRef);
 	if (assignFrom != NULL)
-	{
-		Oid			typesource = exprType(assignFrom);
-		Oid			typeneeded = isSlice ? arrayType : elementType;
-		Node	   *newFrom;
-
-		newFrom = coerce_to_target_type(pstate,
-										assignFrom, typesource,
-										typeneeded, arrayTypMod,
-										COERCION_ASSIGNMENT,
-										COERCE_IMPLICIT_CAST,
-										-1);
-		if (newFrom == NULL)
-			ereport(ERROR,
-					(errcode(ERRCODE_DATATYPE_MISMATCH),
-					 errmsg("array assignment requires type %s"
-							" but expression is of type %s",
-							format_type_be(typeneeded),
-							format_type_be(typesource)),
-				 errhint("You will need to rewrite or cast the expression."),
-					 parser_errposition(pstate, exprLocation(assignFrom))));
-		assignFrom = newFrom;
-	}
+		sbsref->refassgnexpr = (Expr *) assignFrom;
 
-	/*
-	 * Ready to build the ArrayRef node.
-	 */
-	aref = makeNode(ArrayRef);
-	aref->refarraytype = arrayType;
-	aref->refelemtype = elementType;
-	aref->reftypmod = arrayTypMod;
+	sbsref->refcontainertype = containerType;
+	sbsref->refelemtype = elementType;
+	sbsref->reftypmod = containerTypMod;
 	/* refcollid will be set by parse_collate.c */
-	aref->refupperindexpr = upperIndexpr;
-	aref->reflowerindexpr = lowerIndexpr;
-	aref->refexpr = (Expr *) arrayBase;
-	aref->refassgnexpr = (Expr *) assignFrom;
-
-	return aref;
+	sbsref->refupperindexpr = upperIndexpr;
+	sbsref->reflowerindexpr = lowerIndexpr;
+	sbsref->refexpr = (Expr *) containerBase;
+
+	return (Node *) OidFunctionCall3(typsbsparse,
+									 BoolGetDatum(assignFrom != NULL),
+									 PointerGetDatum(sbsref),
+									 PointerGetDatum(pstate));
 }
 
 /*
diff --git a/src/backend/parser/parse_target.c b/src/backend/parser/parse_target.c
index 3b84140..6d80912 100644
--- a/src/backend/parser/parse_target.c
+++ b/src/backend/parser/parse_target.c
@@ -820,41 +820,24 @@ transformAssignmentIndirection(ParseState *pstate,
 
 	/* base case: just coerce RHS to match target type ID */
 
-	result = coerce_to_target_type(pstate,
-								   rhs, exprType(rhs),
-								   targetTypeId, targetTypMod,
-								   COERCION_ASSIGNMENT,
-								   COERCE_IMPLICIT_CAST,
-								   -1);
+	if (targetTypeId != InvalidOid)
+		result = coerce_to_target_type(pstate,
+									   rhs, exprType(rhs),
+									   targetTypeId, targetTypMod,
+									   COERCION_ASSIGNMENT,
+									   COERCE_IMPLICIT_CAST,
+									   -1);
+	else
+		result = rhs;
+
 	if (result == NULL)
-	{
-		if (targetIsArray)
-			ereport(ERROR,
-					(errcode(ERRCODE_DATATYPE_MISMATCH),
-					 errmsg("array assignment to \"%s\" requires type %s"
-							" but expression is of type %s",
-							targetName,
-							format_type_be(targetTypeId),
-							format_type_be(exprType(rhs))),
-				 errhint("You will need to rewrite or cast the expression."),
-					 parser_errposition(pstate, location)));
-		else
-			ereport(ERROR,
-					(errcode(ERRCODE_DATATYPE_MISMATCH),
-					 errmsg("subfield \"%s\" is of type %s"
-							" but expression is of type %s",
-							targetName,
-							format_type_be(targetTypeId),
-							format_type_be(exprType(rhs))),
-				 errhint("You will need to rewrite or cast the expression."),
-					 parser_errposition(pstate, location)));
-	}
+		result = rhs;
 
 	return result;
 }
 
 /*
- * helper for transformAssignmentIndirection: process array assignment
+ * helper for transformAssignmentIndirection: process container assignment
  */
 static Node *
 transformAssignmentSubscripts(ParseState *pstate,
@@ -870,55 +853,55 @@ transformAssignmentSubscripts(ParseState *pstate,
 							  int location)
 {
 	Node	   *result;
-	Oid			arrayType;
-	int32		arrayTypMod;
+	Oid			containerType;
+	int32		containerTypMod;
 	Oid			elementTypeId;
 	Oid			typeNeeded;
 	Oid			collationNeeded;
 
 	Assert(subscripts != NIL);
 
-	/* Identify the actual array type and element type involved */
-	arrayType = targetTypeId;
-	arrayTypMod = targetTypMod;
-	elementTypeId = transformArrayType(&arrayType, &arrayTypMod);
+	/* Identify the actual container type and element type involved */
+	containerType = targetTypeId;
+	containerTypMod = targetTypMod;
+	elementTypeId = transformArrayType(&containerType, &containerTypMod);
 
 	/* Identify type that RHS must provide */
-	typeNeeded = isSlice ? arrayType : elementTypeId;
+	typeNeeded = isSlice ? containerType : elementTypeId;
 
 	/*
-	 * Array normally has same collation as elements, but there's an
-	 * exception: we might be subscripting a domain over an array type. In
+	 * container normally has same collation as elements, but there's an
+	 * exception: we might be subscripting a domain over an container type. In
 	 * that case use collation of the base type.
 	 */
-	if (arrayType == targetTypeId)
+	if (containerType == targetTypeId)
 		collationNeeded = targetCollation;
 	else
-		collationNeeded = get_typcollation(arrayType);
+		collationNeeded = get_typcollation(containerType);
 
-	/* recurse to create appropriate RHS for array assign */
+	/* recurse to create appropriate RHS for container assign */
 	rhs = transformAssignmentIndirection(pstate,
 										 NULL,
 										 targetName,
 										 true,
 										 typeNeeded,
-										 arrayTypMod,
+										 containerTypMod,
 										 collationNeeded,
 										 next_indirection,
 										 rhs,
 										 location);
 
 	/* process subscripts */
-	result = (Node *) transformArraySubscripts(pstate,
-											   basenode,
-											   arrayType,
-											   elementTypeId,
-											   arrayTypMod,
-											   subscripts,
-											   rhs);
-
-	/* If target was a domain over array, need to coerce up to the domain */
-	if (arrayType != targetTypeId)
+	result = (Node *) transformContainerSubscripts(pstate,
+												   basenode,
+												   containerType,
+												   exprType(rhs),
+												   containerTypMod,
+												   subscripts,
+												   rhs);
+
+	/* If target was a domain over container, need to coerce up to the domain */
+	if (containerType != targetTypeId)
 	{
 		Oid			resulttype = exprType(result);
 
diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c
index 56a8bf2..b704788 100644
--- a/src/backend/postmaster/pgstat.c
+++ b/src/backend/postmaster/pgstat.c
@@ -50,7 +50,6 @@
 #include "postmaster/autovacuum.h"
 #include "postmaster/fork_process.h"
 #include "postmaster/postmaster.h"
-#include "replication/walsender.h"
 #include "storage/backendid.h"
 #include "storage/dsm.h"
 #include "storage/fd.h"
@@ -104,18 +103,6 @@
 
 
 /* ----------
- * Total number of backends including auxiliary
- *
- * We reserve a slot for each possible BackendId, plus one for each
- * possible auxiliary process type.  (This scheme assumes there is not
- * more than one of any auxiliary process type at a time.) MaxBackends
- * includes autovacuum workers and background workers as well.
- * ----------
- */
-#define NumBackendStatSlots (MaxBackends + NUM_AUXPROCTYPES)
-
-
-/* ----------
  * GUC parameters
  * ----------
  */
@@ -174,20 +161,6 @@ typedef struct TabStatusArray
 static TabStatusArray *pgStatTabList = NULL;
 
 /*
- * pgStatTabHash entry
- */
-typedef struct TabStatHashEntry
-{
-	Oid t_id;
-	PgStat_TableStatus* tsa_entry;
-} TabStatHashEntry;
-
-/*
- * Hash table for O(1) t_id -> tsa_entry lookup
- */
-static HTAB *pgStatTabHash = NULL;
-
-/*
  * Backends store per-function info that's waiting to be sent to the collector
  * in this hash table (indexed by function OID).
  */
@@ -239,11 +212,7 @@ typedef struct TwoPhasePgStatRecord
  */
 static MemoryContext pgStatLocalContext = NULL;
 static HTAB *pgStatDBHash = NULL;
-
-/* Status for backends including auxiliary */
 static LocalPgBackendStatus *localBackendStatusTable = NULL;
-
-/* Total number of backends including auxiliary */
 static int	localNumBackends = 0;
 
 /*
@@ -855,14 +824,6 @@ pgstat_report_stat(bool force)
 	}
 
 	/*
-	 * pgStatTabHash is outdated on this point so we have to clean it,
-	 * hash_destroy() will remove hash memory context, allocated in
-	 * make_sure_stat_tab_initialized()
-	 */
-	hash_destroy(pgStatTabHash);
-	pgStatTabHash = NULL;
-
-	/*
 	 * Send partial messages.  Make sure that any pending xact commit/abort
 	 * gets counted, even if there are no table stats to send.
 	 */
@@ -1707,87 +1668,59 @@ pgstat_initstats(Relation rel)
 }
 
 /*
- * Make sure pgStatTabList and pgStatTabHash are initialized.
- */
-static void
-make_sure_stat_tab_initialized()
-{
-	HASHCTL			ctl;
-	MemoryContext	new_ctx;
-
-	if(!pgStatTabList)
-	{
-		/* This is first time procedure is called */
-		pgStatTabList = (TabStatusArray *) MemoryContextAllocZero(TopMemoryContext,
-												sizeof(TabStatusArray));
-	}
-
-	if(pgStatTabHash)
-		return;
-
-	/* Hash table was freed or never existed.  */
-
-	new_ctx = AllocSetContextCreate(
-		TopMemoryContext,
-		"PGStatLookupHashTableContext",
-		ALLOCSET_DEFAULT_SIZES);
-
-	memset(&ctl, 0, sizeof(ctl));
-	ctl.keysize = sizeof(Oid);
-	ctl.entrysize = sizeof(TabStatHashEntry);
-	ctl.hcxt = new_ctx;
-
-	pgStatTabHash = hash_create("pgstat t_id to tsa_entry lookup hash table",
-		TABSTAT_QUANTUM, &ctl, HASH_ELEM | HASH_BLOBS | HASH_CONTEXT);
-}
-
-/*
  * get_tabstat_entry - find or create a PgStat_TableStatus entry for rel
  */
 static PgStat_TableStatus *
 get_tabstat_entry(Oid rel_id, bool isshared)
 {
-	TabStatHashEntry* hash_entry;
 	PgStat_TableStatus *entry;
 	TabStatusArray *tsa;
-	bool found;
-
-	make_sure_stat_tab_initialized();
-
-	/*
-	 * Find an entry or create a new one.
-	 */
-	hash_entry = hash_search(pgStatTabHash, &rel_id, HASH_ENTER, &found);
-	if(found)
-		return hash_entry->tsa_entry;
+	TabStatusArray *prev_tsa;
+	int			i;
 
 	/*
-	 * `hash_entry` was just created and now we have to fill it.
-	 * First make sure there is a free space in a last element of pgStatTabList.
+	 * Search the already-used tabstat slots for this relation.
 	 */
-	tsa = pgStatTabList;
-	while(tsa->tsa_used == TABSTAT_QUANTUM)
+	prev_tsa = NULL;
+	for (tsa = pgStatTabList; tsa != NULL; prev_tsa = tsa, tsa = tsa->tsa_next)
 	{
-		if(tsa->tsa_next == NULL)
+		for (i = 0; i < tsa->tsa_used; i++)
 		{
-			tsa->tsa_next = (TabStatusArray *) MemoryContextAllocZero(TopMemoryContext,
-														sizeof(TabStatusArray));
+			entry = &tsa->tsa_entries[i];
+			if (entry->t_id == rel_id)
+				return entry;
 		}
 
-		tsa = tsa->tsa_next;
+		if (tsa->tsa_used < TABSTAT_QUANTUM)
+		{
+			/*
+			 * It must not be present, but we found a free slot instead. Fine,
+			 * let's use this one.  We assume the entry was already zeroed,
+			 * either at creation or after last use.
+			 */
+			entry = &tsa->tsa_entries[tsa->tsa_used++];
+			entry->t_id = rel_id;
+			entry->t_shared = isshared;
+			return entry;
+		}
 	}
 
 	/*
-	 * Add an entry.
+	 * We ran out of tabstat slots, so allocate more.  Be sure they're zeroed.
 	 */
-	entry = &tsa->tsa_entries[tsa->tsa_used++];
-	entry->t_id = rel_id;
-	entry->t_shared = isshared;
+	tsa = (TabStatusArray *) MemoryContextAllocZero(TopMemoryContext,
+													sizeof(TabStatusArray));
+	if (prev_tsa)
+		prev_tsa->tsa_next = tsa;
+	else
+		pgStatTabList = tsa;
 
 	/*
-	 * Add a corresponding entry to pgStatTabHash.
+	 * Use the first entry of the new TabStatusArray.
 	 */
-	hash_entry->tsa_entry = entry;
+	entry = &tsa->tsa_entries[tsa->tsa_used++];
+	entry->t_id = rel_id;
+	entry->t_shared = isshared;
 	return entry;
 }
 
@@ -1799,19 +1732,22 @@ get_tabstat_entry(Oid rel_id, bool isshared)
 PgStat_TableStatus *
 find_tabstat_entry(Oid rel_id)
 {
-	TabStatHashEntry* hash_entry;
-
-	/*
-	 * There are no entries at all.
-	 */
-	if(!pgStatTabHash)
-		return NULL;
+	PgStat_TableStatus *entry;
+	TabStatusArray *tsa;
+	int			i;
 
-	hash_entry = hash_search(pgStatTabHash, &rel_id, HASH_FIND, NULL);
-	if(!hash_entry)
-		return NULL;
+	for (tsa = pgStatTabList; tsa != NULL; tsa = tsa->tsa_next)
+	{
+		for (i = 0; i < tsa->tsa_used; i++)
+		{
+			entry = &tsa->tsa_entries[i];
+			if (entry->t_id == rel_id)
+				return entry;
+		}
+	}
 
-	return hash_entry->tsa_entry;
+	/* Not present */
+	return NULL;
 }
 
 /*
@@ -2569,20 +2505,20 @@ BackendStatusShmemSize(void)
 	Size		size;
 
 	/* BackendStatusArray: */
-	size = mul_size(sizeof(PgBackendStatus), NumBackendStatSlots);
+	size = mul_size(sizeof(PgBackendStatus), MaxBackends);
 	/* BackendAppnameBuffer: */
 	size = add_size(size,
-					mul_size(NAMEDATALEN, NumBackendStatSlots));
+					mul_size(NAMEDATALEN, MaxBackends));
 	/* BackendClientHostnameBuffer: */
 	size = add_size(size,
-					mul_size(NAMEDATALEN, NumBackendStatSlots));
+					mul_size(NAMEDATALEN, MaxBackends));
 	/* BackendActivityBuffer: */
 	size = add_size(size,
-			mul_size(pgstat_track_activity_query_size, NumBackendStatSlots));
+					mul_size(pgstat_track_activity_query_size, MaxBackends));
 #ifdef USE_SSL
 	/* BackendSslStatusBuffer: */
 	size = add_size(size,
-				  mul_size(sizeof(PgBackendSSLStatus), NumBackendStatSlots));
+					mul_size(sizeof(PgBackendSSLStatus), MaxBackends));
 #endif
 	return size;
 }
@@ -2600,7 +2536,7 @@ CreateSharedBackendStatus(void)
 	char	   *buffer;
 
 	/* Create or attach to the shared array */
-	size = mul_size(sizeof(PgBackendStatus), NumBackendStatSlots);
+	size = mul_size(sizeof(PgBackendStatus), MaxBackends);
 	BackendStatusArray = (PgBackendStatus *)
 		ShmemInitStruct("Backend Status Array", size, &found);
 
@@ -2623,7 +2559,7 @@ CreateSharedBackendStatus(void)
 
 		/* Initialize st_appname pointers. */
 		buffer = BackendAppnameBuffer;
-		for (i = 0; i < NumBackendStatSlots; i++)
+		for (i = 0; i < MaxBackends; i++)
 		{
 			BackendStatusArray[i].st_appname = buffer;
 			buffer += NAMEDATALEN;
@@ -2641,7 +2577,7 @@ CreateSharedBackendStatus(void)
 
 		/* Initialize st_clienthostname pointers. */
 		buffer = BackendClientHostnameBuffer;
-		for (i = 0; i < NumBackendStatSlots; i++)
+		for (i = 0; i < MaxBackends; i++)
 		{
 			BackendStatusArray[i].st_clienthostname = buffer;
 			buffer += NAMEDATALEN;
@@ -2650,7 +2586,7 @@ CreateSharedBackendStatus(void)
 
 	/* Create or attach to the shared activity buffer */
 	BackendActivityBufferSize = mul_size(pgstat_track_activity_query_size,
-										 NumBackendStatSlots);
+										 MaxBackends);
 	BackendActivityBuffer = (char *)
 		ShmemInitStruct("Backend Activity Buffer",
 						BackendActivityBufferSize,
@@ -2662,7 +2598,7 @@ CreateSharedBackendStatus(void)
 
 		/* Initialize st_activity pointers. */
 		buffer = BackendActivityBuffer;
-		for (i = 0; i < NumBackendStatSlots; i++)
+		for (i = 0; i < MaxBackends; i++)
 		{
 			BackendStatusArray[i].st_activity = buffer;
 			buffer += pgstat_track_activity_query_size;
@@ -2671,7 +2607,7 @@ CreateSharedBackendStatus(void)
 
 #ifdef USE_SSL
 	/* Create or attach to the shared SSL status buffer */
-	size = mul_size(sizeof(PgBackendSSLStatus), NumBackendStatSlots);
+	size = mul_size(sizeof(PgBackendSSLStatus), MaxBackends);
 	BackendSslStatusBuffer = (PgBackendSSLStatus *)
 		ShmemInitStruct("Backend SSL Status Buffer", size, &found);
 
@@ -2683,7 +2619,7 @@ CreateSharedBackendStatus(void)
 
 		/* Initialize st_sslstatus pointers. */
 		ptr = BackendSslStatusBuffer;
-		for (i = 0; i < NumBackendStatSlots; i++)
+		for (i = 0; i < MaxBackends; i++)
 		{
 			BackendStatusArray[i].st_sslstatus = ptr;
 			ptr++;
@@ -2697,8 +2633,7 @@ CreateSharedBackendStatus(void)
  * pgstat_initialize() -
  *
  *	Initialize pgstats state, and set up our on-proc-exit hook.
- *	Called from InitPostgres and AuxiliaryProcessMain. For auxiliary process,
- *	MyBackendId is invalid. Otherwise, MyBackendId must be set,
+ *	Called from InitPostgres.  MyBackendId must be set,
  *	but we must not have started any transaction yet (since the
  *	exit hook must run after the last transaction exit).
  *	NOTE: MyDatabaseId isn't set yet; so the shutdown hook has to be careful.
@@ -2708,26 +2643,8 @@ void
 pgstat_initialize(void)
 {
 	/* Initialize MyBEEntry */
-	if (MyBackendId != InvalidBackendId)
-	{
-		Assert(MyBackendId >= 1 && MyBackendId <= MaxBackends);
-		MyBEEntry = &BackendStatusArray[MyBackendId - 1];
-	}
-	else
-	{
-		/* Must be an auxiliary process */
-		Assert(MyAuxProcType != NotAnAuxProcess);
-
-		/*
-		 * Assign the MyBEEntry for an auxiliary process.  Since it doesn't
-		 * have a BackendId, the slot is statically allocated based on the
-		 * auxiliary process type (MyAuxProcType).  Backends use slots indexed
-		 * in the range from 1 to MaxBackends (inclusive), so we use
-		 * MaxBackends + AuxBackendType + 1 as the index of the slot for an
-		 * auxiliary process.
-		 */
-		MyBEEntry = &BackendStatusArray[MaxBackends + MyAuxProcType];
-	}
+	Assert(MyBackendId >= 1 && MyBackendId <= MaxBackends);
+	MyBEEntry = &BackendStatusArray[MyBackendId - 1];
 
 	/* Set up a process-exit hook to clean up */
 	on_shmem_exit(pgstat_beshutdown_hook, 0);
@@ -2738,16 +2655,15 @@ pgstat_initialize(void)
  *
  *	Initialize this backend's entry in the PgBackendStatus array.
  *	Called from InitPostgres.
- *
- *	Apart from auxiliary processes, MyBackendId, MyDatabaseId,
- *	session userid, and application_name must be set for a
- *	backend (hence, this cannot be combined with pgstat_initialize).
+ *	MyDatabaseId, session userid, and application_name must be set
+ *	(hence, this cannot be combined with pgstat_initialize).
  * ----------
  */
 void
 pgstat_bestart(void)
 {
 	TimestampTz proc_start_timestamp;
+	Oid			userid;
 	SockAddr	clientaddr;
 	volatile PgBackendStatus *beentry;
 
@@ -2762,6 +2678,7 @@ pgstat_bestart(void)
 		proc_start_timestamp = MyProcPort->SessionStartTime;
 	else
 		proc_start_timestamp = GetCurrentTimestamp();
+	userid = GetSessionUserId();
 
 	/*
 	 * We may not have a MyProcPort (eg, if this is the autovacuum process).
@@ -2780,66 +2697,6 @@ pgstat_bestart(void)
 	 * cute.
 	 */
 	beentry = MyBEEntry;
-
-	/* pgstats state must be initialized from pgstat_initialize() */
-	Assert(beentry != NULL);
-
-	if (MyBackendId != InvalidBackendId)
-	{
-		if (IsAutoVacuumLauncherProcess())
-		{
-			/* Autovacuum Launcher */
-			beentry->st_backendType = B_AUTOVAC_LAUNCHER;
-		}
-		else if (IsAutoVacuumWorkerProcess())
-		{
-			/* Autovacuum Worker */
-			beentry->st_backendType = B_AUTOVAC_WORKER;
-		}
-		else if (am_walsender)
-		{
-			/* Wal sender */
-			beentry->st_backendType = B_WAL_SENDER;
-		}
-		else if (IsBackgroundWorker)
-		{
-			/* bgworker */
-			beentry->st_backendType = B_BG_WORKER;
-		}
-		else
-		{
-			/* client-backend */
-			beentry->st_backendType = B_BACKEND;
-		}
-	}
-	else
-	{
-		/* Must be an auxiliary process */
-		Assert(MyAuxProcType != NotAnAuxProcess);
-		switch (MyAuxProcType)
-		{
-			case StartupProcess:
-				beentry->st_backendType = B_STARTUP;
-				break;
-			case BgWriterProcess:
-				beentry->st_backendType = B_BG_WRITER;
-				break;
-			case CheckpointerProcess:
-				beentry->st_backendType = B_CHECKPOINTER;
-				break;
-			case WalWriterProcess:
-				beentry->st_backendType = B_WAL_WRITER;
-				break;
-			case WalReceiverProcess:
-				beentry->st_backendType = B_WAL_RECEIVER;
-				break;
-			default:
-				elog(FATAL, "unrecognized process type: %d",
-					(int) MyAuxProcType);
-				proc_exit(1);
-		}
-	}
-
 	do
 	{
 		pgstat_increment_changecount_before(beentry);
@@ -2851,15 +2708,7 @@ pgstat_bestart(void)
 	beentry->st_state_start_timestamp = 0;
 	beentry->st_xact_start_timestamp = 0;
 	beentry->st_databaseid = MyDatabaseId;
-
-	/* We have userid for client-backends, wal-sender and bgworker processes */
-	if (beentry->st_backendType == B_BACKEND
-			|| beentry->st_backendType == B_WAL_SENDER
-			|| beentry->st_backendType == B_BG_WORKER)
-		beentry->st_userid = GetSessionUserId();
-	else
-		beentry->st_userid = InvalidOid;
-
+	beentry->st_userid = userid;
 	beentry->st_clientaddr = clientaddr;
 	if (MyProcPort && MyProcPort->remote_hostname)
 		strlcpy(beentry->st_clienthostname, MyProcPort->remote_hostname,
@@ -3197,24 +3046,24 @@ pgstat_read_current_status(void)
 
 	localtable = (LocalPgBackendStatus *)
 		MemoryContextAlloc(pgStatLocalContext,
-						 sizeof(LocalPgBackendStatus) * NumBackendStatSlots);
+						   sizeof(LocalPgBackendStatus) * MaxBackends);
 	localappname = (char *)
 		MemoryContextAlloc(pgStatLocalContext,
-						   NAMEDATALEN * NumBackendStatSlots);
+						   NAMEDATALEN * MaxBackends);
 	localactivity = (char *)
 		MemoryContextAlloc(pgStatLocalContext,
-					 pgstat_track_activity_query_size * NumBackendStatSlots);
+						   pgstat_track_activity_query_size * MaxBackends);
 #ifdef USE_SSL
 	localsslstatus = (PgBackendSSLStatus *)
 		MemoryContextAlloc(pgStatLocalContext,
-						   sizeof(PgBackendSSLStatus) * NumBackendStatSlots);
+						   sizeof(PgBackendSSLStatus) * MaxBackends);
 #endif
 
 	localNumBackends = 0;
 
 	beentry = BackendStatusArray;
 	localentry = localtable;
-	for (i = 1; i <= NumBackendStatSlots; i++)
+	for (i = 1; i <= MaxBackends; i++)
 	{
 		/*
 		 * Follow the protocol of retrying if st_changecount changes while we
@@ -3980,47 +3829,7 @@ pgstat_get_crashed_backend_activity(int pid, char *buffer, int buflen)
 	return NULL;
 }
 
-const char *
-pgstat_get_backend_desc(BackendType backendType)
-{
-	const char *backendDesc = "unknown process type";
-
-	switch (backendType)
-	{
-		case B_AUTOVAC_LAUNCHER:
-			backendDesc = "autovacuum launcher";
-			break;
-		case B_AUTOVAC_WORKER:
-			backendDesc = "autovacuum worker";
-			break;
-		case B_BACKEND:
-			backendDesc = "client backend";
-			break;
-		case B_BG_WORKER:
-			backendDesc = "background worker";
-			break;
-		case B_BG_WRITER:
-			backendDesc = "background writer";
-			break;
-		case B_CHECKPOINTER:
-			backendDesc = "checkpointer";
-			break;
-		case B_STARTUP:
-			backendDesc = "startup";
-			break;
-		case B_WAL_RECEIVER:
-			backendDesc = "walreceiver";
-			break;
-		case B_WAL_SENDER:
-			backendDesc = "walsender";
-			break;
-		case B_WAL_WRITER:
-			backendDesc = "walwriter";
-			break;
-	}
 
-	return backendDesc;
-}
 /* ------------------------------------------------------------
  * Local support functions follow
  * ------------------------------------------------------------
diff --git a/src/backend/postmaster/startup.c b/src/backend/postmaster/startup.c
index b623252..b172b5e 100644
--- a/src/backend/postmaster/startup.c
+++ b/src/backend/postmaster/startup.c
@@ -25,7 +25,6 @@
 #include "access/xlog.h"
 #include "libpq/pqsignal.h"
 #include "miscadmin.h"
-#include "pgstat.h"
 #include "postmaster/startup.h"
 #include "storage/ipc.h"
 #include "storage/latch.h"
diff --git a/src/backend/replication/logical/snapbuild.c b/src/backend/replication/logical/snapbuild.c
index 2279604..a73a7b9 100644
--- a/src/backend/replication/logical/snapbuild.c
+++ b/src/backend/replication/logical/snapbuild.c
@@ -499,14 +499,14 @@ SnapBuildBuildSnapshot(SnapBuild *builder, TransactionId xid)
 }
 
 /*
- * Build the initial slot snapshot and convert it to a normal snapshot that
+ * Build the initial slot snapshot and convert it to normal snapshot that
  * is understood by HeapTupleSatisfiesMVCC.
  *
  * The snapshot will be usable directly in current transaction or exported
  * for loading in different transaction.
  */
 Snapshot
-SnapBuildInitialSnapshot(SnapBuild *builder)
+SnapBuildInitalSnapshot(SnapBuild *builder)
 {
 	Snapshot	snap;
 	TransactionId xid;
@@ -514,7 +514,7 @@ SnapBuildInitialSnapshot(SnapBuild *builder)
 	int			newxcnt = 0;
 
 	Assert(!FirstSnapshotSet);
-	Assert(XactIsoLevel == XACT_REPEATABLE_READ);
+	Assert(XactIsoLevel = XACT_REPEATABLE_READ);
 
 	if (builder->state != SNAPBUILD_CONSISTENT)
 		elog(ERROR, "cannot build an initial slot snapshot before reaching a consistent state");
@@ -604,7 +604,7 @@ SnapBuildExportSnapshot(SnapBuild *builder)
 	XactIsoLevel = XACT_REPEATABLE_READ;
 	XactReadOnly = true;
 
-	snap = SnapBuildInitialSnapshot(builder);
+	snap = SnapBuildInitalSnapshot(builder);
 
 	/*
 	 * now that we've built a plain snapshot, make it active and use the
diff --git a/src/backend/replication/walsender.c b/src/backend/replication/walsender.c
index cfc3fba..59ae22d 100644
--- a/src/backend/replication/walsender.c
+++ b/src/backend/replication/walsender.c
@@ -938,7 +938,7 @@ CreateReplicationSlot(CreateReplicationSlotCmd *cmd)
 		{
 			Snapshot	snap;
 
-			snap = SnapBuildInitialSnapshot(ctx->snapshot_builder);
+			snap = SnapBuildInitalSnapshot(ctx->snapshot_builder);
 			RestoreTransactionSnapshot(snap, MyProc);
 		}
 
@@ -2011,8 +2011,8 @@ WalSndLoop(WalSndSendDataCallback send_data)
 	last_reply_timestamp = GetCurrentTimestamp();
 	waiting_for_ping_response = false;
 
-	/* Report to pgstat that this process is running */
-	pgstat_report_activity(STATE_RUNNING, NULL);
+	/* Report to pgstat that this process is a WAL sender */
+	pgstat_report_activity(STATE_RUNNING, "walsender");
 
 	/*
 	 * Loop until we reach the end of this timeline or the client requests to
diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c
index 354e5d0..bd492e9 100644
--- a/src/backend/rewrite/rewriteHandler.c
+++ b/src/backend/rewrite/rewriteHandler.c
@@ -908,7 +908,7 @@ process_matched_tle(TargetEntry *src_tle,
 
 	/*----------
 	 * Multiple assignments to same attribute.  Allow only if all are
-	 * FieldStore or ArrayRef assignment operations.  This is a bit
+	 * FieldStore or SubscriptingRef assignment operations.  This is a bit
 	 * tricky because what we may actually be looking at is a nest of
 	 * such nodes; consider
 	 *		UPDATE tab SET col.fld1.subfld1 = x, col.fld2.subfld2 = y
@@ -916,7 +916,7 @@ process_matched_tle(TargetEntry *src_tle,
 	 *		FieldStore(col, fld1, FieldStore(placeholder, subfld1, x))
 	 *		FieldStore(col, fld2, FieldStore(placeholder, subfld2, y))
 	 * However, we can ignore the substructure and just consider the top
-	 * FieldStore or ArrayRef from each assignment, because it works to
+	 * FieldStore or SubscriptingRef from each assignment, because it works to
 	 * combine these as
 	 *		FieldStore(FieldStore(col, fld1,
 	 *							  FieldStore(placeholder, subfld1, x)),
@@ -926,7 +926,7 @@ process_matched_tle(TargetEntry *src_tle,
 	 *
 	 * For FieldStore, instead of nesting we can generate a single
 	 * FieldStore with multiple target fields.  We must nest when
-	 * ArrayRefs are involved though.
+	 * SubscriptingRefs are involved though.
 	 *----------
 	 */
 	src_expr = (Node *) src_tle->expr;
@@ -985,13 +985,15 @@ process_matched_tle(TargetEntry *src_tle,
 		}
 		newexpr = (Node *) fstore;
 	}
-	else if (IsA(src_expr, ArrayRef))
+	else if (IsA(src_expr, SubscriptingRef))
 	{
-		ArrayRef   *aref = makeNode(ArrayRef);
+		NodeTag sbstag = nodeTag(src_expr);
+		Size nodeSize = sizeof(SubscriptingRef);
+		SubscriptingRef *sbsref = (SubscriptingRef *) newNode(nodeSize, sbstag);
 
-		memcpy(aref, src_expr, sizeof(ArrayRef));
-		aref->refexpr = (Expr *) prior_expr;
-		newexpr = (Node *) aref;
+		memcpy(sbsref, src_expr, nodeSize);
+		sbsref->refexpr = (Expr *) prior_expr;
+		newexpr = (Node *) sbsref;
 	}
 	else
 	{
@@ -1018,14 +1020,12 @@ get_assignment_input(Node *node)
 
 		return (Node *) fstore->arg;
 	}
-	else if (IsA(node, ArrayRef))
+	else if (IsA(node, SubscriptingRef) && IsAssignment(node))
 	{
-		ArrayRef   *aref = (ArrayRef *) node;
-
-		if (aref->refassgnexpr == NULL)
-			return NULL;
-		return (Node *) aref->refexpr;
+		SubscriptingRef   *sbsref = (SubscriptingRef *) node;
+		return (Node *) sbsref->refexpr;
 	}
+
 	return NULL;
 }
 
diff --git a/src/backend/statistics/mvdistinct.c b/src/backend/statistics/mvdistinct.c
index 6082ff0..5df4e29 100644
--- a/src/backend/statistics/mvdistinct.c
+++ b/src/backend/statistics/mvdistinct.c
@@ -161,10 +161,10 @@ statext_ndistinct_serialize(MVNDistinct *ndistinct)
 	Assert(ndistinct->type == STATS_NDISTINCT_TYPE_BASIC);
 
 	/*
-	 * Base size is size of scalar fields in the struct, plus one base struct
-	 * for each item, including number of items for each.
+	 * Base size is base struct size, plus one base struct for each items,
+	 * including number of items for each.
 	 */
-	len = VARHDRSZ + SizeOfMVNDistinct +
+	len = VARHDRSZ + offsetof(MVNDistinct, items) +
 		ndistinct->nitems * (offsetof(MVNDistinctItem, attrs) + sizeof(int));
 
 	/* and also include space for the actual attribute numbers */
@@ -182,13 +182,9 @@ statext_ndistinct_serialize(MVNDistinct *ndistinct)
 
 	tmp = VARDATA(output);
 
-	/* Store the base struct values (magic, type, nitems) */
-	memcpy(tmp, &ndistinct->magic, sizeof(uint32));
-	tmp += sizeof(uint32);
-	memcpy(tmp, &ndistinct->type, sizeof(uint32));
-	tmp += sizeof(uint32);
-	memcpy(tmp, &ndistinct->nitems, sizeof(uint32));
-	tmp += sizeof(uint32);
+	/* Store the base struct values */
+	memcpy(tmp, ndistinct, offsetof(MVNDistinct, items));
+	tmp += offsetof(MVNDistinct, items);
 
 	/*
 	 * store number of attributes and attribute numbers for each ndistinct
@@ -228,64 +224,49 @@ MVNDistinct *
 statext_ndistinct_deserialize(bytea *data)
 {
 	int			i;
-	Size		minimum_size;
-	MVNDistinct	ndist;
+	Size		expected_size;
 	MVNDistinct *ndistinct;
 	char	   *tmp;
 
 	if (data == NULL)
 		return NULL;
 
-	/* we expect at least the basic fields of MVNDistinct struct */
-	if (VARSIZE_ANY_EXHDR(data) < SizeOfMVNDistinct)
+	if (VARSIZE_ANY_EXHDR(data) < offsetof(MVNDistinct, items))
 		elog(ERROR, "invalid MVNDistinct size %ld (expected at least %ld)",
-			 VARSIZE_ANY_EXHDR(data), SizeOfMVNDistinct);
+			 VARSIZE_ANY_EXHDR(data), offsetof(MVNDistinct, items));
+
+	/* read the MVNDistinct header */
+	ndistinct = (MVNDistinct *) palloc(sizeof(MVNDistinct));
 
 	/* initialize pointer to the data part (skip the varlena header) */
 	tmp = VARDATA_ANY(data);
 
-	/* read the header fields and perform basic sanity checks */
-	memcpy(&ndist.magic, tmp, sizeof(uint32));
-	tmp += sizeof(uint32);
-	memcpy(&ndist.type, tmp, sizeof(uint32));
-	tmp += sizeof(uint32);
-	memcpy(&ndist.nitems, tmp, sizeof(uint32));
-	tmp += sizeof(uint32);
-
-	if (ndist.magic != STATS_NDISTINCT_MAGIC)
-		ereport(ERROR,
-				(errcode(ERRCODE_DATA_CORRUPTED),
-				 errmsg("invalid ndistinct magic %08x (expected %08x)",
-						ndist.magic, STATS_NDISTINCT_MAGIC)));
-	if (ndist.type != STATS_NDISTINCT_TYPE_BASIC)
-		ereport(ERROR,
-				(errcode(ERRCODE_DATA_CORRUPTED),
-				 errmsg("invalid ndistinct type %d (expected %d)",
-						ndist.type, STATS_NDISTINCT_TYPE_BASIC)));
-	if (ndist.nitems == 0)
-		ereport(ERROR,
-				(errcode(ERRCODE_DATA_CORRUPTED),
-				 errmsg("invalid zero-length item array in MVNDistinct")));
+	/* get the header and perform basic sanity checks */
+	memcpy(ndistinct, tmp, offsetof(MVNDistinct, items));
+	tmp += offsetof(MVNDistinct, items);
+
+	if (ndistinct->magic != STATS_NDISTINCT_MAGIC)
+		elog(ERROR, "invalid ndistinct magic %d (expected %d)",
+			 ndistinct->magic, STATS_NDISTINCT_MAGIC);
+
+	if (ndistinct->type != STATS_NDISTINCT_TYPE_BASIC)
+		elog(ERROR, "invalid ndistinct type %d (expected %d)",
+			 ndistinct->type, STATS_NDISTINCT_TYPE_BASIC);
+
+	Assert(ndistinct->nitems > 0);
 
 	/* what minimum bytea size do we expect for those parameters */
-	minimum_size = (SizeOfMVNDistinct +
-					ndist.nitems * (SizeOfMVNDistinctItem +
-									sizeof(AttrNumber) * 2));
-	if (VARSIZE_ANY_EXHDR(data) < minimum_size)
-		ereport(ERROR,
-				(errcode(ERRCODE_DATA_CORRUPTED),
-				 errmsg("invalid MVNDistinct size %ld (expected at least %ld)",
-						VARSIZE_ANY_EXHDR(data), minimum_size)));
+	expected_size = offsetof(MVNDistinct, items) +
+		ndistinct->nitems * (offsetof(MVNDistinctItem, attrs) +
+							 sizeof(AttrNumber) * 2);
 
-	/*
-	 * Allocate space for the ndistinct items (no space for each item's attnos:
-	 * those live in bitmapsets allocated separately)
-	 */
-	ndistinct = palloc0(MAXALIGN(SizeOfMVNDistinct) +
-						(ndist.nitems * sizeof(MVNDistinctItem)));
-	ndistinct->magic = ndist.magic;
-	ndistinct->type = ndist.type;
-	ndistinct->nitems = ndist.nitems;
+	if (VARSIZE_ANY_EXHDR(data) < expected_size)
+		elog(ERROR, "invalid dependencies size %ld (expected at least %ld)",
+			 VARSIZE_ANY_EXHDR(data), expected_size);
+
+	/* allocate space for the ndistinct items */
+	ndistinct = repalloc(ndistinct, offsetof(MVNDistinct, items) +
+						 (ndistinct->nitems * sizeof(MVNDistinctItem)));
 
 	for (i = 0; i < ndistinct->nitems; i++)
 	{
diff --git a/src/backend/storage/file/fd.c b/src/backend/storage/file/fd.c
index b149794..f0ed2e9 100644
--- a/src/backend/storage/file/fd.c
+++ b/src/backend/storage/file/fd.c
@@ -658,43 +658,6 @@ durable_rename(const char *oldfile, const char *newfile, int elevel)
 }
 
 /*
- * durable_unlink -- remove a file in a durable manner
- *
- * This routine ensures that, after returning, the effect of removing file
- * persists in case of a crash. A crash while this routine is running will
- * leave the system in no mixed state.
- *
- * It does so by using fsync on the parent directory of the file after the
- * actual removal is done.
- *
- * Log errors with the severity specified by caller.
- *
- * Returns 0 if the operation succeeded, -1 otherwise. Note that errno is not
- * valid upon return.
- */
-int
-durable_unlink(const char *fname, int elevel)
-{
-	if (unlink(fname) < 0)
-	{
-		ereport(elevel,
-				(errcode_for_file_access(),
-				 errmsg("could not remove file \"%s\": %m",
-						fname)));
-		return -1;
-	}
-
-	/*
-	 * To guarantee that the removal of the file is persistent, fsync
-	 * its parent directory.
-	 */
-	if (fsync_parent_path(fname, elevel) != 0)
-		return -1;
-
-	return 0;
-}
-
-/*
  * durable_link_or_rename -- rename a file in a durable manner.
  *
  * Similar to durable_rename(), except that this routine tries (but does not
diff --git a/src/backend/storage/lmgr/generate-lwlocknames.pl b/src/backend/storage/lmgr/generate-lwlocknames.pl
index 10d0698..f80d2c8 100644
--- a/src/backend/storage/lmgr/generate-lwlocknames.pl
+++ b/src/backend/storage/lmgr/generate-lwlocknames.pl
@@ -9,21 +9,21 @@ use strict;
 my $lastlockidx = -1;
 my $continue    = "\n";
 
-open my $lwlocknames, '<', $ARGV[0] or die;
+open my $lwlocknames, $ARGV[0] or die;
 
 # Include PID in suffix in case parallel make runs this multiple times.
 my $htmp = "lwlocknames.h.tmp$$";
 my $ctmp = "lwlocknames.c.tmp$$";
-open my $h, '>', $htmp or die "Could not open $htmp: $!";
-open my $c, '>', $ctmp or die "Could not open $ctmp: $!";
+open H, '>', $htmp or die "Could not open $htmp: $!";
+open C, '>', $ctmp or die "Could not open $ctmp: $!";
 
 my $autogen =
 "/* autogenerated from src/backend/storage/lmgr/lwlocknames.txt, do not edit */\n";
-print $h $autogen;
-print $h "/* there is deliberately not an #ifndef LWLOCKNAMES_H here */\n\n";
-print $c $autogen, "\n";
+print H $autogen;
+print H "/* there is deliberately not an #ifndef LWLOCKNAMES_H here */\n\n";
+print C $autogen, "\n";
 
-print $c "char *MainLWLockNames[] = {";
+print C "char *MainLWLockNames[] = {";
 
 while (<$lwlocknames>)
 {
@@ -44,22 +44,22 @@ while (<$lwlocknames>)
 	while ($lastlockidx < $lockidx - 1)
 	{
 		++$lastlockidx;
-		printf $c "%s	\"<unassigned:%d>\"", $continue, $lastlockidx;
+		printf C "%s	\"<unassigned:%d>\"", $continue, $lastlockidx;
 		$continue = ",\n";
 	}
-	printf $c "%s	\"%s\"", $continue, $lockname;
+	printf C "%s	\"%s\"", $continue, $lockname;
 	$lastlockidx = $lockidx;
 	$continue    = ",\n";
 
-	print $h "#define $lockname (&MainLWLockArray[$lockidx].lock)\n";
+	print H "#define $lockname (&MainLWLockArray[$lockidx].lock)\n";
 }
 
-printf $c "\n};\n";
-print $h "\n";
-printf $h "#define NUM_INDIVIDUAL_LWLOCKS		%s\n", $lastlockidx + 1;
+printf C "\n};\n";
+print H "\n";
+printf H "#define NUM_INDIVIDUAL_LWLOCKS		%s\n", $lastlockidx + 1;
 
-close $h;
-close $c;
+close H;
+close C;
 
 rename($htmp, 'lwlocknames.h') || die "rename: $htmp: $!";
 rename($ctmp, 'lwlocknames.c') || die "rename: $ctmp: $!";
diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c
index 3e716b1..8f467be 100644
--- a/src/backend/storage/lmgr/proc.c
+++ b/src/backend/storage/lmgr/proc.c
@@ -941,33 +941,6 @@ AuxiliaryProcKill(int code, Datum arg)
 	SpinLockRelease(ProcStructLock);
 }
 
-/*
- * AuxiliaryPidGetProc -- get PGPROC for an auxiliary process
- * given its PID
- *
- * Returns NULL if not found.
- */
-PGPROC *
-AuxiliaryPidGetProc(int pid)
-{
-	PGPROC	   *result = NULL;
-	int			index;
-
-	if (pid == 0)				/* never match dummy PGPROCs */
-		return NULL;
-
-	for (index = 0; index < NUM_AUXILIARY_PROCS; index++)
-	{
-		PGPROC	   *proc = &AuxiliaryProcs[index];
-
-		if (proc->pid == pid)
-		{
-			result = proc;
-			break;
-		}
-	}
-	return result;
-}
 
 /*
  * ProcQueue package: routines for putting processes to sleep
diff --git a/src/backend/utils/Gen_fmgrtab.pl b/src/backend/utils/Gen_fmgrtab.pl
index 2af9b35..cdd603a 100644
--- a/src/backend/utils/Gen_fmgrtab.pl
+++ b/src/backend/utils/Gen_fmgrtab.pl
@@ -90,11 +90,11 @@ my $oidsfile = $output_path . 'fmgroids.h';
 my $protosfile = $output_path . 'fmgrprotos.h';
 my $tabfile  = $output_path . 'fmgrtab.c';
 
-open my $ofh, '>', $oidsfile . $tmpext or die "Could not open $oidsfile$tmpext: $!";
-open my $pfh, '>', $protosfile . $tmpext or die "Could not open $protosfile$tmpext: $!";
-open my $tfh, '>', $tabfile . $tmpext  or die "Could not open $tabfile$tmpext: $!";
+open H, '>', $oidsfile . $tmpext or die "Could not open $oidsfile$tmpext: $!";
+open P, '>', $protosfile . $tmpext or die "Could not open $protosfile$tmpext: $!";
+open T, '>', $tabfile . $tmpext  or die "Could not open $tabfile$tmpext: $!";
 
-print $ofh
+print H
 qq|/*-------------------------------------------------------------------------
  *
  * fmgroids.h
@@ -132,7 +132,7 @@ qq|/*-------------------------------------------------------------------------
  */
 |;
 
-print $pfh
+print P
 qq|/*-------------------------------------------------------------------------
  *
  * fmgrprotos.h
@@ -159,7 +159,7 @@ qq|/*-------------------------------------------------------------------------
 
 |;
 
-print $tfh
+print T
 qq|/*-------------------------------------------------------------------------
  *
  * fmgrtab.c
@@ -193,26 +193,26 @@ foreach my $s (sort { $a->{oid} <=> $b->{oid} } @fmgr)
 {
 	next if $seenit{ $s->{prosrc} };
 	$seenit{ $s->{prosrc} } = 1;
-	print $ofh "#define F_" . uc $s->{prosrc} . " $s->{oid}\n";
-	print $pfh "extern Datum $s->{prosrc}(PG_FUNCTION_ARGS);\n";
+	print H "#define F_" . uc $s->{prosrc} . " $s->{oid}\n";
+	print P "extern Datum $s->{prosrc}(PG_FUNCTION_ARGS);\n";
 }
 
 # Create the fmgr_builtins table
-print $tfh "\nconst FmgrBuiltin fmgr_builtins[] = {\n";
+print T "\nconst FmgrBuiltin fmgr_builtins[] = {\n";
 my %bmap;
 $bmap{'t'} = 'true';
 $bmap{'f'} = 'false';
 foreach my $s (sort { $a->{oid} <=> $b->{oid} } @fmgr)
 {
-	print $tfh
+	print T
 "  { $s->{oid}, \"$s->{prosrc}\", $s->{nargs}, $bmap{$s->{strict}}, $bmap{$s->{retset}}, $s->{prosrc} },\n";
 }
 
 # And add the file footers.
-print $ofh "\n#endif /* FMGROIDS_H */\n";
-print $pfh "\n#endif /* FMGRPROTOS_H */\n";
+print H "\n#endif /* FMGROIDS_H */\n";
+print P "\n#endif /* FMGRPROTOS_H */\n";
 
-print $tfh
+print T
 qq|  /* dummy entry is easier than getting rid of comma after last real one */
   /* (not that there has ever been anything wrong with *having* a
      comma after the last field in an array initializer) */
@@ -223,9 +223,9 @@ qq|  /* dummy entry is easier than getting rid of comma after last real one */
 const int fmgr_nbuiltins = (sizeof(fmgr_builtins) / sizeof(FmgrBuiltin)) - 1;
 |;
 
-close($ofh);
-close($pfh);
-close($tfh);
+close(H);
+close(P);
+close(T);
 
 # Finally, rename the completed files into place.
 Catalog::RenameTempFile($oidsfile, $tmpext);
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index d9c8aa5..fc9eeb8 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -24,13 +24,19 @@
 #include "catalog/pg_type.h"
 #include "funcapi.h"
 #include "libpq/pqformat.h"
+#include "nodes/makefuncs.h"
+#include "nodes/nodeFuncs.h"
+#include "executor/execExpr.h"
 #include "utils/array.h"
 #include "utils/arrayaccess.h"
 #include "utils/builtins.h"
 #include "utils/datum.h"
+#include "utils/fmgroids.h"
 #include "utils/lsyscache.h"
 #include "utils/memutils.h"
 #include "utils/typcache.h"
+#include "parser/parse_node.h"
+#include "parser/parse_coerce.h"
 
 
 /*
@@ -88,6 +94,7 @@ typedef struct ArrayIteratorData
 
 static bool array_isspace(char ch);
 static int	ArrayCount(const char *str, int *dim, char typdelim);
+bool isAssignmentIndirectionExpr(ExprState *exprstate);
 static void ReadArrayStr(char *arrayStr, const char *origStr,
 			 int nitems, int ndim, int *dim,
 			 FmgrInfo *inputproc, Oid typioparam, int32 typmod,
@@ -158,7 +165,6 @@ static int width_bucket_array_variable(Datum operand,
 							Oid collation,
 							TypeCacheEntry *typentry);
 
-
 /*
  * array_in :
  *		  converts an array from the external format in "string" to
@@ -6508,3 +6514,272 @@ width_bucket_array_variable(Datum operand,
 
 	return left;
 }
+
+/*
+ * Perform an actual data extraction or modification for the array
+ * subscripting. As a result the extracted Datum or the modified containers
+ * value will be returned.
+ */
+Datum
+array_subscripting_assign(PG_FUNCTION_ARGS)
+{
+	Datum						containerSource = PG_GETARG_DATUM(0);
+	ExprEvalStep				*step = (ExprEvalStep *) PG_GETARG_POINTER(1);
+	SubscriptingRefState		*sbstate = step->d.sbsref.state;
+
+	bool						is_slice = (sbstate->numlower != 0);
+	IntArray					u_index, l_index;
+	bool						eisnull = *(step->resnull);
+	int							i = 0;
+
+	if (sbstate->refelemlength == 0)
+	{
+		/* do one-time catalog lookups for type info */
+		get_typlenbyvalalign(sbstate->refelemtype,
+							 &sbstate->refelemlength,
+							 &sbstate->refelembyval,
+							 &sbstate->refelemalign);
+	}
+
+	for(i = 0; i < sbstate->numupper; i++)
+		u_index.indx[i] = DatumGetInt32(sbstate->upper[i]);
+
+	if (is_slice)
+	{
+		for(i = 0; i < sbstate->numlower; i++)
+			l_index.indx[i] = DatumGetInt32(sbstate->lower[i]);
+	}
+
+	/*
+	 * For assignment to varlena arrays, we handle a NULL original array
+	 * by substituting an empty (zero-dimensional) array; insertion of the
+	 * new element will result in a singleton array value.  It does not
+	 * matter whether the new element is NULL.
+	 */
+	if (eisnull)
+	{
+		containerSource = PointerGetDatum(construct_empty_array(sbstate->refelemtype));
+		*step->resnull = false;
+		eisnull = false;
+	}
+
+	if (!is_slice)
+		return array_set_element(containerSource, sbstate->numupper,
+								 u_index.indx,
+								 sbstate->replacevalue,
+								 sbstate->replacenull,
+								 sbstate->refattrlength,
+								 sbstate->refelemlength,
+								 sbstate->refelembyval,
+								 sbstate->refelemalign);
+	else
+		return array_set_slice(containerSource, sbstate->numupper,
+							   u_index.indx, l_index.indx,
+							   sbstate->upperprovided,
+							   sbstate->lowerprovided,
+							   sbstate->replacevalue,
+							   sbstate->replacenull,
+							   sbstate->refattrlength,
+							   sbstate->refelemlength,
+							   sbstate->refelembyval,
+							   sbstate->refelemalign);
+}
+
+Datum
+array_subscripting_fetch(PG_FUNCTION_ARGS)
+{
+	Datum							containerSource = PG_GETARG_DATUM(0);
+	ExprEvalStep					*step = (ExprEvalStep *) PG_GETARG_POINTER(1);
+	SubscriptingRefState			*sbstate = step->d.sbsref.state;
+	bool							is_slice = (sbstate->numlower != 0);
+	IntArray						u_index, l_index;
+	int								i = 0;
+
+	if (sbstate->refelemlength == 0)
+	{
+		/* do one-time catalog lookups for type info */
+		get_typlenbyvalalign(sbstate->refelemtype,
+							 &sbstate->refelemlength,
+							 &sbstate->refelembyval,
+							 &sbstate->refelemalign);
+	}
+
+	for(i = 0; i < sbstate->numupper; i++)
+		u_index.indx[i] = DatumGetInt32(sbstate->upper[i]);
+
+	if (is_slice)
+	{
+		for(i = 0; i < sbstate->numlower; i++)
+			l_index.indx[i] = DatumGetInt32(sbstate->lower[i]);
+	}
+
+	if (!is_slice)
+		return array_get_element(containerSource, sbstate->numupper,
+								 u_index.indx,
+								 sbstate->refattrlength,
+								 sbstate->refelemlength,
+								 sbstate->refelembyval,
+								 sbstate->refelemalign,
+								 step->resnull);
+	else
+		return array_get_slice(containerSource, sbstate->numupper,
+							   u_index.indx, l_index.indx,
+							   sbstate->upperprovided,
+							   sbstate->lowerprovided,
+							   sbstate->refattrlength,
+							   sbstate->refelemlength,
+							   sbstate->refelembyval,
+							   sbstate->refelemalign);
+}
+
+/*
+ * Handle array-type subscripting logic.
+ */
+Datum
+array_subscript_parse(PG_FUNCTION_ARGS)
+{
+	bool				isAssignment = PG_GETARG_BOOL(0);
+	SubscriptingRef		*sbsref = (SubscriptingRef *) PG_GETARG_POINTER(1);
+	ParseState			*pstate = (ParseState *) PG_GETARG_POINTER(2);
+	Node				*node = (Node *) sbsref;
+	Oid					array_type = sbsref->refcontainertype;
+	int32				array_typ_mode = (int32) sbsref->reftypmod;
+	bool				is_slice = sbsref->reflowerindexpr != NIL;
+	Oid					typeneeded = InvalidOid,
+						typesource = InvalidOid;
+	Node				*new_from;
+	Oid					element_type_id;
+	Node				*subexpr;
+	List				*upperIndexpr = NIL;
+	List				*lowerIndexpr = NIL;
+	ListCell			*l;
+
+	element_type_id = transformArrayType(&array_type, &array_typ_mode);
+	sbsref->refelemtype = element_type_id;
+
+	foreach(l, sbsref->refupperindexpr)
+	{
+		subexpr = (Node *) lfirst(l);
+
+		if (subexpr == NULL)
+		{
+			upperIndexpr = lappend(upperIndexpr, subexpr);
+			continue;
+		}
+
+		subexpr = coerce_to_target_type(pstate,
+										subexpr, exprType(subexpr),
+										INT4OID, -1,
+										COERCION_ASSIGNMENT,
+										COERCE_IMPLICIT_CAST,
+										-1);
+		if (subexpr == NULL)
+			ereport(ERROR,
+					(errcode(ERRCODE_DATATYPE_MISMATCH),
+					 errmsg("array subscript must have type integer"),
+					 parser_errposition(pstate, exprLocation(subexpr))));
+
+		upperIndexpr = lappend(upperIndexpr, subexpr);
+	}
+
+	sbsref->refupperindexpr = upperIndexpr;
+
+	foreach(l, sbsref->reflowerindexpr)
+	{
+		List *expr_ai = (List *) lfirst(l);
+		A_Indices *ai = (A_Indices *) lfirst(list_tail(expr_ai));
+
+		subexpr = (Node *) lfirst(list_head(expr_ai));
+		if (subexpr == NULL && !ai->is_slice)
+		{
+			/* Make a constant 1 */
+			subexpr = (Node *) makeConst(INT4OID,
+										 -1,
+										 InvalidOid,
+										 sizeof(int32),
+										 Int32GetDatum(1),
+										 false,
+										 true);		/* pass by value */
+		}
+
+		if (subexpr == NULL)
+		{
+			lowerIndexpr = lappend(lowerIndexpr, subexpr);
+			continue;
+		}
+
+
+		subexpr = coerce_to_target_type(pstate,
+										subexpr, exprType(subexpr),
+										INT4OID, -1,
+										COERCION_ASSIGNMENT,
+										COERCE_IMPLICIT_CAST,
+										-1);
+		if (subexpr == NULL)
+			ereport(ERROR,
+					(errcode(ERRCODE_DATATYPE_MISMATCH),
+					 errmsg("array subscript must have type integer"),
+					 parser_errposition(pstate, exprLocation(subexpr))));
+
+		lowerIndexpr = lappend(lowerIndexpr, subexpr);
+	}
+
+	sbsref->reflowerindexpr = lowerIndexpr;
+
+	if (isAssignment)
+	{
+		SubscriptingRef *assignRef = (SubscriptingRef *) sbsref;
+		Node *assignExpr = (Node *) assignRef->refassgnexpr;
+
+		new_from = coerce_to_target_type(pstate,
+										assignExpr, typesource,
+										typeneeded, sbsref->reftypmod,
+										COERCION_ASSIGNMENT,
+										COERCE_IMPLICIT_CAST,
+										-1);
+		if (new_from == NULL)
+			ereport(ERROR,
+					(errcode(ERRCODE_DATATYPE_MISMATCH),
+					 errmsg("array assignment requires type %s"
+							" but expression is of type %s",
+							format_type_be(typeneeded),
+							format_type_be(typesource)),
+				 errhint("You will need to rewrite or cast the expression."),
+					 parser_errposition(pstate, exprLocation(assignExpr))));
+		assignRef->refassgnexpr = (Expr *)new_from;
+
+		if (array_type != sbsref->refcontainertype)
+		{
+			typesource = exprType(assignExpr);
+			typesource = is_slice ? sbsref->refcontainertype : sbsref->refelemtype;
+
+			node = coerce_to_target_type(pstate,
+										 node, array_type,
+										 sbsref->refcontainertype, sbsref->reftypmod,
+										 COERCION_ASSIGNMENT,
+										 COERCE_IMPLICIT_CAST,
+										 -1);
+
+			/* can fail if we had int2vector/oidvector, but not for true domains */
+			if (node == NULL && node->type != 0)
+				ereport(ERROR,
+						(errcode(ERRCODE_CANNOT_COERCE),
+						 errmsg("cannot cast type %s to %s",
+								format_type_be(array_type),
+								format_type_be(sbsref->refcontainertype)),
+						 parser_errposition(pstate, 0)));
+
+			PG_RETURN_POINTER(node);
+		}
+
+	}
+
+	sbsref->refnestedfunc = F_ARRAY_SUBSCRIPTING_FETCH;
+
+	if (isAssignment)
+		sbsref->refevalfunc = F_ARRAY_SUBSCRIPTING_ASSIGN;
+	else
+		sbsref->refevalfunc = F_ARRAY_SUBSCRIPTING_FETCH;
+
+	PG_RETURN_POINTER(sbsref);
+}
diff --git a/src/backend/utils/adt/jsonb.c b/src/backend/utils/adt/jsonb.c
index 164f57e..1dc44d0 100644
--- a/src/backend/utils/adt/jsonb.c
+++ b/src/backend/utils/adt/jsonb.c
@@ -1146,23 +1146,34 @@ to_jsonb(PG_FUNCTION_ARGS)
 {
 	Datum		val = PG_GETARG_DATUM(0);
 	Oid			val_type = get_fn_expr_argtype(fcinfo->flinfo, 0);
-	JsonbInState result;
-	JsonbTypeCategory tcategory;
-	Oid			outfuncoid;
+	JsonbValue *res = to_jsonb_worker(val, val_type);
+	PG_RETURN_POINTER(JsonbValueToJsonb(res));
+}
 
-	if (val_type == InvalidOid)
+/*
+ * Do the actual conversion to jsonb for to_jsonb function. This logic is
+ * separated because it can be useful not only in here (e.g. we use it in
+ * jsonb subscripting)
+ */
+JsonbValue *
+to_jsonb_worker(Datum source, Oid source_type)
+{
+	JsonbInState		result;
+	JsonbTypeCategory	tcategory;
+	Oid					outfuncoid;
+
+	if (source_type == InvalidOid)
 		ereport(ERROR,
 				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
 				 errmsg("could not determine input data type")));
 
-	jsonb_categorize_type(val_type,
+	jsonb_categorize_type(source_type,
 						  &tcategory, &outfuncoid);
 
 	memset(&result, 0, sizeof(JsonbInState));
 
-	datum_to_jsonb(val, false, &result, tcategory, outfuncoid, false);
-
-	PG_RETURN_POINTER(JsonbValueToJsonb(result.res));
+	datum_to_jsonb(source, false, &result, tcategory, outfuncoid, false);
+	return result.res;
 }
 
 /*
diff --git a/src/backend/utils/adt/jsonb_util.c b/src/backend/utils/adt/jsonb_util.c
index 0d2abb3..26453ab 100644
--- a/src/backend/utils/adt/jsonb_util.c
+++ b/src/backend/utils/adt/jsonb_util.c
@@ -62,18 +62,29 @@ static JsonbValue *pushJsonbValueScalar(JsonbParseState **pstate,
 					 JsonbIteratorToken seq,
 					 JsonbValue *scalarVal);
 
+JsonbValue *
+JsonbToJsonbValue(Jsonb *jsonb)
+{
+	JsonbValue *val = (JsonbValue *) palloc(sizeof(JsonbValue));
+
+	val->type = jbvBinary;
+	val->val.binary.data = &jsonb->root;
+	val->val.binary.len = VARSIZE(jsonb) - VARHDRSZ;
+
+	return val;
+}
+
 /*
  * Turn an in-memory JsonbValue into a Jsonb for on-disk storage.
  *
- * There isn't a JsonbToJsonbValue(), because generally we find it more
- * convenient to directly iterate through the Jsonb representation and only
- * really convert nested scalar values.  JsonbIteratorNext() does this, so that
- * clients of the iteration code don't have to directly deal with the binary
- * representation (JsonbDeepContains() is a notable exception, although all
- * exceptions are internal to this module).  In general, functions that accept
- * a JsonbValue argument are concerned with the manipulation of scalar values,
- * or simple containers of scalar values, where it would be inconvenient to
- * deal with a great amount of other state.
+ * Generally we find it more convenient to directly iterate through the Jsonb
+ * representation and only really convert nested scalar values.
+ * JsonbIteratorNext() does this, so that clients of the iteration code don't
+ * have to directly deal with the binary representation (JsonbDeepContains() is
+ * a notable exception, although all exceptions are internal to this module).
+ * In general, functions that accept a JsonbValue argument are concerned with
+ * the manipulation of scalar values, or simple containers of scalar values,
+ * where it would be inconvenient to deal with a great amount of other state.
  */
 Jsonb *
 JsonbValueToJsonb(JsonbValue *val)
@@ -521,6 +532,30 @@ pushJsonbValue(JsonbParseState **pstate, JsonbIteratorToken seq,
 	JsonbValue *res = NULL;
 	JsonbValue	v;
 	JsonbIteratorToken tok;
+	int	i;
+
+	if (jbval && (seq == WJB_ELEM || seq == WJB_VALUE) && jbval->type == jbvObject)
+	{
+		pushJsonbValue(pstate, WJB_BEGIN_OBJECT, NULL);
+		for (i = 0; i < jbval->val.object.nPairs; i++)
+		{
+			pushJsonbValue(pstate, WJB_KEY, &jbval->val.object.pairs[i].key);
+			pushJsonbValue(pstate, WJB_VALUE, &jbval->val.object.pairs[i].value);
+		}
+
+		return pushJsonbValue(pstate, WJB_END_OBJECT, NULL);
+	}
+
+	if (jbval && (seq == WJB_ELEM || seq == WJB_VALUE) && jbval->type == jbvArray)
+	{
+		pushJsonbValue(pstate, WJB_BEGIN_ARRAY, NULL);
+		for (i = 0; i < jbval->val.array.nElems; i++)
+		{
+			pushJsonbValue(pstate, WJB_ELEM, &jbval->val.array.elems[i]);
+		}
+
+		return pushJsonbValue(pstate, WJB_END_ARRAY, NULL);
+	}
 
 	if (!jbval || (seq != WJB_ELEM && seq != WJB_VALUE) ||
 		jbval->type != jbvBinary)
@@ -531,9 +566,30 @@ pushJsonbValue(JsonbParseState **pstate, JsonbIteratorToken seq,
 
 	/* unpack the binary and add each piece to the pstate */
 	it = JsonbIteratorInit(jbval->val.binary.data);
+
+	if ((jbval->val.binary.data->header & JB_FSCALAR) && *pstate)
+	{
+		tok = JsonbIteratorNext(&it, &v, true);
+		Assert(tok == WJB_BEGIN_ARRAY);
+		Assert(v.type == jbvArray && v.val.array.rawScalar);
+
+		tok = JsonbIteratorNext(&it, &v, true);
+		Assert(tok == WJB_ELEM);
+
+		res = pushJsonbValueScalar(pstate, seq, &v);
+
+		tok = JsonbIteratorNext(&it, &v, true);
+		Assert(tok == WJB_END_ARRAY);
+		Assert(it == NULL);
+
+		return res;
+	}
+
 	while ((tok = JsonbIteratorNext(&it, &v, false)) != WJB_DONE)
 		res = pushJsonbValueScalar(pstate, tok,
-								   tok < WJB_BEGIN_ARRAY ? &v : NULL);
+								   tok < WJB_BEGIN_ARRAY ||
+								   (tok == WJB_BEGIN_ARRAY &&
+									v.val.array.rawScalar) ? &v : NULL);
 
 	return res;
 }
diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c
index bf2c91f..6f87562 100644
--- a/src/backend/utils/adt/jsonfuncs.c
+++ b/src/backend/utils/adt/jsonfuncs.c
@@ -20,18 +20,23 @@
 #include "catalog/pg_type.h"
 #include "fmgr.h"
 #include "funcapi.h"
+#include "executor/execExpr.h"
 #include "lib/stringinfo.h"
 #include "mb/pg_wchar.h"
 #include "miscadmin.h"
+#include "nodes/nodeFuncs.h"
+#include "parser/parse_coerce.h"
 #include "utils/array.h"
 #include "utils/builtins.h"
 #include "utils/hsearch.h"
+#include "utils/fmgroids.h"
 #include "utils/json.h"
 #include "utils/jsonapi.h"
 #include "utils/jsonb.h"
 #include "utils/lsyscache.h"
 #include "utils/memutils.h"
 #include "utils/typcache.h"
+#include "parser/parse_node.h"
 
 /* Operations available for setPath */
 #define JB_PATH_CREATE					0x0001
@@ -258,18 +263,19 @@ static JsonbValue *findJsonbValueFromContainerLen(JsonbContainer *container,
 /* functions supporting jsonb_delete, jsonb_set and jsonb_concat */
 static JsonbValue *IteratorConcat(JsonbIterator **it1, JsonbIterator **it2,
 			   JsonbParseState **state);
+static Datum jsonb_set_element(Datum datum, Datum *path, int path_len,
+							   Datum sourceData, Oid source_type);
+static Datum jsonb_get_element(Jsonb *jb, Datum *path, int npath,
+							   bool *isnull, bool as_text);
 static JsonbValue *setPath(JsonbIterator **it, Datum *path_elems,
 		bool *path_nulls, int path_len,
-		JsonbParseState **st, int level, Jsonb *newval,
-		int op_type);
+		JsonbParseState **st, int level, JsonbValue *newval, int op_type);
 static void setPathObject(JsonbIterator **it, Datum *path_elems,
 			  bool *path_nulls, int path_len, JsonbParseState **st,
-			  int level,
-			  Jsonb *newval, uint32 npairs, int op_type);
+			  int level, JsonbValue *newval, uint32 npairs, int op_type);
 static void setPathArray(JsonbIterator **it, Datum *path_elems,
 			 bool *path_nulls, int path_len, JsonbParseState **st,
-			 int level, Jsonb *newval, uint32 nelems, int op_type);
-static void addJsonbToParseState(JsonbParseState **jbps, Jsonb *jb);
+			 int level, JsonbValue *newval, uint32 nelems, int op_type);
 
 
 /*
@@ -1172,16 +1178,11 @@ get_jsonb_path_all(FunctionCallInfo fcinfo, bool as_text)
 {
 	Jsonb	   *jb = PG_GETARG_JSONB(0);
 	ArrayType  *path = PG_GETARG_ARRAYTYPE_P(1);
-	Jsonb	   *res;
 	Datum	   *pathtext;
 	bool	   *pathnulls;
+	bool		isnull;
 	int			npath;
-	int			i;
-	bool		have_object = false,
-				have_array = false;
-	JsonbValue *jbvp = NULL;
-	JsonbValue	tv;
-	JsonbContainer *container;
+	Datum		res;
 
 	/*
 	 * If the array contains any null elements, return NULL, on the grounds
@@ -1196,9 +1197,28 @@ get_jsonb_path_all(FunctionCallInfo fcinfo, bool as_text)
 	deconstruct_array(path, TEXTOID, -1, false, 'i',
 					  &pathtext, &pathnulls, &npath);
 
-	/* Identify whether we have object, array, or scalar at top-level */
-	container = &jb->root;
+	res = jsonb_get_element(jb, pathtext, npath, &isnull, as_text);
+
+	if (isnull)
+		PG_RETURN_NULL();
+	else
+		PG_RETURN_DATUM(res);
+}
 
+static Datum
+jsonb_get_element(Jsonb *jb, Datum *path, int npath, bool *isnull, bool as_text)
+{
+	Jsonb		   *res;
+	JsonbContainer *container = &jb->root;
+	JsonbValue	   *jbvp = NULL;
+	JsonbValue		tv;
+	int				i;
+	bool			have_object = false,
+					have_array = false;
+
+	*isnull = false;
+
+	/* Identify whether we have object, array, or scalar at top-level */
 	if (JB_ROOT_IS_OBJECT(jb))
 		have_object = true;
 	else if (JB_ROOT_IS_ARRAY(jb) && !JB_ROOT_IS_SCALAR(jb))
@@ -1223,14 +1243,14 @@ get_jsonb_path_all(FunctionCallInfo fcinfo, bool as_text)
 	{
 		if (as_text)
 		{
-			PG_RETURN_TEXT_P(cstring_to_text(JsonbToCString(NULL,
+			return PointerGetDatum(cstring_to_text(JsonbToCString(NULL,
 															container,
 															VARSIZE(jb))));
 		}
 		else
 		{
 			/* not text mode - just hand back the jsonb */
-			PG_RETURN_JSONB(jb);
+			return JsonbGetDatum(jb);
 		}
 	}
 
@@ -1240,21 +1260,24 @@ get_jsonb_path_all(FunctionCallInfo fcinfo, bool as_text)
 		{
 			jbvp = findJsonbValueFromContainerLen(container,
 												  JB_FOBJECT,
-												  VARDATA(pathtext[i]),
-											VARSIZE(pathtext[i]) - VARHDRSZ);
+												  VARDATA(path[i]),
+												  VARSIZE(path[i]) - VARHDRSZ);
 		}
 		else if (have_array)
 		{
 			long		lindex;
 			uint32		index;
-			char	   *indextext = TextDatumGetCString(pathtext[i]);
+			char	   *indextext = TextDatumGetCString(path[i]);
 			char	   *endptr;
 
 			errno = 0;
 			lindex = strtol(indextext, &endptr, 10);
 			if (endptr == indextext || *endptr != '\0' || errno != 0 ||
 				lindex > INT_MAX || lindex < INT_MIN)
-				PG_RETURN_NULL();
+			{
+				*isnull = true;
+				return PointerGetDatum(NULL);
+			}
 
 			if (lindex >= 0)
 			{
@@ -1272,7 +1295,10 @@ get_jsonb_path_all(FunctionCallInfo fcinfo, bool as_text)
 				nelements = JsonContainerSize(container);
 
 				if (-lindex > nelements)
-					PG_RETURN_NULL();
+				{
+					*isnull = true;
+					return PointerGetDatum(NULL);
+				}
 				else
 					index = nelements + lindex;
 			}
@@ -1282,11 +1308,15 @@ get_jsonb_path_all(FunctionCallInfo fcinfo, bool as_text)
 		else
 		{
 			/* scalar, extraction yields a null */
-			PG_RETURN_NULL();
+			*isnull = true;
+			return PointerGetDatum(NULL);
 		}
 
 		if (jbvp == NULL)
-			PG_RETURN_NULL();
+		{
+			*isnull = true;
+			return PointerGetDatum(NULL);
+		}
 		else if (i == npath - 1)
 			break;
 
@@ -1311,27 +1341,57 @@ get_jsonb_path_all(FunctionCallInfo fcinfo, bool as_text)
 	{
 		/* special-case outputs for string and null values */
 		if (jbvp->type == jbvString)
-			PG_RETURN_TEXT_P(cstring_to_text_with_len(jbvp->val.string.val,
-													  jbvp->val.string.len));
+			return PointerGetDatum(
+							cstring_to_text_with_len(jbvp->val.string.val,
+													 jbvp->val.string.len));
 		if (jbvp->type == jbvNull)
-			PG_RETURN_NULL();
+		{
+			*isnull = true;
+			return PointerGetDatum(NULL);
+		}
 	}
 
 	res = JsonbValueToJsonb(jbvp);
 
 	if (as_text)
 	{
-		PG_RETURN_TEXT_P(cstring_to_text(JsonbToCString(NULL,
+		return PointerGetDatum(cstring_to_text(JsonbToCString(NULL,
 														&res->root,
 														VARSIZE(res))));
 	}
 	else
 	{
 		/* not text mode - just hand back the jsonb */
-		PG_RETURN_JSONB(res);
+		return JsonbGetDatum(res);
 	}
 }
 
+Datum
+jsonb_set_element(Datum jsonbdatum, Datum *path, int path_len,
+				  Datum sourceData, Oid source_type)
+{
+	Jsonb			   *jb = DatumGetJsonb(jsonbdatum);
+	JsonbValue		   *newval,
+					   *res;
+	JsonbParseState    *state = NULL;
+	JsonbIterator 	   *it;
+	bool			   *path_nulls = palloc0(path_len * sizeof(bool));
+
+	newval = to_jsonb_worker(sourceData, source_type);
+
+	if (newval->type == jbvArray && newval->val.array.rawScalar)
+		*newval = newval->val.array.elems[0];
+
+	it = JsonbIteratorInit(&jb->root);
+
+	res = setPath(&it, path, path_nulls, path_len, &state, 0,
+				  newval, JB_PATH_CREATE);
+
+	pfree(path_nulls);
+
+	PG_RETURN_JSONB(JsonbValueToJsonb(res));
+}
+
 /*
  * SQL function json_array_length(json) -> int
  */
@@ -3279,57 +3339,6 @@ jsonb_strip_nulls(PG_FUNCTION_ARGS)
 }
 
 /*
- * Add values from the jsonb to the parse state.
- *
- * If the parse state container is an object, the jsonb is pushed as
- * a value, not a key.
- *
- * This needs to be done using an iterator because pushJsonbValue doesn't
- * like getting jbvBinary values, so we can't just push jb as a whole.
- */
-static void
-addJsonbToParseState(JsonbParseState **jbps, Jsonb *jb)
-{
-	JsonbIterator *it;
-	JsonbValue *o = &(*jbps)->contVal;
-	JsonbValue	v;
-	JsonbIteratorToken type;
-
-	it = JsonbIteratorInit(&jb->root);
-
-	Assert(o->type == jbvArray || o->type == jbvObject);
-
-	if (JB_ROOT_IS_SCALAR(jb))
-	{
-		(void) JsonbIteratorNext(&it, &v, false);		/* skip array header */
-		(void) JsonbIteratorNext(&it, &v, false);		/* fetch scalar value */
-
-		switch (o->type)
-		{
-			case jbvArray:
-				(void) pushJsonbValue(jbps, WJB_ELEM, &v);
-				break;
-			case jbvObject:
-				(void) pushJsonbValue(jbps, WJB_VALUE, &v);
-				break;
-			default:
-				elog(ERROR, "unexpected parent of nested structure");
-		}
-	}
-	else
-	{
-		while ((type = JsonbIteratorNext(&it, &v, false)) != WJB_DONE)
-		{
-			if (type == WJB_KEY || type == WJB_VALUE || type == WJB_ELEM)
-				(void) pushJsonbValue(jbps, type, &v);
-			else
-				(void) pushJsonbValue(jbps, type, NULL);
-		}
-	}
-
-}
-
-/*
  * SQL function jsonb_pretty (jsonb)
  *
  * Pretty-printed text for the jsonb
@@ -3601,7 +3610,8 @@ jsonb_set(PG_FUNCTION_ARGS)
 {
 	Jsonb	   *in = PG_GETARG_JSONB(0);
 	ArrayType  *path = PG_GETARG_ARRAYTYPE_P(1);
-	Jsonb	   *newval = PG_GETARG_JSONB(2);
+	Jsonb	   *newjsonb = PG_GETARG_JSONB(2);
+	JsonbValue *newval = JsonbToJsonbValue(newjsonb);
 	bool		create = PG_GETARG_BOOL(3);
 	JsonbValue *res = NULL;
 	Datum	   *path_elems;
@@ -3693,7 +3703,8 @@ jsonb_insert(PG_FUNCTION_ARGS)
 {
 	Jsonb	   *in = PG_GETARG_JSONB(0);
 	ArrayType  *path = PG_GETARG_ARRAYTYPE_P(1);
-	Jsonb	   *newval = PG_GETARG_JSONB(2);
+	Jsonb	   *newjsonb = PG_GETARG_JSONB(2);
+	JsonbValue *newval = JsonbToJsonbValue(newjsonb);
 	bool		after = PG_GETARG_BOOL(3);
 	JsonbValue *res = NULL;
 	Datum	   *path_elems;
@@ -3856,7 +3867,7 @@ IteratorConcat(JsonbIterator **it1, JsonbIterator **it2,
 static JsonbValue *
 setPath(JsonbIterator **it, Datum *path_elems,
 		bool *path_nulls, int path_len,
-		JsonbParseState **st, int level, Jsonb *newval, int op_type)
+		JsonbParseState **st, int level, JsonbValue *newval, int op_type)
 {
 	JsonbValue	v;
 	JsonbIteratorToken r;
@@ -3909,11 +3920,11 @@ setPath(JsonbIterator **it, Datum *path_elems,
 static void
 setPathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
 			  int path_len, JsonbParseState **st, int level,
-			  Jsonb *newval, uint32 npairs, int op_type)
+			  JsonbValue *newval, uint32 npairs, int op_type)
 {
-	JsonbValue	v;
 	int			i;
-	JsonbValue	k;
+	JsonbValue	k,
+				v;
 	bool		done = false;
 
 	if (level >= path_len || path_nulls[level])
@@ -3930,7 +3941,7 @@ setPathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
 		newkey.val.string.val = VARDATA_ANY(path_elems[level]);
 
 		(void) pushJsonbValue(st, WJB_KEY, &newkey);
-		addJsonbToParseState(st, newval);
+		(void) pushJsonbValue(st, WJB_VALUE, newval);
 	}
 
 	for (i = 0; i < npairs; i++)
@@ -3961,7 +3972,7 @@ setPathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
 				if (!(op_type & JB_PATH_DELETE))
 				{
 					(void) pushJsonbValue(st, WJB_KEY, &k);
-					addJsonbToParseState(st, newval);
+					(void) pushJsonbValue(st, WJB_VALUE, newval);
 				}
 				done = true;
 			}
@@ -3984,7 +3995,7 @@ setPathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
 				newkey.val.string.val = VARDATA_ANY(path_elems[level]);
 
 				(void) pushJsonbValue(st, WJB_KEY, &newkey);
-				addJsonbToParseState(st, newval);
+				(void) pushJsonbValue(st, WJB_VALUE, newval);
 			}
 
 			(void) pushJsonbValue(st, r, &k);
@@ -4016,7 +4027,7 @@ setPathObject(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
 static void
 setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
 			 int path_len, JsonbParseState **st, int level,
-			 Jsonb *newval, uint32 nelems, int op_type)
+			 JsonbValue *newval, uint32 nelems, int op_type)
 {
 	JsonbValue	v;
 	int			idx,
@@ -4064,7 +4075,7 @@ setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
 		(op_type & JB_PATH_CREATE_OR_INSERT))
 	{
 		Assert(newval != NULL);
-		addJsonbToParseState(st, newval);
+		(void) pushJsonbValue(st, WJB_ELEM, newval);
 		done = true;
 	}
 
@@ -4080,7 +4091,7 @@ setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
 				r = JsonbIteratorNext(it, &v, true);	/* skip */
 
 				if (op_type & (JB_PATH_INSERT_BEFORE | JB_PATH_CREATE))
-					addJsonbToParseState(st, newval);
+					(void) pushJsonbValue(st, WJB_ELEM, newval);
 
 				/*
 				 * We should keep current value only in case of
@@ -4091,7 +4102,7 @@ setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
 					(void) pushJsonbValue(st, r, &v);
 
 				if (op_type & (JB_PATH_INSERT_AFTER | JB_PATH_REPLACE))
-					addJsonbToParseState(st, newval);
+					(void) pushJsonbValue(st, WJB_ELEM, newval);
 
 				done = true;
 			}
@@ -4125,8 +4136,138 @@ setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls,
 			if ((op_type & JB_PATH_CREATE_OR_INSERT) && !done &&
 				level == path_len - 1 && i == nelems - 1)
 			{
-				addJsonbToParseState(st, newval);
+				(void) pushJsonbValue(st, WJB_ELEM, newval);
 			}
 		}
 	}
 }
+
+/*
+ * Perform an actual data extraction or modification for the jsonb
+ * subscripting. As a result the extracted Datum or the modified containers
+ * value will be returned.
+ */
+Datum
+jsonb_subscripting_fetch(PG_FUNCTION_ARGS)
+{
+	Datum					containerSource = PG_GETARG_DATUM(0);
+	ExprEvalStep			*step = (ExprEvalStep *) PG_GETARG_POINTER(1);
+	SubscriptingRefState	*sbstate = step->d.sbsref.state;
+
+	return jsonb_get_element(DatumGetJsonb(containerSource),
+							 sbstate->upper,
+							 sbstate->numupper,
+							 step->resnull,
+							 false);
+}
+
+
+
+/*
+ * Perform an actual data extraction or modification for the jsonb
+ * subscripting. As a result the extracted Datum or the modified containers
+ * value will be returned.
+ */
+Datum
+jsonb_subscripting_assign(PG_FUNCTION_ARGS)
+{
+	Datum						containerSource = PG_GETARG_DATUM(0);
+	ExprEvalStep				*step = (ExprEvalStep *) PG_GETARG_POINTER(1);
+
+	SubscriptingRefState		*sbstate = step->d.sbsref.state;
+	bool						*is_null = step->resnull;
+	bool						eisnull = sbstate->replacenull;
+
+	/*
+	 * For an assignment to a fixed-length array type, both the original
+	 * array and the value to be assigned into it must be non-NULL, else
+	 * we punt and return the original array.
+	 */
+	if (sbstate->refattrlength > 0)	/* fixed-length array? */
+		if (eisnull || *is_null)
+			return containerSource;
+
+	/*
+	 * For assignment to varlena arrays, we handle a NULL original array
+	 * by substituting an empty (zero-dimensional) array; insertion of the
+	 * new element will result in a singleton array value.  It does not
+	 * matter whether the new element is NULL.
+	 */
+	if (*is_null)
+	{
+		containerSource =
+			PointerGetDatum(construct_empty_array(sbstate->refelemtype));
+		*is_null = false;
+	}
+
+	return jsonb_set_element(containerSource,
+							 sbstate->upper,
+							 sbstate->numupper,
+							 sbstate->replacevalue,
+							 sbstate->refelemtype);
+}
+
+/*
+ * Perform preparation for the jsonb subscripting. Since there are not any
+ * particular restrictions for this kind of subscripting, we will verify that
+ * it is not a slice operation. This function produces an expression that
+ * represents the result of extracting a single container element or the new
+ * container value with the source data inserted into the right part of the
+ * container.
+ */
+
+/*
+ * Handle jsonb-type subscripting logic.
+ */
+Datum
+jsonb_subscript_parse(PG_FUNCTION_ARGS)
+{
+	bool				isAssignment = PG_GETARG_BOOL(0);
+	SubscriptingRef	   *sbsref = (SubscriptingRef *) PG_GETARG_POINTER(1);
+	ParseState		   *pstate = (ParseState *) PG_GETARG_POINTER(2);
+	List			   *upperIndexpr = NIL;
+	ListCell		   *l;
+
+	if (sbsref->reflowerindexpr != NIL)
+		ereport(ERROR,
+				(errcode(ERRCODE_DATATYPE_MISMATCH),
+				 errmsg("jsonb subscript does not support slices"),
+				 parser_errposition(pstate, exprLocation(
+						 ((Node *)lfirst(sbsref->reflowerindexpr->head))))));
+
+	foreach(l, sbsref->refupperindexpr)
+	{
+		Node *subexpr = (Node *) lfirst(l);
+
+		Assert(subexpr != NULL);
+
+		if (subexpr == NULL)
+			ereport(ERROR,
+					(errcode(ERRCODE_DATATYPE_MISMATCH),
+					 errmsg("jsonb subscript does not support slices"),
+					 parser_errposition(pstate, exprLocation(
+						((Node *) lfirst(sbsref->refupperindexpr->head))))));
+
+		subexpr = coerce_to_target_type(pstate,
+										subexpr, exprType(subexpr),
+										TEXTOID, -1,
+										COERCION_ASSIGNMENT,
+										COERCE_IMPLICIT_CAST,
+										-1);
+		if (subexpr == NULL)
+			ereport(ERROR,
+					(errcode(ERRCODE_DATATYPE_MISMATCH),
+					 errmsg("jsonb subscript must have text type"),
+					 parser_errposition(pstate, exprLocation(subexpr))));
+
+		upperIndexpr = lappend(upperIndexpr, subexpr);
+	}
+
+	sbsref->refupperindexpr = upperIndexpr;
+	if (isAssignment)
+		sbsref->refevalfunc = F_JSONB_SUBSCRIPTING_ASSIGN;
+	else
+		sbsref->refevalfunc = F_JSONB_SUBSCRIPTING_FETCH;
+
+	PG_RETURN_POINTER(sbsref);
+}
diff --git a/src/backend/utils/adt/pgstatfuncs.c b/src/backend/utils/adt/pgstatfuncs.c
index dd2b924..a987d0d 100644
--- a/src/backend/utils/adt/pgstatfuncs.c
+++ b/src/backend/utils/adt/pgstatfuncs.c
@@ -20,7 +20,6 @@
 #include "funcapi.h"
 #include "miscadmin.h"
 #include "pgstat.h"
-#include "postmaster/postmaster.h"
 #include "storage/proc.h"
 #include "storage/procarray.h"
 #include "utils/acl.h"
@@ -539,7 +538,7 @@ pg_stat_get_progress_info(PG_FUNCTION_ARGS)
 Datum
 pg_stat_get_activity(PG_FUNCTION_ARGS)
 {
-#define PG_STAT_GET_ACTIVITY_COLS	24
+#define PG_STAT_GET_ACTIVITY_COLS	23
 	int			num_backends = pgstat_fetch_stat_numbackends();
 	int			curr_backend;
 	int			pid = PG_ARGISNULL(0) ? -1 : PG_GETARG_INT32(0);
@@ -583,8 +582,8 @@ pg_stat_get_activity(PG_FUNCTION_ARGS)
 		LocalPgBackendStatus *local_beentry;
 		PgBackendStatus *beentry;
 		PGPROC	   *proc;
-		const char *wait_event_type = NULL;
-		const char *wait_event = NULL;
+		const char *wait_event_type;
+		const char *wait_event;
 
 		MemSet(values, 0, sizeof(values));
 		MemSet(nulls, 0, sizeof(nulls));
@@ -616,18 +615,9 @@ pg_stat_get_activity(PG_FUNCTION_ARGS)
 			continue;
 
 		/* Values available to all callers */
-		if (beentry->st_databaseid != InvalidOid)
-			values[0] = ObjectIdGetDatum(beentry->st_databaseid);
-		else
-			nulls[0] = true;
-
+		values[0] = ObjectIdGetDatum(beentry->st_databaseid);
 		values[1] = Int32GetDatum(beentry->st_procpid);
-
-		if (beentry->st_userid != InvalidOid)
-			values[2] = ObjectIdGetDatum(beentry->st_userid);
-		else
-			nulls[2] = true;
-
+		values[2] = ObjectIdGetDatum(beentry->st_userid);
 		if (beentry->st_appname)
 			values[3] = CStringGetTextDatum(beentry->st_appname);
 		else
@@ -645,17 +635,17 @@ pg_stat_get_activity(PG_FUNCTION_ARGS)
 
 		if (beentry->st_ssl)
 		{
-			values[18] = BoolGetDatum(true);	/* ssl */
-			values[19] = CStringGetTextDatum(beentry->st_sslstatus->ssl_version);
-			values[20] = CStringGetTextDatum(beentry->st_sslstatus->ssl_cipher);
-			values[21] = Int32GetDatum(beentry->st_sslstatus->ssl_bits);
-			values[22] = BoolGetDatum(beentry->st_sslstatus->ssl_compression);
-			values[23] = CStringGetTextDatum(beentry->st_sslstatus->ssl_clientdn);
+			values[17] = BoolGetDatum(true);	/* ssl */
+			values[18] = CStringGetTextDatum(beentry->st_sslstatus->ssl_version);
+			values[19] = CStringGetTextDatum(beentry->st_sslstatus->ssl_cipher);
+			values[20] = Int32GetDatum(beentry->st_sslstatus->ssl_bits);
+			values[21] = BoolGetDatum(beentry->st_sslstatus->ssl_compression);
+			values[22] = CStringGetTextDatum(beentry->st_sslstatus->ssl_clientdn);
 		}
 		else
 		{
-			values[18] = BoolGetDatum(false);	/* ssl */
-			nulls[19] = nulls[20] = nulls[21] = nulls[22] = nulls[23] = true;
+			values[17] = BoolGetDatum(false);	/* ssl */
+			nulls[18] = nulls[19] = nulls[20] = nulls[21] = nulls[22] = true;
 		}
 
 		/* Values only available to role member */
@@ -700,24 +690,10 @@ pg_stat_get_activity(PG_FUNCTION_ARGS)
 				wait_event = pgstat_get_wait_event(raw_wait_event);
 
 			}
-			else if (beentry->st_backendType != B_BACKEND)
+			else
 			{
-				/*
-				 * For an auxiliary process, retrieve process info from
-				 * AuxiliaryProcs stored in shared-memory.
-				 */
-				proc = AuxiliaryPidGetProc(beentry->st_procpid);
-
-				if (proc != NULL)
-				{
-					uint32		raw_wait_event;
-
-					raw_wait_event =
-						UINT32_ACCESS_ONCE(proc->wait_event_info);
-					wait_event_type =
-						pgstat_get_wait_event_type(raw_wait_event);
-					wait_event = pgstat_get_wait_event(raw_wait_event);
-				}
+				wait_event_type = NULL;
+				wait_event = NULL;
 			}
 
 			if (wait_event_type)
@@ -817,9 +793,6 @@ pg_stat_get_activity(PG_FUNCTION_ARGS)
 					nulls[14] = true;
 				}
 			}
-			/* Add backend type */
-			values[17] =
-				CStringGetTextDatum(pgstat_get_backend_desc(beentry->st_backendType));
 		}
 		else
 		{
@@ -835,7 +808,6 @@ pg_stat_get_activity(PG_FUNCTION_ARGS)
 			nulls[12] = true;
 			nulls[13] = true;
 			nulls[14] = true;
-			nulls[17] = true;
 		}
 
 		tuplestore_putvalues(tupstore, tupdesc, values, nulls);
diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c
index c2681ce..f692f2f 100644
--- a/src/backend/utils/adt/ruleutils.c
+++ b/src/backend/utils/adt/ruleutils.c
@@ -447,7 +447,7 @@ static void get_tablesample_def(TableSampleClause *tablesample,
 static void get_opclass_name(Oid opclass, Oid actual_datatype,
 				 StringInfo buf);
 static Node *processIndirection(Node *node, deparse_context *context);
-static void printSubscripts(ArrayRef *aref, deparse_context *context);
+static void printSubscripts(SubscriptingRef *aref, deparse_context *context);
 static char *get_relation_name(Oid relid);
 static char *generate_relation_name(Oid relid, List *namespaces);
 static char *generate_qualified_relation_name(Oid relid);
@@ -1448,10 +1448,11 @@ static char *
 pg_get_statisticsext_worker(Oid statextid, bool missing_ok)
 {
 	Form_pg_statistic_ext	statextrec;
+	Form_pg_class			pgclassrec;
 	HeapTuple	statexttup;
+	HeapTuple	pgclasstup;
 	StringInfoData buf;
 	int			colno;
-	char	   *nsp;
 
 	statexttup = SearchSysCache1(STATEXTOID, ObjectIdGetDatum(statextid));
 
@@ -1464,12 +1465,20 @@ pg_get_statisticsext_worker(Oid statextid, bool missing_ok)
 
 	statextrec = (Form_pg_statistic_ext) GETSTRUCT(statexttup);
 
+	pgclasstup = SearchSysCache1(RELOID, ObjectIdGetDatum(statextrec->starelid));
+
+	if (!HeapTupleIsValid(statexttup))
+	{
+		ReleaseSysCache(statexttup);
+		elog(ERROR, "cache lookup failed for relation %u", statextrec->starelid);
+	}
+
+	pgclassrec = (Form_pg_class) GETSTRUCT(pgclasstup);
+
 	initStringInfo(&buf);
 
-	nsp = get_namespace_name(statextrec->stanamespace);
 	appendStringInfo(&buf, "CREATE STATISTICS %s ON (",
-					 quote_qualified_identifier(nsp,
-												NameStr(statextrec->staname)));
+							quote_identifier(NameStr(statextrec->staname)));
 
 	for (colno = 0; colno < statextrec->stakeys.dim1; colno++)
 	{
@@ -1485,9 +1494,10 @@ pg_get_statisticsext_worker(Oid statextid, bool missing_ok)
 	}
 
 	appendStringInfo(&buf, ") FROM %s",
-					 generate_relation_name(statextrec->starelid, NIL));
+							quote_identifier(NameStr(pgclassrec->relname)));
 
 	ReleaseSysCache(statexttup);
+	ReleaseSysCache(pgclasstup);
 
 	return buf.data;
 }
@@ -6107,7 +6117,7 @@ get_update_query_targetlist_def(Query *query, List *targetList,
 		{
 			/*
 			 * We must dig down into the expr to see if it's a PARAM_MULTIEXPR
-			 * Param.  That could be buried under FieldStores and ArrayRefs
+			 * Param.  That could be buried under FieldStores and SubscriptingRefs
 			 * (cf processIndirection()), and underneath those there could be
 			 * an implicit type coercion.
 			 */
@@ -6120,13 +6130,10 @@ get_update_query_targetlist_def(Query *query, List *targetList,
 
 					expr = (Node *) linitial(fstore->newvals);
 				}
-				else if (IsA(expr, ArrayRef))
+				else if (IsA(expr, SubscriptingRef) && IsAssignment(expr))
 				{
-					ArrayRef   *aref = (ArrayRef *) expr;
-
-					if (aref->refassgnexpr == NULL)
-						break;
-					expr = (Node *) aref->refassgnexpr;
+					SubscriptingRef   *sbsref = (SubscriptingRef *) expr;
+					expr = (Node *) sbsref->refassgnexpr;
 				}
 				else
 					break;
@@ -7159,7 +7166,7 @@ isSimpleNode(Node *node, Node *parentNode, int prettyFlags)
 			/* single words: always simple */
 			return true;
 
-		case T_ArrayRef:
+		case T_SubscriptingRef:
 		case T_ArrayExpr:
 		case T_RowExpr:
 		case T_CoalesceExpr:
@@ -7276,7 +7283,7 @@ isSimpleNode(Node *node, Node *parentNode, int prettyFlags)
 						return true;	/* own parentheses */
 					}
 				case T_BoolExpr:		/* lower precedence */
-				case T_ArrayRef:		/* other separators */
+				case T_SubscriptingRef:		/* other separators */
 				case T_ArrayExpr:		/* other separators */
 				case T_RowExpr:	/* other separators */
 				case T_CoalesceExpr:	/* own parentheses */
@@ -7326,7 +7333,7 @@ isSimpleNode(Node *node, Node *parentNode, int prettyFlags)
 							return false;
 						return true;	/* own parentheses */
 					}
-				case T_ArrayRef:		/* other separators */
+				case T_SubscriptingRef:		/* other separators */
 				case T_ArrayExpr:		/* other separators */
 				case T_RowExpr:	/* other separators */
 				case T_CoalesceExpr:	/* own parentheses */
@@ -7512,9 +7519,9 @@ get_rule_expr(Node *node, deparse_context *context,
 			get_windowfunc_expr((WindowFunc *) node, context);
 			break;
 
-		case T_ArrayRef:
+		case T_SubscriptingRef:
 			{
-				ArrayRef   *aref = (ArrayRef *) node;
+				SubscriptingRef   *sbsref = (SubscriptingRef *) node;
 				bool		need_parens;
 
 				/*
@@ -7525,38 +7532,38 @@ get_rule_expr(Node *node, deparse_context *context,
 				 * here too, and display only the assignment source
 				 * expression.
 				 */
-				if (IsA(aref->refexpr, CaseTestExpr))
+				if (IsA(sbsref->refexpr, CaseTestExpr))
 				{
-					Assert(aref->refassgnexpr);
-					get_rule_expr((Node *) aref->refassgnexpr,
+					Assert(sbsref->refassgnexpr);
+					get_rule_expr((Node *) sbsref->refassgnexpr,
 								  context, showimplicit);
 					break;
 				}
 
 				/*
 				 * Parenthesize the argument unless it's a simple Var or a
-				 * FieldSelect.  (In particular, if it's another ArrayRef, we
+				 * FieldSelect.  (In particular, if it's another SubscriptingRef, we
 				 * *must* parenthesize to avoid confusion.)
 				 */
-				need_parens = !IsA(aref->refexpr, Var) &&
-					!IsA(aref->refexpr, FieldSelect);
+				need_parens = !IsA(sbsref->refexpr, Var) &&
+					!IsA(sbsref->refexpr, FieldSelect);
 				if (need_parens)
 					appendStringInfoChar(buf, '(');
-				get_rule_expr((Node *) aref->refexpr, context, showimplicit);
+				get_rule_expr((Node *) sbsref->refexpr, context, showimplicit);
 				if (need_parens)
 					appendStringInfoChar(buf, ')');
 
-				/*
-				 * If there's a refassgnexpr, we want to print the node in the
-				 * format "array[subscripts] := refassgnexpr".  This is not
-				 * legal SQL, so decompilation of INSERT or UPDATE statements
-				 * should always use processIndirection as part of the
-				 * statement-level syntax.  We should only see this when
-				 * EXPLAIN tries to print the targetlist of a plan resulting
-				 * from such a statement.
-				 */
-				if (aref->refassgnexpr)
+				if (IsAssignment(sbsref))
 				{
+					/*
+					 * If there's a refassgnexpr, we want to print the node in the
+					 * format "array[subscripts] := refassgnexpr".  This is not
+					 * legal SQL, so decompilation of INSERT or UPDATE statements
+					 * should always use processIndirection as part of the
+					 * statement-level syntax.  We should only see this when
+					 * EXPLAIN tries to print the targetlist of a plan resulting
+					 * from such a statement.
+					 */
 					Node	   *refassgnexpr;
 
 					/*
@@ -7571,8 +7578,8 @@ get_rule_expr(Node *node, deparse_context *context,
 				}
 				else
 				{
-					/* Just an ordinary array fetch, so print subscripts */
-					printSubscripts(aref, context);
+					/* Just an ordinary container fetch, so print subscripts */
+					printSubscripts(sbsref, context);
 				}
 			}
 			break;
@@ -7770,12 +7777,13 @@ get_rule_expr(Node *node, deparse_context *context,
 				bool		need_parens;
 
 				/*
-				 * Parenthesize the argument unless it's an ArrayRef or
+				 * Parenthesize the argument unless it's an SubscriptingRef or
 				 * another FieldSelect.  Note in particular that it would be
 				 * WRONG to not parenthesize a Var argument; simplicity is not
 				 * the issue here, having the right number of names is.
 				 */
-				need_parens = !IsA(arg, ArrayRef) &&!IsA(arg, FieldSelect);
+				need_parens = !IsA(arg, SubscriptingRef) &&
+							  !IsA(arg, FieldSelect);
 				if (need_parens)
 					appendStringInfoChar(buf, '(');
 				get_rule_expr(arg, context, true);
@@ -10088,7 +10096,7 @@ get_opclass_name(Oid opclass, Oid actual_datatype,
 /*
  * processIndirection - take care of array and subfield assignment
  *
- * We strip any top-level FieldStore or assignment ArrayRef nodes that
+ * We strip any top-level FieldStore or assignment SubscriptingRef nodes that
  * appear in the input, printing them as decoration for the base column
  * name (which we assume the caller just printed).  Return the subexpression
  * that's to be assigned.
@@ -10130,19 +10138,17 @@ processIndirection(Node *node, deparse_context *context)
 			 */
 			node = (Node *) linitial(fstore->newvals);
 		}
-		else if (IsA(node, ArrayRef))
+		else if (IsA(node, SubscriptingRef) && IsAssignment(node))
 		{
-			ArrayRef   *aref = (ArrayRef *) node;
+			SubscriptingRef   *sbsref = (SubscriptingRef *) node;
 
-			if (aref->refassgnexpr == NULL)
-				break;
-			printSubscripts(aref, context);
+			printSubscripts(sbsref, context);
 
 			/*
 			 * We ignore refexpr since it should be an uninteresting reference
 			 * to the target column or subcolumn.
 			 */
-			node = (Node *) aref->refassgnexpr;
+			node = (Node *) sbsref->refassgnexpr;
 		}
 		else
 			break;
@@ -10152,14 +10158,14 @@ processIndirection(Node *node, deparse_context *context)
 }
 
 static void
-printSubscripts(ArrayRef *aref, deparse_context *context)
+printSubscripts(SubscriptingRef *sbsref, deparse_context *context)
 {
 	StringInfo	buf = context->buf;
 	ListCell   *lowlist_item;
 	ListCell   *uplist_item;
 
-	lowlist_item = list_head(aref->reflowerindexpr);	/* could be NULL */
-	foreach(uplist_item, aref->refupperindexpr)
+	lowlist_item = list_head(sbsref->reflowerindexpr);	/* could be NULL */
+	foreach(uplist_item, sbsref->refupperindexpr)
 	{
 		appendStringInfoChar(buf, '[');
 		if (lowlist_item)
diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c
index 5c382a2..cc24c8a 100644
--- a/src/backend/utils/adt/selfuncs.c
+++ b/src/backend/utils/adt/selfuncs.c
@@ -3404,7 +3404,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows,
 		RelOptInfo *rel = varinfo1->rel;
 		double		reldistinct = 1;
 		double		relmaxndistinct = reldistinct;
-		int			relvarcount = 0;
+		int			relvarcount = 1;
 		List	   *newvarinfos = NIL;
 		List	   *relvarinfos = NIL;
 
@@ -3436,10 +3436,6 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows,
 		 * we multiply them together.  Any remaining relvarinfos after
 		 * no more multivariate matches are found are assumed independent too,
 		 * so their individual ndistinct estimates are multiplied also.
-		 *
-		 * While iterating, count how many separate numdistinct values we
-		 * apply.  We apply a fudge factor below, but only if we multiplied
-		 * more than one such values.
 		 */
 		while (relvarinfos)
 		{
@@ -3451,7 +3447,7 @@ estimate_num_groups(PlannerInfo *root, List *groupExprs, double input_rows,
 				reldistinct *= mvndistinct;
 				if (relmaxndistinct < mvndistinct)
 					relmaxndistinct = mvndistinct;
-				relvarcount++;
+				relvarcount++;	/* inaccurate, but doesn't matter */
 			}
 			else
 			{
diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c
index b891f38..fc37e1f 100644
--- a/src/backend/utils/cache/lsyscache.c
+++ b/src/backend/utils/cache/lsyscache.c
@@ -3061,3 +3061,23 @@ get_range_subtype(Oid rangeOid)
 	else
 		return InvalidOid;
 }
+
+/*
+ * get_typsbsparse
+ *
+ *		Given the type OID, return the type's typsbsparse procedure, if any.
+ */
+RegProcedure
+get_typsbsparse(Oid typid)
+{
+	HeapTuple		tp;
+	RegProcedure	result = InvalidOid;
+
+	tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typid));
+	if (HeapTupleIsValid(tp))
+	{
+		result = ((Form_pg_type) GETSTRUCT(tp))->typsbsparse;
+		ReleaseSysCache(tp);
+	}
+	return result;
+}
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index bc52183..a6b60c6 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -4743,7 +4743,7 @@ RelationGetIndexExpressions(Relation relation)
  * RelationGetIndexPredicate -- get the index predicate for an index
  *
  * We cache the result of transforming pg_index.indpred into an implicit-AND
- * node tree (suitable for use in planning).
+ * node tree (suitable for ExecQual).
  * If the rel is not an index or has no predicate, we return NIL.
  * Otherwise, the returned tree is copied into the caller's memory context.
  * (We don't want to return a pointer to the relcache copy, since it could
diff --git a/src/backend/utils/generate-errcodes.pl b/src/backend/utils/generate-errcodes.pl
index 6a577f6..b84c6b0 100644
--- a/src/backend/utils/generate-errcodes.pl
+++ b/src/backend/utils/generate-errcodes.pl
@@ -10,7 +10,7 @@ print
   "/* autogenerated from src/backend/utils/errcodes.txt, do not edit */\n";
 print "/* there is deliberately not an #ifndef ERRCODES_H here */\n";
 
-open my $errcodes, '<', $ARGV[0] or die;
+open my $errcodes, $ARGV[0] or die;
 
 while (<$errcodes>)
 {
diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c
index b8b4a06..9f938f2 100644
--- a/src/backend/utils/init/postinit.c
+++ b/src/backend/utils/init/postinit.c
@@ -665,12 +665,7 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
 
 	/* The autovacuum launcher is done here */
 	if (IsAutoVacuumLauncherProcess())
-	{
-		/* report this backend in the PgBackendStatus array */
-		pgstat_bestart();
-
 		return;
-	}
 
 	/*
 	 * Start a new transaction here before first access to db, and get a
@@ -879,10 +874,7 @@ InitPostgres(const char *in_dbname, Oid dboid, const char *username,
 		 * transaction we started before returning.
 		 */
 		if (!bootstrap)
-		{
-			pgstat_bestart();
 			CommitTransactionCommand();
-		}
 		return;
 	}
 
diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c
index e9d561b..291bf76 100644
--- a/src/backend/utils/misc/guc.c
+++ b/src/backend/utils/misc/guc.c
@@ -3320,7 +3320,7 @@ static struct config_string ConfigureNamesString[] =
 			GUC_SUPERUSER_ONLY
 		},
 		&Log_directory,
-		"log",
+		"pg_log",
 		check_canonical_path, NULL, NULL
 	},
 	{
diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample
index 8a93bdc..a02b154 100644
--- a/src/backend/utils/misc/postgresql.conf.sample
+++ b/src/backend/utils/misc/postgresql.conf.sample
@@ -344,7 +344,7 @@
 					# (change requires restart)
 
 # These are only used if logging_collector is on:
-#log_directory = 'log'			# directory where log files are written,
+#log_directory = 'pg_log'		# directory where log files are written,
 					# can be absolute or relative to PGDATA
 #log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log'	# log file name pattern,
 					# can include strftime() escapes
diff --git a/src/bin/pg_basebackup/t/010_pg_basebackup.pl b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
index 1d3c498..14bd813 100644
--- a/src/bin/pg_basebackup/t/010_pg_basebackup.pl
+++ b/src/bin/pg_basebackup/t/010_pg_basebackup.pl
@@ -24,10 +24,10 @@ $node->command_fails(['pg_basebackup'],
 
 # Some Windows ANSI code pages may reject this filename, in which case we
 # quietly proceed without this bit of test coverage.
-if (open my $badchars, '>>', "$tempdir/pgdata/FOO\xe0\xe0\xe0BAR")
+if (open BADCHARS, ">>$tempdir/pgdata/FOO\xe0\xe0\xe0BAR")
 {
-	print $badchars "test backup of file with non-UTF8 name\n";
-	close $badchars;
+	print BADCHARS "test backup of file with non-UTF8 name\n";
+	close BADCHARS;
 }
 
 $node->set_replication_conf();
@@ -45,19 +45,19 @@ $node->command_fails(
 
 ok(-d "$tempdir/backup", 'backup directory was created and left behind');
 
-open my $conf, '>>', "$pgdata/postgresql.conf";
-print $conf "max_replication_slots = 10\n";
-print $conf "max_wal_senders = 10\n";
-print $conf "wal_level = replica\n";
-close $conf;
+open CONF, ">>$pgdata/postgresql.conf";
+print CONF "max_replication_slots = 10\n";
+print CONF "max_wal_senders = 10\n";
+print CONF "wal_level = replica\n";
+close CONF;
 $node->restart;
 
 # Write some files to test that they are not copied.
 foreach my $filename (qw(backup_label tablespace_map postgresql.auto.conf.tmp current_logfiles.tmp))
 {
-	open my $file, '>>', "$pgdata/$filename";
-	print $file "DONOTCOPY";
-	close $file;
+	open FILE, ">>$pgdata/$filename";
+	print FILE "DONOTCOPY";
+	close FILE;
 }
 
 $node->command_ok([ 'pg_basebackup', '-D', "$tempdir/backup", '-X', 'none' ],
@@ -124,8 +124,8 @@ $node->command_fails(
 my $superlongname = "superlongname_" . ("x" x 100);
 my $superlongpath = "$pgdata/$superlongname";
 
-open my $file, '>', "$superlongpath" or die "unable to create file $superlongpath";
-close $file;
+open FILE, ">$superlongpath" or die "unable to create file $superlongpath";
+close FILE;
 $node->command_fails(
 	[ 'pg_basebackup', '-D', "$tempdir/tarbackup_l1", '-Ft' ],
 	'pg_basebackup tar with long name fails');
diff --git a/src/bin/pg_ctl/t/001_start_stop.pl b/src/bin/pg_ctl/t/001_start_stop.pl
index 9182574..8f16bf9 100644
--- a/src/bin/pg_ctl/t/001_start_stop.pl
+++ b/src/bin/pg_ctl/t/001_start_stop.pl
@@ -20,18 +20,18 @@ command_ok([ 'pg_ctl', 'initdb', '-D', "$tempdir/data", '-o', '-N' ],
 	'pg_ctl initdb');
 command_ok([ $ENV{PG_REGRESS}, '--config-auth', "$tempdir/data" ],
 	'configure authentication');
-open my $conf, '>>', "$tempdir/data/postgresql.conf";
-print $conf "fsync = off\n";
-if (! $windows_os)
+open CONF, ">>$tempdir/data/postgresql.conf";
+print CONF "fsync = off\n";
+if (!$windows_os)
 {
-	print $conf "listen_addresses = ''\n";
-	print $conf "unix_socket_directories = '$tempdir_short'\n";
+	print CONF "listen_addresses = ''\n";
+	print CONF "unix_socket_directories = '$tempdir_short'\n";
 }
 else
 {
-	print $conf "listen_addresses = '127.0.0.1'\n";
+	print CONF "listen_addresses = '127.0.0.1'\n";
 }
-close $conf;
+close CONF;
 command_ok([ 'pg_ctl', 'start', '-D', "$tempdir/data" ],
 	'pg_ctl start');
 
diff --git a/src/bin/psql/create_help.pl b/src/bin/psql/create_help.pl
index cedb767..359670b 100644
--- a/src/bin/psql/create_help.pl
+++ b/src/bin/psql/create_help.pl
@@ -42,12 +42,12 @@ $define =~ s/\W/_/g;
 
 opendir(DIR, $docdir)
   or die "$0: could not open documentation source dir '$docdir': $!\n";
-open(my $hfile_handle, '>', $hfile)
+open(HFILE, ">$hfile")
   or die "$0: could not open output file '$hfile': $!\n";
-open(my $cfile_handle, '>', $cfile)
+open(CFILE, ">$cfile")
   or die "$0: could not open output file '$cfile': $!\n";
 
-print $hfile_handle "/*
+print HFILE "/*
  * *** Do not change this file by hand. It is automatically
  * *** generated from the DocBook documentation.
  *
@@ -72,7 +72,7 @@ struct _helpStruct
 extern const struct _helpStruct QL_HELP[];
 ";
 
-print $cfile_handle "/*
+print CFILE "/*
  * *** Do not change this file by hand. It is automatically
  * *** generated from the DocBook documentation.
  *
@@ -97,9 +97,9 @@ foreach my $file (sort readdir DIR)
 	my (@cmdnames, $cmddesc, $cmdsynopsis);
 	$file =~ /\.sgml$/ or next;
 
-	open(my $fh, '<', "$docdir/$file") or next;
-	my $filecontent = join('', <$fh>);
-	close $fh;
+	open(FILE, "$docdir/$file") or next;
+	my $filecontent = join('', <FILE>);
+	close FILE;
 
 	# Ignore files that are not for SQL language statements
 	$filecontent =~
@@ -171,7 +171,7 @@ foreach (sort keys %entries)
 	$synopsis =~ s/\\n/\\n"\n$prefix"/g;
 	my @args =
 	  ("buf", $synopsis, map("_(\"$_\")", @{ $entries{$_}{params} }));
-	print $cfile_handle "static void
+	print CFILE "static void
 sql_help_$id(PQExpBuffer buf)
 {
 \tappendPQExpBuffer(" . join(",\n$prefix", @args) . ");
@@ -180,14 +180,14 @@ sql_help_$id(PQExpBuffer buf)
 ";
 }
 
-print $cfile_handle "
+print CFILE "
 const struct _helpStruct QL_HELP[] = {
 ";
 foreach (sort keys %entries)
 {
 	my $id = $_;
 	$id =~ s/ /_/g;
-	print $cfile_handle "    { \"$_\",
+	print CFILE "    { \"$_\",
       N_(\"$entries{$_}{cmddesc}\"),
       sql_help_$id,
       $entries{$_}{nl_count} },
@@ -195,12 +195,12 @@ foreach (sort keys %entries)
 ";
 }
 
-print $cfile_handle "
+print CFILE "
     { NULL, NULL, NULL }    /* End of list marker */
 };
 ";
 
-print $hfile_handle "
+print HFILE "
 #define QL_HELP_COUNT	"
   . scalar(keys %entries) . "		/* number of help items */
 #define QL_MAX_CMD_LEN	$maxlen		/* largest strlen(cmd) */
@@ -209,6 +209,6 @@ print $hfile_handle "
 #endif /* $define */
 ";
 
-close $cfile_handle;
-close $hfile_handle;
+close CFILE;
+close HFILE;
 closedir DIR;
diff --git a/src/include/access/hash_xlog.h b/src/include/access/hash_xlog.h
index 644da2e..2e64cfa 100644
--- a/src/include/access/hash_xlog.h
+++ b/src/include/access/hash_xlog.h
@@ -265,13 +265,11 @@ typedef struct xl_hash_init_bitmap_page
 typedef struct xl_hash_vacuum_one_page
 {
 	RelFileNode	hnode;
-	int		ntuples;
-
-	/* TARGET OFFSET NUMBERS FOLLOW AT THE END */
+	double		ntuples;
 }	xl_hash_vacuum_one_page;
 
 #define SizeOfHashVacuumOnePage	\
-	(offsetof(xl_hash_vacuum_one_page, ntuples) + sizeof(int))
+	(offsetof(xl_hash_vacuum_one_page, ntuples) + sizeof(double))
 
 extern void hash_redo(XLogReaderState *record);
 extern void hash_desc(StringInfo buf, XLogReaderState *record);
diff --git a/src/include/access/tuptoaster.h b/src/include/access/tuptoaster.h
index c7abeed..a6233c3 100644
--- a/src/include/access/tuptoaster.h
+++ b/src/include/access/tuptoaster.h
@@ -194,17 +194,6 @@ extern Datum toast_flatten_tuple_to_datum(HeapTupleHeader tup,
 							 TupleDesc tupleDesc);
 
 /* ----------
- * toast_build_flattened_tuple -
- *
- *	Build a tuple containing no out-of-line toasted fields.
- *	(This does not eliminate compressed or short-header datums.)
- * ----------
- */
-extern HeapTuple toast_build_flattened_tuple(TupleDesc tupleDesc,
-							Datum *values,
-							bool *isnull);
-
-/* ----------
  * toast_compress_datum -
  *
  *	Create a compressed version of a varlena datum, if possible
diff --git a/src/include/c.h b/src/include/c.h
index fba07c6..08cf94b 100644
--- a/src/include/c.h
+++ b/src/include/c.h
@@ -422,6 +422,8 @@ typedef struct
 	int			indx[MAXDIM];
 } IntArray;
 
+#define MAX_SUBSCRIPT_DEPTH 12
+
 /* ----------------
  *		Variable-length datatypes all share the 'struct varlena' header.
  *
diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h
index fc374d7..b8fa18a 100644
--- a/src/include/catalog/catversion.h
+++ b/src/include/catalog/catversion.h
@@ -53,6 +53,6 @@
  */
 
 /*							yyyymmddN */
-#define CATALOG_VERSION_NO	201703261
+#define CATALOG_VERSION_NO	201703242
 
 #endif
diff --git a/src/include/catalog/pg_class.h b/src/include/catalog/pg_class.h
index d1d493e..79bb1ac 100644
--- a/src/include/catalog/pg_class.h
+++ b/src/include/catalog/pg_class.h
@@ -147,7 +147,7 @@ typedef FormData_pg_class *Form_pg_class;
  * Note: "3" in the relfrozenxid column stands for FirstNormalTransactionId;
  * similarly, "1" in relminmxid stands for FirstMultiXactId
  */
-DATA(insert OID = 1247 (  pg_type		PGNSP 71 0 PGUID 0 0 0 0 0 0 0 f f p r 30 0 t f f f f f f t n f 3 1 _null_ _null_ _null_));
+DATA(insert OID = 1247 (  pg_type		PGNSP 71 0 PGUID 0 0 0 0 0 0 0 f f p r 31 0 t f f f f f f t n f 3 1 _null_ _null_ _null_));
 DESCR("");
 DATA(insert OID = 1249 (  pg_attribute	PGNSP 75 0 PGUID 0 0 0 0 0 0 0 f f p r 21 0 f f f f f f f t n f 3 1 _null_ _null_ _null_));
 DESCR("");
diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h
index 79f9b90..5b6e5cd 100644
--- a/src/include/catalog/pg_proc.h
+++ b/src/include/catalog/pg_proc.h
@@ -2811,7 +2811,7 @@ DATA(insert OID = 3057 ( pg_stat_get_autoanalyze_count PGNSP PGUID 12 1 0 0 0 f
 DESCR("statistics: number of auto analyzes for a table");
 DATA(insert OID = 1936 (  pg_stat_get_backend_idset		PGNSP PGUID 12 1 100 0 0 f f f f t t s r 0 0 23 "" _null_ _null_ _null_ _null_ _null_ pg_stat_get_backend_idset _null_ _null_ _null_ ));
 DESCR("statistics: currently active backend IDs");
-DATA(insert OID = 2022 (  pg_stat_get_activity			PGNSP PGUID 12 1 100 0 0 f f f f f t s r 1 0 2249 "23" "{23,26,23,26,25,25,25,25,25,1184,1184,1184,1184,869,25,23,28,28,25,16,25,25,23,16,25}" "{i,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}" "{pid,datid,pid,usesysid,application_name,state,query,wait_event_type,wait_event,xact_start,query_start,backend_start,state_change,client_addr,client_hostname,client_port,backend_xid,backend_xmin,backend_type,ssl,sslversion,sslcipher,sslbits,sslcompression,sslclientdn}" _null_ _null_ pg_stat_get_activity _null_ _null_ _null_ ));
+DATA(insert OID = 2022 (  pg_stat_get_activity			PGNSP PGUID 12 1 100 0 0 f f f f f t s r 1 0 2249 "23" "{23,26,23,26,25,25,25,25,25,1184,1184,1184,1184,869,25,23,28,28,16,25,25,23,16,25}" "{i,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o,o}" "{pid,datid,pid,usesysid,application_name,state,query,wait_event_type,wait_event,xact_start,query_start,backend_start,state_change,client_addr,client_hostname,client_port,backend_xid,backend_xmin,ssl,sslversion,sslcipher,sslbits,sslcompression,sslclientdn}" _null_ _null_ pg_stat_get_activity _null_ _null_ _null_ ));
 DESCR("statistics: information about currently active backends");
 DATA(insert OID = 3318 (  pg_stat_get_progress_info			  PGNSP PGUID 12 1 100 0 0 f f f f t t s r 1 0 2249 "25" "{25,23,26,26,20,20,20,20,20,20,20,20,20,20}" "{i,o,o,o,o,o,o,o,o,o,o,o,o,o}" "{cmdtype,pid,datid,relid,param1,param2,param3,param4,param5,param6,param7,param8,param9,param10}" _null_ _null_ pg_stat_get_progress_info _null_ _null_ _null_ ));
 DESCR("statistics: information about progress of backends running maintenance command");
@@ -5411,6 +5411,21 @@ DESCR("pg_controldata recovery state information as a function");
 DATA(insert OID = 3444 ( pg_control_init PGNSP PGUID 12 1 0 0 0 f f f f t f v s 0 0 2249 "" "{23,23,23,23,23,23,23,23,23,16,16,23}" "{o,o,o,o,o,o,o,o,o,o,o,o}" "{max_data_alignment,database_block_size,blocks_per_segment,wal_block_size,bytes_per_wal_segment,max_identifier_length,max_index_columns,max_toast_chunk_size,large_object_chunk_size,float4_pass_by_value,float8_pass_by_value,data_page_checksum_version}" _null_ _null_ pg_control_init _null_ _null_ _null_ ));
 DESCR("pg_controldata init state information as a function");
 
+/* type subscripting support */
+DATA(insert OID = 4001 (  jsonb_subscript_parse PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 "2281" "16 2281 2281" _null_ _null_ _null_ _null_ _null_ jsonb_subscript_parse _null_ _null_ _null_ ));
+DESCR("Jsonb subscripting logic");
+DATA(insert OID = 4002 (  jsonb_subscripting_fetch PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 "3802" "3802 2281" _null_ _null_ _null_ _null_ _null_ jsonb_subscripting_fetch _null_ _null_ _null_ ));
+DESCR("Jsonb subscripting logic");
+DATA(insert OID = 4003 (  jsonb_subscripting_assign PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 "3802" "3802 2281" _null_ _null_ _null_ _null_ _null_ jsonb_subscripting_assign _null_ _null_ _null_ ));
+DESCR("Jsonb subscripting logic");
+
+DATA(insert OID = 4004 (  array_subscript_parse PGNSP PGUID 12 1 0 0 0 f f f f t f s s 3 0 "2281" "16 2281 2281" _null_ _null_ _null_ _null_ _null_ array_subscript_parse _null_ _null_ _null_ ));
+DESCR("Array subscripting logic");
+DATA(insert OID = 4005 (  array_subscripting_fetch PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 "2283" "2277 2281" _null_ _null_ _null_ _null_ _null_ array_subscripting_fetch _null_ _null_ _null_ ));
+DESCR("Array subscripting logic");
+DATA(insert OID = 4006 (  array_subscripting_assign PGNSP PGUID 12 1 0 0 0 f f f f t f s s 2 0 "2277" "2277 2281" _null_ _null_ _null_ _null_ _null_ array_subscripting_assign _null_ _null_ _null_ ));
+DESCR("Array subscripting logic");
+
 DATA(insert OID = 3445 ( pg_import_system_collations PGNSP PGUID 12 100 0 0 0 f f f f t f v r 2 0 2278 "16 4089" _null_ _null_ "{if_not_exists,schema}" _null_ _null_ pg_import_system_collations _null_ _null_ _null_ ));
 DESCR("import collations from operating system");
 
diff --git a/src/include/catalog/pg_type.h b/src/include/catalog/pg_type.h
index 9ad6725..2fb2ed4 100644
--- a/src/include/catalog/pg_type.h
+++ b/src/include/catalog/pg_type.h
@@ -199,6 +199,12 @@ CATALOG(pg_type,1247) BKI_BOOTSTRAP BKI_ROWTYPE_OID(71) BKI_SCHEMA_MACRO
 	 */
 	Oid			typcollation;
 
+	/*
+	 * Type specific subscripting logic. If typsbsparse is none, it means
+	 * that this type doesn't support subscripting.
+	 */
+	regproc		typsbsparse;
+
 #ifdef CATALOG_VARLEN			/* variable-length fields start here */
 
 	/*
@@ -236,7 +242,7 @@ typedef FormData_pg_type *Form_pg_type;
  *		compiler constants for pg_type
  * ----------------
  */
-#define Natts_pg_type					30
+#define Natts_pg_type					31
 #define Anum_pg_type_typname			1
 #define Anum_pg_type_typnamespace		2
 #define Anum_pg_type_typowner			3
@@ -264,10 +270,10 @@ typedef FormData_pg_type *Form_pg_type;
 #define Anum_pg_type_typtypmod			25
 #define Anum_pg_type_typndims			26
 #define Anum_pg_type_typcollation		27
-#define Anum_pg_type_typdefaultbin		28
-#define Anum_pg_type_typdefault			29
-#define Anum_pg_type_typacl				30
-
+#define Anum_pg_type_typsbsparse		28
+#define Anum_pg_type_typdefaultbin		29
+#define Anum_pg_type_typdefault			30
+#define Anum_pg_type_typacl				31
 
 /* ----------------
  *		initial contents of pg_type
@@ -283,98 +289,98 @@ typedef FormData_pg_type *Form_pg_type;
  */
 
 /* OIDS 1 - 99 */
-DATA(insert OID = 16 (	bool	   PGNSP PGUID	1 t b B t t \054 0	 0 1000 boolin boolout boolrecv boolsend - - - c p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 16 (	bool	   PGNSP PGUID	1 t b B t t \054 0	 0 1000 boolin boolout boolrecv boolsend - - - c p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("boolean, 'true'/'false'");
 #define BOOLOID			16
 
-DATA(insert OID = 17 (	bytea	   PGNSP PGUID -1 f b U f t \054 0	0 1001 byteain byteaout bytearecv byteasend - - - i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 17 (	bytea	   PGNSP PGUID -1 f b U f t \054 0	0 1001 byteain byteaout bytearecv byteasend - - - i x f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("variable-length string, binary values escaped");
 #define BYTEAOID		17
 
-DATA(insert OID = 18 (	char	   PGNSP PGUID	1 t b S f t \054 0	 0 1002 charin charout charrecv charsend - - - c p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 18 (	char	   PGNSP PGUID	1 t b S f t \054 0	 0 1002 charin charout charrecv charsend - - - c p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("single character");
 #define CHAROID			18
 
-DATA(insert OID = 19 (	name	   PGNSP PGUID NAMEDATALEN f b S f t \054 0 18 1003 namein nameout namerecv namesend - - - c p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 19 (	name	   PGNSP PGUID NAMEDATALEN f b S f t \054 0 18 1003 namein nameout namerecv namesend - - - c p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("63-byte type for storing system identifiers");
 #define NAMEOID			19
 
-DATA(insert OID = 20 (	int8	   PGNSP PGUID	8 FLOAT8PASSBYVAL b N f t \054 0	 0 1016 int8in int8out int8recv int8send - - - d p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 20 (	int8	   PGNSP PGUID	8 FLOAT8PASSBYVAL b N f t \054 0	 0 1016 int8in int8out int8recv int8send - - - d p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("~18 digit integer, 8-byte storage");
 #define INT8OID			20
 
-DATA(insert OID = 21 (	int2	   PGNSP PGUID	2 t b N f t \054 0	 0 1005 int2in int2out int2recv int2send - - - s p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 21 (	int2	   PGNSP PGUID	2 t b N f t \054 0	 0 1005 int2in int2out int2recv int2send - - - s p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("-32 thousand to 32 thousand, 2-byte storage");
 #define INT2OID			21
 
-DATA(insert OID = 22 (	int2vector PGNSP PGUID -1 f b A f t \054 0	21 1006 int2vectorin int2vectorout int2vectorrecv int2vectorsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 22 (	int2vector PGNSP PGUID -1 f b A f t \054 0	21 1006 int2vectorin int2vectorout int2vectorrecv int2vectorsend - - - i p f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 DESCR("array of int2, used in system tables");
 #define INT2VECTOROID	22
 
-DATA(insert OID = 23 (	int4	   PGNSP PGUID	4 t b N f t \054 0	 0 1007 int4in int4out int4recv int4send - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 23 (	int4	   PGNSP PGUID	4 t b N f t \054 0	 0 1007 int4in int4out int4recv int4send - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("-2 billion to 2 billion integer, 4-byte storage");
 #define INT4OID			23
 
-DATA(insert OID = 24 (	regproc    PGNSP PGUID	4 t b N f t \054 0	 0 1008 regprocin regprocout regprocrecv regprocsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 24 (	regproc    PGNSP PGUID	4 t b N f t \054 0	 0 1008 regprocin regprocout regprocrecv regprocsend - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("registered procedure");
 #define REGPROCOID		24
 
-DATA(insert OID = 25 (	text	   PGNSP PGUID -1 f b S t t \054 0	0 1009 textin textout textrecv textsend - - - i x f 0 -1 0 100 _null_ _null_ _null_ ));
+DATA(insert OID = 25 (	text	   PGNSP PGUID -1 f b S t t \054 0	0 1009 textin textout textrecv textsend - - - i x f 0 -1 0 100 - _null_ _null_ _null_ ));
 DESCR("variable-length string, no limit specified");
 #define TEXTOID			25
 
-DATA(insert OID = 26 (	oid		   PGNSP PGUID	4 t b N t t \054 0	 0 1028 oidin oidout oidrecv oidsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 26 (	oid		   PGNSP PGUID	4 t b N t t \054 0	 0 1028 oidin oidout oidrecv oidsend - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("object identifier(oid), maximum 4 billion");
 #define OIDOID			26
 
-DATA(insert OID = 27 (	tid		   PGNSP PGUID	6 f b U f t \054 0	 0 1010 tidin tidout tidrecv tidsend - - - s p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 27 (	tid		   PGNSP PGUID	6 f b U f t \054 0	 0 1010 tidin tidout tidrecv tidsend - - - s p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("(block, offset), physical location of tuple");
 #define TIDOID		27
 
-DATA(insert OID = 28 (	xid		   PGNSP PGUID	4 t b U f t \054 0	 0 1011 xidin xidout xidrecv xidsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 28 (	xid		   PGNSP PGUID	4 t b U f t \054 0	 0 1011 xidin xidout xidrecv xidsend - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("transaction id");
 #define XIDOID 28
 
-DATA(insert OID = 29 (	cid		   PGNSP PGUID	4 t b U f t \054 0	 0 1012 cidin cidout cidrecv cidsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 29 (	cid		   PGNSP PGUID	4 t b U f t \054 0	 0 1012 cidin cidout cidrecv cidsend - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("command identifier type, sequence in transaction id");
 #define CIDOID 29
 
-DATA(insert OID = 30 (	oidvector  PGNSP PGUID -1 f b A f t \054 0	26 1013 oidvectorin oidvectorout oidvectorrecv oidvectorsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 30 (	oidvector  PGNSP PGUID -1 f b A f t \054 0	26 1013 oidvectorin oidvectorout oidvectorrecv oidvectorsend - - - i p f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 DESCR("array of oids, used in system tables");
 #define OIDVECTOROID	30
 
 /* hand-built rowtype entries for bootstrapped catalogs */
 /* NB: OIDs assigned here must match the BKI_ROWTYPE_OID declarations */
 
-DATA(insert OID = 71 (	pg_type			PGNSP PGUID -1 f c C f t \054 1247 0 0 record_in record_out record_recv record_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 75 (	pg_attribute	PGNSP PGUID -1 f c C f t \054 1249 0 0 record_in record_out record_recv record_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 81 (	pg_proc			PGNSP PGUID -1 f c C f t \054 1255 0 0 record_in record_out record_recv record_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 83 (	pg_class		PGNSP PGUID -1 f c C f t \054 1259 0 0 record_in record_out record_recv record_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 71 (	pg_type			PGNSP PGUID -1 f c C f t \054 1247 0 0 record_in record_out record_recv record_send - - - d x f 0 -1 0 0 - _null_ _null_ _null_ ));
+DATA(insert OID = 75 (	pg_attribute	PGNSP PGUID -1 f c C f t \054 1249 0 0 record_in record_out record_recv record_send - - - d x f 0 -1 0 0 - _null_ _null_ _null_ ));
+DATA(insert OID = 81 (	pg_proc			PGNSP PGUID -1 f c C f t \054 1255 0 0 record_in record_out record_recv record_send - - - d x f 0 -1 0 0 - _null_ _null_ _null_ ));
+DATA(insert OID = 83 (	pg_class		PGNSP PGUID -1 f c C f t \054 1259 0 0 record_in record_out record_recv record_send - - - d x f 0 -1 0 0 - _null_ _null_ _null_ ));
 
 /* OIDS 100 - 199 */
-DATA(insert OID = 114 ( json		   PGNSP PGUID -1 f b U f t \054 0 0 199 json_in json_out json_recv json_send - - - i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 114 ( json		   PGNSP PGUID -1 f b U f t \054 0 0 199 json_in json_out json_recv json_send - - - i x f 0 -1 0 0 - _null_ _null_ _null_ ));
 #define JSONOID 114
-DATA(insert OID = 142 ( xml		   PGNSP PGUID -1 f b U f t \054 0 0 143 xml_in xml_out xml_recv xml_send - - - i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 142 ( xml		   PGNSP PGUID -1 f b U f t \054 0 0 143 xml_in xml_out xml_recv xml_send - - - i x f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("XML content");
 #define XMLOID 142
-DATA(insert OID = 143 ( _xml	   PGNSP PGUID -1 f b A f t \054 0 142 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 199 ( _json	   PGNSP PGUID -1 f b A f t \054 0 114 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 143 ( _xml	   PGNSP PGUID -1 f b A f t \054 0 142 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 199 ( _json	   PGNSP PGUID -1 f b A f t \054 0 114 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 
-DATA(insert OID = 194 ( pg_node_tree	PGNSP PGUID -1 f b S f t \054 0 0 0 pg_node_tree_in pg_node_tree_out pg_node_tree_recv pg_node_tree_send - - - i x f 0 -1 0 100 _null_ _null_ _null_ ));
+DATA(insert OID = 194 ( pg_node_tree	PGNSP PGUID -1 f b S f t \054 0 0 0 pg_node_tree_in pg_node_tree_out pg_node_tree_recv pg_node_tree_send - - - i x f 0 -1 0 100 - _null_ _null_ _null_ ));
 DESCR("string representing an internal node tree");
 #define PGNODETREEOID	194
 
-DATA(insert OID = 3361 ( pg_ndistinct		PGNSP PGUID -1 f b S f t \054 0 0 0 pg_ndistinct_in pg_ndistinct_out pg_ndistinct_recv pg_ndistinct_send - - - i x f 0 -1 0 100 _null_ _null_ _null_ ));
+DATA(insert OID = 3361 ( pg_ndistinct		PGNSP PGUID -1 f b S f t \054 0 0 0 pg_ndistinct_in pg_ndistinct_out pg_ndistinct_recv pg_ndistinct_send - - - i x f 0 -1 0 100 - _null_ _null_ _null_ ));
 DESCR("multivariate ndistinct coefficients");
 #define PGNDISTINCTOID	3361
 
-DATA(insert OID = 32 ( pg_ddl_command	PGNSP PGUID SIZEOF_POINTER t p P f t \054 0 0 0 pg_ddl_command_in pg_ddl_command_out pg_ddl_command_recv pg_ddl_command_send - - - ALIGNOF_POINTER p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 32 ( pg_ddl_command	PGNSP PGUID SIZEOF_POINTER t p P f t \054 0 0 0 pg_ddl_command_in pg_ddl_command_out pg_ddl_command_recv pg_ddl_command_send - - - ALIGNOF_POINTER p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("internal type for passing CollectedCommand");
 #define PGDDLCOMMANDOID 32
 
 /* OIDS 200 - 299 */
 
-DATA(insert OID = 210 (  smgr	   PGNSP PGUID 2 t b U f t \054 0 0 0 smgrin smgrout - - - - - s p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 210 (  smgr	   PGNSP PGUID 2 t b U f t \054 0 0 0 smgrin smgrout - - - - - s p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("storage manager");
 
 /* OIDS 300 - 399 */
@@ -384,280 +390,280 @@ DESCR("storage manager");
 /* OIDS 500 - 599 */
 
 /* OIDS 600 - 699 */
-DATA(insert OID = 600 (  point	   PGNSP PGUID 16 f b G f t \054 0 701 1017 point_in point_out point_recv point_send - - - d p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 600 (  point	   PGNSP PGUID 16 f b G f t \054 0 701 1017 point_in point_out point_recv point_send - - - d p f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 DESCR("geometric point '(x, y)'");
 #define POINTOID		600
-DATA(insert OID = 601 (  lseg	   PGNSP PGUID 32 f b G f t \054 0 600 1018 lseg_in lseg_out lseg_recv lseg_send - - - d p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 601 (  lseg	   PGNSP PGUID 32 f b G f t \054 0 600 1018 lseg_in lseg_out lseg_recv lseg_send - - - d p f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 DESCR("geometric line segment '(pt1,pt2)'");
 #define LSEGOID			601
-DATA(insert OID = 602 (  path	   PGNSP PGUID -1 f b G f t \054 0 0 1019 path_in path_out path_recv path_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 602 (  path	   PGNSP PGUID -1 f b G f t \054 0 0 1019 path_in path_out path_recv path_send - - - d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 DESCR("geometric path '(pt1,...)'");
 #define PATHOID			602
-DATA(insert OID = 603 (  box	   PGNSP PGUID 32 f b G f t \073 0 600 1020 box_in box_out box_recv box_send - - - d p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 603 (  box	   PGNSP PGUID 32 f b G f t \073 0 600 1020 box_in box_out box_recv box_send - - - d p f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 DESCR("geometric box '(lower left,upper right)'");
 #define BOXOID			603
-DATA(insert OID = 604 (  polygon   PGNSP PGUID -1 f b G f t \054 0	 0 1027 poly_in poly_out poly_recv poly_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 604 (  polygon   PGNSP PGUID -1 f b G f t \054 0	 0 1027 poly_in poly_out poly_recv poly_send - - - d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 DESCR("geometric polygon '(pt1,...)'");
 #define POLYGONOID		604
 
-DATA(insert OID = 628 (  line	   PGNSP PGUID 24 f b G f t \054 0 701 629 line_in line_out line_recv line_send - - - d p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 628 (  line	   PGNSP PGUID 24 f b G f t \054 0 701 629 line_in line_out line_recv line_send - - - d p f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 DESCR("geometric line");
 #define LINEOID			628
-DATA(insert OID = 629 (  _line	   PGNSP PGUID	-1 f b A f t \054 0 628 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 629 (  _line	   PGNSP PGUID	-1 f b A f t \054 0 628 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 
 /* OIDS 700 - 799 */
 
-DATA(insert OID = 700 (  float4    PGNSP PGUID	4 FLOAT4PASSBYVAL b N f t \054 0	 0 1021 float4in float4out float4recv float4send - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 700 (  float4    PGNSP PGUID	4 FLOAT4PASSBYVAL b N f t \054 0	 0 1021 float4in float4out float4recv float4send - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("single-precision floating point number, 4-byte storage");
 #define FLOAT4OID 700
-DATA(insert OID = 701 (  float8    PGNSP PGUID	8 FLOAT8PASSBYVAL b N t t \054 0	 0 1022 float8in float8out float8recv float8send - - - d p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 701 (  float8    PGNSP PGUID	8 FLOAT8PASSBYVAL b N t t \054 0	 0 1022 float8in float8out float8recv float8send - - - d p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("double-precision floating point number, 8-byte storage");
 #define FLOAT8OID 701
-DATA(insert OID = 702 (  abstime   PGNSP PGUID	4 t b D f t \054 0	 0 1023 abstimein abstimeout abstimerecv abstimesend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 702 (  abstime   PGNSP PGUID	4 t b D f t \054 0	 0 1023 abstimein abstimeout abstimerecv abstimesend - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("absolute, limited-range date and time (Unix system time)");
 #define ABSTIMEOID		702
-DATA(insert OID = 703 (  reltime   PGNSP PGUID	4 t b T f t \054 0	 0 1024 reltimein reltimeout reltimerecv reltimesend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 703 (  reltime   PGNSP PGUID	4 t b T f t \054 0	 0 1024 reltimein reltimeout reltimerecv reltimesend - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("relative, limited-range time interval (Unix delta time)");
 #define RELTIMEOID		703
-DATA(insert OID = 704 (  tinterval PGNSP PGUID 12 f b T f t \054 0	 0 1025 tintervalin tintervalout tintervalrecv tintervalsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 704 (  tinterval PGNSP PGUID 12 f b T f t \054 0	 0 1025 tintervalin tintervalout tintervalrecv tintervalsend - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("(abstime,abstime), time interval");
 #define TINTERVALOID	704
-DATA(insert OID = 705 (  unknown   PGNSP PGUID -2 f p X f t \054 0	 0 0 unknownin unknownout unknownrecv unknownsend - - - c p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 705 (  unknown   PGNSP PGUID -2 f p X f t \054 0	 0 0 unknownin unknownout unknownrecv unknownsend - - - c p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("");
 #define UNKNOWNOID		705
 
-DATA(insert OID = 718 (  circle    PGNSP PGUID	24 f b G f t \054 0 0 719 circle_in circle_out circle_recv circle_send - - - d p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 718 (  circle    PGNSP PGUID	24 f b G f t \054 0 0 719 circle_in circle_out circle_recv circle_send - - - d p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("geometric circle '(center,radius)'");
 #define CIRCLEOID		718
-DATA(insert OID = 719 (  _circle   PGNSP PGUID	-1 f b A f t \054 0  718 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 790 (  money	   PGNSP PGUID	 8 FLOAT8PASSBYVAL b N f t \054 0 0 791 cash_in cash_out cash_recv cash_send - - - d p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 719 (  _circle   PGNSP PGUID	-1 f b A f t \054 0  718 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 790 (  money	   PGNSP PGUID	 8 FLOAT8PASSBYVAL b N f t \054 0 0 791 cash_in cash_out cash_recv cash_send - - - d p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("monetary amounts, $d,ddd.cc");
 #define CASHOID 790
-DATA(insert OID = 791 (  _money    PGNSP PGUID	-1 f b A f t \054 0  790 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 791 (  _money    PGNSP PGUID	-1 f b A f t \054 0  790 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 
 /* OIDS 800 - 899 */
-DATA(insert OID = 829 ( macaddr    PGNSP PGUID	6 f b U f t \054 0 0 1040 macaddr_in macaddr_out macaddr_recv macaddr_send - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 829 ( macaddr    PGNSP PGUID	6 f b U f t \054 0 0 1040 macaddr_in macaddr_out macaddr_recv macaddr_send - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("XX:XX:XX:XX:XX:XX, MAC address");
 #define MACADDROID 829
-DATA(insert OID = 869 ( inet	   PGNSP PGUID	-1 f b I t t \054 0 0 1041 inet_in inet_out inet_recv inet_send - - - i m f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 869 ( inet	   PGNSP PGUID	-1 f b I t t \054 0 0 1041 inet_in inet_out inet_recv inet_send - - - i m f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("IP address/netmask, host address, netmask optional");
 #define INETOID 869
-DATA(insert OID = 650 ( cidr	   PGNSP PGUID	-1 f b I f t \054 0 0 651 cidr_in cidr_out cidr_recv cidr_send - - - i m f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 650 ( cidr	   PGNSP PGUID	-1 f b I f t \054 0 0 651 cidr_in cidr_out cidr_recv cidr_send - - - i m f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("network IP address/netmask, network address");
 #define CIDROID 650
-DATA(insert OID = 774 ( macaddr8	PGNSP PGUID 8 f b U f t \054 0 0 775 macaddr8_in macaddr8_out macaddr8_recv macaddr8_send - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 774 ( macaddr8	PGNSP PGUID 8 f b U f t \054 0 0 775 macaddr8_in macaddr8_out macaddr8_recv macaddr8_send - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("XX:XX:XX:XX:XX:XX:XX:XX, MAC address");
 #define MACADDR8OID 774
 
 /* OIDS 900 - 999 */
 
 /* OIDS 1000 - 1099 */
-DATA(insert OID = 1000 (  _bool		 PGNSP PGUID -1 f b A f t \054 0	16 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1001 (  _bytea	 PGNSP PGUID -1 f b A f t \054 0	17 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1002 (  _char		 PGNSP PGUID -1 f b A f t \054 0	18 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1003 (  _name		 PGNSP PGUID -1 f b A f t \054 0	19 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1005 (  _int2		 PGNSP PGUID -1 f b A f t \054 0	21 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 1000 (  _bool		 PGNSP PGUID -1 f b A f t \054 0	16 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1001 (  _bytea	 PGNSP PGUID -1 f b A f t \054 0	17 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1002 (  _char		 PGNSP PGUID -1 f b A f t \054 0	18 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1003 (  _name		 PGNSP PGUID -1 f b A f t \054 0	19 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1005 (  _int2		 PGNSP PGUID -1 f b A f t \054 0	21 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 #define INT2ARRAYOID		1005
-DATA(insert OID = 1006 (  _int2vector PGNSP PGUID -1 f b A f t \054 0	22 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1007 (  _int4		 PGNSP PGUID -1 f b A f t \054 0	23 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 1006 (  _int2vector PGNSP PGUID -1 f b A f t \054 0	22 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1007 (  _int4		 PGNSP PGUID -1 f b A f t \054 0	23 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 #define INT4ARRAYOID		1007
-DATA(insert OID = 1008 (  _regproc	 PGNSP PGUID -1 f b A f t \054 0	24 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1009 (  _text		 PGNSP PGUID -1 f b A f t \054 0	25 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 100 _null_ _null_ _null_ ));
+DATA(insert OID = 1008 (  _regproc	 PGNSP PGUID -1 f b A f t \054 0	24 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1009 (  _text		 PGNSP PGUID -1 f b A f t \054 0	25 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 100 array_subscript_parse _null_ _null_ _null_ ));
 #define TEXTARRAYOID		1009
-DATA(insert OID = 1028 (  _oid		 PGNSP PGUID -1 f b A f t \054 0	26 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 1028 (  _oid		 PGNSP PGUID -1 f b A f t \054 0	26 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 #define OIDARRAYOID			1028
-DATA(insert OID = 1010 (  _tid		 PGNSP PGUID -1 f b A f t \054 0	27 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1011 (  _xid		 PGNSP PGUID -1 f b A f t \054 0	28 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1012 (  _cid		 PGNSP PGUID -1 f b A f t \054 0	29 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1013 (  _oidvector PGNSP PGUID -1 f b A f t \054 0	30 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1014 (  _bpchar	 PGNSP PGUID -1 f b A f t \054 0 1042 0 array_in array_out array_recv array_send bpchartypmodin bpchartypmodout array_typanalyze i x f 0 -1 0 100 _null_ _null_ _null_ ));
-DATA(insert OID = 1015 (  _varchar	 PGNSP PGUID -1 f b A f t \054 0 1043 0 array_in array_out array_recv array_send varchartypmodin varchartypmodout array_typanalyze i x f 0 -1 0 100 _null_ _null_ _null_ ));
-DATA(insert OID = 1016 (  _int8		 PGNSP PGUID -1 f b A f t \054 0	20 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1017 (  _point	 PGNSP PGUID -1 f b A f t \054 0 600 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1018 (  _lseg		 PGNSP PGUID -1 f b A f t \054 0 601 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1019 (  _path		 PGNSP PGUID -1 f b A f t \054 0 602 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1020 (  _box		 PGNSP PGUID -1 f b A f t \073 0 603 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1021 (  _float4	 PGNSP PGUID -1 f b A f t \054 0 700 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 1010 (  _tid		 PGNSP PGUID -1 f b A f t \054 0	27 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1011 (  _xid		 PGNSP PGUID -1 f b A f t \054 0	28 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1012 (  _cid		 PGNSP PGUID -1 f b A f t \054 0	29 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1013 (  _oidvector PGNSP PGUID -1 f b A f t \054 0	30 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1014 (  _bpchar	 PGNSP PGUID -1 f b A f t \054 0 1042 0 array_in array_out array_recv array_send bpchartypmodin bpchartypmodout array_typanalyze i x f 0 -1 0 100 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1015 (  _varchar	 PGNSP PGUID -1 f b A f t \054 0 1043 0 array_in array_out array_recv array_send varchartypmodin varchartypmodout array_typanalyze i x f 0 -1 0 100 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1016 (  _int8		 PGNSP PGUID -1 f b A f t \054 0	20 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1017 (  _point	 PGNSP PGUID -1 f b A f t \054 0 600 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1018 (  _lseg		 PGNSP PGUID -1 f b A f t \054 0 601 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1019 (  _path		 PGNSP PGUID -1 f b A f t \054 0 602 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1020 (  _box		 PGNSP PGUID -1 f b A f t \073 0 603 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1021 (  _float4	 PGNSP PGUID -1 f b A f t \054 0 700 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 #define FLOAT4ARRAYOID 1021
-DATA(insert OID = 1022 (  _float8	 PGNSP PGUID -1 f b A f t \054 0 701 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1023 (  _abstime	 PGNSP PGUID -1 f b A f t \054 0 702 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1024 (  _reltime	 PGNSP PGUID -1 f b A f t \054 0 703 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1025 (  _tinterval PGNSP PGUID -1 f b A f t \054 0 704 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1027 (  _polygon	 PGNSP PGUID -1 f b A f t \054 0 604 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1033 (  aclitem	 PGNSP PGUID 12 f b U f t \054 0 0 1034 aclitemin aclitemout - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 1022 (  _float8	 PGNSP PGUID -1 f b A f t \054 0 701 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1023 (  _abstime	 PGNSP PGUID -1 f b A f t \054 0 702 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1024 (  _reltime	 PGNSP PGUID -1 f b A f t \054 0 703 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1025 (  _tinterval PGNSP PGUID -1 f b A f t \054 0 704 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1027 (  _polygon	 PGNSP PGUID -1 f b A f t \054 0 604 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1033 (  aclitem	 PGNSP PGUID 12 f b U f t \054 0 0 1034 aclitemin aclitemout - - - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("access control list");
 #define ACLITEMOID		1033
-DATA(insert OID = 1034 (  _aclitem	 PGNSP PGUID -1 f b A f t \054 0 1033 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1040 (  _macaddr	 PGNSP PGUID -1 f b A f t \054 0  829 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 775  (  _macaddr8  PGNSP PGUID -1 f b A f t \054 0  774 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1041 (  _inet		 PGNSP PGUID -1 f b A f t \054 0  869 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 651  (  _cidr		 PGNSP PGUID -1 f b A f t \054 0  650 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1263 (  _cstring	 PGNSP PGUID -1 f b A f t \054 0 2275 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 1034 (  _aclitem	 PGNSP PGUID -1 f b A f t \054 0 1033 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1040 (  _macaddr	 PGNSP PGUID -1 f b A f t \054 0  829 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 775  (  _macaddr8  PGNSP PGUID -1 f b A f t \054 0  774 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1041 (  _inet		 PGNSP PGUID -1 f b A f t \054 0  869 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 651  (  _cidr		 PGNSP PGUID -1 f b A f t \054 0  650 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1263 (  _cstring	 PGNSP PGUID -1 f b A f t \054 0 2275 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 #define CSTRINGARRAYOID		1263
 
-DATA(insert OID = 1042 ( bpchar		 PGNSP PGUID -1 f b S f t \054 0	0 1014 bpcharin bpcharout bpcharrecv bpcharsend bpchartypmodin bpchartypmodout - i x f 0 -1 0 100 _null_ _null_ _null_ ));
+DATA(insert OID = 1042 ( bpchar		 PGNSP PGUID -1 f b S f t \054 0	0 1014 bpcharin bpcharout bpcharrecv bpcharsend bpchartypmodin bpchartypmodout - i x f 0 -1 0 100 - _null_ _null_ _null_ ));
 DESCR("char(length), blank-padded string, fixed storage length");
 #define BPCHAROID		1042
-DATA(insert OID = 1043 ( varchar	 PGNSP PGUID -1 f b S f t \054 0	0 1015 varcharin varcharout varcharrecv varcharsend varchartypmodin varchartypmodout - i x f 0 -1 0 100 _null_ _null_ _null_ ));
+DATA(insert OID = 1043 ( varchar	 PGNSP PGUID -1 f b S f t \054 0	0 1015 varcharin varcharout varcharrecv varcharsend varchartypmodin varchartypmodout - i x f 0 -1 0 100 - _null_ _null_ _null_ ));
 DESCR("varchar(length), non-blank-padded string, variable storage length");
 #define VARCHAROID		1043
 
-DATA(insert OID = 1082 ( date		 PGNSP PGUID	4 t b D f t \054 0	0 1182 date_in date_out date_recv date_send - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 1082 ( date		 PGNSP PGUID	4 t b D f t \054 0	0 1182 date_in date_out date_recv date_send - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("date");
 #define DATEOID			1082
-DATA(insert OID = 1083 ( time		 PGNSP PGUID	8 FLOAT8PASSBYVAL b D f t \054 0	0 1183 time_in time_out time_recv time_send timetypmodin timetypmodout - d p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 1083 ( time		 PGNSP PGUID	8 FLOAT8PASSBYVAL b D f t \054 0	0 1183 time_in time_out time_recv time_send timetypmodin timetypmodout - d p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("time of day");
 #define TIMEOID			1083
 
 /* OIDS 1100 - 1199 */
-DATA(insert OID = 1114 ( timestamp	 PGNSP PGUID	8 FLOAT8PASSBYVAL b D f t \054 0	0 1115 timestamp_in timestamp_out timestamp_recv timestamp_send timestamptypmodin timestamptypmodout - d p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 1114 ( timestamp	 PGNSP PGUID	8 FLOAT8PASSBYVAL b D f t \054 0	0 1115 timestamp_in timestamp_out timestamp_recv timestamp_send timestamptypmodin timestamptypmodout - d p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("date and time");
 #define TIMESTAMPOID	1114
-DATA(insert OID = 1115 ( _timestamp  PGNSP PGUID	-1 f b A f t \054 0 1114 0 array_in array_out array_recv array_send timestamptypmodin timestamptypmodout array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1182 ( _date		 PGNSP PGUID	-1 f b A f t \054 0 1082 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1183 ( _time		 PGNSP PGUID	-1 f b A f t \054 0 1083 0 array_in array_out array_recv array_send timetypmodin timetypmodout array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1184 ( timestamptz PGNSP PGUID	8 FLOAT8PASSBYVAL b D t t \054 0	0 1185 timestamptz_in timestamptz_out timestamptz_recv timestamptz_send timestamptztypmodin timestamptztypmodout - d p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 1115 ( _timestamp  PGNSP PGUID	-1 f b A f t \054 0 1114 0 array_in array_out array_recv array_send timestamptypmodin timestamptypmodout array_typanalyze d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1182 ( _date		 PGNSP PGUID	-1 f b A f t \054 0 1082 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1183 ( _time		 PGNSP PGUID	-1 f b A f t \054 0 1083 0 array_in array_out array_recv array_send timetypmodin timetypmodout array_typanalyze d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1184 ( timestamptz PGNSP PGUID	8 FLOAT8PASSBYVAL b D t t \054 0	0 1185 timestamptz_in timestamptz_out timestamptz_recv timestamptz_send timestamptztypmodin timestamptztypmodout - d p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("date and time with time zone");
 #define TIMESTAMPTZOID	1184
-DATA(insert OID = 1185 ( _timestamptz PGNSP PGUID -1 f b A f t \054 0	1184 0 array_in array_out array_recv array_send timestamptztypmodin timestamptztypmodout array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1186 ( interval	 PGNSP PGUID 16 f b T t t \054 0	0 1187 interval_in interval_out interval_recv interval_send intervaltypmodin intervaltypmodout - d p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 1185 ( _timestamptz PGNSP PGUID -1 f b A f t \054 0	1184 0 array_in array_out array_recv array_send timestamptztypmodin timestamptztypmodout array_typanalyze d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1186 ( interval	 PGNSP PGUID 16 f b T t t \054 0	0 1187 interval_in interval_out interval_recv interval_send intervaltypmodin intervaltypmodout - d p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("@ <number> <units>, time interval");
 #define INTERVALOID		1186
-DATA(insert OID = 1187 ( _interval	 PGNSP PGUID	-1 f b A f t \054 0 1186 0 array_in array_out array_recv array_send intervaltypmodin intervaltypmodout array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 1187 ( _interval	 PGNSP PGUID	-1 f b A f t \054 0 1186 0 array_in array_out array_recv array_send intervaltypmodin intervaltypmodout array_typanalyze d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 
 /* OIDS 1200 - 1299 */
-DATA(insert OID = 1231 (  _numeric	 PGNSP PGUID -1 f b A f t \054 0	1700 0 array_in array_out array_recv array_send numerictypmodin numerictypmodout array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1266 ( timetz		 PGNSP PGUID 12 f b D f t \054 0	0 1270 timetz_in timetz_out timetz_recv timetz_send timetztypmodin timetztypmodout - d p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 1231 (  _numeric	 PGNSP PGUID -1 f b A f t \054 0	1700 0 array_in array_out array_recv array_send numerictypmodin numerictypmodout array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1266 ( timetz		 PGNSP PGUID 12 f b D f t \054 0	0 1270 timetz_in timetz_out timetz_recv timetz_send timetztypmodin timetztypmodout - d p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("time of day with time zone");
 #define TIMETZOID		1266
-DATA(insert OID = 1270 ( _timetz	 PGNSP PGUID -1 f b A f t \054 0	1266 0 array_in array_out array_recv array_send timetztypmodin timetztypmodout array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 1270 ( _timetz	 PGNSP PGUID -1 f b A f t \054 0	1266 0 array_in array_out array_recv array_send timetztypmodin timetztypmodout array_typanalyze d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 
 /* OIDS 1500 - 1599 */
-DATA(insert OID = 1560 ( bit		 PGNSP PGUID -1 f b V f t \054 0	0 1561 bit_in bit_out bit_recv bit_send bittypmodin bittypmodout - i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 1560 ( bit		 PGNSP PGUID -1 f b V f t \054 0	0 1561 bit_in bit_out bit_recv bit_send bittypmodin bittypmodout - i x f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("fixed-length bit string");
 #define BITOID	 1560
-DATA(insert OID = 1561 ( _bit		 PGNSP PGUID -1 f b A f t \054 0	1560 0 array_in array_out array_recv array_send bittypmodin bittypmodout array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 1562 ( varbit		 PGNSP PGUID -1 f b V t t \054 0	0 1563 varbit_in varbit_out varbit_recv varbit_send varbittypmodin varbittypmodout - i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 1561 ( _bit		 PGNSP PGUID -1 f b A f t \054 0	1560 0 array_in array_out array_recv array_send bittypmodin bittypmodout array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 1562 ( varbit		 PGNSP PGUID -1 f b V t t \054 0	0 1563 varbit_in varbit_out varbit_recv varbit_send varbittypmodin varbittypmodout - i x f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("variable-length bit string");
 #define VARBITOID	  1562
-DATA(insert OID = 1563 ( _varbit	 PGNSP PGUID -1 f b A f t \054 0	1562 0 array_in array_out array_recv array_send varbittypmodin varbittypmodout array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 1563 ( _varbit	 PGNSP PGUID -1 f b A f t \054 0	1562 0 array_in array_out array_recv array_send varbittypmodin varbittypmodout array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 
 /* OIDS 1600 - 1699 */
 
 /* OIDS 1700 - 1799 */
-DATA(insert OID = 1700 ( numeric	   PGNSP PGUID -1 f b N f t \054 0	0 1231 numeric_in numeric_out numeric_recv numeric_send numerictypmodin numerictypmodout - i m f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 1700 ( numeric	   PGNSP PGUID -1 f b N f t \054 0	0 1231 numeric_in numeric_out numeric_recv numeric_send numerictypmodin numerictypmodout - i m f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("numeric(precision, decimal), arbitrary precision number");
 #define NUMERICOID		1700
 
-DATA(insert OID = 1790 ( refcursor	   PGNSP PGUID -1 f b U f t \054 0	0 2201 textin textout textrecv textsend - - - i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 1790 ( refcursor	   PGNSP PGUID -1 f b U f t \054 0	0 2201 textin textout textrecv textsend - - - i x f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("reference to cursor (portal name)");
 #define REFCURSOROID	1790
 
 /* OIDS 2200 - 2299 */
-DATA(insert OID = 2201 ( _refcursor    PGNSP PGUID -1 f b A f t \054 0 1790 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2201 ( _refcursor    PGNSP PGUID -1 f b A f t \054 0 1790 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 
-DATA(insert OID = 2202 ( regprocedure  PGNSP PGUID	4 t b N f t \054 0	 0 2207 regprocedurein regprocedureout regprocedurerecv regproceduresend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2202 ( regprocedure  PGNSP PGUID	4 t b N f t \054 0	 0 2207 regprocedurein regprocedureout regprocedurerecv regproceduresend - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("registered procedure (with args)");
 #define REGPROCEDUREOID 2202
 
-DATA(insert OID = 2203 ( regoper	   PGNSP PGUID	4 t b N f t \054 0	 0 2208 regoperin regoperout regoperrecv regopersend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2203 ( regoper	   PGNSP PGUID	4 t b N f t \054 0	 0 2208 regoperin regoperout regoperrecv regopersend - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("registered operator");
 #define REGOPEROID		2203
 
-DATA(insert OID = 2204 ( regoperator   PGNSP PGUID	4 t b N f t \054 0	 0 2209 regoperatorin regoperatorout regoperatorrecv regoperatorsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2204 ( regoperator   PGNSP PGUID	4 t b N f t \054 0	 0 2209 regoperatorin regoperatorout regoperatorrecv regoperatorsend - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("registered operator (with args)");
 #define REGOPERATOROID	2204
 
-DATA(insert OID = 2205 ( regclass	   PGNSP PGUID	4 t b N f t \054 0	 0 2210 regclassin regclassout regclassrecv regclasssend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2205 ( regclass	   PGNSP PGUID	4 t b N f t \054 0	 0 2210 regclassin regclassout regclassrecv regclasssend - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("registered class");
 #define REGCLASSOID		2205
 
-DATA(insert OID = 2206 ( regtype	   PGNSP PGUID	4 t b N f t \054 0	 0 2211 regtypein regtypeout regtyperecv regtypesend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2206 ( regtype	   PGNSP PGUID	4 t b N f t \054 0	 0 2211 regtypein regtypeout regtyperecv regtypesend - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("registered type");
 #define REGTYPEOID		2206
 
-DATA(insert OID = 4096 ( regrole	   PGNSP PGUID	4 t b N f t \054 0	 0 4097 regrolein regroleout regrolerecv regrolesend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 4096 ( regrole	   PGNSP PGUID	4 t b N f t \054 0	 0 4097 regrolein regroleout regrolerecv regrolesend - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("registered role");
 #define REGROLEOID		4096
 
-DATA(insert OID = 4089 ( regnamespace  PGNSP PGUID	4 t b N f t \054 0	 0 4090 regnamespacein regnamespaceout regnamespacerecv regnamespacesend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 4089 ( regnamespace  PGNSP PGUID	4 t b N f t \054 0	 0 4090 regnamespacein regnamespaceout regnamespacerecv regnamespacesend - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("registered namespace");
 #define REGNAMESPACEOID		4089
 
-DATA(insert OID = 2207 ( _regprocedure PGNSP PGUID -1 f b A f t \054 0 2202 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 2208 ( _regoper	   PGNSP PGUID -1 f b A f t \054 0 2203 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 2209 ( _regoperator  PGNSP PGUID -1 f b A f t \054 0 2204 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 2210 ( _regclass	   PGNSP PGUID -1 f b A f t \054 0 2205 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 2211 ( _regtype	   PGNSP PGUID -1 f b A f t \054 0 2206 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2207 ( _regprocedure PGNSP PGUID -1 f b A f t \054 0 2202 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 2208 ( _regoper	   PGNSP PGUID -1 f b A f t \054 0 2203 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 2209 ( _regoperator  PGNSP PGUID -1 f b A f t \054 0 2204 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 2210 ( _regclass	   PGNSP PGUID -1 f b A f t \054 0 2205 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 2211 ( _regtype	   PGNSP PGUID -1 f b A f t \054 0 2206 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 #define REGTYPEARRAYOID 2211
-DATA(insert OID = 4097 ( _regrole	   PGNSP PGUID -1 f b A f t \054 0 4096 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 4090 ( _regnamespace PGNSP PGUID -1 f b A f t \054 0 4089 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 4097 ( _regrole	   PGNSP PGUID -1 f b A f t \054 0 4096 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 4090 ( _regnamespace PGNSP PGUID -1 f b A f t \054 0 4089 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 
 /* uuid */
-DATA(insert OID = 2950 ( uuid			PGNSP PGUID 16 f b U f t \054 0 0 2951 uuid_in uuid_out uuid_recv uuid_send - - - c p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2950 ( uuid			PGNSP PGUID 16 f b U f t \054 0 0 2951 uuid_in uuid_out uuid_recv uuid_send - - - c p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("UUID datatype");
 #define UUIDOID 2950
-DATA(insert OID = 2951 ( _uuid			PGNSP PGUID -1 f b A f t \054 0 2950 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2951 ( _uuid			PGNSP PGUID -1 f b A f t \054 0 2950 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 
 /* pg_lsn */
-DATA(insert OID = 3220 ( pg_lsn			PGNSP PGUID 8 FLOAT8PASSBYVAL b U f t \054 0 0 3221 pg_lsn_in pg_lsn_out pg_lsn_recv pg_lsn_send - - - d p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 3220 ( pg_lsn			PGNSP PGUID 8 FLOAT8PASSBYVAL b U f t \054 0 0 3221 pg_lsn_in pg_lsn_out pg_lsn_recv pg_lsn_send - - - d p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("PostgreSQL LSN datatype");
 #define LSNOID			3220
-DATA(insert OID = 3221 ( _pg_lsn			PGNSP PGUID -1 f b A f t \054 0 3220 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 3221 ( _pg_lsn			PGNSP PGUID -1 f b A f t \054 0 3220 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 
 /* text search */
-DATA(insert OID = 3614 ( tsvector		PGNSP PGUID -1 f b U f t \054 0 0 3643 tsvectorin tsvectorout tsvectorrecv tsvectorsend - - ts_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 3614 ( tsvector		PGNSP PGUID -1 f b U f t \054 0 0 3643 tsvectorin tsvectorout tsvectorrecv tsvectorsend - - ts_typanalyze i x f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("text representation for text search");
 #define TSVECTOROID		3614
-DATA(insert OID = 3642 ( gtsvector		PGNSP PGUID -1 f b U f t \054 0 0 3644 gtsvectorin gtsvectorout - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 3642 ( gtsvector		PGNSP PGUID -1 f b U f t \054 0 0 3644 gtsvectorin gtsvectorout - - - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("GiST index internal text representation for text search");
 #define GTSVECTOROID	3642
-DATA(insert OID = 3615 ( tsquery		PGNSP PGUID -1 f b U f t \054 0 0 3645 tsqueryin tsqueryout tsqueryrecv tsquerysend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 3615 ( tsquery		PGNSP PGUID -1 f b U f t \054 0 0 3645 tsqueryin tsqueryout tsqueryrecv tsquerysend - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("query representation for text search");
 #define TSQUERYOID		3615
-DATA(insert OID = 3734 ( regconfig		PGNSP PGUID 4 t b N f t \054 0 0 3735 regconfigin regconfigout regconfigrecv regconfigsend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 3734 ( regconfig		PGNSP PGUID 4 t b N f t \054 0 0 3735 regconfigin regconfigout regconfigrecv regconfigsend - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("registered text search configuration");
 #define REGCONFIGOID	3734
-DATA(insert OID = 3769 ( regdictionary	PGNSP PGUID 4 t b N f t \054 0 0 3770 regdictionaryin regdictionaryout regdictionaryrecv regdictionarysend - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 3769 ( regdictionary	PGNSP PGUID 4 t b N f t \054 0 0 3770 regdictionaryin regdictionaryout regdictionaryrecv regdictionarysend - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("registered text search dictionary");
 #define REGDICTIONARYOID	3769
 
-DATA(insert OID = 3643 ( _tsvector		PGNSP PGUID -1 f b A f t \054 0 3614 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 3644 ( _gtsvector		PGNSP PGUID -1 f b A f t \054 0 3642 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 3645 ( _tsquery		PGNSP PGUID -1 f b A f t \054 0 3615 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 3735 ( _regconfig		PGNSP PGUID -1 f b A f t \054 0 3734 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 3770 ( _regdictionary PGNSP PGUID -1 f b A f t \054 0 3769 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 3643 ( _tsvector		PGNSP PGUID -1 f b A f t \054 0 3614 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 3644 ( _gtsvector		PGNSP PGUID -1 f b A f t \054 0 3642 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 3645 ( _tsquery		PGNSP PGUID -1 f b A f t \054 0 3615 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 3735 ( _regconfig		PGNSP PGUID -1 f b A f t \054 0 3734 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 3770 ( _regdictionary PGNSP PGUID -1 f b A f t \054 0 3769 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 
 /* jsonb */
-DATA(insert OID = 3802 ( jsonb			PGNSP PGUID -1 f b U f t \054 0 0 3807 jsonb_in jsonb_out jsonb_recv jsonb_send - - - i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 3802 ( jsonb			PGNSP PGUID -1 f b U f t \054 0 0 3807 jsonb_in jsonb_out jsonb_recv jsonb_send - - - i x f 0 -1 0 0 jsonb_subscript_parse _null_ _null_ _null_ ));
 DESCR("Binary JSON");
 #define JSONBOID 3802
-DATA(insert OID = 3807 ( _jsonb			PGNSP PGUID -1 f b A f t \054 0 3802 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 3807 ( _jsonb			PGNSP PGUID -1 f b A f t \054 0 3802 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 
-DATA(insert OID = 2970 ( txid_snapshot	PGNSP PGUID -1 f b U f t \054 0 0 2949 txid_snapshot_in txid_snapshot_out txid_snapshot_recv txid_snapshot_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2970 ( txid_snapshot	PGNSP PGUID -1 f b U f t \054 0 0 2949 txid_snapshot_in txid_snapshot_out txid_snapshot_recv txid_snapshot_send - - - d x f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("txid snapshot");
-DATA(insert OID = 2949 ( _txid_snapshot PGNSP PGUID -1 f b A f t \054 0 2970 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2949 ( _txid_snapshot PGNSP PGUID -1 f b A f t \054 0 2970 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 
 /* range types */
-DATA(insert OID = 3904 ( int4range		PGNSP PGUID  -1 f r R f t \054 0 0 3905 range_in range_out range_recv range_send - - range_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 3904 ( int4range		PGNSP PGUID  -1 f r R f t \054 0 0 3905 range_in range_out range_recv range_send - - range_typanalyze i x f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("range of integers");
 #define INT4RANGEOID		3904
-DATA(insert OID = 3905 ( _int4range		PGNSP PGUID  -1 f b A f t \054 0 3904 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 3906 ( numrange		PGNSP PGUID  -1 f r R f t \054 0 0 3907 range_in range_out range_recv range_send - - range_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 3905 ( _int4range		PGNSP PGUID  -1 f b A f t \054 0 3904 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 3906 ( numrange		PGNSP PGUID  -1 f r R f t \054 0 0 3907 range_in range_out range_recv range_send - - range_typanalyze i x f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("range of numerics");
-DATA(insert OID = 3907 ( _numrange		PGNSP PGUID  -1 f b A f t \054 0 3906 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 3908 ( tsrange		PGNSP PGUID  -1 f r R f t \054 0 0 3909 range_in range_out range_recv range_send - - range_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 3907 ( _numrange		PGNSP PGUID  -1 f b A f t \054 0 3906 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 3908 ( tsrange		PGNSP PGUID  -1 f r R f t \054 0 0 3909 range_in range_out range_recv range_send - - range_typanalyze d x f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("range of timestamps without time zone");
-DATA(insert OID = 3909 ( _tsrange		PGNSP PGUID  -1 f b A f t \054 0 3908 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 3910 ( tstzrange		PGNSP PGUID  -1 f r R f t \054 0 0 3911 range_in range_out range_recv range_send - - range_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 3909 ( _tsrange		PGNSP PGUID  -1 f b A f t \054 0 3908 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 3910 ( tstzrange		PGNSP PGUID  -1 f r R f t \054 0 0 3911 range_in range_out range_recv range_send - - range_typanalyze d x f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("range of timestamps with time zone");
-DATA(insert OID = 3911 ( _tstzrange		PGNSP PGUID  -1 f b A f t \054 0 3910 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 3912 ( daterange		PGNSP PGUID  -1 f r R f t \054 0 0 3913 range_in range_out range_recv range_send - - range_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 3911 ( _tstzrange		PGNSP PGUID  -1 f b A f t \054 0 3910 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 3912 ( daterange		PGNSP PGUID  -1 f r R f t \054 0 0 3913 range_in range_out range_recv range_send - - range_typanalyze i x f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("range of dates");
-DATA(insert OID = 3913 ( _daterange		PGNSP PGUID  -1 f b A f t \054 0 3912 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 _null_ _null_ _null_ ));
-DATA(insert OID = 3926 ( int8range		PGNSP PGUID  -1 f r R f t \054 0 0 3927 range_in range_out range_recv range_send - - range_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 3913 ( _daterange		PGNSP PGUID  -1 f b A f t \054 0 3912 0 array_in array_out array_recv array_send - - array_typanalyze i x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
+DATA(insert OID = 3926 ( int8range		PGNSP PGUID  -1 f r R f t \054 0 0 3927 range_in range_out range_recv range_send - - range_typanalyze d x f 0 -1 0 0 - _null_ _null_ _null_ ));
 DESCR("range of bigints");
-DATA(insert OID = 3927 ( _int8range		PGNSP PGUID  -1 f b A f t \054 0 3926 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 3927 ( _int8range		PGNSP PGUID  -1 f b A f t \054 0 3926 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 
 /*
  * pseudo-types
@@ -672,41 +678,41 @@ DATA(insert OID = 3927 ( _int8range		PGNSP PGUID  -1 f b A f t \054 0 3926 0 arr
  * but there is now support for it in records and arrays.  Perhaps we should
  * just treat it as a regular base type?
  */
-DATA(insert OID = 2249 ( record			PGNSP PGUID -1 f p P f t \054 0 0 2287 record_in record_out record_recv record_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2249 ( record			PGNSP PGUID -1 f p P f t \054 0 0 2287 record_in record_out record_recv record_send - - - d x f 0 -1 0 0 - _null_ _null_ _null_ ));
 #define RECORDOID		2249
-DATA(insert OID = 2287 ( _record		PGNSP PGUID -1 f p P f t \054 0 2249 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2287 ( _record		PGNSP PGUID -1 f p P f t \054 0 2249 0 array_in array_out array_recv array_send - - array_typanalyze d x f 0 -1 0 0 array_subscript_parse _null_ _null_ _null_ ));
 #define RECORDARRAYOID	2287
-DATA(insert OID = 2275 ( cstring		PGNSP PGUID -2 f p P f t \054 0 0 1263 cstring_in cstring_out cstring_recv cstring_send - - - c p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2275 ( cstring		PGNSP PGUID -2 f p P f t \054 0 0 1263 cstring_in cstring_out cstring_recv cstring_send - - - c p f 0 -1 0 0 - _null_ _null_ _null_ ));
 #define CSTRINGOID		2275
-DATA(insert OID = 2276 ( any			PGNSP PGUID  4 t p P f t \054 0 0 0 any_in any_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2276 ( any			PGNSP PGUID  4 t p P f t \054 0 0 0 any_in any_out - - - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 #define ANYOID			2276
-DATA(insert OID = 2277 ( anyarray		PGNSP PGUID -1 f p P f t \054 0 0 0 anyarray_in anyarray_out anyarray_recv anyarray_send - - - d x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2277 ( anyarray		PGNSP PGUID -1 f p P f t \054 0 0 0 anyarray_in anyarray_out anyarray_recv anyarray_send - - - d x f 0 -1 0 0 - _null_ _null_ _null_ ));
 #define ANYARRAYOID		2277
-DATA(insert OID = 2278 ( void			PGNSP PGUID  4 t p P f t \054 0 0 0 void_in void_out void_recv void_send - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2278 ( void			PGNSP PGUID  4 t p P f t \054 0 0 0 void_in void_out void_recv void_send - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 #define VOIDOID			2278
-DATA(insert OID = 2279 ( trigger		PGNSP PGUID  4 t p P f t \054 0 0 0 trigger_in trigger_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2279 ( trigger		PGNSP PGUID  4 t p P f t \054 0 0 0 trigger_in trigger_out - - - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 #define TRIGGEROID		2279
-DATA(insert OID = 3838 ( event_trigger		PGNSP PGUID  4 t p P f t \054 0 0 0 event_trigger_in event_trigger_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 3838 ( event_trigger		PGNSP PGUID  4 t p P f t \054 0 0 0 event_trigger_in event_trigger_out - - - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 #define EVTTRIGGEROID		3838
-DATA(insert OID = 2280 ( language_handler	PGNSP PGUID  4 t p P f t \054 0 0 0 language_handler_in language_handler_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2280 ( language_handler	PGNSP PGUID  4 t p P f t \054 0 0 0 language_handler_in language_handler_out - - - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 #define LANGUAGE_HANDLEROID		2280
-DATA(insert OID = 2281 ( internal		PGNSP PGUID  SIZEOF_POINTER t p P f t \054 0 0 0 internal_in internal_out - - - - - ALIGNOF_POINTER p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2281 ( internal		PGNSP PGUID  SIZEOF_POINTER t p P f t \054 0 0 0 internal_in internal_out - - - - - ALIGNOF_POINTER p f 0 -1 0 0 - _null_ _null_ _null_ ));
 #define INTERNALOID		2281
-DATA(insert OID = 2282 ( opaque			PGNSP PGUID  4 t p P f t \054 0 0 0 opaque_in opaque_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2282 ( opaque			PGNSP PGUID  4 t p P f t \054 0 0 0 opaque_in opaque_out - - - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 #define OPAQUEOID		2282
-DATA(insert OID = 2283 ( anyelement		PGNSP PGUID  4 t p P f t \054 0 0 0 anyelement_in anyelement_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2283 ( anyelement		PGNSP PGUID  4 t p P f t \054 0 0 0 anyelement_in anyelement_out - - - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 #define ANYELEMENTOID	2283
-DATA(insert OID = 2776 ( anynonarray	PGNSP PGUID  4 t p P f t \054 0 0 0 anynonarray_in anynonarray_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 2776 ( anynonarray	PGNSP PGUID  4 t p P f t \054 0 0 0 anynonarray_in anynonarray_out - - - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 #define ANYNONARRAYOID	2776
-DATA(insert OID = 3500 ( anyenum		PGNSP PGUID  4 t p P f t \054 0 0 0 anyenum_in anyenum_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 3500 ( anyenum		PGNSP PGUID  4 t p P f t \054 0 0 0 anyenum_in anyenum_out - - - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 #define ANYENUMOID		3500
-DATA(insert OID = 3115 ( fdw_handler	PGNSP PGUID  4 t p P f t \054 0 0 0 fdw_handler_in fdw_handler_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 3115 ( fdw_handler	PGNSP PGUID  4 t p P f t \054 0 0 0 fdw_handler_in fdw_handler_out - - - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 #define FDW_HANDLEROID	3115
-DATA(insert OID = 325 ( index_am_handler	PGNSP PGUID  4 t p P f t \054 0 0 0 index_am_handler_in index_am_handler_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 325 ( index_am_handler	PGNSP PGUID  4 t p P f t \054 0 0 0 index_am_handler_in index_am_handler_out - - - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 #define INDEX_AM_HANDLEROID 325
-DATA(insert OID = 3310 ( tsm_handler	PGNSP PGUID  4 t p P f t \054 0 0 0 tsm_handler_in tsm_handler_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 3310 ( tsm_handler	PGNSP PGUID  4 t p P f t \054 0 0 0 tsm_handler_in tsm_handler_out - - - - - i p f 0 -1 0 0 - _null_ _null_ _null_ ));
 #define TSM_HANDLEROID	3310
-DATA(insert OID = 3831 ( anyrange		PGNSP PGUID  -1 f p P f t \054 0 0 0 anyrange_in anyrange_out - - - - - d x f 0 -1 0 0 _null_ _null_ _null_ ));
+DATA(insert OID = 3831 ( anyrange		PGNSP PGUID  -1 f p P f t \054 0 0 0 anyrange_in anyrange_out - - - - - d x f 0 -1 0 0 - _null_ _null_ _null_ ));
 #define ANYRANGEOID		3831
 
 
diff --git a/src/include/catalog/pg_type_fn.h b/src/include/catalog/pg_type_fn.h
index 01f0956..a9dd116 100644
--- a/src/include/catalog/pg_type_fn.h
+++ b/src/include/catalog/pg_type_fn.h
@@ -52,7 +52,8 @@ extern ObjectAddress TypeCreate(Oid newTypeOid,
 		   int32 typeMod,
 		   int32 typNDims,
 		   bool typeNotNull,
-		   Oid typeCollation);
+		   Oid typeCollation,
+		   Oid subscriptingProcedure);
 
 extern void GenerateTypeDependencies(Oid typeNamespace,
 						 Oid typeObjectId,
@@ -70,6 +71,7 @@ extern void GenerateTypeDependencies(Oid typeNamespace,
 						 bool isImplicitArray,
 						 Oid baseType,
 						 Oid typeCollation,
+						 Oid subscriptingProcedure,
 						 Node *defaultExpr,
 						 bool rebuild);
 
diff --git a/src/include/executor/execExpr.h b/src/include/executor/execExpr.h
index a665388..49d3873 100644
--- a/src/include/executor/execExpr.h
+++ b/src/include/executor/execExpr.h
@@ -17,7 +17,7 @@
 #include "nodes/execnodes.h"
 
 /* forward reference to avoid circularity */
-struct ArrayRefState;
+struct SubscriptingRefState;
 
 /* Bits in ExprState->flags (see also execnodes.h for public flag bits): */
 /* expression's interpreter has been initialized */
@@ -177,20 +177,20 @@ typedef enum ExprEvalOp
 	EEOP_FIELDSTORE_FORM,
 
 	/* Process an array subscript; short-circuit expression to NULL if NULL */
-	EEOP_ARRAYREF_SUBSCRIPT,
+	EEOP_SBSREF_SUBSCRIPT,
 
 	/*
-	 * Compute old array element/slice when an ArrayRef assignment expression
-	 * contains ArrayRef/FieldStore subexpressions.  Value is accessed using
+	 * Compute old array element/slice when an SubscriptingRef assignment expression
+	 * contains SubscriptingRef/FieldStore subexpressions.  Value is accessed using
 	 * the CaseTest mechanism.
 	 */
-	EEOP_ARRAYREF_OLD,
+	EEOP_SBSREF_OLD,
 
-	/* compute new value for ArrayRef assignment expression */
-	EEOP_ARRAYREF_ASSIGN,
+	/* compute new value for SubscriptingRef assignment expression */
+	EEOP_SBSREF_ASSIGN,
 
-	/* compute element/slice for ArrayRef fetch expression */
-	EEOP_ARRAYREF_FETCH,
+	/* compute element/slice for SubscriptingRef fetch expression */
+	EEOP_SBSREF_FETCH,
 
 	/* evaluate value for CoerceToDomainValue */
 	EEOP_DOMAIN_TESTVAL,
@@ -451,22 +451,25 @@ typedef struct ExprEvalStep
 			int			ncolumns;
 		}			fieldstore;
 
-		/* for EEOP_ARRAYREF_SUBSCRIPT */
+		/* for EEOP_SBSREF_SUBSCRIPT */
 		struct
 		{
 			/* too big to have inline */
-			struct ArrayRefState *state;
+			struct SubscriptingRefState *state;
 			int			off;	/* 0-based index of this subscript */
 			bool		isupper;	/* is it upper or lower subscript? */
 			int			jumpdone;		/* jump here on null */
-		}			arrayref_subscript;
+		}			sbsref_subscript;
 
-		/* for EEOP_ARRAYREF_OLD / ASSIGN / FETCH */
+		/* for EEOP_SBSREF_OLD / ASSIGN / FETCH */
 		struct
 		{
 			/* too big to have inline */
-			struct ArrayRefState *state;
-		}			arrayref;
+			FmgrInfo   *eval_finfo;	/* function to evaluate subscript */
+			FmgrInfo   *nested_finfo;	/* function to handle nested assignment */
+
+			struct SubscriptingRefState *state;
+		}			sbsref;
 
 		/* for EEOP_DOMAIN_NOTNULL / DOMAIN_CHECK */
 		struct
@@ -557,7 +560,7 @@ typedef struct ExprEvalStep
 
 
 /* Non-inline data for array operations */
-typedef struct ArrayRefState
+typedef struct SubscriptingRefState
 {
 	bool		isassignment;	/* is it assignment, or just fetch? */
 
@@ -570,13 +573,13 @@ typedef struct ArrayRefState
 	/* numupper and upperprovided[] are filled at compile time */
 	/* at runtime, extracted subscript datums get stored in upperindex[] */
 	int			numupper;
-	bool		upperprovided[MAXDIM];
-	int			upperindex[MAXDIM];
+	bool		upperprovided[MAX_SUBSCRIPT_DEPTH];
+	Datum		upper[MAX_SUBSCRIPT_DEPTH];
 
 	/* similarly for lower indexes, if any */
 	int			numlower;
-	bool		lowerprovided[MAXDIM];
-	int			lowerindex[MAXDIM];
+	bool		lowerprovided[MAX_SUBSCRIPT_DEPTH];
+	Datum		lower[MAX_SUBSCRIPT_DEPTH];
 
 	/* subscript expressions get evaluated into here */
 	Datum		subscriptvalue;
@@ -586,11 +589,10 @@ typedef struct ArrayRefState
 	Datum		replacevalue;
 	bool		replacenull;
 
-	/* if we have a nested assignment, ARRAYREF_OLD puts old value here */
+	/* if we have a nested assignment, SBSREF_OLD puts old value here */
 	Datum		prevvalue;
 	bool		prevnull;
-} ArrayRefState;
-
+} SubscriptingRefState;
 
 extern void ExecReadyInterpretedExpr(ExprState *state);
 
@@ -621,10 +623,10 @@ extern void ExecEvalFieldStoreDeForm(ExprState *state, ExprEvalStep *op,
 						 ExprContext *econtext);
 extern void ExecEvalFieldStoreForm(ExprState *state, ExprEvalStep *op,
 					   ExprContext *econtext);
-extern bool ExecEvalArrayRefSubscript(ExprState *state, ExprEvalStep *op);
-extern void ExecEvalArrayRefFetch(ExprState *state, ExprEvalStep *op);
-extern void ExecEvalArrayRefOld(ExprState *state, ExprEvalStep *op);
-extern void ExecEvalArrayRefAssign(ExprState *state, ExprEvalStep *op);
+extern bool ExecEvalSubscriptingRef(ExprState *state, ExprEvalStep *op);
+extern void ExecEvalSubscriptingRefFetch(ExprState *state, ExprEvalStep *op);
+extern void ExecEvalSubscriptingRefOld(ExprState *state, ExprEvalStep *op);
+extern void ExecEvalSubscriptingRefAssign(ExprState *state, ExprEvalStep *op);
 extern void ExecEvalConvertRowtype(ExprState *state, ExprEvalStep *op,
 					   ExprContext *econtext);
 extern void ExecEvalScalarArrayOp(ExprState *state, ExprEvalStep *op);
diff --git a/src/include/lib/knapsack.h b/src/include/lib/knapsack.h
deleted file mode 100644
index 8d1e6d0..0000000
--- a/src/include/lib/knapsack.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * knapsack.h
- *
- * Copyright (c) 2017, PostgreSQL Global Development Group
- *
- * src/include/lib/knapsack.h
- */
-#ifndef KNAPSACK_H
-#define KNAPSACK_H
-
-#include "postgres.h"
-#include "nodes/bitmapset.h"
-
-extern Bitmapset *DiscreteKnapsack(int max_weight, int num_items,
-				 int *item_weights, double *item_values);
-
-#endif   /* KNAPSACK_H */
diff --git a/src/include/nodes/bitmapset.h b/src/include/nodes/bitmapset.h
index 109f7b0..4f1910e 100644
--- a/src/include/nodes/bitmapset.h
+++ b/src/include/nodes/bitmapset.h
@@ -21,11 +21,6 @@
 #define BITMAPSET_H
 
 /*
- * Forward decl to save including pg_list.h
- */
-struct List;
-
-/*
  * Data representation
  */
 
@@ -75,7 +70,6 @@ extern bool bms_is_subset(const Bitmapset *a, const Bitmapset *b);
 extern BMS_Comparison bms_subset_compare(const Bitmapset *a, const Bitmapset *b);
 extern bool bms_is_member(int x, const Bitmapset *a);
 extern bool bms_overlap(const Bitmapset *a, const Bitmapset *b);
-extern bool bms_overlap_list(const Bitmapset *a, const struct List *b);
 extern bool bms_nonempty_difference(const Bitmapset *a, const Bitmapset *b);
 extern int	bms_singleton_member(const Bitmapset *a);
 extern bool bms_get_singleton_member(const Bitmapset *a, int *member);
diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h
index 11a6850..e26c318 100644
--- a/src/include/nodes/execnodes.h
+++ b/src/include/nodes/execnodes.h
@@ -635,7 +635,6 @@ typedef struct WindowFuncExprState
 	int			wfuncno;		/* ID number for wfunc within its plan node */
 } WindowFuncExprState;
 
-
 /* ----------------
  *		SetExprState node
  *
@@ -1699,7 +1698,6 @@ typedef struct AggStatePerAggData *AggStatePerAgg;
 typedef struct AggStatePerTransData *AggStatePerTrans;
 typedef struct AggStatePerGroupData *AggStatePerGroup;
 typedef struct AggStatePerPhaseData *AggStatePerPhase;
-typedef struct AggStatePerHashData *AggStatePerHash;
 
 typedef struct AggState
 {
@@ -1707,17 +1705,15 @@ typedef struct AggState
 	List	   *aggs;			/* all Aggref nodes in targetlist & quals */
 	int			numaggs;		/* length of list (could be zero!) */
 	int			numtrans;		/* number of pertrans items */
-	AggStrategy aggstrategy;	/* strategy mode */
 	AggSplit	aggsplit;		/* agg-splitting mode, see nodes.h */
 	AggStatePerPhase phase;		/* pointer to current phase data */
-	int			numphases;		/* number of phases (including phase 0) */
+	int			numphases;		/* number of phases */
 	int			current_phase;	/* current phase number */
+	FmgrInfo   *hashfunctions;	/* per-grouping-field hash fns */
 	AggStatePerAgg peragg;		/* per-Aggref information */
 	AggStatePerTrans pertrans;	/* per-Trans state information */
-	ExprContext *hashcontext;	/* econtexts for long-lived data (hashtable) */
 	ExprContext **aggcontexts;	/* econtexts for long-lived data (per GS) */
 	ExprContext *tmpcontext;	/* econtext for input expressions */
-	ExprContext *curaggcontext; /* currently active aggcontext */
 	AggStatePerTrans curpertrans;		/* currently active trans state */
 	bool		input_done;		/* indicates end of input */
 	bool		agg_done;		/* indicates completion of Agg scan */
@@ -1729,17 +1725,21 @@ typedef struct AggState
 	/* These fields are for grouping set phase data */
 	int			maxsets;		/* The max number of sets in any phase */
 	AggStatePerPhase phases;	/* array of all phases */
-	Tuplesortstate *sort_in;	/* sorted input to phases > 1 */
+	Tuplesortstate *sort_in;	/* sorted input to phases > 0 */
 	Tuplesortstate *sort_out;	/* input is copied here for next phase */
 	TupleTableSlot *sort_slot;	/* slot for sort results */
 	/* these fields are used in AGG_PLAIN and AGG_SORTED modes: */
 	AggStatePerGroup pergroup;	/* per-Aggref-per-group working state */
 	HeapTuple	grp_firstTuple; /* copy of first tuple of current group */
-	/* these fields are used in AGG_HASHED and AGG_MIXED modes: */
+	/* these fields are used in AGG_HASHED mode: */
+	TupleHashTable hashtable;	/* hash table with one entry per group */
+	TupleTableSlot *hashslot;	/* slot for loading hash table */
+	int			numhashGrpCols;	/* number of columns in hash table */
+	int			largestGrpColIdx; /* largest column required for hashing */
+	AttrNumber *hashGrpColIdxInput;	/* and their indices in input slot */
+	AttrNumber *hashGrpColIdxHash;	/* indices for execGrouping in hashtbl */
 	bool		table_filled;	/* hash table filled yet? */
-	int			num_hashes;
-	AggStatePerHash perhash;
-	AggStatePerGroup *hash_pergroup;	/* array of per-group pointers */
+	TupleHashIterator hashiter; /* for iterating through hash table */
 	/* support for evaluation of agg inputs */
 	TupleTableSlot *evalslot;	/* slot for agg inputs */
 	ProjectionInfo *evalproj;	/* projection machinery */
diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h
index b9369ac..531c9a5 100644
--- a/src/include/nodes/nodes.h
+++ b/src/include/nodes/nodes.h
@@ -147,7 +147,7 @@ typedef enum NodeTag
 	T_Aggref,
 	T_GroupingFunc,
 	T_WindowFunc,
-	T_ArrayRef,
+	T_SubscriptingRef,
 	T_FuncExpr,
 	T_NamedArgExpr,
 	T_OpExpr,
@@ -261,8 +261,6 @@ typedef enum NodeTag
 	T_PlaceHolderInfo,
 	T_MinMaxAggInfo,
 	T_PlannerParamItem,
-	T_RollupData,
-	T_GroupingSetData,
 	T_StatisticExtInfo,
 
 	/*
@@ -555,6 +553,10 @@ extern PGDLLIMPORT Node *newNodeMacroHolder;
 #define NodeSetTag(nodeptr,t)	(((Node*)(nodeptr))->type = (t))
 
 #define IsA(nodeptr,_type_)		(nodeTag(nodeptr) == T_##_type_)
+#define IsOneOf(nodeptr,_type_a_,_type_b_)									\
+(																			\
+	nodeTag(nodeptr) == T_##_type_a_ || nodeTag(nodeptr) == T_##_type_b_	\
+)																			\
 
 /*
  * castNode(type, ptr) casts ptr to "type *", and if assertions are enabled,
@@ -726,8 +728,7 @@ typedef enum AggStrategy
 {
 	AGG_PLAIN,					/* simple agg across all input rows */
 	AGG_SORTED,					/* grouped agg, input must be sorted */
-	AGG_HASHED,					/* grouped agg, use internal hashtable */
-	AGG_MIXED					/* grouped agg, hash and sort both used */
+	AGG_HASHED					/* grouped agg, use internal hashtable */
 } AggStrategy;
 
 /*
diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h
index 6e531b6..4a95e16 100644
--- a/src/include/nodes/plannodes.h
+++ b/src/include/nodes/plannodes.h
@@ -758,7 +758,7 @@ typedef struct Agg
 	Oid		   *grpOperators;	/* equality operators to compare with */
 	long		numGroups;		/* estimated number of groups in input */
 	Bitmapset  *aggParams;		/* IDs of Params used in Aggref inputs */
-	/* Note: planner provides numGroups & aggParams only in HASHED/MIXED case */
+	/* Note: planner provides numGroups & aggParams only in AGG_HASHED case */
 	List	   *groupingSets;	/* grouping sets to use */
 	List	   *chain;			/* chained Agg/Sort nodes */
 } Agg;
diff --git a/src/include/nodes/primnodes.h b/src/include/nodes/primnodes.h
index d57b4fa..09fb569 100644
--- a/src/include/nodes/primnodes.h
+++ b/src/include/nodes/primnodes.h
@@ -365,18 +365,18 @@ typedef struct WindowFunc
 } WindowFunc;
 
 /* ----------------
- *	ArrayRef: describes an array subscripting operation
- *
- * An ArrayRef can describe fetching a single element from an array,
- * fetching a subarray (array slice), storing a single element into
- * an array, or storing a slice.  The "store" cases work with an
- * initial array value and a source value that is inserted into the
- * appropriate part of the array; the result of the operation is an
- * entire new modified array value.
- *
- * If reflowerindexpr = NIL, then we are fetching or storing a single array
- * element at the subscripts given by refupperindexpr.  Otherwise we are
- * fetching or storing an array slice, that is a rectangular subarray
+ *	SubscriptingRef: describes a subscripting operation over a container
+ *
+ * An SubscriptingRef can describe fetching a single element from a container,
+ * fetching a part of container (e.g. array slice), storing a single element into
+ * a container, or storing a slice.  The "store" cases work with an
+ * initial container value and a source value that is inserted into the
+ * appropriate part of the container; the result of the operation is an
+ * entire new modified container value.
+ *
+ * If reflowerindexpr = NIL, then we are fetching or storing a single container
+ * element at the subscripts given by refupperindexpr. Otherwise we are
+ * fetching or storing a container slice, that is a rectangular subcontainer
  * with lower and upper bounds given by the index expressions.
  * reflowerindexpr must be the same length as refupperindexpr when it
  * is not NIL.
@@ -388,27 +388,33 @@ typedef struct WindowFunc
  * element; but it is the array type when doing subarray fetch or either
  * type of store.
  *
- * Note: for the cases where an array is returned, if refexpr yields a R/W
- * expanded array, then the implementation is allowed to modify that object
+ * Note: for the cases where a container is returned, if refexpr yields a R/W
+ * expanded container, then the implementation is allowed to modify that object
  * in-place and return the same object.)
  * ----------------
  */
-typedef struct ArrayRef
+
+typedef struct SubscriptingRef
 {
 	Expr		xpr;
-	Oid			refarraytype;	/* type of the array proper */
-	Oid			refelemtype;	/* type of the array elements */
-	int32		reftypmod;		/* typmod of the array (and elements too) */
-	Oid			refcollid;		/* OID of collation, or InvalidOid if none */
-	List	   *refupperindexpr;/* expressions that evaluate to upper array
-								 * indexes */
-	List	   *reflowerindexpr;/* expressions that evaluate to lower array
-								 * indexes, or NIL for single array element */
-	Expr	   *refexpr;		/* the expression that evaluates to an array
-								 * value */
-	Expr	   *refassgnexpr;	/* expression for the source value, or NULL if
-								 * fetch */
-} ArrayRef;
+	Oid			refcontainertype;	/* type of the container proper */
+	Oid			refelemtype;		/* type of the container elements */
+	int32		reftypmod;			/* typmod of the container (and elements too) */
+	Oid			refcollid;			/* OID of collation, or InvalidOid if none */
+	Oid			refevalfunc;		/* OID of type-specific subscripting function */
+	Oid			refnestedfunc;		/* OID of type-specific function to handle nested assignment */
+	List	   *refupperindexpr;	/* expressions that evaluate to upper container
+									 * indexes */
+	List	   *reflowerindexpr;	/* expressions that evaluate to lower container
+									 * indexes, or NIL for single container element */
+	Expr	   *refexpr;			/* the expression that evaluates to a container
+									 * value */
+
+	Expr	   *refassgnexpr;		/* expression for the source value, or NULL if
+									 * fetch */
+} SubscriptingRef;
+
+#define IsAssignment(expr) ( ((SubscriptingRef*) expr)->refassgnexpr != NULL )
 
 /*
  * CoercionContext - distinguishes the allowed set of type casts
@@ -750,7 +756,7 @@ typedef struct FieldSelect
  *
  * FieldStore represents the operation of modifying one field in a tuple
  * value, yielding a new tuple value (the input is not touched!).  Like
- * the assign case of ArrayRef, this is used to implement UPDATE of a
+ * the assign case of SubscriptingRef, this is used to implement UPDATE of a
  * portion of a column.
  *
  * A single FieldStore can actually represent updates of several different
diff --git a/src/include/nodes/relation.h b/src/include/nodes/relation.h
index 8930edf..0a5187c 100644
--- a/src/include/nodes/relation.h
+++ b/src/include/nodes/relation.h
@@ -1418,37 +1418,17 @@ typedef struct AggPath
 } AggPath;
 
 /*
- * Various annotations used for grouping sets in the planner.
- */
-
-typedef struct GroupingSetData
-{
-	NodeTag		type;
-	List	   *set;			/* grouping set as list of sortgrouprefs */
-	double		numGroups;		/* est. number of result groups */
-} GroupingSetData;
-
-typedef struct RollupData
-{
-	NodeTag		type;
-	List	   *groupClause;	/* applicable subset of parse->groupClause */
-	List	   *gsets;			/* lists of integer indexes into groupClause */
-	List	   *gsets_data;		/* list of GroupingSetData */
-	double		numGroups;		/* est. number of result groups */
-	bool		hashable;		/* can be hashed */
-	bool		is_hashed;		/* to be implemented as a hashagg */
-} RollupData;
-
-/*
  * GroupingSetsPath represents a GROUPING SETS aggregation
+ *
+ * Currently we only support this in sorted not hashed form, so the input
+ * must always be appropriately presorted.
  */
-
 typedef struct GroupingSetsPath
 {
 	Path		path;
 	Path	   *subpath;		/* path representing input source */
-	AggStrategy aggstrategy;	/* basic strategy */
-	List	   *rollups;		/* list of RollupData */
+	List	   *rollup_groupclauses;	/* list of lists of SortGroupClause's */
+	List	   *rollup_lists;	/* parallel list of lists of grouping sets */
 	List	   *qual;			/* quals (HAVING quals), if any */
 } GroupingSetsPath;
 
diff --git a/src/include/optimizer/pathnode.h b/src/include/optimizer/pathnode.h
index c72c7e0..81640de 100644
--- a/src/include/optimizer/pathnode.h
+++ b/src/include/optimizer/pathnode.h
@@ -195,8 +195,8 @@ extern GroupingSetsPath *create_groupingsets_path(PlannerInfo *root,
 						 Path *subpath,
 						 PathTarget *target,
 						 List *having_qual,
-						 AggStrategy aggstrategy,
-						 List *rollups,
+						 List *rollup_lists,
+						 List *rollup_groupclauses,
 						 const AggClauseCosts *agg_costs,
 						 double numGroups);
 extern MinMaxAggPath *create_minmaxagg_path(PlannerInfo *root,
diff --git a/src/include/parser/parse_node.h b/src/include/parser/parse_node.h
index 3a25d95..c1c07c0 100644
--- a/src/include/parser/parse_node.h
+++ b/src/include/parser/parse_node.h
@@ -261,12 +261,13 @@ extern void cancel_parser_errposition_callback(ParseCallbackState *pcbstate);
 
 extern Var *make_var(ParseState *pstate, RangeTblEntry *rte, int attrno,
 		 int location);
-extern Oid	transformArrayType(Oid *arrayType, int32 *arrayTypmod);
-extern ArrayRef *transformArraySubscripts(ParseState *pstate,
-						 Node *arrayBase,
-						 Oid arrayType,
+extern Oid	transformArrayType(Oid *containerType, int32 *containerTypmod);
+
+extern Node *transformContainerSubscripts(ParseState *pstate,
+						 Node *containerBase,
+						 Oid containerType,
 						 Oid elementType,
-						 int32 arrayTypMod,
+						 int32 containerTypMod,
 						 List *indirection,
 						 Node *assignFrom);
 extern Const *make_const(ParseState *pstate, Value *value, int location);
diff --git a/src/include/pgstat.h b/src/include/pgstat.h
index e29397f..2015625 100644
--- a/src/include/pgstat.h
+++ b/src/include/pgstat.h
@@ -696,25 +696,6 @@ typedef struct PgStat_GlobalStats
 
 
 /* ----------
- * Backend types
- * ----------
- */
-typedef enum BackendType
-{
-	B_AUTOVAC_LAUNCHER,
-	B_AUTOVAC_WORKER,
-	B_BACKEND,
-	B_BG_WORKER,
-	B_BG_WRITER,
-	B_CHECKPOINTER,
-	B_STARTUP,
-	B_WAL_RECEIVER,
-	B_WAL_SENDER,
-	B_WAL_WRITER
-} BackendType;
-
-
-/* ----------
  * Backend states
  * ----------
  */
@@ -946,9 +927,6 @@ typedef struct PgBackendSSLStatus
  * showing its current activity.  (The structs are allocated according to
  * BackendId, but that is not critical.)  Note that the collector process
  * has no involvement in, or even access to, these structs.
- *
- * Each auxiliary process also maintains a PgBackendStatus struct in shared
- * memory.
  * ----------
  */
 typedef struct PgBackendStatus
@@ -973,9 +951,6 @@ typedef struct PgBackendStatus
 	/* The entry is valid iff st_procpid > 0, unused if st_procpid == 0 */
 	int			st_procpid;
 
-	/* Type of backends */
-	BackendType st_backendType;
-
 	/* Times when current backend, transaction, and activity started */
 	TimestampTz st_proc_start_timestamp;
 	TimestampTz st_xact_start_timestamp;
@@ -1174,7 +1149,6 @@ extern const char *pgstat_get_wait_event_type(uint32 wait_event_info);
 extern const char *pgstat_get_backend_current_activity(int pid, bool checkUser);
 extern const char *pgstat_get_crashed_backend_activity(int pid, char *buffer,
 									int buflen);
-extern const char *pgstat_get_backend_desc(BackendType backendType);
 
 extern void pgstat_progress_start_command(ProgressCommandType cmdtype,
 							  Oid relid);
diff --git a/src/include/replication/snapbuild.h b/src/include/replication/snapbuild.h
index a8ae631..091a9f9 100644
--- a/src/include/replication/snapbuild.h
+++ b/src/include/replication/snapbuild.h
@@ -59,7 +59,7 @@ extern void FreeSnapshotBuilder(SnapBuild *cache);
 
 extern void SnapBuildSnapDecRefcount(Snapshot snap);
 
-extern Snapshot SnapBuildInitialSnapshot(SnapBuild *builder);
+extern Snapshot SnapBuildInitalSnapshot(SnapBuild *builder);
 extern const char *SnapBuildExportSnapshot(SnapBuild *snapstate);
 extern void SnapBuildClearExportedSnapshot(void);
 
diff --git a/src/include/statistics/statistics.h b/src/include/statistics/statistics.h
index 91645bf..a15e39e 100644
--- a/src/include/statistics/statistics.h
+++ b/src/include/statistics/statistics.h
@@ -27,9 +27,6 @@ typedef struct MVNDistinctItem
 	double		ndistinct;		/* ndistinct value for this combination */
 	Bitmapset  *attrs;			/* attr numbers of items */
 } MVNDistinctItem;
-/* size of the struct, excluding attribute list */
-#define SizeOfMVNDistinctItem \
-	(offsetof(MVNDistinctItem, ndistinct) + sizeof(double))
 
 /* A MVNDistinct object, comprising all possible combinations of columns */
 typedef struct MVNDistinct
@@ -40,10 +37,6 @@ typedef struct MVNDistinct
 	MVNDistinctItem items[FLEXIBLE_ARRAY_MEMBER];
 } MVNDistinct;
 
-/* size of the struct excluding the items array */
-#define SizeOfMVNDistinct	(offsetof(MVNDistinct, nitems) + sizeof(uint32))
-
-
 extern MVNDistinct *statext_ndistinct_load(Oid mvoid);
 
 extern void BuildRelationExtStatistics(Relation onerel, double totalrows,
diff --git a/src/include/storage/fd.h b/src/include/storage/fd.h
index 0568049..ac37502 100644
--- a/src/include/storage/fd.h
+++ b/src/include/storage/fd.h
@@ -119,7 +119,6 @@ extern int	pg_fdatasync(int fd);
 extern void pg_flush_data(int fd, off_t offset, off_t amount);
 extern void fsync_fname(const char *fname, bool isdir);
 extern int	durable_rename(const char *oldfile, const char *newfile, int loglevel);
-extern int	durable_unlink(const char *fname, int loglevel);
 extern int	durable_link_or_rename(const char *oldfile, const char *newfile, int loglevel);
 extern void SyncDataDirectory(void);
 
diff --git a/src/include/storage/proc.h b/src/include/storage/proc.h
index 1a125d8..1b345fa 100644
--- a/src/include/storage/proc.h
+++ b/src/include/storage/proc.h
@@ -272,6 +272,7 @@ extern PGPROC *PreparedXactProcs;
  */
 #define NUM_AUXILIARY_PROCS		4
 
+
 /* configurable options */
 extern int	DeadlockTimeout;
 extern int	StatementTimeout;
@@ -308,8 +309,6 @@ extern void LockErrorCleanup(void);
 extern void ProcWaitForSignal(uint32 wait_event_info);
 extern void ProcSendSignal(int pid);
 
-extern PGPROC *AuxiliaryPidGetProc(int pid);
-
 extern void BecomeLockGroupLeader(void);
 extern bool BecomeLockGroupMember(PGPROC *leader, int pid);
 
diff --git a/src/include/utils/jsonb.h b/src/include/utils/jsonb.h
index 411e158..be4c569 100644
--- a/src/include/utils/jsonb.h
+++ b/src/include/utils/jsonb.h
@@ -366,6 +366,7 @@ extern JsonbValue *pushJsonbValue(JsonbParseState **pstate,
 extern JsonbIterator *JsonbIteratorInit(JsonbContainer *container);
 extern JsonbIteratorToken JsonbIteratorNext(JsonbIterator **it, JsonbValue *val,
 				  bool skipNested);
+extern JsonbValue *JsonbToJsonbValue(Jsonb *jsonb);
 extern Jsonb *JsonbValueToJsonb(JsonbValue *val);
 extern bool JsonbDeepContains(JsonbIterator **val,
 				  JsonbIterator **mContained);
@@ -377,5 +378,9 @@ extern char *JsonbToCString(StringInfo out, JsonbContainer *in,
 extern char *JsonbToCStringIndent(StringInfo out, JsonbContainer *in,
 					 int estimated_len);
 
+extern JsonbValue *to_jsonb_worker(Datum source, Oid source_type);
+
+/* Jsonb subscripting logic */
+//extern Datum jsonb_subscript_parse(PG_FUNCTION_ARGS);
 
 #endif   /* __JSONB_H__ */
diff --git a/src/include/utils/lsyscache.h b/src/include/utils/lsyscache.h
index b6d1fca..f14bc42 100644
--- a/src/include/utils/lsyscache.h
+++ b/src/include/utils/lsyscache.h
@@ -159,6 +159,7 @@ extern void free_attstatsslot(Oid atttype,
 extern char *get_namespace_name(Oid nspid);
 extern char *get_namespace_name_or_temp(Oid nspid);
 extern Oid	get_range_subtype(Oid rangeOid);
+extern RegProcedure get_typsbsparse(Oid typid);
 
 #define type_is_array(typid)  (get_element_type(typid) != InvalidOid)
 /* type_is_array_domain accepts both plain arrays and domains over arrays */
diff --git a/src/include/utils/pg_locale.h b/src/include/utils/pg_locale.h
index 439dfbd..85eb9d7 100644
--- a/src/include/utils/pg_locale.h
+++ b/src/include/utils/pg_locale.h
@@ -66,7 +66,7 @@ extern void cache_locale_time(void);
  * fake version of the standard type locale_t in the global namespace.
  * pg_locale_t is occasionally checked for truth, so make it a pointer.
  */
-struct pg_locale_struct
+struct pg_locale_t
 {
 	char	provider;
 	union
@@ -84,7 +84,7 @@ struct pg_locale_struct
 	} info;
 };
 
-typedef struct pg_locale_struct *pg_locale_t;
+typedef struct pg_locale_t *pg_locale_t;
 
 extern pg_locale_t pg_newlocale_from_collation(Oid collid);
 
diff --git a/src/interfaces/ecpg/preproc/check_rules.pl b/src/interfaces/ecpg/preproc/check_rules.pl
index e681943..dce4bc6 100644
--- a/src/interfaces/ecpg/preproc/check_rules.pl
+++ b/src/interfaces/ecpg/preproc/check_rules.pl
@@ -53,8 +53,8 @@ my $comment     = 0;
 my $non_term_id = '';
 my $cc          = 0;
 
-open my $parser_fh, '<', $parser or die $!;
-while (<$parser_fh>)
+open GRAM, $parser or die $!;
+while (<GRAM>)
 {
 	if (/^%%/)
 	{
@@ -145,7 +145,7 @@ while (<$parser_fh>)
 	}
 }
 
-close $parser_fh;
+close GRAM;
 if ($verbose)
 {
 	print "$cc rules loaded\n";
@@ -154,8 +154,8 @@ if ($verbose)
 my $ret = 0;
 $cc = 0;
 
-open my $ecpg_fh, '<', $filename or die $!;
-while (<$ecpg_fh>)
+open ECPG, $filename or die $!;
+while (<ECPG>)
 {
 	if (!/^ECPG:/)
 	{
@@ -170,7 +170,7 @@ while (<$ecpg_fh>)
 		$ret = 1;
 	}
 }
-close $ecpg_fh;
+close ECPG;
 
 if ($verbose)
 {
diff --git a/src/interfaces/libpq/test/regress.pl b/src/interfaces/libpq/test/regress.pl
index c403130..1dab122 100644
--- a/src/interfaces/libpq/test/regress.pl
+++ b/src/interfaces/libpq/test/regress.pl
@@ -14,19 +14,19 @@ my $expected_out = "$srcdir/$subdir/expected.out";
 my $regress_out = "regress.out";
 
 # open input file first, so possible error isn't sent to redirected STDERR
-open(my $regress_in_fh, "<", $regress_in)
+open(REGRESS_IN, "<", $regress_in)
   or die "can't open $regress_in for reading: $!";
 
 # save STDOUT/ERR and redirect both to regress.out
-open(my $oldout_fh, ">&", \*STDOUT) or die "can't dup STDOUT: $!";
-open(my $olderr_fh, ">&", \*STDERR) or die "can't dup STDERR: $!";
+open(OLDOUT, ">&", \*STDOUT) or die "can't dup STDOUT: $!";
+open(OLDERR, ">&", \*STDERR) or die "can't dup STDERR: $!";
 
 open(STDOUT, ">", $regress_out)
   or die "can't open $regress_out for writing: $!";
 open(STDERR, ">&", \*STDOUT) or die "can't dup STDOUT: $!";
 
 # read lines from regress.in and run uri-regress on them
-while (<$regress_in_fh>)
+while (<REGRESS_IN>)
 {
 	chomp;
 	print "trying $_\n";
@@ -35,11 +35,11 @@ while (<$regress_in_fh>)
 }
 
 # restore STDOUT/ERR so we can print the outcome to the user
-open(STDERR, ">&", $olderr_fh) or die; # can't complain as STDERR is still duped
-open(STDOUT, ">&", $oldout_fh) or die "can't restore STDOUT: $!";
+open(STDERR, ">&", \*OLDERR) or die; # can't complain as STDERR is still duped
+open(STDOUT, ">&", \*OLDOUT) or die "can't restore STDOUT: $!";
 
 # just in case
-close $regress_in_fh;
+close REGRESS_IN;
 
 my $diff_status = system(
 	"diff -c \"$srcdir/$subdir/expected.out\" regress.out >regress.diff");
diff --git a/src/pl/plperl/plc_perlboot.pl b/src/pl/plperl/plc_perlboot.pl
index 292c910..bb2d009 100644
--- a/src/pl/plperl/plc_perlboot.pl
+++ b/src/pl/plperl/plc_perlboot.pl
@@ -52,7 +52,7 @@ sub ::encode_array_constructor
 
 {
 
-	package PostgreSQL::InServer;  ## no critic (RequireFilenameMatchesPackage);
+	package PostgreSQL::InServer;
 	use strict;
 	use warnings;
 
@@ -86,13 +86,11 @@ sub ::encode_array_constructor
 
 	sub mkfunc
 	{
-		## no critic (ProhibitNoStrict, ProhibitStringyEval);
 		no strict;      # default to no strict for the eval
 		no warnings;    # default to no warnings for the eval
 		my $ret = eval(mkfuncsrc(@_));
 		$@ =~ s/\(eval \d+\) //g if $@;
 		return $ret;
-		## use critic
 	}
 
 	1;
diff --git a/src/pl/plperl/plc_trusted.pl b/src/pl/plperl/plc_trusted.pl
index 38255b4..cd61882 100644
--- a/src/pl/plperl/plc_trusted.pl
+++ b/src/pl/plperl/plc_trusted.pl
@@ -1,6 +1,6 @@
 #  src/pl/plperl/plc_trusted.pl
 
-package PostgreSQL::InServer::safe;  ## no critic (RequireFilenameMatchesPackage);
+package PostgreSQL::InServer::safe;
 
 # Load widely useful pragmas into plperl to make them available.
 #
diff --git a/src/pl/plperl/text2macro.pl b/src/pl/plperl/text2macro.pl
index e681fca..c88e5ec 100644
--- a/src/pl/plperl/text2macro.pl
+++ b/src/pl/plperl/text2macro.pl
@@ -49,7 +49,7 @@ for my $src_file (@ARGV)
 
 	(my $macro = $src_file) =~ s/ .*? (\w+) (?:\.\w+) $/$1/x;
 
-	open my $src_fh, '<', $src_file
+	open my $src_fh, $src_file    # not 3-arg form
 	  or die "Can't open $src_file: $!";
 
 	printf qq{#define %s%s \\\n},
@@ -80,19 +80,19 @@ sub selftest
 	my $tmp    = "text2macro_tmp";
 	my $string = q{a '' '\\'' "" "\\"" "\\\\" "\\\\n" b};
 
-	open my $fh, '>', "$tmp.pl" or die;
+	open my $fh, ">$tmp.pl" or die;
 	print $fh $string;
 	close $fh;
 
 	system("perl $0 --name=X $tmp.pl > $tmp.c") == 0 or die;
-	open $fh, '>>', "$tmp.c";
+	open $fh, ">>$tmp.c";
 	print $fh "#include <stdio.h>\n";
 	print $fh "int main() { puts(X); return 0; }\n";
 	close $fh;
 	system("cat -n $tmp.c");
 
 	system("make $tmp") == 0 or die;
-	open $fh, '<', "./$tmp |" or die;
+	open $fh, "./$tmp |" or die;
 	my $result = <$fh>;
 	unlink <$tmp.*>;
 
diff --git a/src/pl/plpgsql/src/generate-plerrcodes.pl b/src/pl/plpgsql/src/generate-plerrcodes.pl
index eb135bc..6a676c0 100644
--- a/src/pl/plpgsql/src/generate-plerrcodes.pl
+++ b/src/pl/plpgsql/src/generate-plerrcodes.pl
@@ -10,7 +10,7 @@ print
   "/* autogenerated from src/backend/utils/errcodes.txt, do not edit */\n";
 print "/* there is deliberately not an #ifndef PLERRCODES_H here */\n";
 
-open my $errcodes, '<', $ARGV[0] or die;
+open my $errcodes, $ARGV[0] or die;
 
 while (<$errcodes>)
 {
diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c
index c27935b..4ffbc35 100644
--- a/src/pl/plpgsql/src/pl_exec.c
+++ b/src/pl/plpgsql/src/pl_exec.c
@@ -4710,7 +4710,7 @@ exec_assign_value(PLpgSQL_execstate *estate,
 
 				/*
 				 * Evaluate the subscripts, switch into left-to-right order.
-				 * Like the expression built by ExecInitArrayRef(), complain
+				 * Like the expression built by ExecInitSubscriptingRef(), complain
 				 * if any subscript is null.
 				 */
 				for (i = 0; i < nsubscripts; i++)
@@ -4759,7 +4759,7 @@ exec_assign_value(PLpgSQL_execstate *estate,
 				 * fixed-length array types we skip the assignment.  We can't
 				 * support assignment of a null entry into a fixed-length
 				 * array, either, so that's a no-op too.  This is all ugly but
-				 * corresponds to the current behavior of execExpr*.c.
+				 * corresponds to the current behavior of ExecEvalSubscriptingRef().
 				 */
 				if (arrayelem->arraytyplen > 0 &&		/* fixed-length array? */
 					(oldarrayisnull || isNull))
@@ -6467,9 +6467,9 @@ exec_simple_check_node(Node *node)
 		case T_Param:
 			return TRUE;
 
-		case T_ArrayRef:
+		case T_SubscriptingRef:
 			{
-				ArrayRef   *expr = (ArrayRef *) node;
+				SubscriptingRef   *expr = (SubscriptingRef *) node;
 
 				if (!exec_simple_check_node((Node *) expr->refupperindexpr))
 					return FALSE;
diff --git a/src/pl/plpython/expected/plpython_spi.out b/src/pl/plpython/expected/plpython_spi.out
index e54dca9..0d78ca1 100644
--- a/src/pl/plpython/expected/plpython_spi.out
+++ b/src/pl/plpython/expected/plpython_spi.out
@@ -31,19 +31,6 @@ except Exception, ex:
 return None
 '
 	LANGUAGE plpythonu;
-CREATE FUNCTION spi_prepared_plan_test_two(a text) RETURNS text
-	AS
-'if "myplan" not in SD:
-	q = "SELECT count(*) FROM users WHERE lname = $1"
-	SD["myplan"] = plpy.prepare(q, [ "text" ])
-try:
-	rv = SD["myplan"].execute([a])
-	return "there are " + str(rv[0]["count"]) + " " + str(a) + "s"
-except Exception, ex:
-	plpy.error(str(ex))
-return None
-'
-	LANGUAGE plpythonu;
 CREATE FUNCTION spi_prepared_plan_test_nested(a text) RETURNS text
 	AS
 'if "myplan" not in SD:
@@ -93,8 +80,8 @@ select spi_prepared_plan_test_one('doe');
  there are 3 does
 (1 row)
 
-select spi_prepared_plan_test_two('smith');
- spi_prepared_plan_test_two 
+select spi_prepared_plan_test_one('smith');
+ spi_prepared_plan_test_one 
 ----------------------------
  there are 1 smiths
 (1 row)
@@ -385,7 +372,7 @@ plan = plpy.prepare(
     ["text"])
 for row in plpy.cursor(plan, ["w"]):
     yield row['fname']
-for row in plan.cursor(["j"]):
+for row in plpy.cursor(plan, ["j"]):
     yield row['fname']
 $$ LANGUAGE plpythonu;
 CREATE FUNCTION cursor_plan_wrong_args() RETURNS SETOF text AS $$
diff --git a/src/pl/plpython/generate-spiexceptions.pl b/src/pl/plpython/generate-spiexceptions.pl
index a9ee960..ab0fa4a 100644
--- a/src/pl/plpython/generate-spiexceptions.pl
+++ b/src/pl/plpython/generate-spiexceptions.pl
@@ -10,7 +10,7 @@ print
   "/* autogenerated from src/backend/utils/errcodes.txt, do not edit */\n";
 print "/* there is deliberately not an #ifndef SPIEXCEPTIONS_H here */\n";
 
-open my $errcodes, '<', $ARGV[0] or die;
+open my $errcodes, $ARGV[0] or die;
 
 while (<$errcodes>)
 {
diff --git a/src/pl/plpython/plpy_cursorobject.c b/src/pl/plpython/plpy_cursorobject.c
index 18e689f..7bb8992 100644
--- a/src/pl/plpython/plpy_cursorobject.c
+++ b/src/pl/plpython/plpy_cursorobject.c
@@ -25,6 +25,7 @@
 
 
 static PyObject *PLy_cursor_query(const char *query);
+static PyObject *PLy_cursor_plan(PyObject *ob, PyObject *args);
 static void PLy_cursor_dealloc(PyObject *arg);
 static PyObject *PLy_cursor_iternext(PyObject *self);
 static PyObject *PLy_cursor_fetch(PyObject *self, PyObject *args);
@@ -159,7 +160,7 @@ PLy_cursor_query(const char *query)
 	return (PyObject *) cursor;
 }
 
-PyObject *
+static PyObject *
 PLy_cursor_plan(PyObject *ob, PyObject *args)
 {
 	PLyCursorObject *cursor;
diff --git a/src/pl/plpython/plpy_cursorobject.h b/src/pl/plpython/plpy_cursorobject.h
index ef23865..c73033c 100644
--- a/src/pl/plpython/plpy_cursorobject.h
+++ b/src/pl/plpython/plpy_cursorobject.h
@@ -19,6 +19,5 @@ typedef struct PLyCursorObject
 
 extern void PLy_cursor_init_type(void);
 extern PyObject *PLy_cursor(PyObject *self, PyObject *args);
-extern PyObject *PLy_cursor_plan(PyObject *ob, PyObject *args);
 
 #endif   /* PLPY_CURSOROBJECT_H */
diff --git a/src/pl/plpython/plpy_planobject.c b/src/pl/plpython/plpy_planobject.c
index 390b4e9..16c39a0 100644
--- a/src/pl/plpython/plpy_planobject.c
+++ b/src/pl/plpython/plpy_planobject.c
@@ -10,15 +10,11 @@
 
 #include "plpy_planobject.h"
 
-#include "plpy_cursorobject.h"
 #include "plpy_elog.h"
-#include "plpy_spi.h"
 #include "utils/memutils.h"
 
 
 static void PLy_plan_dealloc(PyObject *arg);
-static PyObject *PLy_plan_cursor(PyObject *self, PyObject *args);
-static PyObject *PLy_plan_execute(PyObject *self, PyObject *args);
 static PyObject *PLy_plan_status(PyObject *self, PyObject *args);
 
 static char PLy_plan_doc[] = {
@@ -26,8 +22,6 @@ static char PLy_plan_doc[] = {
 };
 
 static PyMethodDef PLy_plan_methods[] = {
-	{"cursor", PLy_plan_cursor, METH_VARARGS, NULL},
-	{"execute", PLy_plan_execute, METH_VARARGS, NULL},
 	{"status", PLy_plan_status, METH_VARARGS, NULL},
 	{NULL, NULL, 0, NULL}
 };
@@ -118,31 +112,6 @@ PLy_plan_dealloc(PyObject *arg)
 
 
 static PyObject *
-PLy_plan_cursor(PyObject *self, PyObject *args)
-{
-	PyObject   *planargs = NULL;
-
-	if (!PyArg_ParseTuple(args, "|O", &planargs))
-		return NULL;
-
-	return PLy_cursor_plan(self, planargs);
-}
-
-
-static PyObject *
-PLy_plan_execute(PyObject *self, PyObject *args)
-{
-	PyObject   *list = NULL;
-	long		limit = 0;
-
-	if (!PyArg_ParseTuple(args, "|Ol", &list, &limit))
-		return NULL;
-
-	return PLy_spi_execute_plan(self, list, limit);
-}
-
-
-static PyObject *
 PLy_plan_status(PyObject *self, PyObject *args)
 {
 	if (PyArg_ParseTuple(args, ":status"))
diff --git a/src/pl/plpython/plpy_spi.c b/src/pl/plpython/plpy_spi.c
index c6856cc..07ab6a0 100644
--- a/src/pl/plpython/plpy_spi.c
+++ b/src/pl/plpython/plpy_spi.c
@@ -30,6 +30,7 @@
 
 
 static PyObject *PLy_spi_execute_query(char *query, long limit);
+static PyObject *PLy_spi_execute_plan(PyObject *ob, PyObject *list, long limit);
 static PyObject *PLy_spi_execute_fetch_result(SPITupleTable *tuptable,
 							 uint64 rows, int status);
 static void PLy_spi_exception_set(PyObject *excclass, ErrorData *edata);
@@ -192,7 +193,7 @@ PLy_spi_execute(PyObject *self, PyObject *args)
 	return NULL;
 }
 
-PyObject *
+static PyObject *
 PLy_spi_execute_plan(PyObject *ob, PyObject *list, long limit)
 {
 	volatile int nargs;
diff --git a/src/pl/plpython/plpy_spi.h b/src/pl/plpython/plpy_spi.h
index 817a758..b042794 100644
--- a/src/pl/plpython/plpy_spi.h
+++ b/src/pl/plpython/plpy_spi.h
@@ -10,7 +10,6 @@
 
 extern PyObject *PLy_spi_prepare(PyObject *self, PyObject *args);
 extern PyObject *PLy_spi_execute(PyObject *self, PyObject *args);
-extern PyObject *PLy_spi_execute_plan(PyObject *ob, PyObject *list, long limit);
 
 typedef struct PLyExceptionEntry
 {
diff --git a/src/pl/plpython/sql/plpython_spi.sql b/src/pl/plpython/sql/plpython_spi.sql
index fcf049c..7427de8 100644
--- a/src/pl/plpython/sql/plpython_spi.sql
+++ b/src/pl/plpython/sql/plpython_spi.sql
@@ -37,20 +37,6 @@ return None
 '
 	LANGUAGE plpythonu;
 
-CREATE FUNCTION spi_prepared_plan_test_two(a text) RETURNS text
-	AS
-'if "myplan" not in SD:
-	q = "SELECT count(*) FROM users WHERE lname = $1"
-	SD["myplan"] = plpy.prepare(q, [ "text" ])
-try:
-	rv = SD["myplan"].execute([a])
-	return "there are " + str(rv[0]["count"]) + " " + str(a) + "s"
-except Exception, ex:
-	plpy.error(str(ex))
-return None
-'
-	LANGUAGE plpythonu;
-
 CREATE FUNCTION spi_prepared_plan_test_nested(a text) RETURNS text
 	AS
 'if "myplan" not in SD:
@@ -93,7 +79,7 @@ return a + r
 --
 select nested_call_one('pass this along');
 select spi_prepared_plan_test_one('doe');
-select spi_prepared_plan_test_two('smith');
+select spi_prepared_plan_test_one('smith');
 select spi_prepared_plan_test_nested('smith');
 
 SELECT join_sequences(sequences) FROM sequences;
@@ -289,7 +275,7 @@ plan = plpy.prepare(
     ["text"])
 for row in plpy.cursor(plan, ["w"]):
     yield row['fname']
-for row in plan.cursor(["j"]):
+for row in plpy.cursor(plan, ["j"]):
     yield row['fname']
 $$ LANGUAGE plpythonu;
 
diff --git a/src/pl/tcl/generate-pltclerrcodes.pl b/src/pl/tcl/generate-pltclerrcodes.pl
index b4e429a..e20a0af 100644
--- a/src/pl/tcl/generate-pltclerrcodes.pl
+++ b/src/pl/tcl/generate-pltclerrcodes.pl
@@ -10,7 +10,7 @@ print
   "/* autogenerated from src/backend/utils/errcodes.txt, do not edit */\n";
 print "/* there is deliberately not an #ifndef PLTCLERRCODES_H here */\n";
 
-open my $errcodes, '<', $ARGV[0] or die;
+open my $errcodes, $ARGV[0] or die;
 
 while (<$errcodes>)
 {
diff --git a/src/test/locale/sort-test.pl b/src/test/locale/sort-test.pl
index b8fc93a..cb7e493 100755
--- a/src/test/locale/sort-test.pl
+++ b/src/test/locale/sort-test.pl
@@ -3,9 +3,9 @@
 use strict;
 use locale;
 
-open(my $in_fh, '<', $ARGV[0]) || die;
-chop(my (@words) = <$in_fh>);
-close($in_fh);
+open(INFILE, "<$ARGV[0]");
+chop(my (@words) = <INFILE>);
+close(INFILE);
 
 $" = "\n";
 my (@result) = sort @words;
diff --git a/src/test/perl/PostgresNode.pm b/src/test/perl/PostgresNode.pm
index cb84f1f..5ef007f 100644
--- a/src/test/perl/PostgresNode.pm
+++ b/src/test/perl/PostgresNode.pm
@@ -347,7 +347,7 @@ sub set_replication_conf
 	$self->host eq $test_pghost
 	  or die "set_replication_conf only works with the default host";
 
-	open my $hba, '>>', "$pgdata/pg_hba.conf";
+	open my $hba, ">>$pgdata/pg_hba.conf";
 	print $hba "\n# Allow replication (set up by PostgresNode.pm)\n";
 	if ($TestLib::windows_os)
 	{
@@ -399,7 +399,7 @@ sub init
 		@{ $params{extra} });
 	TestLib::system_or_bail($ENV{PG_REGRESS}, '--config-auth', $pgdata);
 
-	open my $conf, '>>', "$pgdata/postgresql.conf";
+	open my $conf, ">>$pgdata/postgresql.conf";
 	print $conf "\n# Added by PostgresNode.pm\n";
 	print $conf "fsync = off\n";
 	print $conf "log_line_prefix = '%m [%p] %q%a '\n";
@@ -551,7 +551,7 @@ sub _backup_fs
 		$backup_path,
 		filterfn => sub {
 			my $src = shift;
-			return ($src ne 'log' and $src ne 'postmaster.pid');
+			return ($src ne 'pg_log' and $src ne 'postmaster.pid');
 		});
 
 	if ($hot)
@@ -820,7 +820,7 @@ sub _update_pid
 	# If we can open the PID file, read its first line and that's the PID we
 	# want.  If the file cannot be opened, presumably the server is not
 	# running; don't be noisy in that case.
-	if (open my $pidfile, '<', $self->data_dir . "/postmaster.pid")
+	if (open my $pidfile, $self->data_dir . "/postmaster.pid")
 	{
 		chomp($self->{_pid} = <$pidfile>);
 		print "# Postmaster PID for node \"$name\" is $self->{_pid}\n";
@@ -1357,7 +1357,7 @@ sub lsn
 	chomp($result);
 	if ($result eq '')
 	{
-		return;
+		return undef;
 	}
 	else
 	{
diff --git a/src/test/perl/RecursiveCopy.pm b/src/test/perl/RecursiveCopy.pm
index 28ecaf6..3e98813 100644
--- a/src/test/perl/RecursiveCopy.pm
+++ b/src/test/perl/RecursiveCopy.pm
@@ -48,9 +48,9 @@ attempted.
 
  RecursiveCopy::copypath('/some/path', '/empty/dir',
     filterfn => sub {
-		# omit log/ and contents
+		# omit pg_log and contents
 		my $src = shift;
-		return $src ne 'log';
+		return $src ne 'pg_log';
 	}
  );
 
diff --git a/src/test/perl/TestLib.pm b/src/test/perl/TestLib.pm
index ae8d178..d22957c 100644
--- a/src/test/perl/TestLib.pm
+++ b/src/test/perl/TestLib.pm
@@ -84,14 +84,14 @@ INIT
 	$test_logfile = basename($0);
 	$test_logfile =~ s/\.[^.]+$//;
 	$test_logfile = "$log_path/regress_log_$test_logfile";
-	open my $testlog, '>', $test_logfile
+	open TESTLOG, '>', $test_logfile
 	  or die "could not open STDOUT to logfile \"$test_logfile\": $!";
 
 	# Hijack STDOUT and STDERR to the log file
-	open(my $orig_stdout, '>&', \*STDOUT);
-	open(my $orig_stderr, '>&', \*STDERR);
-	open(STDOUT, '>&', $testlog);
-	open(STDERR, '>&', $testlog);
+	open(ORIG_STDOUT, ">&STDOUT");
+	open(ORIG_STDERR, ">&STDERR");
+	open(STDOUT,      ">&TESTLOG");
+	open(STDERR,      ">&TESTLOG");
 
 	# The test output (ok ...) needs to be printed to the original STDOUT so
 	# that the 'prove' program can parse it, and display it to the user in
@@ -99,16 +99,16 @@ INIT
 	# in the log.
 	my $builder = Test::More->builder;
 	my $fh      = $builder->output;
-	tie *$fh, "SimpleTee", $orig_stdout, $testlog;
+	tie *$fh, "SimpleTee", *ORIG_STDOUT, *TESTLOG;
 	$fh = $builder->failure_output;
-	tie *$fh, "SimpleTee", $orig_stderr, $testlog;
+	tie *$fh, "SimpleTee", *ORIG_STDERR, *TESTLOG;
 
 	# Enable auto-flushing for all the file handles. Stderr and stdout are
 	# redirected to the same file, and buffering causes the lines to appear
 	# in the log in confusing order.
 	autoflush STDOUT 1;
 	autoflush STDERR 1;
-	autoflush $testlog 1;
+	autoflush TESTLOG 1;
 }
 
 END
diff --git a/src/test/regress/expected/arrays.out b/src/test/regress/expected/arrays.out
index c730563..ee219ae 100644
--- a/src/test/regress/expected/arrays.out
+++ b/src/test/regress/expected/arrays.out
@@ -190,9 +190,9 @@ select ('[0:2][0:2]={{1,2,3},{4,5,6},{7,8,9}}'::int[])[1:2][2];
 --
 -- check subscription corner cases
 --
--- More subscripts than MAXDIMS(6)
-SELECT ('{}'::int[])[1][2][3][4][5][6][7];
-ERROR:  number of array dimensions (7) exceeds the maximum allowed (6)
+-- More subscripts than MAXDIMS(12)
+SELECT ('{}'::int[])[1][2][3][4][5][6][7][8][9][10][11][12][13];
+ERROR:  number of array dimensions (13) exceeds the maximum allowed (12)
 -- NULL index yields NULL when selecting
 SELECT ('{{{1},{2},{3}},{{4},{5},{6}}}'::int[])[1][NULL][1];
  int4 
diff --git a/src/test/regress/expected/groupingsets.out b/src/test/regress/expected/groupingsets.out
index fd618af..b0886da 100644
--- a/src/test/regress/expected/groupingsets.out
+++ b/src/test/regress/expected/groupingsets.out
@@ -13,13 +13,6 @@ copy gstest2 from stdin;
 create temp table gstest3 (a integer, b integer, c integer, d integer);
 copy gstest3 from stdin;
 alter table gstest3 add primary key (a);
-create temp table gstest4(id integer, v integer,
-                          unhashable_col bit(4), unsortable_col xid);
-insert into gstest4
-values (1,1,b'0000','1'), (2,2,b'0001','1'),
-       (3,4,b'0010','2'), (4,8,b'0011','2'),
-       (5,16,b'0000','2'), (6,32,b'0001','2'),
-       (7,64,b'0010','1'), (8,128,b'0011','1');
 create temp table gstest_empty (a integer, b integer, v integer);
 create function gstest_data(v integer, out a integer, out b integer)
   returns setof record
@@ -29,7 +22,6 @@ create function gstest_data(v integer, out a integer, out b integer)
     end;
   $f$ language plpgsql;
 -- basic functionality
-set enable_hashagg = false;  -- test hashing explicitly later
 -- simple rollup with multiple plain aggregates, with and without ordering
 -- (and with ordering differing from grouping)
 select a, b, grouping(a,b), sum(v), count(*), max(v)
@@ -470,7 +462,7 @@ select a, b from (values (1,2),(2,3)) v(a,b) group by a,b, grouping sets(a);
 
 -- Tests for chained aggregates
 select a, b, grouping(a,b), sum(v), count(*), max(v)
-  from gstest1 group by grouping sets ((a,b),(a+1,b+1),(a+2,b+2)) order by 3,6;
+  from gstest1 group by grouping sets ((a,b),(a+1,b+1),(a+2,b+2));
  a | b | grouping | sum | count | max 
 ---+---+----------+-----+-------+-----
  1 | 1 |        0 |  21 |     2 |  11
@@ -481,18 +473,18 @@ select a, b, grouping(a,b), sum(v), count(*), max(v)
  3 | 4 |        0 |  17 |     1 |  17
  4 | 1 |        0 |  37 |     2 |  19
    |   |        3 |  21 |     2 |  11
-   |   |        3 |  21 |     2 |  11
-   |   |        3 |  25 |     2 |  13
    |   |        3 |  25 |     2 |  13
    |   |        3 |  14 |     1 |  14
-   |   |        3 |  14 |     1 |  14
    |   |        3 |  15 |     1 |  15
-   |   |        3 |  15 |     1 |  15
-   |   |        3 |  16 |     1 |  16
    |   |        3 |  16 |     1 |  16
    |   |        3 |  17 |     1 |  17
-   |   |        3 |  17 |     1 |  17
    |   |        3 |  37 |     2 |  19
+   |   |        3 |  21 |     2 |  11
+   |   |        3 |  25 |     2 |  13
+   |   |        3 |  14 |     1 |  14
+   |   |        3 |  15 |     1 |  15
+   |   |        3 |  16 |     1 |  16
+   |   |        3 |  17 |     1 |  17
    |   |        3 |  37 |     2 |  19
 (21 rows)
 
@@ -855,599 +847,4 @@ select sum(ten) from onek group by rollup(four::text), two order by 1;
  2500
 (6 rows)
 
--- hashing support
-set enable_hashagg = true;
--- failure cases
-select count(*) from gstest4 group by rollup(unhashable_col,unsortable_col);
-ERROR:  could not implement GROUP BY
-DETAIL:  Some of the datatypes only support hashing, while others only support sorting.
-select array_agg(v order by v) from gstest4 group by grouping sets ((id,unsortable_col),(id));
-ERROR:  could not implement GROUP BY
-DETAIL:  Some of the datatypes only support hashing, while others only support sorting.
--- simple cases
-select a, b, grouping(a,b), sum(v), count(*), max(v)
-  from gstest1 group by grouping sets ((a),(b)) order by 3,1,2;
- a | b | grouping | sum | count | max 
----+---+----------+-----+-------+-----
- 1 |   |        1 |  60 |     5 |  14
- 2 |   |        1 |  15 |     1 |  15
- 3 |   |        1 |  33 |     2 |  17
- 4 |   |        1 |  37 |     2 |  19
-   | 1 |        2 |  58 |     4 |  19
-   | 2 |        2 |  25 |     2 |  13
-   | 3 |        2 |  45 |     3 |  16
-   | 4 |        2 |  17 |     1 |  17
-(8 rows)
-
-explain (costs off) select a, b, grouping(a,b), sum(v), count(*), max(v)
-  from gstest1 group by grouping sets ((a),(b)) order by 3,1,2;
-                                               QUERY PLAN                                               
---------------------------------------------------------------------------------------------------------
- Sort
-   Sort Key: (GROUPING("*VALUES*".column1, "*VALUES*".column2)), "*VALUES*".column1, "*VALUES*".column2
-   ->  HashAggregate
-         Hash Key: "*VALUES*".column1
-         Hash Key: "*VALUES*".column2
-         ->  Values Scan on "*VALUES*"
-(6 rows)
-
-select a, b, grouping(a,b), sum(v), count(*), max(v)
-  from gstest1 group by cube(a,b) order by 3,1,2;
- a | b | grouping | sum | count | max 
----+---+----------+-----+-------+-----
- 1 | 1 |        0 |  21 |     2 |  11
- 1 | 2 |        0 |  25 |     2 |  13
- 1 | 3 |        0 |  14 |     1 |  14
- 2 | 3 |        0 |  15 |     1 |  15
- 3 | 3 |        0 |  16 |     1 |  16
- 3 | 4 |        0 |  17 |     1 |  17
- 4 | 1 |        0 |  37 |     2 |  19
- 1 |   |        1 |  60 |     5 |  14
- 2 |   |        1 |  15 |     1 |  15
- 3 |   |        1 |  33 |     2 |  17
- 4 |   |        1 |  37 |     2 |  19
-   | 1 |        2 |  58 |     4 |  19
-   | 2 |        2 |  25 |     2 |  13
-   | 3 |        2 |  45 |     3 |  16
-   | 4 |        2 |  17 |     1 |  17
-   |   |        3 | 145 |    10 |  19
-(16 rows)
-
-explain (costs off) select a, b, grouping(a,b), sum(v), count(*), max(v)
-  from gstest1 group by cube(a,b) order by 3,1,2;
-                                               QUERY PLAN                                               
---------------------------------------------------------------------------------------------------------
- Sort
-   Sort Key: (GROUPING("*VALUES*".column1, "*VALUES*".column2)), "*VALUES*".column1, "*VALUES*".column2
-   ->  MixedAggregate
-         Hash Key: "*VALUES*".column1, "*VALUES*".column2
-         Hash Key: "*VALUES*".column1
-         Hash Key: "*VALUES*".column2
-         Group Key: ()
-         ->  Values Scan on "*VALUES*"
-(8 rows)
-
--- shouldn't try and hash
-explain (costs off)
-  select a, b, grouping(a,b), array_agg(v order by v)
-    from gstest1 group by cube(a,b);
-                        QUERY PLAN                        
-----------------------------------------------------------
- GroupAggregate
-   Group Key: "*VALUES*".column1, "*VALUES*".column2
-   Group Key: "*VALUES*".column1
-   Group Key: ()
-   Sort Key: "*VALUES*".column2
-     Group Key: "*VALUES*".column2
-   ->  Sort
-         Sort Key: "*VALUES*".column1, "*VALUES*".column2
-         ->  Values Scan on "*VALUES*"
-(9 rows)
-
--- mixed hashable/sortable cases
-select unhashable_col, unsortable_col,
-       grouping(unhashable_col, unsortable_col),
-       count(*), sum(v)
-  from gstest4 group by grouping sets ((unhashable_col),(unsortable_col))
- order by 3, 5;
- unhashable_col | unsortable_col | grouping | count | sum 
-----------------+----------------+----------+-------+-----
- 0000           |                |        1 |     2 |  17
- 0001           |                |        1 |     2 |  34
- 0010           |                |        1 |     2 |  68
- 0011           |                |        1 |     2 | 136
-                |              2 |        2 |     4 |  60
-                |              1 |        2 |     4 | 195
-(6 rows)
-
-explain (costs off)
-  select unhashable_col, unsortable_col,
-         grouping(unhashable_col, unsortable_col),
-         count(*), sum(v)
-    from gstest4 group by grouping sets ((unhashable_col),(unsortable_col))
-   order by 3,5;
-                            QUERY PLAN                            
-------------------------------------------------------------------
- Sort
-   Sort Key: (GROUPING(unhashable_col, unsortable_col)), (sum(v))
-   ->  MixedAggregate
-         Hash Key: unsortable_col
-         Group Key: unhashable_col
-         ->  Sort
-               Sort Key: unhashable_col
-               ->  Seq Scan on gstest4
-(8 rows)
-
-select unhashable_col, unsortable_col,
-       grouping(unhashable_col, unsortable_col),
-       count(*), sum(v)
-  from gstest4 group by grouping sets ((v,unhashable_col),(v,unsortable_col))
- order by 3,5;
- unhashable_col | unsortable_col | grouping | count | sum 
-----------------+----------------+----------+-------+-----
- 0000           |                |        1 |     1 |   1
- 0001           |                |        1 |     1 |   2
- 0010           |                |        1 |     1 |   4
- 0011           |                |        1 |     1 |   8
- 0000           |                |        1 |     1 |  16
- 0001           |                |        1 |     1 |  32
- 0010           |                |        1 |     1 |  64
- 0011           |                |        1 |     1 | 128
-                |              1 |        2 |     1 |   1
-                |              1 |        2 |     1 |   2
-                |              2 |        2 |     1 |   4
-                |              2 |        2 |     1 |   8
-                |              2 |        2 |     1 |  16
-                |              2 |        2 |     1 |  32
-                |              1 |        2 |     1 |  64
-                |              1 |        2 |     1 | 128
-(16 rows)
-
-explain (costs off)
-  select unhashable_col, unsortable_col,
-         grouping(unhashable_col, unsortable_col),
-         count(*), sum(v)
-    from gstest4 group by grouping sets ((v,unhashable_col),(v,unsortable_col))
-   order by 3,5;
-                            QUERY PLAN                            
-------------------------------------------------------------------
- Sort
-   Sort Key: (GROUPING(unhashable_col, unsortable_col)), (sum(v))
-   ->  MixedAggregate
-         Hash Key: v, unsortable_col
-         Group Key: v, unhashable_col
-         ->  Sort
-               Sort Key: v, unhashable_col
-               ->  Seq Scan on gstest4
-(8 rows)
-
--- empty input: first is 0 rows, second 1, third 3 etc.
-select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a);
- a | b | sum | count 
----+---+-----+-------
-(0 rows)
-
-explain (costs off)
-  select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a);
-           QUERY PLAN           
---------------------------------
- HashAggregate
-   Hash Key: a, b
-   Hash Key: a
-   ->  Seq Scan on gstest_empty
-(4 rows)
-
-select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),());
- a | b | sum | count 
----+---+-----+-------
-   |   |     |     0
-(1 row)
-
-select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),(),(),());
- a | b | sum | count 
----+---+-----+-------
-   |   |     |     0
-   |   |     |     0
-   |   |     |     0
-(3 rows)
-
-explain (costs off)
-  select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),(),(),());
-           QUERY PLAN           
---------------------------------
- MixedAggregate
-   Hash Key: a, b
-   Group Key: ()
-   Group Key: ()
-   Group Key: ()
-   ->  Seq Scan on gstest_empty
-(6 rows)
-
-select sum(v), count(*) from gstest_empty group by grouping sets ((),(),());
- sum | count 
------+-------
-     |     0
-     |     0
-     |     0
-(3 rows)
-
-explain (costs off)
-  select sum(v), count(*) from gstest_empty group by grouping sets ((),(),());
-           QUERY PLAN           
---------------------------------
- Aggregate
-   Group Key: ()
-   Group Key: ()
-   Group Key: ()
-   ->  Seq Scan on gstest_empty
-(5 rows)
-
--- check that functionally dependent cols are not nulled
-select a, d, grouping(a,b,c)
-  from gstest3
- group by grouping sets ((a,b), (a,c));
- a | d | grouping 
----+---+----------
- 1 | 1 |        1
- 2 | 2 |        1
- 1 | 1 |        2
- 2 | 2 |        2
-(4 rows)
-
-explain (costs off)
-  select a, d, grouping(a,b,c)
-    from gstest3
-   group by grouping sets ((a,b), (a,c));
-        QUERY PLAN         
----------------------------
- HashAggregate
-   Hash Key: a, b
-   Hash Key: a, c
-   ->  Seq Scan on gstest3
-(4 rows)
-
--- simple rescan tests
-select a, b, sum(v.x)
-  from (values (1),(2)) v(x), gstest_data(v.x)
- group by grouping sets (a,b);
- a | b | sum 
----+---+-----
- 2 |   |   6
- 1 |   |   3
-   | 2 |   3
-   | 3 |   3
-   | 1 |   3
-(5 rows)
-
-explain (costs off)
-  select a, b, sum(v.x)
-    from (values (1),(2)) v(x), gstest_data(v.x)
-   group by grouping sets (a,b);
-                QUERY PLAN                
-------------------------------------------
- HashAggregate
-   Hash Key: gstest_data.a
-   Hash Key: gstest_data.b
-   ->  Nested Loop
-         ->  Values Scan on "*VALUES*"
-         ->  Function Scan on gstest_data
-(6 rows)
-
-select *
-  from (values (1),(2)) v(x),
-       lateral (select a, b, sum(v.x) from gstest_data(v.x) group by grouping sets (a,b)) s;
-ERROR:  aggregate functions are not allowed in FROM clause of their own query level
-LINE 3:        lateral (select a, b, sum(v.x) from gstest_data(v.x) ...
-                                     ^
-explain (costs off)
-  select *
-    from (values (1),(2)) v(x),
-         lateral (select a, b, sum(v.x) from gstest_data(v.x) group by grouping sets (a,b)) s;
-ERROR:  aggregate functions are not allowed in FROM clause of their own query level
-LINE 4:          lateral (select a, b, sum(v.x) from gstest_data(v.x...
-                                       ^
--- Tests for chained aggregates
-select a, b, grouping(a,b), sum(v), count(*), max(v)
-  from gstest1 group by grouping sets ((a,b),(a+1,b+1),(a+2,b+2)) order by 3,6;
- a | b | grouping | sum | count | max 
----+---+----------+-----+-------+-----
- 1 | 1 |        0 |  21 |     2 |  11
- 1 | 2 |        0 |  25 |     2 |  13
- 1 | 3 |        0 |  14 |     1 |  14
- 2 | 3 |        0 |  15 |     1 |  15
- 3 | 3 |        0 |  16 |     1 |  16
- 3 | 4 |        0 |  17 |     1 |  17
- 4 | 1 |        0 |  37 |     2 |  19
-   |   |        3 |  21 |     2 |  11
-   |   |        3 |  21 |     2 |  11
-   |   |        3 |  25 |     2 |  13
-   |   |        3 |  25 |     2 |  13
-   |   |        3 |  14 |     1 |  14
-   |   |        3 |  14 |     1 |  14
-   |   |        3 |  15 |     1 |  15
-   |   |        3 |  15 |     1 |  15
-   |   |        3 |  16 |     1 |  16
-   |   |        3 |  16 |     1 |  16
-   |   |        3 |  17 |     1 |  17
-   |   |        3 |  17 |     1 |  17
-   |   |        3 |  37 |     2 |  19
-   |   |        3 |  37 |     2 |  19
-(21 rows)
-
-explain (costs off)
-  select a, b, grouping(a,b), sum(v), count(*), max(v)
-    from gstest1 group by grouping sets ((a,b),(a+1,b+1),(a+2,b+2)) order by 3,6;
-                                        QUERY PLAN                                         
--------------------------------------------------------------------------------------------
- Sort
-   Sort Key: (GROUPING("*VALUES*".column1, "*VALUES*".column2)), (max("*VALUES*".column3))
-   ->  HashAggregate
-         Hash Key: "*VALUES*".column1, "*VALUES*".column2
-         Hash Key: ("*VALUES*".column1 + 1), ("*VALUES*".column2 + 1)
-         Hash Key: ("*VALUES*".column1 + 2), ("*VALUES*".column2 + 2)
-         ->  Values Scan on "*VALUES*"
-(7 rows)
-
-select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum
-  from gstest2 group by cube (a,b) order by rsum, a, b;
- a | b | sum | rsum 
----+---+-----+------
- 1 | 1 |   8 |    8
- 1 | 2 |   2 |   10
- 1 |   |  10 |   20
- 2 | 2 |   2 |   22
- 2 |   |   2 |   24
-   | 1 |   8 |   32
-   | 2 |   4 |   36
-   |   |  12 |   48
-(8 rows)
-
-explain (costs off)
-  select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum
-    from gstest2 group by cube (a,b) order by rsum, a, b;
-                 QUERY PLAN                  
----------------------------------------------
- Sort
-   Sort Key: (sum((sum(c))) OVER (?)), a, b
-   ->  WindowAgg
-         ->  Sort
-               Sort Key: a, b
-               ->  MixedAggregate
-                     Hash Key: a, b
-                     Hash Key: a
-                     Hash Key: b
-                     Group Key: ()
-                     ->  Seq Scan on gstest2
-(11 rows)
-
-select a, b, sum(v.x)
-  from (values (1),(2)) v(x), gstest_data(v.x)
- group by cube (a,b) order by a,b;
- a | b | sum 
----+---+-----
- 1 | 1 |   1
- 1 | 2 |   1
- 1 | 3 |   1
- 1 |   |   3
- 2 | 1 |   2
- 2 | 2 |   2
- 2 | 3 |   2
- 2 |   |   6
-   | 1 |   3
-   | 2 |   3
-   | 3 |   3
-   |   |   9
-(12 rows)
-
-explain (costs off)
-  select a, b, sum(v.x)
-    from (values (1),(2)) v(x), gstest_data(v.x)
-   group by cube (a,b) order by a,b;
-                   QUERY PLAN                   
-------------------------------------------------
- Sort
-   Sort Key: gstest_data.a, gstest_data.b
-   ->  MixedAggregate
-         Hash Key: gstest_data.a, gstest_data.b
-         Hash Key: gstest_data.a
-         Hash Key: gstest_data.b
-         Group Key: ()
-         ->  Nested Loop
-               ->  Values Scan on "*VALUES*"
-               ->  Function Scan on gstest_data
-(10 rows)
-
--- More rescan tests
-select * from (values (1),(2)) v(a) left join lateral (select v.a, four, ten, count(*) from onek group by cube(four,ten)) s on true order by v.a,four,ten;
- a | a | four | ten | count 
----+---+------+-----+-------
- 1 | 1 |    0 |   0 |    50
- 1 | 1 |    0 |   2 |    50
- 1 | 1 |    0 |   4 |    50
- 1 | 1 |    0 |   6 |    50
- 1 | 1 |    0 |   8 |    50
- 1 | 1 |    0 |     |   250
- 1 | 1 |    1 |   1 |    50
- 1 | 1 |    1 |   3 |    50
- 1 | 1 |    1 |   5 |    50
- 1 | 1 |    1 |   7 |    50
- 1 | 1 |    1 |   9 |    50
- 1 | 1 |    1 |     |   250
- 1 | 1 |    2 |   0 |    50
- 1 | 1 |    2 |   2 |    50
- 1 | 1 |    2 |   4 |    50
- 1 | 1 |    2 |   6 |    50
- 1 | 1 |    2 |   8 |    50
- 1 | 1 |    2 |     |   250
- 1 | 1 |    3 |   1 |    50
- 1 | 1 |    3 |   3 |    50
- 1 | 1 |    3 |   5 |    50
- 1 | 1 |    3 |   7 |    50
- 1 | 1 |    3 |   9 |    50
- 1 | 1 |    3 |     |   250
- 1 | 1 |      |   0 |   100
- 1 | 1 |      |   1 |   100
- 1 | 1 |      |   2 |   100
- 1 | 1 |      |   3 |   100
- 1 | 1 |      |   4 |   100
- 1 | 1 |      |   5 |   100
- 1 | 1 |      |   6 |   100
- 1 | 1 |      |   7 |   100
- 1 | 1 |      |   8 |   100
- 1 | 1 |      |   9 |   100
- 1 | 1 |      |     |  1000
- 2 | 2 |    0 |   0 |    50
- 2 | 2 |    0 |   2 |    50
- 2 | 2 |    0 |   4 |    50
- 2 | 2 |    0 |   6 |    50
- 2 | 2 |    0 |   8 |    50
- 2 | 2 |    0 |     |   250
- 2 | 2 |    1 |   1 |    50
- 2 | 2 |    1 |   3 |    50
- 2 | 2 |    1 |   5 |    50
- 2 | 2 |    1 |   7 |    50
- 2 | 2 |    1 |   9 |    50
- 2 | 2 |    1 |     |   250
- 2 | 2 |    2 |   0 |    50
- 2 | 2 |    2 |   2 |    50
- 2 | 2 |    2 |   4 |    50
- 2 | 2 |    2 |   6 |    50
- 2 | 2 |    2 |   8 |    50
- 2 | 2 |    2 |     |   250
- 2 | 2 |    3 |   1 |    50
- 2 | 2 |    3 |   3 |    50
- 2 | 2 |    3 |   5 |    50
- 2 | 2 |    3 |   7 |    50
- 2 | 2 |    3 |   9 |    50
- 2 | 2 |    3 |     |   250
- 2 | 2 |      |   0 |   100
- 2 | 2 |      |   1 |   100
- 2 | 2 |      |   2 |   100
- 2 | 2 |      |   3 |   100
- 2 | 2 |      |   4 |   100
- 2 | 2 |      |   5 |   100
- 2 | 2 |      |   6 |   100
- 2 | 2 |      |   7 |   100
- 2 | 2 |      |   8 |   100
- 2 | 2 |      |   9 |   100
- 2 | 2 |      |     |  1000
-(70 rows)
-
-select array(select row(v.a,s1.*) from (select two,four, count(*) from onek group by cube(two,four) order by two,four) s1) from (values (1),(2)) v(a);
-                                                                        array                                                                         
-------------------------------------------------------------------------------------------------------------------------------------------------------
- {"(1,0,0,250)","(1,0,2,250)","(1,0,,500)","(1,1,1,250)","(1,1,3,250)","(1,1,,500)","(1,,0,250)","(1,,1,250)","(1,,2,250)","(1,,3,250)","(1,,,1000)"}
- {"(2,0,0,250)","(2,0,2,250)","(2,0,,500)","(2,1,1,250)","(2,1,3,250)","(2,1,,500)","(2,,0,250)","(2,,1,250)","(2,,2,250)","(2,,3,250)","(2,,,1000)"}
-(2 rows)
-
--- Rescan logic changes when there are no empty grouping sets, so test
--- that too:
-select * from (values (1),(2)) v(a) left join lateral (select v.a, four, ten, count(*) from onek group by grouping sets(four,ten)) s on true order by v.a,four,ten;
- a | a | four | ten | count 
----+---+------+-----+-------
- 1 | 1 |    0 |     |   250
- 1 | 1 |    1 |     |   250
- 1 | 1 |    2 |     |   250
- 1 | 1 |    3 |     |   250
- 1 | 1 |      |   0 |   100
- 1 | 1 |      |   1 |   100
- 1 | 1 |      |   2 |   100
- 1 | 1 |      |   3 |   100
- 1 | 1 |      |   4 |   100
- 1 | 1 |      |   5 |   100
- 1 | 1 |      |   6 |   100
- 1 | 1 |      |   7 |   100
- 1 | 1 |      |   8 |   100
- 1 | 1 |      |   9 |   100
- 2 | 2 |    0 |     |   250
- 2 | 2 |    1 |     |   250
- 2 | 2 |    2 |     |   250
- 2 | 2 |    3 |     |   250
- 2 | 2 |      |   0 |   100
- 2 | 2 |      |   1 |   100
- 2 | 2 |      |   2 |   100
- 2 | 2 |      |   3 |   100
- 2 | 2 |      |   4 |   100
- 2 | 2 |      |   5 |   100
- 2 | 2 |      |   6 |   100
- 2 | 2 |      |   7 |   100
- 2 | 2 |      |   8 |   100
- 2 | 2 |      |   9 |   100
-(28 rows)
-
-select array(select row(v.a,s1.*) from (select two,four, count(*) from onek group by grouping sets(two,four) order by two,four) s1) from (values (1),(2)) v(a);
-                                      array                                      
----------------------------------------------------------------------------------
- {"(1,0,,500)","(1,1,,500)","(1,,0,250)","(1,,1,250)","(1,,2,250)","(1,,3,250)"}
- {"(2,0,,500)","(2,1,,500)","(2,,0,250)","(2,,1,250)","(2,,2,250)","(2,,3,250)"}
-(2 rows)
-
--- test the knapsack
-set enable_indexscan = false;
-set work_mem = '64kB';
-explain (costs off)
-  select unique1,
-         count(two), count(four), count(ten),
-         count(hundred), count(thousand), count(twothousand),
-         count(*)
-    from tenk1 group by grouping sets (unique1,twothousand,thousand,hundred,ten,four,two);
-          QUERY PLAN           
--------------------------------
- MixedAggregate
-   Hash Key: two
-   Hash Key: four
-   Hash Key: ten
-   Hash Key: hundred
-   Group Key: unique1
-   Sort Key: twothousand
-     Group Key: twothousand
-   Sort Key: thousand
-     Group Key: thousand
-   ->  Sort
-         Sort Key: unique1
-         ->  Seq Scan on tenk1
-(13 rows)
-
-explain (costs off)
-  select unique1,
-         count(two), count(four), count(ten),
-         count(hundred), count(thousand), count(twothousand),
-         count(*)
-    from tenk1 group by grouping sets (unique1,hundred,ten,four,two);
-          QUERY PLAN           
--------------------------------
- MixedAggregate
-   Hash Key: two
-   Hash Key: four
-   Hash Key: ten
-   Hash Key: hundred
-   Group Key: unique1
-   ->  Sort
-         Sort Key: unique1
-         ->  Seq Scan on tenk1
-(9 rows)
-
-set work_mem = '384kB';
-explain (costs off)
-  select unique1,
-         count(two), count(four), count(ten),
-         count(hundred), count(thousand), count(twothousand),
-         count(*)
-    from tenk1 group by grouping sets (unique1,twothousand,thousand,hundred,ten,four,two);
-          QUERY PLAN           
--------------------------------
- MixedAggregate
-   Hash Key: two
-   Hash Key: four
-   Hash Key: ten
-   Hash Key: hundred
-   Hash Key: thousand
-   Group Key: unique1
-   Sort Key: twothousand
-     Group Key: twothousand
-   ->  Sort
-         Sort Key: unique1
-         ->  Seq Scan on tenk1
-(12 rows)
-
 -- end
diff --git a/src/test/regress/expected/insert.out b/src/test/regress/expected/insert.out
index 7fafa98..116854e 100644
--- a/src/test/regress/expected/insert.out
+++ b/src/test/regress/expected/insert.out
@@ -365,13 +365,6 @@ DETAIL:  Failing row contains (1, 2).
 insert into mlparted1 (a, b) values (2, 3);
 ERROR:  new row for relation "mlparted11" violates partition constraint
 DETAIL:  Failing row contains (3, 2).
--- check routing error through a list partitioned table when the key is null
-create table lparted_nonullpart (a int, b char) partition by list (b);
-create table lparted_nonullpart_a partition of lparted_nonullpart for values in ('a');
-insert into lparted_nonullpart values (1);
-ERROR:  no partition of relation "lparted_nonullpart" found for row
-DETAIL:  Partition key of the failing row contains (b) = (null).
-drop table lparted_nonullpart;
 -- check that RETURNING works correctly with tuple-routing
 alter table mlparted drop constraint check_b;
 create table mlparted12 partition of mlparted1 for values from (5) to (10);
diff --git a/src/test/regress/expected/insert_conflict.out b/src/test/regress/expected/insert_conflict.out
index c90d381b..8d005fd 100644
--- a/src/test/regress/expected/insert_conflict.out
+++ b/src/test/regress/expected/insert_conflict.out
@@ -786,13 +786,3 @@ select * from selfconflict;
 (3 rows)
 
 drop table selfconflict;
--- check that the following works:
--- insert into partitioned_table on conflict do nothing
-create table parted_conflict_test (a int, b char) partition by list (a);
-create table parted_conflict_test_1 partition of parted_conflict_test for values in (1);
-insert into parted_conflict_test values (1, 'a') on conflict do nothing;
-insert into parted_conflict_test values (1, 'a') on conflict do nothing;
--- however, on conflict do update not supported yet
-insert into parted_conflict_test values (1) on conflict (a) do update set b = excluded.b where excluded.a = 1;
-ERROR:  there is no unique or exclusion constraint matching the ON CONFLICT specification
-drop table parted_conflict_test, parted_conflict_test_1;
diff --git a/src/test/regress/expected/jsonb.out b/src/test/regress/expected/jsonb.out
index 8ec4150..6925265 100644
--- a/src/test/regress/expected/jsonb.out
+++ b/src/test/regress/expected/jsonb.out
@@ -3474,3 +3474,211 @@ HINT:  Try using the function jsonb_set to replace key value.
 select jsonb_insert('{"a": {"b": "value"}}', '{a, b}', '"new_value"', true);
 ERROR:  cannot replace existing key
 HINT:  Try using the function jsonb_set to replace key value.
+-- jsonb subscript
+select ('123'::jsonb)['a'];
+ jsonb 
+-------
+ 
+(1 row)
+
+select ('123'::jsonb)[0];
+ jsonb 
+-------
+ 
+(1 row)
+
+select ('{"a": 1}'::jsonb)['a'];
+ jsonb 
+-------
+ 1
+(1 row)
+
+select ('{"a": 1}'::jsonb)[0];
+ jsonb 
+-------
+ 
+(1 row)
+
+select ('{"a": 1}'::jsonb)['not_exist'];
+ jsonb 
+-------
+ 
+(1 row)
+
+select ('[1, "2", null]'::jsonb)['a'];
+ jsonb 
+-------
+ 
+(1 row)
+
+select ('[1, "2", null]'::jsonb)[0];
+ jsonb 
+-------
+ 1
+(1 row)
+
+select ('[1, "2", null]'::jsonb)['1'];
+ jsonb 
+-------
+ "2"
+(1 row)
+
+select ('[1, "2", null]'::jsonb)[1.0];
+ jsonb 
+-------
+ 
+(1 row)
+
+select ('[1, "2", null]'::jsonb)[2];
+ jsonb 
+-------
+ null
+(1 row)
+
+select ('[1, "2", null]'::jsonb)[3];
+ jsonb 
+-------
+ 
+(1 row)
+
+select ('[1, "2", null]'::jsonb)[-2];
+ jsonb 
+-------
+ "2"
+(1 row)
+
+select ('[1, "2", null]'::jsonb)[1]['a'];
+ jsonb 
+-------
+ 
+(1 row)
+
+select ('[1, "2", null]'::jsonb)[1][0];
+ jsonb 
+-------
+ 
+(1 row)
+
+select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['b'];
+ jsonb 
+-------
+ "c"
+(1 row)
+
+select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d'];
+   jsonb   
+-----------
+ [1, 2, 3]
+(1 row)
+
+select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d'][1];
+ jsonb 
+-------
+ 2
+(1 row)
+
+select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d']['a'];
+ jsonb 
+-------
+ 
+(1 row)
+
+select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1'];
+     jsonb     
+---------------
+ {"a2": "aaa"}
+(1 row)
+
+select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1']['a2'];
+ jsonb 
+-------
+ "aaa"
+(1 row)
+
+select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1']['a2']['a3'];
+ jsonb 
+-------
+ 
+(1 row)
+
+select ('{"a": ["a1", {"b1": ["aaa", "bbb", "ccc"]}], "b": "bb"}'::jsonb)['a'][1]['b1'];
+         jsonb         
+-----------------------
+ ["aaa", "bbb", "ccc"]
+(1 row)
+
+select ('{"a": ["a1", {"b1": ["aaa", "bbb", "ccc"]}], "b": "bb"}'::jsonb)['a'][1]['b1'][2];
+ jsonb 
+-------
+ "ccc"
+(1 row)
+
+create TEMP TABLE test_jsonb_subscript (
+       id int,
+       test_json jsonb
+);
+insert into test_jsonb_subscript values
+(1, '{}'), -- empty jsonb
+(2, '{"key": "value"}'); -- jsonb with data
+-- update empty jsonb
+update test_jsonb_subscript set test_json['a'] = 1 where id = 1;
+select * from test_jsonb_subscript;
+ id |    test_json     
+----+------------------
+  2 | {"key": "value"}
+  1 | {"a": 1}
+(2 rows)
+
+-- update jsonb with some data
+update test_jsonb_subscript set test_json['a'] = 1 where id = 2;
+select * from test_jsonb_subscript;
+ id |        test_json         
+----+--------------------------
+  1 | {"a": 1}
+  2 | {"a": 1, "key": "value"}
+(2 rows)
+
+-- replace jsonb
+update test_jsonb_subscript set test_json['a'] = 'test';
+select * from test_jsonb_subscript;
+ id |           test_json           
+----+-------------------------------
+  1 | {"a": "test"}
+  2 | {"a": "test", "key": "value"}
+(2 rows)
+
+-- replace by object
+update test_jsonb_subscript set test_json['a'] = '{"b": 1}'::jsonb;
+select * from test_jsonb_subscript;
+ id |            test_json            
+----+---------------------------------
+  1 | {"a": {"b": 1}}
+  2 | {"a": {"b": 1}, "key": "value"}
+(2 rows)
+
+-- replace by array
+update test_jsonb_subscript set test_json['a'] = '[1, 2, 3]'::jsonb;
+select * from test_jsonb_subscript;
+ id |            test_json             
+----+----------------------------------
+  1 | {"a": [1, 2, 3]}
+  2 | {"a": [1, 2, 3], "key": "value"}
+(2 rows)
+
+-- use jsonb subscription in where clause
+select * from test_jsonb_subscript where test_json['key'] = '"value"';
+ id |            test_json             
+----+----------------------------------
+  2 | {"a": [1, 2, 3], "key": "value"}
+(1 row)
+
+select * from test_jsonb_subscript where test_json['key_doesnt_exists'] = '"value"';
+ id | test_json 
+----+-----------
+(0 rows)
+
+select * from test_jsonb_subscript where test_json['key'] = '"wrong_value"';
+ id | test_json 
+----+-----------
+(0 rows)
+
diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out
index d706f42..e8f8726 100644
--- a/src/test/regress/expected/rules.out
+++ b/src/test/regress/expected/rules.out
@@ -1727,9 +1727,8 @@ pg_stat_activity| SELECT s.datid,
     s.state,
     s.backend_xid,
     s.backend_xmin,
-    s.query,
-    s.backend_type
-   FROM ((pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, backend_type, ssl, sslversion, sslcipher, sslbits, sslcompression, sslclientdn)
+    s.query
+   FROM ((pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, ssl, sslversion, sslcipher, sslbits, sslcompression, sslclientdn)
      LEFT JOIN pg_database d ON ((s.datid = d.oid)))
      LEFT JOIN pg_authid u ON ((s.usesysid = u.oid)));
 pg_stat_all_indexes| SELECT c.oid AS relid,
@@ -1860,7 +1859,7 @@ pg_stat_replication| SELECT s.pid,
     w.replay_lag,
     w.sync_priority,
     w.sync_state
-   FROM ((pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, backend_type, ssl, sslversion, sslcipher, sslbits, sslcompression, sslclientdn)
+   FROM ((pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, ssl, sslversion, sslcipher, sslbits, sslcompression, sslclientdn)
      JOIN pg_stat_get_wal_senders() w(pid, state, sent_location, write_location, flush_location, replay_location, write_lag, flush_lag, replay_lag, sync_priority, sync_state) ON ((s.pid = w.pid)))
      LEFT JOIN pg_authid u ON ((s.usesysid = u.oid)));
 pg_stat_ssl| SELECT s.pid,
@@ -1870,7 +1869,7 @@ pg_stat_ssl| SELECT s.pid,
     s.sslbits AS bits,
     s.sslcompression AS compression,
     s.sslclientdn AS clientdn
-   FROM pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, backend_type, ssl, sslversion, sslcipher, sslbits, sslcompression, sslclientdn);
+   FROM pg_stat_get_activity(NULL::integer) s(datid, pid, usesysid, application_name, state, query, wait_event_type, wait_event, xact_start, query_start, backend_start, state_change, client_addr, client_hostname, client_port, backend_xid, backend_xmin, ssl, sslversion, sslcipher, sslbits, sslcompression, sslclientdn);
 pg_stat_subscription| SELECT su.oid AS subid,
     su.subname,
     st.pid,
@@ -3167,12 +3166,6 @@ SELECT pg_get_ruledef(0);
  
 (1 row)
 
-SELECT pg_get_statisticsextdef(0);
- pg_get_statisticsextdef 
--------------------------
- 
-(1 row)
-
 SELECT pg_get_triggerdef(0);
  pg_get_triggerdef 
 -------------------
diff --git a/src/test/regress/expected/stats_ext.out b/src/test/regress/expected/stats_ext.out
index 8fe96d6..83d70bf 100644
--- a/src/test/regress/expected/stats_ext.out
+++ b/src/test/regress/expected/stats_ext.out
@@ -1,23 +1,10 @@
 -- Generic extended statistics support
--- We will be checking execution plans without/with statistics, so
--- let's make sure we get simple non-parallel plans. Also set the
--- work_mem low so that we can use small amounts of data.
-SET max_parallel_workers = 0;
-SET max_parallel_workers_per_gather = 0;
-SET work_mem = '128kB';
 -- Ensure stats are dropped sanely
 CREATE TABLE ab1 (a INTEGER, b INTEGER, c INTEGER);
 CREATE STATISTICS ab1_a_b_stats ON (a, b) FROM ab1;
 DROP STATISTICS ab1_a_b_stats;
 CREATE SCHEMA regress_schema_2;
 CREATE STATISTICS regress_schema_2.ab1_a_b_stats ON (a, b) FROM ab1;
--- Let's also verify the pg_get_statisticsextdef output looks sane.
-SELECT pg_get_statisticsextdef(oid) FROM pg_statistic_ext WHERE staname = 'ab1_a_b_stats';
-                       pg_get_statisticsextdef                       
----------------------------------------------------------------------
- CREATE STATISTICS regress_schema_2.ab1_a_b_stats ON (a, b) FROM ab1
-(1 row)
-
 DROP STATISTICS regress_schema_2.ab1_a_b_stats;
 -- Ensure statistics are dropped when columns are
 CREATE STATISTICS ab1_b_c_stats ON (b, c) FROM ab1;
@@ -55,67 +42,6 @@ CREATE TABLE ndistinct (
     c INT,
     d INT
 );
--- over-estimates when using only per-column statistics
-INSERT INTO ndistinct (a, b, c, filler1)
-     SELECT i/100, i/100, i/100, cash_words((i/100)::money)
-       FROM generate_series(1,30000) s(i);
-ANALYZE ndistinct;
--- Group Aggregate, due to over-estimate of the number of groups
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY a, b;
-            QUERY PLAN             
------------------------------------
- GroupAggregate
-   Group Key: a, b
-   ->  Sort
-         Sort Key: a, b
-         ->  Seq Scan on ndistinct
-(5 rows)
-
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY b, c;
-            QUERY PLAN             
------------------------------------
- GroupAggregate
-   Group Key: b, c
-   ->  Sort
-         Sort Key: b, c
-         ->  Seq Scan on ndistinct
-(5 rows)
-
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c;
-            QUERY PLAN             
------------------------------------
- GroupAggregate
-   Group Key: a, b, c
-   ->  Sort
-         Sort Key: a, b, c
-         ->  Seq Scan on ndistinct
-(5 rows)
-
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d;
-            QUERY PLAN             
------------------------------------
- GroupAggregate
-   Group Key: a, b, c, d
-   ->  Sort
-         Sort Key: a, b, c, d
-         ->  Seq Scan on ndistinct
-(5 rows)
-
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d;
-            QUERY PLAN             
------------------------------------
- GroupAggregate
-   Group Key: b, c, d
-   ->  Sort
-         Sort Key: b, c, d
-         ->  Seq Scan on ndistinct
-(5 rows)
-
 -- unknown column
 CREATE STATISTICS s10 ON (unknown_column) FROM ndistinct;
 ERROR:  column "unknown_column" referenced in statistics does not exist
@@ -130,15 +56,18 @@ CREATE STATISTICS s10 ON (a, a, b) FROM ndistinct;
 ERROR:  duplicate column name in statistics definition
 -- correct command
 CREATE STATISTICS s10 ON (a, b, c) FROM ndistinct;
+-- perfectly correlated groups
+INSERT INTO ndistinct (a, b, c, filler1)
+     SELECT i/100, i/100, i/100, cash_words(i::money)
+       FROM generate_series(1,10000) s(i);
 ANALYZE ndistinct;
 SELECT staenabled, standistinct
   FROM pg_statistic_ext WHERE starelid = 'ndistinct'::regclass;
  staenabled |                                          standistinct                                          
 ------------+------------------------------------------------------------------------------------------------
- {d}        | [{(b 3 4), 301.000000}, {(b 3 6), 301.000000}, {(b 4 6), 301.000000}, {(b 3 4 6), 301.000000}]
+ {d}        | [{(b 3 4), 101.000000}, {(b 3 6), 101.000000}, {(b 4 6), 101.000000}, {(b 3 4 6), 101.000000}]
 (1 row)
 
--- Hash Aggregate, thanks to estimates improved by the statistic
 EXPLAIN (COSTS off)
  SELECT COUNT(*) FROM ndistinct GROUP BY a, b;
          QUERY PLAN          
@@ -149,15 +78,6 @@ EXPLAIN (COSTS off)
 (3 rows)
 
 EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY b, c;
-         QUERY PLAN          
------------------------------
- HashAggregate
-   Group Key: b, c
-   ->  Seq Scan on ndistinct
-(3 rows)
-
-EXPLAIN (COSTS off)
  SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c;
          QUERY PLAN          
 -----------------------------
@@ -166,148 +86,70 @@ EXPLAIN (COSTS off)
    ->  Seq Scan on ndistinct
 (3 rows)
 
--- last two plans keep using Group Aggregate, because 'd' is not covered
--- by the statistic and while it's NULL-only we assume 200 values for it
 EXPLAIN (COSTS off)
  SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d;
-            QUERY PLAN             
------------------------------------
- GroupAggregate
-   Group Key: a, b, c, d
-   ->  Sort
-         Sort Key: a, b, c, d
-         ->  Seq Scan on ndistinct
-(5 rows)
-
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d;
-            QUERY PLAN             
------------------------------------
- GroupAggregate
-   Group Key: b, c, d
-   ->  Sort
-         Sort Key: b, c, d
-         ->  Seq Scan on ndistinct
-(5 rows)
-
-TRUNCATE TABLE ndistinct;
--- under-estimates when using only per-column statistics
-INSERT INTO ndistinct (a, b, c, filler1)
-     SELECT mod(i,50), mod(i,51), mod(i,32),
-            cash_words(mod(i,33)::int::money)
-       FROM generate_series(1,10000) s(i);
-ANALYZE ndistinct;
-SELECT staenabled, standistinct
-  FROM pg_statistic_ext WHERE starelid = 'ndistinct'::regclass;
- staenabled |                                            standistinct                                            
-------------+----------------------------------------------------------------------------------------------------
- {d}        | [{(b 3 4), 2550.000000}, {(b 3 6), 800.000000}, {(b 4 6), 1632.000000}, {(b 3 4 6), 10000.000000}]
-(1 row)
-
--- plans using Group Aggregate, thanks to using correct esimates
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY a, b;
-            QUERY PLAN             
------------------------------------
- GroupAggregate
-   Group Key: a, b
-   ->  Sort
-         Sort Key: a, b
-         ->  Seq Scan on ndistinct
-(5 rows)
-
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c;
-            QUERY PLAN             
------------------------------------
- GroupAggregate
-   Group Key: a, b, c
-   ->  Sort
-         Sort Key: a, b, c
-         ->  Seq Scan on ndistinct
-(5 rows)
-
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d;
-            QUERY PLAN             
------------------------------------
- GroupAggregate
-   Group Key: a, b, c, d
-   ->  Sort
-         Sort Key: a, b, c, d
-         ->  Seq Scan on ndistinct
-(5 rows)
-
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d;
-         QUERY PLAN          
------------------------------
- HashAggregate
-   Group Key: b, c, d
-   ->  Seq Scan on ndistinct
-(3 rows)
-
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY a, d;
          QUERY PLAN          
 -----------------------------
  HashAggregate
-   Group Key: a, d
+   Group Key: a, b, c, d
    ->  Seq Scan on ndistinct
 (3 rows)
 
-DROP STATISTICS s10;
+TRUNCATE TABLE ndistinct;
+-- partially correlated groups
+INSERT INTO ndistinct (a, b, c)
+     SELECT i/50, i/100, i/200 FROM generate_series(1,10000) s(i);
+ANALYZE ndistinct;
 SELECT staenabled, standistinct
   FROM pg_statistic_ext WHERE starelid = 'ndistinct'::regclass;
- staenabled | standistinct 
-------------+--------------
-(0 rows)
+ staenabled |                                          standistinct                                          
+------------+------------------------------------------------------------------------------------------------
+ {d}        | [{(b 3 4), 201.000000}, {(b 3 6), 201.000000}, {(b 4 6), 101.000000}, {(b 3 4 6), 201.000000}]
+(1 row)
 
--- dropping the statistics switches the plans to Hash Aggregate,
--- due to under-estimates
-EXPLAIN (COSTS off)
+EXPLAIN
  SELECT COUNT(*) FROM ndistinct GROUP BY a, b;
-         QUERY PLAN          
------------------------------
- HashAggregate
+                             QUERY PLAN                              
+---------------------------------------------------------------------
+ HashAggregate  (cost=230.00..232.01 rows=201 width=16)
    Group Key: a, b
-   ->  Seq Scan on ndistinct
+   ->  Seq Scan on ndistinct  (cost=0.00..155.00 rows=10000 width=8)
 (3 rows)
 
-EXPLAIN (COSTS off)
+EXPLAIN
  SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c;
-         QUERY PLAN          
------------------------------
- HashAggregate
+                              QUERY PLAN                              
+----------------------------------------------------------------------
+ HashAggregate  (cost=255.00..257.01 rows=201 width=20)
    Group Key: a, b, c
-   ->  Seq Scan on ndistinct
+   ->  Seq Scan on ndistinct  (cost=0.00..155.00 rows=10000 width=12)
 (3 rows)
 
-EXPLAIN (COSTS off)
+EXPLAIN
  SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d;
-         QUERY PLAN          
------------------------------
- HashAggregate
+                              QUERY PLAN                              
+----------------------------------------------------------------------
+ HashAggregate  (cost=280.00..290.00 rows=1000 width=24)
    Group Key: a, b, c, d
-   ->  Seq Scan on ndistinct
+   ->  Seq Scan on ndistinct  (cost=0.00..155.00 rows=10000 width=16)
 (3 rows)
 
-EXPLAIN (COSTS off)
+EXPLAIN
  SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d;
-         QUERY PLAN          
------------------------------
- HashAggregate
+                              QUERY PLAN                              
+----------------------------------------------------------------------
+ HashAggregate  (cost=255.00..265.00 rows=1000 width=20)
    Group Key: b, c, d
-   ->  Seq Scan on ndistinct
+   ->  Seq Scan on ndistinct  (cost=0.00..155.00 rows=10000 width=12)
 (3 rows)
 
-EXPLAIN (COSTS off)
+EXPLAIN
  SELECT COUNT(*) FROM ndistinct GROUP BY a, d;
-         QUERY PLAN          
------------------------------
- HashAggregate
+                             QUERY PLAN                              
+---------------------------------------------------------------------
+ HashAggregate  (cost=230.00..240.00 rows=1000 width=16)
    Group Key: a, d
-   ->  Seq Scan on ndistinct
+   ->  Seq Scan on ndistinct  (cost=0.00..155.00 rows=10000 width=8)
 (3 rows)
 
 DROP TABLE ndistinct;
diff --git a/src/test/regress/expected/stats_ext_1.out b/src/test/regress/expected/stats_ext_1.out
new file mode 100644
index 0000000..128afba
--- /dev/null
+++ b/src/test/regress/expected/stats_ext_1.out
@@ -0,0 +1,155 @@
+-- Generic extended statistics support
+-- Ensure stats are dropped sanely
+CREATE TABLE ab1 (a INTEGER, b INTEGER, c INTEGER);
+CREATE STATISTICS ab1_a_b_stats ON (a, b) FROM ab1;
+DROP STATISTICS ab1_a_b_stats;
+CREATE SCHEMA regress_schema_2;
+CREATE STATISTICS regress_schema_2.ab1_a_b_stats ON (a, b) FROM ab1;
+DROP STATISTICS regress_schema_2.ab1_a_b_stats;
+-- Ensure statistics are dropped when columns are
+CREATE STATISTICS ab1_b_c_stats ON (b, c) FROM ab1;
+CREATE STATISTICS ab1_a_b_c_stats ON (a, b, c) FROM ab1;
+CREATE STATISTICS ab1_a_b_stats ON (a, b) FROM ab1;
+ALTER TABLE ab1 DROP COLUMN a;
+\d ab1
+                Table "public.ab1"
+ Column |  Type   | Collation | Nullable | Default 
+--------+---------+-----------+----------+---------
+ b      | integer |           |          | 
+ c      | integer |           |          | 
+Statistics:
+    "public.ab1_b_c_stats" WITH (ndistinct) ON (b, c)
+
+DROP TABLE ab1;
+-- Ensure things work sanely with SET STATISTICS 0
+CREATE TABLE ab1 (a INTEGER, b INTEGER);
+ALTER TABLE ab1 ALTER a SET STATISTICS 0;
+INSERT INTO ab1 SELECT a, a%23 FROM generate_series(1, 1000) a;
+CREATE STATISTICS ab1_a_b_stats ON (a, b) FROM ab1;
+ANALYZE ab1;
+ERROR:  extended statistics could not be collected for column "a" of relation public.ab1
+HINT:  Consider ALTER TABLE "public"."ab1" ALTER "a" SET STATISTICS -1
+ALTER TABLE ab1 ALTER a SET STATISTICS -1;
+ANALYZE ab1;
+DROP TABLE ab1;
+-- n-distinct tests
+CREATE TABLE ndistinct (
+    filler1 TEXT,
+    filler2 NUMERIC,
+    a INT,
+    b INT,
+    filler3 DATE,
+    c INT,
+    d INT
+);
+-- unknown column
+CREATE STATISTICS s10 ON (unknown_column) FROM ndistinct;
+ERROR:  column "unknown_column" referenced in statistics does not exist
+-- single column
+CREATE STATISTICS s10 ON (a) FROM ndistinct;
+ERROR:  statistics require at least 2 columns
+-- single column, duplicated
+CREATE STATISTICS s10 ON (a,a) FROM ndistinct;
+ERROR:  duplicate column name in statistics definition
+-- two columns, one duplicated
+CREATE STATISTICS s10 ON (a, a, b) FROM ndistinct;
+ERROR:  duplicate column name in statistics definition
+-- correct command
+CREATE STATISTICS s10 ON (a, b, c) FROM ndistinct;
+-- perfectly correlated groups
+INSERT INTO ndistinct (a, b, c, filler1)
+     SELECT i/100, i/100, i/100, cash_words(i::money)
+       FROM generate_series(1,10000) s(i);
+ANALYZE ndistinct;
+SELECT staenabled, standistinct
+  FROM pg_statistic_ext WHERE starelid = 'ndistinct'::regclass;
+ staenabled |                                          standistinct                                          
+------------+------------------------------------------------------------------------------------------------
+ {d}        | [{(b 3 4), 101.000000}, {(b 3 6), 101.000000}, {(b 4 6), 101.000000}, {(b 3 4 6), 101.000000}]
+(1 row)
+
+EXPLAIN (COSTS off)
+ SELECT COUNT(*) FROM ndistinct GROUP BY a, b;
+         QUERY PLAN          
+-----------------------------
+ HashAggregate
+   Group Key: a, b
+   ->  Seq Scan on ndistinct
+(3 rows)
+
+EXPLAIN (COSTS off)
+ SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c;
+         QUERY PLAN          
+-----------------------------
+ HashAggregate
+   Group Key: a, b, c
+   ->  Seq Scan on ndistinct
+(3 rows)
+
+EXPLAIN (COSTS off)
+ SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d;
+         QUERY PLAN          
+-----------------------------
+ HashAggregate
+   Group Key: a, b, c, d
+   ->  Seq Scan on ndistinct
+(3 rows)
+
+TRUNCATE TABLE ndistinct;
+-- partially correlated groups
+INSERT INTO ndistinct (a, b, c)
+     SELECT i/50, i/100, i/200 FROM generate_series(1,10000) s(i);
+ANALYZE ndistinct;
+SELECT staenabled, standistinct
+  FROM pg_statistic_ext WHERE starelid = 'ndistinct'::regclass;
+ staenabled |                                          standistinct                                          
+------------+------------------------------------------------------------------------------------------------
+ {d}        | [{(b 3 4), 201.000000}, {(b 3 6), 201.000000}, {(b 4 6), 101.000000}, {(b 3 4 6), 201.000000}]
+(1 row)
+
+EXPLAIN
+ SELECT COUNT(*) FROM ndistinct GROUP BY a, b;
+                             QUERY PLAN                              
+---------------------------------------------------------------------
+ HashAggregate  (cost=225.00..227.01 rows=201 width=16)
+   Group Key: a, b
+   ->  Seq Scan on ndistinct  (cost=0.00..150.00 rows=10000 width=8)
+(3 rows)
+
+EXPLAIN
+ SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c;
+                              QUERY PLAN                              
+----------------------------------------------------------------------
+ HashAggregate  (cost=250.00..252.01 rows=201 width=20)
+   Group Key: a, b, c
+   ->  Seq Scan on ndistinct  (cost=0.00..150.00 rows=10000 width=12)
+(3 rows)
+
+EXPLAIN
+ SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d;
+                              QUERY PLAN                              
+----------------------------------------------------------------------
+ HashAggregate  (cost=275.00..285.00 rows=1000 width=24)
+   Group Key: a, b, c, d
+   ->  Seq Scan on ndistinct  (cost=0.00..150.00 rows=10000 width=16)
+(3 rows)
+
+EXPLAIN
+ SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d;
+                              QUERY PLAN                              
+----------------------------------------------------------------------
+ HashAggregate  (cost=250.00..260.00 rows=1000 width=20)
+   Group Key: b, c, d
+   ->  Seq Scan on ndistinct  (cost=0.00..150.00 rows=10000 width=12)
+(3 rows)
+
+EXPLAIN
+ SELECT COUNT(*) FROM ndistinct GROUP BY a, d;
+                             QUERY PLAN                              
+---------------------------------------------------------------------
+ HashAggregate  (cost=225.00..235.00 rows=1000 width=16)
+   Group Key: a, d
+   ->  Seq Scan on ndistinct  (cost=0.00..150.00 rows=10000 width=8)
+(3 rows)
+
+DROP TABLE ndistinct;
diff --git a/src/test/regress/expected/tsrf.out b/src/test/regress/expected/tsrf.out
index 33f370b..0eeaf9e 100644
--- a/src/test/regress/expected/tsrf.out
+++ b/src/test/regress/expected/tsrf.out
@@ -233,7 +233,6 @@ SELECT few.dataa, count(*), min(id), max(id), generate_series(1,3) FROM few GROU
 (6 rows)
 
 -- grouping sets are a bit special, they produce NULLs in columns not actually NULL
-set enable_hashagg = false;
 SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab);
  dataa |  b  | g | count 
 -------+-----+---+-------
@@ -312,46 +311,46 @@ SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(d
  b     | bar |   |     2
  b     |     |   |     2
        |     |   |     6
-       | bar | 1 |     2
-       | bar | 2 |     2
-       | bar |   |     4
-       | foo | 1 |     1
-       | foo | 2 |     1
-       | foo |   |     2
  a     |     | 1 |     2
  b     |     | 1 |     1
        |     | 1 |     3
  a     |     | 2 |     2
  b     |     | 2 |     1
        |     | 2 |     3
+       | bar | 1 |     2
+       | bar | 2 |     2
+       | bar |   |     4
+       | foo | 1 |     1
+       | foo | 2 |     1
+       | foo |   |     2
 (24 rows)
 
 SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g) ORDER BY dataa;
  dataa |  b  | g | count 
 -------+-----+---+-------
- a     | foo |   |     2
- a     |     |   |     4
- a     |     | 2 |     2
  a     | bar | 1 |     1
  a     | bar | 2 |     1
  a     | bar |   |     2
  a     | foo | 1 |     1
  a     | foo | 2 |     1
+ a     | foo |   |     2
+ a     |     |   |     4
  a     |     | 1 |     2
- b     | bar | 1 |     1
+ a     |     | 2 |     2
+ b     | bar | 2 |     1
  b     |     |   |     2
  b     |     | 1 |     1
- b     | bar | 2 |     1
- b     | bar |   |     2
  b     |     | 2 |     1
+ b     | bar | 1 |     1
+ b     | bar |   |     2
+       | foo |   |     2
+       | foo | 1 |     1
        |     | 2 |     3
-       |     |   |     6
        | bar | 1 |     2
        | bar | 2 |     2
-       | bar |   |     4
-       | foo | 1 |     1
+       |     |   |     6
        | foo | 2 |     1
-       | foo |   |     2
+       | bar |   |     4
        |     | 1 |     3
 (24 rows)
 
@@ -361,30 +360,29 @@ SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(d
  a     | bar | 1 |     1
  a     | foo | 1 |     1
  b     | bar | 1 |     1
-       | bar | 1 |     2
-       | foo | 1 |     1
  a     |     | 1 |     2
  b     |     | 1 |     1
        |     | 1 |     3
+       | bar | 1 |     2
+       | foo | 1 |     1
+       | foo | 2 |     1
+       | bar | 2 |     2
  a     |     | 2 |     2
  b     |     | 2 |     1
-       | bar | 2 |     2
-       |     | 2 |     3
-       | foo | 2 |     1
  a     | bar | 2 |     1
+       |     | 2 |     3
  a     | foo | 2 |     1
  b     | bar | 2 |     1
- a     |     |   |     4
+ a     | foo |   |     2
  b     | bar |   |     2
  b     |     |   |     2
        |     |   |     6
- a     | foo |   |     2
- a     | bar |   |     2
+ a     |     |   |     4
        | bar |   |     4
        | foo |   |     2
+ a     | bar |   |     2
 (24 rows)
 
-reset enable_hashagg;
 -- data modification
 CREATE TABLE fewmore AS SELECT generate_series(1,3) AS data;
 INSERT INTO fewmore VALUES(generate_series(4,5));
diff --git a/src/test/regress/sql/arrays.sql b/src/test/regress/sql/arrays.sql
index 25dd4e2..fb7a319 100644
--- a/src/test/regress/sql/arrays.sql
+++ b/src/test/regress/sql/arrays.sql
@@ -106,8 +106,8 @@ select ('[0:2][0:2]={{1,2,3},{4,5,6},{7,8,9}}'::int[])[1:2][2];
 --
 -- check subscription corner cases
 --
--- More subscripts than MAXDIMS(6)
-SELECT ('{}'::int[])[1][2][3][4][5][6][7];
+-- More subscripts than MAXDIMS(12)
+SELECT ('{}'::int[])[1][2][3][4][5][6][7][8][9][10][11][12][13];
 -- NULL index yields NULL when selecting
 SELECT ('{{{1},{2},{3}},{{4},{5},{6}}}'::int[])[1][NULL][1];
 SELECT ('{{{1},{2},{3}},{{4},{5},{6}}}'::int[])[1][NULL:1][1];
diff --git a/src/test/regress/sql/groupingsets.sql b/src/test/regress/sql/groupingsets.sql
index 564ebc9..cc557cc 100644
--- a/src/test/regress/sql/groupingsets.sql
+++ b/src/test/regress/sql/groupingsets.sql
@@ -31,14 +31,6 @@ copy gstest3 from stdin;
 \.
 alter table gstest3 add primary key (a);
 
-create temp table gstest4(id integer, v integer,
-                          unhashable_col bit(4), unsortable_col xid);
-insert into gstest4
-values (1,1,b'0000','1'), (2,2,b'0001','1'),
-       (3,4,b'0010','2'), (4,8,b'0011','2'),
-       (5,16,b'0000','2'), (6,32,b'0001','2'),
-       (7,64,b'0010','1'), (8,128,b'0011','1');
-
 create temp table gstest_empty (a integer, b integer, v integer);
 
 create function gstest_data(v integer, out a integer, out b integer)
@@ -51,11 +43,8 @@ create function gstest_data(v integer, out a integer, out b integer)
 
 -- basic functionality
 
-set enable_hashagg = false;  -- test hashing explicitly later
-
 -- simple rollup with multiple plain aggregates, with and without ordering
 -- (and with ordering differing from grouping)
-
 select a, b, grouping(a,b), sum(v), count(*), max(v)
   from gstest1 group by rollup (a,b);
 select a, b, grouping(a,b), sum(v), count(*), max(v)
@@ -172,7 +161,7 @@ select a, b from (values (1,2),(2,3)) v(a,b) group by a,b, grouping sets(a);
 
 -- Tests for chained aggregates
 select a, b, grouping(a,b), sum(v), count(*), max(v)
-  from gstest1 group by grouping sets ((a,b),(a+1,b+1),(a+2,b+2)) order by 3,6;
+  from gstest1 group by grouping sets ((a,b),(a+1,b+1),(a+2,b+2));
 select(select (select grouping(a,b) from (values (1)) v2(c)) from (values (1,2)) v1(a,b) group by (a,b)) from (values(6,7)) v3(e,f) GROUP BY ROLLUP((e+1),(f+1));
 select(select (select grouping(a,b) from (values (1)) v2(c)) from (values (1,2)) v1(a,b) group by (a,b)) from (values(6,7)) v3(e,f) GROUP BY CUBE((e+1),(f+1)) ORDER BY (e+1),(f+1);
 select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum
@@ -235,147 +224,4 @@ select array(select row(v.a,s1.*) from (select two,four, count(*) from onek grou
 select sum(ten) from onek group by two, rollup(four::text) order by 1;
 select sum(ten) from onek group by rollup(four::text), two order by 1;
 
--- hashing support
-
-set enable_hashagg = true;
-
--- failure cases
-
-select count(*) from gstest4 group by rollup(unhashable_col,unsortable_col);
-select array_agg(v order by v) from gstest4 group by grouping sets ((id,unsortable_col),(id));
-
--- simple cases
-
-select a, b, grouping(a,b), sum(v), count(*), max(v)
-  from gstest1 group by grouping sets ((a),(b)) order by 3,1,2;
-explain (costs off) select a, b, grouping(a,b), sum(v), count(*), max(v)
-  from gstest1 group by grouping sets ((a),(b)) order by 3,1,2;
-
-select a, b, grouping(a,b), sum(v), count(*), max(v)
-  from gstest1 group by cube(a,b) order by 3,1,2;
-explain (costs off) select a, b, grouping(a,b), sum(v), count(*), max(v)
-  from gstest1 group by cube(a,b) order by 3,1,2;
-
--- shouldn't try and hash
-explain (costs off)
-  select a, b, grouping(a,b), array_agg(v order by v)
-    from gstest1 group by cube(a,b);
-
--- mixed hashable/sortable cases
-select unhashable_col, unsortable_col,
-       grouping(unhashable_col, unsortable_col),
-       count(*), sum(v)
-  from gstest4 group by grouping sets ((unhashable_col),(unsortable_col))
- order by 3, 5;
-explain (costs off)
-  select unhashable_col, unsortable_col,
-         grouping(unhashable_col, unsortable_col),
-         count(*), sum(v)
-    from gstest4 group by grouping sets ((unhashable_col),(unsortable_col))
-   order by 3,5;
-
-select unhashable_col, unsortable_col,
-       grouping(unhashable_col, unsortable_col),
-       count(*), sum(v)
-  from gstest4 group by grouping sets ((v,unhashable_col),(v,unsortable_col))
- order by 3,5;
-explain (costs off)
-  select unhashable_col, unsortable_col,
-         grouping(unhashable_col, unsortable_col),
-         count(*), sum(v)
-    from gstest4 group by grouping sets ((v,unhashable_col),(v,unsortable_col))
-   order by 3,5;
-
--- empty input: first is 0 rows, second 1, third 3 etc.
-select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a);
-explain (costs off)
-  select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a);
-select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),());
-select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),(),(),());
-explain (costs off)
-  select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),(),(),());
-select sum(v), count(*) from gstest_empty group by grouping sets ((),(),());
-explain (costs off)
-  select sum(v), count(*) from gstest_empty group by grouping sets ((),(),());
-
--- check that functionally dependent cols are not nulled
-select a, d, grouping(a,b,c)
-  from gstest3
- group by grouping sets ((a,b), (a,c));
-explain (costs off)
-  select a, d, grouping(a,b,c)
-    from gstest3
-   group by grouping sets ((a,b), (a,c));
-
--- simple rescan tests
-
-select a, b, sum(v.x)
-  from (values (1),(2)) v(x), gstest_data(v.x)
- group by grouping sets (a,b);
-explain (costs off)
-  select a, b, sum(v.x)
-    from (values (1),(2)) v(x), gstest_data(v.x)
-   group by grouping sets (a,b);
-
-select *
-  from (values (1),(2)) v(x),
-       lateral (select a, b, sum(v.x) from gstest_data(v.x) group by grouping sets (a,b)) s;
-explain (costs off)
-  select *
-    from (values (1),(2)) v(x),
-         lateral (select a, b, sum(v.x) from gstest_data(v.x) group by grouping sets (a,b)) s;
-
--- Tests for chained aggregates
-select a, b, grouping(a,b), sum(v), count(*), max(v)
-  from gstest1 group by grouping sets ((a,b),(a+1,b+1),(a+2,b+2)) order by 3,6;
-explain (costs off)
-  select a, b, grouping(a,b), sum(v), count(*), max(v)
-    from gstest1 group by grouping sets ((a,b),(a+1,b+1),(a+2,b+2)) order by 3,6;
-select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum
-  from gstest2 group by cube (a,b) order by rsum, a, b;
-explain (costs off)
-  select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum
-    from gstest2 group by cube (a,b) order by rsum, a, b;
-select a, b, sum(v.x)
-  from (values (1),(2)) v(x), gstest_data(v.x)
- group by cube (a,b) order by a,b;
-explain (costs off)
-  select a, b, sum(v.x)
-    from (values (1),(2)) v(x), gstest_data(v.x)
-   group by cube (a,b) order by a,b;
-
--- More rescan tests
-select * from (values (1),(2)) v(a) left join lateral (select v.a, four, ten, count(*) from onek group by cube(four,ten)) s on true order by v.a,four,ten;
-select array(select row(v.a,s1.*) from (select two,four, count(*) from onek group by cube(two,four) order by two,four) s1) from (values (1),(2)) v(a);
-
--- Rescan logic changes when there are no empty grouping sets, so test
--- that too:
-select * from (values (1),(2)) v(a) left join lateral (select v.a, four, ten, count(*) from onek group by grouping sets(four,ten)) s on true order by v.a,four,ten;
-select array(select row(v.a,s1.*) from (select two,four, count(*) from onek group by grouping sets(two,four) order by two,four) s1) from (values (1),(2)) v(a);
-
--- test the knapsack
-
-set enable_indexscan = false;
-set work_mem = '64kB';
-explain (costs off)
-  select unique1,
-         count(two), count(four), count(ten),
-         count(hundred), count(thousand), count(twothousand),
-         count(*)
-    from tenk1 group by grouping sets (unique1,twothousand,thousand,hundred,ten,four,two);
-explain (costs off)
-  select unique1,
-         count(two), count(four), count(ten),
-         count(hundred), count(thousand), count(twothousand),
-         count(*)
-    from tenk1 group by grouping sets (unique1,hundred,ten,four,two);
-
-set work_mem = '384kB';
-explain (costs off)
-  select unique1,
-         count(two), count(four), count(ten),
-         count(hundred), count(thousand), count(twothousand),
-         count(*)
-    from tenk1 group by grouping sets (unique1,twothousand,thousand,hundred,ten,four,two);
-
 -- end
diff --git a/src/test/regress/sql/insert.sql b/src/test/regress/sql/insert.sql
index f9c0070..c56c3c2 100644
--- a/src/test/regress/sql/insert.sql
+++ b/src/test/regress/sql/insert.sql
@@ -226,12 +226,6 @@ insert into mlparted values (1, 2);
 -- selected by tuple-routing
 insert into mlparted1 (a, b) values (2, 3);
 
--- check routing error through a list partitioned table when the key is null
-create table lparted_nonullpart (a int, b char) partition by list (b);
-create table lparted_nonullpart_a partition of lparted_nonullpart for values in ('a');
-insert into lparted_nonullpart values (1);
-drop table lparted_nonullpart;
-
 -- check that RETURNING works correctly with tuple-routing
 alter table mlparted drop constraint check_b;
 create table mlparted12 partition of mlparted1 for values from (5) to (10);
diff --git a/src/test/regress/sql/insert_conflict.sql b/src/test/regress/sql/insert_conflict.sql
index 78bffc7..df3a9b5 100644
--- a/src/test/regress/sql/insert_conflict.sql
+++ b/src/test/regress/sql/insert_conflict.sql
@@ -471,13 +471,3 @@ commit;
 select * from selfconflict;
 
 drop table selfconflict;
-
--- check that the following works:
--- insert into partitioned_table on conflict do nothing
-create table parted_conflict_test (a int, b char) partition by list (a);
-create table parted_conflict_test_1 partition of parted_conflict_test for values in (1);
-insert into parted_conflict_test values (1, 'a') on conflict do nothing;
-insert into parted_conflict_test values (1, 'a') on conflict do nothing;
--- however, on conflict do update not supported yet
-insert into parted_conflict_test values (1) on conflict (a) do update set b = excluded.b where excluded.a = 1;
-drop table parted_conflict_test, parted_conflict_test_1;
diff --git a/src/test/regress/sql/jsonb.sql b/src/test/regress/sql/jsonb.sql
index e2eaca0..e8e65ba 100644
--- a/src/test/regress/sql/jsonb.sql
+++ b/src/test/regress/sql/jsonb.sql
@@ -878,3 +878,62 @@ select jsonb_insert('{"a": {"b": "value"}}', '{a, c}', '"new_value"', true);
 
 select jsonb_insert('{"a": {"b": "value"}}', '{a, b}', '"new_value"');
 select jsonb_insert('{"a": {"b": "value"}}', '{a, b}', '"new_value"', true);
+
+-- jsonb subscript
+select ('123'::jsonb)['a'];
+select ('123'::jsonb)[0];
+select ('{"a": 1}'::jsonb)['a'];
+select ('{"a": 1}'::jsonb)[0];
+select ('{"a": 1}'::jsonb)['not_exist'];
+select ('[1, "2", null]'::jsonb)['a'];
+select ('[1, "2", null]'::jsonb)[0];
+select ('[1, "2", null]'::jsonb)['1'];
+select ('[1, "2", null]'::jsonb)[1.0];
+select ('[1, "2", null]'::jsonb)[2];
+select ('[1, "2", null]'::jsonb)[3];
+select ('[1, "2", null]'::jsonb)[-2];
+select ('[1, "2", null]'::jsonb)[1]['a'];
+select ('[1, "2", null]'::jsonb)[1][0];
+select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['b'];
+select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d'];
+select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d'][1];
+select ('{"a": 1, "b": "c", "d": [1, 2, 3]}'::jsonb)['d']['a'];
+select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1'];
+select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1']['a2'];
+select ('{"a": {"a1": {"a2": "aaa"}}, "b": "bbb", "c": "ccc"}'::jsonb)['a']['a1']['a2']['a3'];
+select ('{"a": ["a1", {"b1": ["aaa", "bbb", "ccc"]}], "b": "bb"}'::jsonb)['a'][1]['b1'];
+select ('{"a": ["a1", {"b1": ["aaa", "bbb", "ccc"]}], "b": "bb"}'::jsonb)['a'][1]['b1'][2];
+
+create TEMP TABLE test_jsonb_subscript (
+       id int,
+       test_json jsonb
+);
+
+insert into test_jsonb_subscript values
+(1, '{}'), -- empty jsonb
+(2, '{"key": "value"}'); -- jsonb with data
+
+-- update empty jsonb
+update test_jsonb_subscript set test_json['a'] = 1 where id = 1;
+select * from test_jsonb_subscript;
+
+-- update jsonb with some data
+update test_jsonb_subscript set test_json['a'] = 1 where id = 2;
+select * from test_jsonb_subscript;
+
+-- replace jsonb
+update test_jsonb_subscript set test_json['a'] = 'test';
+select * from test_jsonb_subscript;
+
+-- replace by object
+update test_jsonb_subscript set test_json['a'] = '{"b": 1}'::jsonb;
+select * from test_jsonb_subscript;
+
+-- replace by array
+update test_jsonb_subscript set test_json['a'] = '[1, 2, 3]'::jsonb;
+select * from test_jsonb_subscript;
+
+-- use jsonb subscription in where clause
+select * from test_jsonb_subscript where test_json['key'] = '"value"';
+select * from test_jsonb_subscript where test_json['key_doesnt_exists'] = '"value"';
+select * from test_jsonb_subscript where test_json['key'] = '"wrong_value"';
diff --git a/src/test/regress/sql/rules.sql b/src/test/regress/sql/rules.sql
index dcff0de..90dc9ce 100644
--- a/src/test/regress/sql/rules.sql
+++ b/src/test/regress/sql/rules.sql
@@ -1150,7 +1150,6 @@ SELECT pg_get_constraintdef(0);
 SELECT pg_get_functiondef(0);
 SELECT pg_get_indexdef(0);
 SELECT pg_get_ruledef(0);
-SELECT pg_get_statisticsextdef(0);
 SELECT pg_get_triggerdef(0);
 SELECT pg_get_viewdef(0);
 SELECT pg_get_function_arguments(0);
diff --git a/src/test/regress/sql/stats_ext.sql b/src/test/regress/sql/stats_ext.sql
index 4faaf88..946cb84 100644
--- a/src/test/regress/sql/stats_ext.sql
+++ b/src/test/regress/sql/stats_ext.sql
@@ -1,12 +1,5 @@
 -- Generic extended statistics support
 
--- We will be checking execution plans without/with statistics, so
--- let's make sure we get simple non-parallel plans. Also set the
--- work_mem low so that we can use small amounts of data.
-SET max_parallel_workers = 0;
-SET max_parallel_workers_per_gather = 0;
-SET work_mem = '128kB';
-
 -- Ensure stats are dropped sanely
 CREATE TABLE ab1 (a INTEGER, b INTEGER, c INTEGER);
 CREATE STATISTICS ab1_a_b_stats ON (a, b) FROM ab1;
@@ -14,10 +7,6 @@ DROP STATISTICS ab1_a_b_stats;
 
 CREATE SCHEMA regress_schema_2;
 CREATE STATISTICS regress_schema_2.ab1_a_b_stats ON (a, b) FROM ab1;
-
--- Let's also verify the pg_get_statisticsextdef output looks sane.
-SELECT pg_get_statisticsextdef(oid) FROM pg_statistic_ext WHERE staname = 'ab1_a_b_stats';
-
 DROP STATISTICS regress_schema_2.ab1_a_b_stats;
 
 -- Ensure statistics are dropped when columns are
@@ -50,29 +39,6 @@ CREATE TABLE ndistinct (
     d INT
 );
 
--- over-estimates when using only per-column statistics
-INSERT INTO ndistinct (a, b, c, filler1)
-     SELECT i/100, i/100, i/100, cash_words((i/100)::money)
-       FROM generate_series(1,30000) s(i);
-
-ANALYZE ndistinct;
-
--- Group Aggregate, due to over-estimate of the number of groups
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY a, b;
-
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY b, c;
-
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c;
-
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d;
-
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d;
-
 -- unknown column
 CREATE STATISTICS s10 ON (unknown_column) FROM ndistinct;
 
@@ -88,35 +54,9 @@ CREATE STATISTICS s10 ON (a, a, b) FROM ndistinct;
 -- correct command
 CREATE STATISTICS s10 ON (a, b, c) FROM ndistinct;
 
-ANALYZE ndistinct;
-
-SELECT staenabled, standistinct
-  FROM pg_statistic_ext WHERE starelid = 'ndistinct'::regclass;
-
--- Hash Aggregate, thanks to estimates improved by the statistic
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY a, b;
-
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY b, c;
-
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c;
-
--- last two plans keep using Group Aggregate, because 'd' is not covered
--- by the statistic and while it's NULL-only we assume 200 values for it
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d;
-
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d;
-
-TRUNCATE TABLE ndistinct;
-
--- under-estimates when using only per-column statistics
+-- perfectly correlated groups
 INSERT INTO ndistinct (a, b, c, filler1)
-     SELECT mod(i,50), mod(i,51), mod(i,32),
-            cash_words(mod(i,33)::int::money)
+     SELECT i/100, i/100, i/100, cash_words(i::money)
        FROM generate_series(1,10000) s(i);
 
 ANALYZE ndistinct;
@@ -124,7 +64,6 @@ ANALYZE ndistinct;
 SELECT staenabled, standistinct
   FROM pg_statistic_ext WHERE starelid = 'ndistinct'::regclass;
 
--- plans using Group Aggregate, thanks to using correct esimates
 EXPLAIN (COSTS off)
  SELECT COUNT(*) FROM ndistinct GROUP BY a, b;
 
@@ -134,32 +73,30 @@ EXPLAIN (COSTS off)
 EXPLAIN (COSTS off)
  SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d;
 
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d;
+TRUNCATE TABLE ndistinct;
 
-EXPLAIN (COSTS off)
- SELECT COUNT(*) FROM ndistinct GROUP BY a, d;
+-- partially correlated groups
+INSERT INTO ndistinct (a, b, c)
+     SELECT i/50, i/100, i/200 FROM generate_series(1,10000) s(i);
 
-DROP STATISTICS s10;
+ANALYZE ndistinct;
 
 SELECT staenabled, standistinct
   FROM pg_statistic_ext WHERE starelid = 'ndistinct'::regclass;
 
--- dropping the statistics switches the plans to Hash Aggregate,
--- due to under-estimates
-EXPLAIN (COSTS off)
+EXPLAIN
  SELECT COUNT(*) FROM ndistinct GROUP BY a, b;
 
-EXPLAIN (COSTS off)
+EXPLAIN
  SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c;
 
-EXPLAIN (COSTS off)
+EXPLAIN
  SELECT COUNT(*) FROM ndistinct GROUP BY a, b, c, d;
 
-EXPLAIN (COSTS off)
+EXPLAIN
  SELECT COUNT(*) FROM ndistinct GROUP BY b, c, d;
 
-EXPLAIN (COSTS off)
+EXPLAIN
  SELECT COUNT(*) FROM ndistinct GROUP BY a, d;
 
 DROP TABLE ndistinct;
diff --git a/src/test/regress/sql/tsrf.sql b/src/test/regress/sql/tsrf.sql
index 417e78c..e627bb9 100644
--- a/src/test/regress/sql/tsrf.sql
+++ b/src/test/regress/sql/tsrf.sql
@@ -66,14 +66,12 @@ SELECT SUM(count(*)) OVER(PARTITION BY generate_series(1,3) ORDER BY generate_se
 SELECT few.dataa, count(*), min(id), max(id), generate_series(1,3) FROM few GROUP BY few.dataa ORDER BY 5, 1;
 
 -- grouping sets are a bit special, they produce NULLs in columns not actually NULL
-set enable_hashagg = false;
 SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab);
 SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab) ORDER BY dataa;
 SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab) ORDER BY g;
 SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g);
 SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g) ORDER BY dataa;
 SELECT dataa, datab b, generate_series(1,2) g, count(*) FROM few GROUP BY CUBE(dataa, datab, g) ORDER BY g;
-reset enable_hashagg;
 
 -- data modification
 CREATE TABLE fewmore AS SELECT generate_series(1,3) AS data;
diff --git a/src/test/ssl/ServerSetup.pm b/src/test/ssl/ServerSetup.pm
index 6d17d6d..9441249 100644
--- a/src/test/ssl/ServerSetup.pm
+++ b/src/test/ssl/ServerSetup.pm
@@ -58,21 +58,21 @@ sub configure_test_server_for_ssl
 	$node->psql('postgres', "CREATE DATABASE certdb");
 
 	# enable logging etc.
-	open my $conf, '>>', "$pgdata/postgresql.conf";
-	print $conf "fsync=off\n";
-	print $conf "log_connections=on\n";
-	print $conf "log_hostname=on\n";
-	print $conf "listen_addresses='$serverhost'\n";
-	print $conf "log_statement=all\n";
+	open CONF, ">>$pgdata/postgresql.conf";
+	print CONF "fsync=off\n";
+	print CONF "log_connections=on\n";
+	print CONF "log_hostname=on\n";
+	print CONF "listen_addresses='$serverhost'\n";
+	print CONF "log_statement=all\n";
 
 	# enable SSL and set up server key
-	print $conf "include 'sslconfig.conf'";
+	print CONF "include 'sslconfig.conf'";
 
-	close $conf;
+	close CONF;
 
 	# ssl configuration will be placed here
-	open my $sslconf, '>', "$pgdata/sslconfig.conf";
-	close $sslconf;
+	open SSLCONF, ">$pgdata/sslconfig.conf";
+	close SSLCONF;
 
 	# Copy all server certificates and keys, and client root cert, to the data dir
 	copy_files("ssl/server-*.crt", $pgdata);
@@ -100,13 +100,13 @@ sub switch_server_cert
 
 	diag "Reloading server with certfile \"$certfile\" and cafile \"$cafile\"...";
 
-	open my $sslconf, '>', "$pgdata/sslconfig.conf";
-	print $sslconf "ssl=on\n";
-	print $sslconf "ssl_ca_file='root+client_ca.crt'\n";
-	print $sslconf "ssl_cert_file='$certfile.crt'\n";
-	print $sslconf "ssl_key_file='$certfile.key'\n";
-	print $sslconf "ssl_crl_file='root+client.crl'\n";
-	close $sslconf;
+	open SSLCONF, ">$pgdata/sslconfig.conf";
+	print SSLCONF "ssl=on\n";
+	print SSLCONF "ssl_ca_file='$cafile.crt'\n";
+	print SSLCONF "ssl_cert_file='$certfile.crt'\n";
+	print SSLCONF "ssl_key_file='$certfile.key'\n";
+	print SSLCONF "ssl_crl_file='root+client.crl'\n";
+	close SSLCONF;
 
 	$node->reload;
 }
@@ -121,16 +121,16 @@ sub configure_hba_for_ssl
 	# but seems best to keep it as narrow as possible for security reasons.
 	#
 	# When connecting to certdb, also check the client certificate.
-	open my $hba, '>', "$pgdata/pg_hba.conf";
-	print $hba
+	open HBA, ">$pgdata/pg_hba.conf";
+	print HBA
 "# TYPE  DATABASE        USER            ADDRESS                 METHOD\n";
-	print $hba
+	print HBA
 "hostssl trustdb         ssltestuser     $serverhost/32            trust\n";
-	print $hba
+	print HBA
 "hostssl trustdb         ssltestuser     ::1/128                 trust\n";
-	print $hba
+	print HBA
 "hostssl certdb          ssltestuser     $serverhost/32            cert\n";
-	print $hba
+	print HBA
 "hostssl certdb          ssltestuser     ::1/128                 cert\n";
-	close $hba;
+	close HBA;
 }
diff --git a/src/tools/fix-old-flex-code.pl b/src/tools/fix-old-flex-code.pl
index bc868df..8dafcae 100644
--- a/src/tools/fix-old-flex-code.pl
+++ b/src/tools/fix-old-flex-code.pl
@@ -25,7 +25,7 @@ my $filename = shift;
 # Suck in the whole file.
 local $/ = undef;
 my $cfile;
-open($cfile, '<', $filename) || die "opening $filename for reading: $!";
+open($cfile, $filename) || die "opening $filename for reading: $!";
 my $ccode = <$cfile>;
 close($cfile);
 
@@ -45,7 +45,7 @@ $ccode =~ s|(struct yyguts_t \* yyg = \(struct yyguts_t\*\)yyscanner; /\* This v
 |s;
 
 # Write the modified file back out.
-open($cfile, '>', $filename) || die "opening $filename for writing: $!";
+open($cfile, ">$filename") || die "opening $filename for writing: $!";
 print $cfile $ccode;
 close($cfile);
 
diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm
index 35ad5b8..b81f4dd 100644
--- a/src/tools/msvc/Install.pm
+++ b/src/tools/msvc/Install.pm
@@ -58,8 +58,8 @@ sub Install
 
 		# suppress warning about harmless redeclaration of $config
 		no warnings 'misc';
-		do "config_default.pl";
-		do "config.pl" if (-f "config.pl");
+		require "config_default.pl";
+		require "config.pl" if (-f "config.pl");
 	}
 
 	chdir("../../..")    if (-f "../../../configure");
@@ -367,7 +367,7 @@ sub GenerateConversionScript
 		$sql .=
 "COMMENT ON CONVERSION pg_catalog.$name IS 'conversion for $se to $de';\n\n";
 	}
-	open($F, '>', "$target/share/conversion_create.sql")
+	open($F, ">$target/share/conversion_create.sql")
 	  || die "Could not write to conversion_create.sql\n";
 	print $F $sql;
 	close($F);
@@ -409,7 +409,7 @@ sub GenerateTsearchFiles
 	$mf =~ /^LANGUAGES\s*=\s*(.*)$/m
 	  || die "Could not find LANGUAGES line in snowball Makefile\n";
 	my @pieces = split /\s+/, $1;
-	open($F, '>', "$target/share/snowball_create.sql")
+	open($F, ">$target/share/snowball_create.sql")
 	  || die "Could not write snowball_create.sql";
 	print $F read_file('src/backend/snowball/snowball_func.sql.in');
 
@@ -735,7 +735,7 @@ sub read_file
 	my $t = $/;
 
 	undef $/;
-	open($F, '<', $filename) || die "Could not open file $filename\n";
+	open($F, $filename) || die "Could not open file $filename\n";
 	my $txt = <$F>;
 	close($F);
 	$/ = $t;
diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm
index ba1bf6d..12f73f3 100644
--- a/src/tools/msvc/Mkvcbuild.pm
+++ b/src/tools/msvc/Mkvcbuild.pm
@@ -825,7 +825,7 @@ sub GenerateContribSqlFiles
 				$dn   =~ s/\.sql$//;
 				$cont =~ s/MODULE_PATHNAME/\$libdir\/$dn/g;
 				my $o;
-				open($o, '>', "contrib/$n/$out")
+				open($o, ">contrib/$n/$out")
 				  || croak "Could not write to contrib/$n/$d";
 				print $o $cont;
 				close($o);
diff --git a/src/tools/msvc/Project.pm b/src/tools/msvc/Project.pm
index 9817b94..faf1a68 100644
--- a/src/tools/msvc/Project.pm
+++ b/src/tools/msvc/Project.pm
@@ -310,12 +310,12 @@ sub AddResourceFile
 	if (Solution::IsNewer("$dir/win32ver.rc", 'src/port/win32ver.rc'))
 	{
 		print "Generating win32ver.rc for $dir\n";
-		open(my $i, '<', 'src/port/win32ver.rc')
+		open(I, 'src/port/win32ver.rc')
 		  || confess "Could not open win32ver.rc";
-		open(my $o, '>', "$dir/win32ver.rc")
+		open(O, ">$dir/win32ver.rc")
 		  || confess "Could not write win32ver.rc";
 		my $icostr = $ico ? "IDI_ICON ICON \"src/port/$ico.ico\"" : "";
-		while (<$i>)
+		while (<I>)
 		{
 			s/FILEDESC/"$desc"/gm;
 			s/_ICO_/$icostr/gm;
@@ -324,11 +324,11 @@ sub AddResourceFile
 			{
 				s/VFT_APP/VFT_DLL/gm;
 			}
-			print $o $_;
+			print O;
 		}
-		close($o);
-		close($i);
 	}
+	close(O);
+	close(I);
 	$self->AddFile("$dir/win32ver.rc");
 }
 
@@ -357,13 +357,13 @@ sub Save
 	$self->DisableLinkerWarnings('4197') if ($self->{platform} eq 'x64');
 
 	# Dump the project
-	open(my $f, '>', "$self->{name}$self->{filenameExtension}")
+	open(F, ">$self->{name}$self->{filenameExtension}")
 	  || croak(
 		"Could not write to $self->{name}$self->{filenameExtension}\n");
-	$self->WriteHeader($f);
-	$self->WriteFiles($f);
-	$self->Footer($f);
-	close($f);
+	$self->WriteHeader(*F);
+	$self->WriteFiles(*F);
+	$self->Footer(*F);
+	close(F);
 }
 
 sub GetAdditionalLinkerDependencies
@@ -397,7 +397,7 @@ sub read_file
 	my $t = $/;
 
 	undef $/;
-	open($F, '<', $filename) || croak "Could not open file $filename\n";
+	open($F, $filename) || croak "Could not open file $filename\n";
 	my $txt = <$F>;
 	close($F);
 	$/ = $t;
@@ -412,8 +412,8 @@ sub read_makefile
 	my $t = $/;
 
 	undef $/;
-	open($F, '<', "$reldir/GNUmakefile")
-	  || open($F, '<', "$reldir/Makefile")
+	open($F, "$reldir/GNUmakefile")
+	  || open($F, "$reldir/Makefile")
 	  || confess "Could not open $reldir/Makefile\n";
 	my $txt = <$F>;
 	close($F);
diff --git a/src/tools/msvc/Solution.pm b/src/tools/msvc/Solution.pm
index abac2c7..ff9064f 100644
--- a/src/tools/msvc/Solution.pm
+++ b/src/tools/msvc/Solution.pm
@@ -102,14 +102,14 @@ sub IsNewer
 sub copyFile
 {
 	my ($src, $dest) = @_;
-	open(my $i, '<', $src)  || croak "Could not open $src";
-	open(my $o, '>', $dest) || croak "Could not open $dest";
-	while (<$i>)
+	open(I, $src)     || croak "Could not open $src";
+	open(O, ">$dest") || croak "Could not open $dest";
+	while (<I>)
 	{
-		print $o $_;
+		print O;
 	}
-	close($i);
-	close($o);
+	close(I);
+	close(O);
 }
 
 sub GenerateFiles
@@ -118,9 +118,9 @@ sub GenerateFiles
 	my $bits = $self->{platform} eq 'Win32' ? 32 : 64;
 
 	# Parse configure.in to get version numbers
-	open(my $c, '<', "configure.in")
+	open(C, "configure.in")
 	  || confess("Could not open configure.in for reading\n");
-	while (<$c>)
+	while (<C>)
 	{
 		if (/^AC_INIT\(\[PostgreSQL\], \[([^\]]+)\]/)
 		{
@@ -133,7 +133,7 @@ sub GenerateFiles
 			$self->{majorver} = sprintf("%d", $1);
 		}
 	}
-	close($c);
+	close(C);
 	confess "Unable to parse configure.in for all variables!"
 	  if ($self->{strver} eq '' || $self->{numver} eq '');
 
@@ -146,91 +146,91 @@ sub GenerateFiles
 	if (IsNewer("src/include/pg_config.h", "src/include/pg_config.h.win32"))
 	{
 		print "Generating pg_config.h...\n";
-		open(my $i, '<', "src/include/pg_config.h.win32")
+		open(I, "src/include/pg_config.h.win32")
 		  || confess "Could not open pg_config.h.win32\n";
-		open(my $o, '>', "src/include/pg_config.h")
+		open(O, ">src/include/pg_config.h")
 		  || confess "Could not write to pg_config.h\n";
 		my $extraver = $self->{options}->{extraver};
 		$extraver = '' unless defined $extraver;
-		while (<$i>)
+		while (<I>)
 		{
 			s{PG_VERSION "[^"]+"}{PG_VERSION "$self->{strver}$extraver"};
 			s{PG_VERSION_NUM \d+}{PG_VERSION_NUM $self->{numver}};
 s{PG_VERSION_STR "[^"]+"}{__STRINGIFY(x) #x\n#define __STRINGIFY2(z) __STRINGIFY(z)\n#define PG_VERSION_STR "PostgreSQL $self->{strver}$extraver, compiled by Visual C++ build " __STRINGIFY2(_MSC_VER) ", $bits-bit"};
-			print $o $_;
+			print O;
 		}
-		print $o "#define PG_MAJORVERSION \"$self->{majorver}\"\n";
-		print $o "#define LOCALEDIR \"/share/locale\"\n"
+		print O "#define PG_MAJORVERSION \"$self->{majorver}\"\n";
+		print O "#define LOCALEDIR \"/share/locale\"\n"
 		  if ($self->{options}->{nls});
-		print $o "/* defines added by config steps */\n";
-		print $o "#ifndef IGNORE_CONFIGURED_SETTINGS\n";
-		print $o "#define USE_ASSERT_CHECKING 1\n"
+		print O "/* defines added by config steps */\n";
+		print O "#ifndef IGNORE_CONFIGURED_SETTINGS\n";
+		print O "#define USE_ASSERT_CHECKING 1\n"
 		  if ($self->{options}->{asserts});
-		print $o "#define USE_LDAP 1\n"    if ($self->{options}->{ldap});
-		print $o "#define HAVE_LIBZ 1\n"   if ($self->{options}->{zlib});
-		print $o "#define USE_OPENSSL 1\n" if ($self->{options}->{openssl});
-		print $o "#define ENABLE_NLS 1\n"  if ($self->{options}->{nls});
+		print O "#define USE_LDAP 1\n"    if ($self->{options}->{ldap});
+		print O "#define HAVE_LIBZ 1\n"   if ($self->{options}->{zlib});
+		print O "#define USE_OPENSSL 1\n" if ($self->{options}->{openssl});
+		print O "#define ENABLE_NLS 1\n"  if ($self->{options}->{nls});
 
-		print $o "#define BLCKSZ ", 1024 * $self->{options}->{blocksize}, "\n";
-		print $o "#define RELSEG_SIZE ",
+		print O "#define BLCKSZ ", 1024 * $self->{options}->{blocksize}, "\n";
+		print O "#define RELSEG_SIZE ",
 		  (1024 / $self->{options}->{blocksize}) *
 		  $self->{options}->{segsize} *
 		  1024, "\n";
-		print $o "#define XLOG_BLCKSZ ",
+		print O "#define XLOG_BLCKSZ ",
 		  1024 * $self->{options}->{wal_blocksize}, "\n";
-		print $o "#define XLOG_SEG_SIZE (", $self->{options}->{wal_segsize},
+		print O "#define XLOG_SEG_SIZE (", $self->{options}->{wal_segsize},
 		  " * 1024 * 1024)\n";
 
 		if ($self->{options}->{float4byval})
 		{
-			print $o "#define USE_FLOAT4_BYVAL 1\n";
-			print $o "#define FLOAT4PASSBYVAL true\n";
+			print O "#define USE_FLOAT4_BYVAL 1\n";
+			print O "#define FLOAT4PASSBYVAL true\n";
 		}
 		else
 		{
-			print $o "#define FLOAT4PASSBYVAL false\n";
+			print O "#define FLOAT4PASSBYVAL false\n";
 		}
 		if ($self->{options}->{float8byval})
 		{
-			print $o "#define USE_FLOAT8_BYVAL 1\n";
-			print $o "#define FLOAT8PASSBYVAL true\n";
+			print O "#define USE_FLOAT8_BYVAL 1\n";
+			print O "#define FLOAT8PASSBYVAL true\n";
 		}
 		else
 		{
-			print $o "#define FLOAT8PASSBYVAL false\n";
+			print O "#define FLOAT8PASSBYVAL false\n";
 		}
 
 		if ($self->{options}->{uuid})
 		{
-			print $o "#define HAVE_UUID_OSSP\n";
-			print $o "#define HAVE_UUID_H\n";
+			print O "#define HAVE_UUID_OSSP\n";
+			print O "#define HAVE_UUID_H\n";
 		}
 		if ($self->{options}->{xml})
 		{
-			print $o "#define HAVE_LIBXML2\n";
-			print $o "#define USE_LIBXML\n";
+			print O "#define HAVE_LIBXML2\n";
+			print O "#define USE_LIBXML\n";
 		}
 		if ($self->{options}->{xslt})
 		{
-			print $o "#define HAVE_LIBXSLT\n";
-			print $o "#define USE_LIBXSLT\n";
+			print O "#define HAVE_LIBXSLT\n";
+			print O "#define USE_LIBXSLT\n";
 		}
 		if ($self->{options}->{gss})
 		{
-			print $o "#define ENABLE_GSS 1\n";
+			print O "#define ENABLE_GSS 1\n";
 		}
 		if (my $port = $self->{options}->{"--with-pgport"})
 		{
-			print $o "#undef DEF_PGPORT\n";
-			print $o "#undef DEF_PGPORT_STR\n";
-			print $o "#define DEF_PGPORT $port\n";
-			print $o "#define DEF_PGPORT_STR \"$port\"\n";
+			print O "#undef DEF_PGPORT\n";
+			print O "#undef DEF_PGPORT_STR\n";
+			print O "#define DEF_PGPORT $port\n";
+			print O "#define DEF_PGPORT_STR \"$port\"\n";
 		}
-		print $o "#define VAL_CONFIGURE \""
+		print O "#define VAL_CONFIGURE \""
 		  . $self->GetFakeConfigure() . "\"\n";
-		print $o "#endif /* IGNORE_CONFIGURED_SETTINGS */\n";
-		close($o);
-		close($i);
+		print O "#endif /* IGNORE_CONFIGURED_SETTINGS */\n";
+		close(O);
+		close(I);
 	}
 
 	if (IsNewer(
@@ -379,17 +379,17 @@ s{PG_VERSION_STR "[^"]+"}{__STRINGIFY(x) #x\n#define __STRINGIFY2(z) __STRINGIFY
 		my ($sec, $min, $hour, $mday, $mon, $year, $wday, $yday, $isdst) =
 		  localtime(time);
 		my $d = ($year - 100) . "$yday";
-		open(my $i, '<', 'src/interfaces/libpq/libpq.rc.in')
+		open(I, '<', 'src/interfaces/libpq/libpq.rc.in')
 		  || confess "Could not open libpq.rc.in";
-		open(my $o, '>', 'src/interfaces/libpq/libpq.rc')
+		open(O, '>', 'src/interfaces/libpq/libpq.rc')
 		  || confess "Could not open libpq.rc";
-		while (<$i>)
+		while (<I>)
 		{
 			s/(VERSION.*),0/$1,$d/;
-			print $o;
+			print O;
 		}
-		close($i);
-		close($o);
+		close(I);
+		close(O);
 	}
 
 	if (IsNewer('src/bin/psql/sql_help.h', 'src/bin/psql/create_help.pl'))
@@ -415,23 +415,23 @@ s{PG_VERSION_STR "[^"]+"}{__STRINGIFY(x) #x\n#define __STRINGIFY2(z) __STRINGIFY
 			'src/interfaces/ecpg/include/ecpg_config.h.in'))
 	{
 		print "Generating ecpg_config.h...\n";
-		open(my $o, '>', 'src/interfaces/ecpg/include/ecpg_config.h')
+		open(O, '>', 'src/interfaces/ecpg/include/ecpg_config.h')
 		  || confess "Could not open ecpg_config.h";
-		print $o <<EOF;
+		print O <<EOF;
 #if (_MSC_VER > 1200)
 #define HAVE_LONG_LONG_INT_64
 #define ENABLE_THREAD_SAFETY 1
 EOF
-		print $o "#endif\n";
-		close($o);
+		print O "#endif\n";
+		close(O);
 	}
 
 	unless (-f "src/port/pg_config_paths.h")
 	{
 		print "Generating pg_config_paths.h...\n";
-		open(my $o, '>', 'src/port/pg_config_paths.h')
+		open(O, '>', 'src/port/pg_config_paths.h')
 		  || confess "Could not open pg_config_paths.h";
-		print $o <<EOF;
+		print O <<EOF;
 #define PGBINDIR "/bin"
 #define PGSHAREDIR "/share"
 #define SYSCONFDIR "/etc"
@@ -445,7 +445,7 @@ EOF
 #define HTMLDIR "/doc"
 #define MANDIR "/man"
 EOF
-		close($o);
+		close(O);
 	}
 
 	my $mf = Project::read_file('src/backend/catalog/Makefile');
@@ -474,13 +474,13 @@ EOF
 		}
 	}
 
-	open(my $o, '>', "doc/src/sgml/version.sgml")
+	open(O, ">doc/src/sgml/version.sgml")
 	  || croak "Could not write to version.sgml\n";
-	print $o <<EOF;
+	print O <<EOF;
 <!ENTITY version "$self->{strver}">
 <!ENTITY majorversion "$self->{majorver}">
 EOF
-	close($o);
+	close(O);
 }
 
 sub GenerateDefFile
@@ -490,18 +490,18 @@ sub GenerateDefFile
 	if (IsNewer($deffile, $txtfile))
 	{
 		print "Generating $deffile...\n";
-		open(my $if, '<', $txtfile) || confess("Could not open $txtfile\n");
-		open(my $of, '>', $deffile) || confess("Could not open $deffile\n");
-		print $of "LIBRARY $libname\nEXPORTS\n";
-		while (<$if>)
+		open(I, $txtfile)    || confess("Could not open $txtfile\n");
+		open(O, ">$deffile") || confess("Could not open $deffile\n");
+		print O "LIBRARY $libname\nEXPORTS\n";
+		while (<I>)
 		{
 			next if (/^#/);
 			next if (/^\s*$/);
 			my ($f, $o) = split;
-			print $of " $f @ $o\n";
+			print O " $f @ $o\n";
 		}
-		close($of);
-		close($if);
+		close(O);
+		close(I);
 	}
 }
 
@@ -575,19 +575,19 @@ sub Save
 		}
 	}
 
-	open(my $sln, '>', "pgsql.sln") || croak "Could not write to pgsql.sln\n";
-	print $sln <<EOF;
+	open(SLN, ">pgsql.sln") || croak "Could not write to pgsql.sln\n";
+	print SLN <<EOF;
 Microsoft Visual Studio Solution File, Format Version $self->{solutionFileVersion}
 # $self->{visualStudioName}
 EOF
 
-	print $sln $self->GetAdditionalHeaders();
+	print SLN $self->GetAdditionalHeaders();
 
 	foreach my $fld (keys %{ $self->{projects} })
 	{
 		foreach my $proj (@{ $self->{projects}->{$fld} })
 		{
-			print $sln <<EOF;
+			print SLN <<EOF;
 Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "$proj->{name}", "$proj->{name}$proj->{filenameExtension}", "$proj->{guid}"
 EndProject
 EOF
@@ -595,14 +595,14 @@ EOF
 		if ($fld ne "")
 		{
 			$flduid{$fld} = Win32::GuidGen();
-			print $sln <<EOF;
+			print SLN <<EOF;
 Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "$fld", "$fld", "$flduid{$fld}"
 EndProject
 EOF
 		}
 	}
 
-	print $sln <<EOF;
+	print SLN <<EOF;
 Global
 	GlobalSection(SolutionConfigurationPlatforms) = preSolution
 		Debug|$self->{platform}= Debug|$self->{platform}
@@ -615,7 +615,7 @@ EOF
 	{
 		foreach my $proj (@{ $self->{projects}->{$fld} })
 		{
-			print $sln <<EOF;
+			print SLN <<EOF;
 		$proj->{guid}.Debug|$self->{platform}.ActiveCfg = Debug|$self->{platform}
 		$proj->{guid}.Debug|$self->{platform}.Build.0  = Debug|$self->{platform}
 		$proj->{guid}.Release|$self->{platform}.ActiveCfg = Release|$self->{platform}
@@ -624,7 +624,7 @@ EOF
 		}
 	}
 
-	print $sln <<EOF;
+	print SLN <<EOF;
 	EndGlobalSection
 	GlobalSection(SolutionProperties) = preSolution
 		HideSolutionNode = FALSE
@@ -637,15 +637,15 @@ EOF
 		next if ($fld eq "");
 		foreach my $proj (@{ $self->{projects}->{$fld} })
 		{
-			print $sln "\t\t$proj->{guid} = $flduid{$fld}\n";
+			print SLN "\t\t$proj->{guid} = $flduid{$fld}\n";
 		}
 	}
 
-	print $sln <<EOF;
+	print SLN <<EOF;
 	EndGlobalSection
 EndGlobal
 EOF
-	close($sln);
+	close(SLN);
 }
 
 sub GetFakeConfigure
diff --git a/src/tools/msvc/build.pl b/src/tools/msvc/build.pl
index 7246064..2e7c548 100644
--- a/src/tools/msvc/build.pl
+++ b/src/tools/msvc/build.pl
@@ -23,17 +23,17 @@ use Mkvcbuild;
 
 if (-e "src/tools/msvc/buildenv.pl")
 {
-	do "src/tools/msvc/buildenv.pl";
+	require "src/tools/msvc/buildenv.pl";
 }
 elsif (-e "./buildenv.pl")
 {
-	do "./buildenv.pl";
+	require "./buildenv.pl";
 }
 
 # set up the project
 our $config;
-do "config_default.pl";
-do "config.pl" if (-f "src/tools/msvc/config.pl");
+require "config_default.pl";
+require "config.pl" if (-f "src/tools/msvc/config.pl");
 
 my $vcver = Mkvcbuild::mkvcbuild($config);
 
diff --git a/src/tools/msvc/builddoc.pl b/src/tools/msvc/builddoc.pl
index e0b5c50..2b56ced 100644
--- a/src/tools/msvc/builddoc.pl
+++ b/src/tools/msvc/builddoc.pl
@@ -18,7 +18,7 @@ chdir '../../..' if (-d '../msvc' && -d '../../../src');
 
 noversion() unless -e 'doc/src/sgml/version.sgml';
 
-do 'src/tools/msvc/buildenv.pl' if -e 'src/tools/msvc/buildenv.pl';
+require 'src/tools/msvc/buildenv.pl' if -e 'src/tools/msvc/buildenv.pl';
 
 my $docroot = $ENV{DOCROOT};
 die "bad DOCROOT '$docroot'" unless ($docroot && -d $docroot);
diff --git a/src/tools/msvc/gendef.pl b/src/tools/msvc/gendef.pl
index 64227c2..3bcff7f 100644
--- a/src/tools/msvc/gendef.pl
+++ b/src/tools/msvc/gendef.pl
@@ -32,8 +32,8 @@ sub dumpsyms
 sub extract_syms
 {
 	my ($symfile, $def) = @_;
-	open(my $f, '<', $symfile) || die "Could not open $symfile for $_\n";
-	while (<$f>)
+	open(F, "<$symfile") || die "Could not open $symfile for $_\n";
+	while (<F>)
 	{
 
 	# Expected symbol lines look like:
@@ -115,14 +115,14 @@ sub extract_syms
 		# whatever came last.
 		$def->{ $pieces[6] } = $pieces[3];
 	}
-	close($f);
+	close(F);
 }
 
 sub writedef
 {
 	my ($deffile, $platform, $def) = @_;
-	open(my $fh, '>', $deffile) || die "Could not write to $deffile\n";
-	print $fh "EXPORTS\n";
+	open(DEF, ">$deffile") || die "Could not write to $deffile\n";
+	print DEF "EXPORTS\n";
 	foreach my $f (sort keys %{$def})
 	{
 		my $isdata = $def->{$f} eq 'data';
@@ -135,14 +135,14 @@ sub writedef
 		# decorated with the DATA option for variables.
 		if ($isdata)
 		{
-			print $fh "  $f DATA\n";
+			print DEF "  $f DATA\n";
 		}
 		else
 		{
-			print $fh "  $f\n";
+			print DEF "  $f\n";
 		}
 	}
-	close($fh);
+	close(DEF);
 }
 
 
@@ -174,7 +174,7 @@ print "Generating $defname.DEF from directory $ARGV[0], platform $platform\n";
 
 my %def = ();
 
-while (<$ARGV[0]/*.obj>)  ## no critic (RequireGlobFunction);
+while (<$ARGV[0]/*.obj>)
 {
 	my $objfile = $_;
 	my $symfile = $objfile;
diff --git a/src/tools/msvc/install.pl b/src/tools/msvc/install.pl
index b2d7f9e..bde5b7c 100755
--- a/src/tools/msvc/install.pl
+++ b/src/tools/msvc/install.pl
@@ -14,11 +14,11 @@ use Install qw(Install);
 
 if (-e "src/tools/msvc/buildenv.pl")
 {
-	do "src/tools/msvc/buildenv.pl";
+	require "src/tools/msvc/buildenv.pl";
 }
 elsif (-e "./buildenv.pl")
 {
-	do "./buildenv.pl";
+	require "./buildenv.pl";
 }
 
 my $target = shift || Usage();
diff --git a/src/tools/msvc/mkvcbuild.pl b/src/tools/msvc/mkvcbuild.pl
index 9255dff..6f1c42e 100644
--- a/src/tools/msvc/mkvcbuild.pl
+++ b/src/tools/msvc/mkvcbuild.pl
@@ -19,7 +19,7 @@ print "Warning: no config.pl found, using default.\n"
   unless (-f 'src/tools/msvc/config.pl');
 
 our $config;
-do 'src/tools/msvc/config_default.pl';
-do 'src/tools/msvc/config.pl' if (-f 'src/tools/msvc/config.pl');
+require 'src/tools/msvc/config_default.pl';
+require 'src/tools/msvc/config.pl' if (-f 'src/tools/msvc/config.pl');
 
 Mkvcbuild::mkvcbuild($config);
diff --git a/src/tools/msvc/pgbison.pl b/src/tools/msvc/pgbison.pl
index e799d90..31e7540 100644
--- a/src/tools/msvc/pgbison.pl
+++ b/src/tools/msvc/pgbison.pl
@@ -7,7 +7,7 @@ use File::Basename;
 
 # assume we are in the postgres source root
 
-do 'src/tools/msvc/buildenv.pl' if -e 'src/tools/msvc/buildenv.pl';
+require 'src/tools/msvc/buildenv.pl' if -e 'src/tools/msvc/buildenv.pl';
 
 my ($bisonver) = `bison -V`;    # grab first line
 $bisonver = (split(/\s+/, $bisonver))[3];    # grab version number
@@ -38,7 +38,7 @@ $output =~ s/gram\.c$/pl_gram.c/ if $input =~ /src.pl.plpgsql.src.gram\.y$/;
 
 my $makefile = dirname($input) . "/Makefile";
 my ($mf, $make);
-open($mf, '<', $makefile);
+open($mf, $makefile);
 local $/ = undef;
 $make = <$mf>;
 close($mf);
diff --git a/src/tools/msvc/pgflex.pl b/src/tools/msvc/pgflex.pl
index 67397ba..fab0efa 100644
--- a/src/tools/msvc/pgflex.pl
+++ b/src/tools/msvc/pgflex.pl
@@ -10,7 +10,7 @@ $ENV{CYGWIN} = 'nodosfilewarning';
 
 # assume we are in the postgres source root
 
-do 'src/tools/msvc/buildenv.pl' if -e 'src/tools/msvc/buildenv.pl';
+require 'src/tools/msvc/buildenv.pl' if -e 'src/tools/msvc/buildenv.pl';
 
 my ($flexver) = `flex -V`;    # grab first line
 $flexver = (split(/\s+/, $flexver))[1];
@@ -41,7 +41,7 @@ elsif (!-e $input)
 # get flex flags from make file
 my $makefile = dirname($input) . "/Makefile";
 my ($mf, $make);
-open($mf, '<', $makefile);
+open($mf, $makefile);
 local $/ = undef;
 $make = <$mf>;
 close($mf);
@@ -53,7 +53,7 @@ if ($? == 0)
 {
 	# Check for "%option reentrant" in .l file.
 	my $lfile;
-	open($lfile, '<', $input) || die "opening $input for reading: $!";
+	open($lfile, $input) || die "opening $input for reading: $!";
 	my $lcode = <$lfile>;
 	close($lfile);
 	if ($lcode =~ /\%option\sreentrant/)
@@ -69,18 +69,18 @@ if ($? == 0)
 		# For reentrant scanners (like the core scanner) we do not
 		# need to (and must not) change the yywrap definition.
 		my $cfile;
-		open($cfile, '<', $output) || die "opening $output for reading: $!";
+		open($cfile, $output) || die "opening $output for reading: $!";
 		my $ccode = <$cfile>;
 		close($cfile);
 		$ccode =~ s/yywrap\(n\)/yywrap()/;
-		open($cfile, '>', $output) || die "opening $output for writing: $!";
+		open($cfile, ">$output") || die "opening $output for writing: $!";
 		print $cfile $ccode;
 		close($cfile);
 	}
 	if ($flexflags =~ /\s-b\s/)
 	{
 		my $lexback = "lex.backup";
-		open($lfile, '<', $lexback) || die "opening $lexback for reading: $!";
+		open($lfile, $lexback) || die "opening $lexback for reading: $!";
 		my $lexbacklines = <$lfile>;
 		close($lfile);
 		my $linecount = $lexbacklines =~ tr /\n/\n/;
diff --git a/src/tools/msvc/vcregress.pl b/src/tools/msvc/vcregress.pl
index d9367f8..f1b9819 100644
--- a/src/tools/msvc/vcregress.pl
+++ b/src/tools/msvc/vcregress.pl
@@ -20,8 +20,8 @@ chdir "../../.." if (-d "../../../src/tools/msvc");
 my $topdir         = getcwd();
 my $tmp_installdir = "$topdir/tmp_install";
 
-do 'src/tools/msvc/config_default.pl';
-do 'src/tools/msvc/config.pl' if (-f 'src/tools/msvc/config.pl');
+require 'src/tools/msvc/config_default.pl';
+require 'src/tools/msvc/config.pl' if (-f 'src/tools/msvc/config.pl');
 
 # buildenv.pl is for specifying the build environment settings
 # it should contain lines like:
@@ -29,7 +29,7 @@ do 'src/tools/msvc/config.pl' if (-f 'src/tools/msvc/config.pl');
 
 if (-e "src/tools/msvc/buildenv.pl")
 {
-	do "src/tools/msvc/buildenv.pl";
+	require "src/tools/msvc/buildenv.pl";
 }
 
 my $what = shift || "";
@@ -505,8 +505,8 @@ sub upgradecheck
 sub fetchRegressOpts
 {
 	my $handle;
-	open($handle, '<', "GNUmakefile")
-	  || open($handle, '<', "Makefile")
+	open($handle, "<GNUmakefile")
+	  || open($handle, "<Makefile")
 	  || die "Could not open Makefile";
 	local ($/) = undef;
 	my $m = <$handle>;
@@ -521,9 +521,8 @@ sub fetchRegressOpts
 		# an unhandled variable reference.  Ignore anything that isn't an
 		# option starting with "--".
 		@opts = grep {
-			my $x = $_;
-			$x =~ s/\Q$(top_builddir)\E/\"$topdir\"/;
-			$x !~ /\$\(/ && $x =~ /^--/
+			s/\Q$(top_builddir)\E/\"$topdir\"/;
+			$_ !~ /\$\(/ && $_ =~ /^--/
 		} split(/\s+/, $1);
 	}
 	if ($m =~ /^\s*ENCODING\s*=\s*(\S+)/m)
@@ -541,8 +540,8 @@ sub fetchTests
 {
 
 	my $handle;
-	open($handle, '<', "GNUmakefile")
-	  || open($handle, '<', "Makefile")
+	open($handle, "<GNUmakefile")
+	  || open($handle, "<Makefile")
 	  || die "Could not open Makefile";
 	local ($/) = undef;
 	my $m = <$handle>;
diff --git a/src/tools/pginclude/pgcheckdefines b/src/tools/pginclude/pgcheckdefines
index aa7c9c2..e166efa 100755
--- a/src/tools/pginclude/pgcheckdefines
+++ b/src/tools/pginclude/pgcheckdefines
@@ -42,25 +42,25 @@ my $MAKE = "make";
 #
 my (@cfiles, @hfiles);
 
-open my $pipe, '-|', "$FIND * -type f -name '*.c'"
+open PIPE, "$FIND * -type f -name '*.c' |"
   or die "can't fork: $!";
-while (<$pipe>)
+while (<PIPE>)
 {
 	chomp;
 	push @cfiles, $_;
 }
-close $pipe or die "$FIND failed: $!";
+close PIPE or die "$FIND failed: $!";
 
-open $pipe, '-|', "$FIND * -type f -name '*.h'"
+open PIPE, "$FIND * -type f -name '*.h' |"
   or die "can't fork: $!";
-while (<$pipe>)
+while (<PIPE>)
 {
 	chomp;
 	push @hfiles, $_
 	  unless m|^src/include/port/|
 		  || m|^src/backend/port/\w+/|;
 }
-close $pipe or die "$FIND failed: $!";
+close PIPE or die "$FIND failed: $!";
 
 #
 # For each .h file, extract all the symbols it #define's, and add them to
@@ -71,16 +71,16 @@ my %defines;
 
 foreach my $hfile (@hfiles)
 {
-	open my $fh, '<', $hfile
+	open HFILE, $hfile
 	  or die "can't open $hfile: $!";
-	while (<$fh>)
+	while (<HFILE>)
 	{
 		if (m/^\s*#\s*define\s+(\w+)/)
 		{
 			$defines{$1}{$hfile} = 1;
 		}
 	}
-	close $fh;
+	close HFILE;
 }
 
 #
@@ -124,9 +124,9 @@ foreach my $file (@hfiles, @cfiles)
 
 	my ($CPPFLAGS, $CFLAGS, $CFLAGS_SL, $PTHREAD_CFLAGS, $CC);
 
-	open $pipe, '-|', "$MAKECMD"
+	open PIPE, "$MAKECMD |"
 	  or die "can't fork: $!";
-	while (<$pipe>)
+	while (<PIPE>)
 	{
 		if (m/^CPPFLAGS :?= (.*)/)
 		{
@@ -166,9 +166,9 @@ foreach my $file (@hfiles, @cfiles)
 	#
 	my @includes = ();
 	my $COMPILE  = "$CC $CPPFLAGS $CFLAGS -H -E $fname";
-	open $pipe, '-|', "$COMPILE 2>&1 >/dev/null"
+	open PIPE, "$COMPILE 2>&1 >/dev/null |"
 	  or die "can't fork: $!";
-	while (<$pipe>)
+	while (<PIPE>)
 	{
 		if (m/^\.+ (.*)/)
 		{
@@ -211,10 +211,10 @@ foreach my $file (@hfiles, @cfiles)
 	# We assume #ifdef isn't continued across lines, and that defined(foo)
 	# isn't split across lines either
 	#
-	open my $fh, '<', $fname
+	open FILE, $fname
 	  or die "can't open $file: $!";
 	my $inif = 0;
-	while (<$fh>)
+	while (<FILE>)
 	{
 		my $line = $_;
 		if ($line =~ m/^\s*#\s*ifdef\s+(\w+)/)
@@ -241,7 +241,7 @@ foreach my $file (@hfiles, @cfiles)
 			}
 		}
 	}
-	close $fh;
+	close FILE;
 
 	chdir $topdir or die "can't chdir to $topdir: $!";
 }
diff --git a/src/tools/pgindent/pgindent b/src/tools/pgindent/pgindent
index 0f3a1ba..0d3859d 100755
--- a/src/tools/pgindent/pgindent
+++ b/src/tools/pgindent/pgindent
@@ -159,7 +159,8 @@ sub process_exclude
 		while (my $line = <$eh>)
 		{
 			chomp $line;
-			my $rgx = qr!$line!;
+			my $rgx;
+			eval " \$rgx = qr!$line!;";
 			@files = grep { $_ !~ /$rgx/ } @files if $rgx;
 		}
 		close($eh);
@@ -434,7 +435,7 @@ sub diff
 
 sub run_build
 {
-	eval "use LWP::Simple;";  ## no critic (ProhibitStringyEval);
+	eval "use LWP::Simple;";
 
 	my $code_base = shift || '.';
 	my $save_dir = getcwd();
diff --git a/src/tools/version_stamp.pl b/src/tools/version_stamp.pl
index f973dd9..dc9173f 100755
--- a/src/tools/version_stamp.pl
+++ b/src/tools/version_stamp.pl
@@ -80,8 +80,8 @@ my $padnumericversion = sprintf("%d%04d", $majorversion, $numericminor);
 # (this also ensures we're in the right directory)
 
 my $aconfver = "";
-open(my $fh, '<', "configure.in") || die "could not read configure.in: $!\n";
-while (<$fh>)
+open(FILE, "configure.in") || die "could not read configure.in: $!\n";
+while (<FILE>)
 {
 	if (
 m/^m4_if\(m4_defn\(\[m4_PACKAGE_VERSION\]\), \[(.*)\], \[\], \[m4_fatal/)
@@ -90,7 +90,7 @@ m/^m4_if\(m4_defn\(\[m4_PACKAGE_VERSION\]\), \[(.*)\], \[\], \[m4_fatal/)
 		last;
 	}
 }
-close($fh);
+close(FILE);
 $aconfver ne ""
   || die "could not find autoconf version number in configure.in\n";
 
diff --git a/src/tools/win32tzlist.pl b/src/tools/win32tzlist.pl
index 0bdcc36..6345465 100755
--- a/src/tools/win32tzlist.pl
+++ b/src/tools/win32tzlist.pl
@@ -58,11 +58,11 @@ $basekey->Close();
 # Fetch all timezones currently in the file
 #
 my @file_zones;
-open(my $tzfh, '<', $tzfile) or die "Could not open $tzfile!\n";
+open(TZFILE, "<$tzfile") or die "Could not open $tzfile!\n";
 my $t = $/;
 undef $/;
-my $pgtz = <$tzfh>;
-close($tzfh);
+my $pgtz = <TZFILE>;
+close(TZFILE);
 $/ = $t;
 
 # Attempt to locate and extract the complete win32_tzmap struct
diff --git a/src/tutorial/Makefile b/src/tutorial/Makefile
index 16dc390..0ead60c 100644
--- a/src/tutorial/Makefile
+++ b/src/tutorial/Makefile
@@ -13,8 +13,8 @@
 #
 #-------------------------------------------------------------------------
 
-MODULES = complex funcs
-DATA_built = advanced.sql basics.sql complex.sql funcs.sql syscat.sql
+MODULES = complex funcs subscripting
+DATA_built = advanced.sql basics.sql complex.sql funcs.sql syscat.sql subscripting.sql
 
 ifdef NO_PGXS
 subdir = src/tutorial
diff --git a/src/tutorial/subscripting.c b/src/tutorial/subscripting.c
new file mode 100644
index 0000000..bf47d23
--- /dev/null
+++ b/src/tutorial/subscripting.c
@@ -0,0 +1,172 @@
+/*
+ * src/tutorial/subscripting.c
+ *
+ ******************************************************************************
+  This file contains routines that can be bound to a Postgres backend and
+  called by the backend in the process of processing queries.  The calling
+  format for these routines is dictated by Postgres architecture.
+******************************************************************************/
+
+#include "postgres.h"
+
+#include "catalog/pg_type.h"
+#include "executor/executor.h"
+#include "executor/execExpr.h"
+#include "nodes/nodeFuncs.h"
+#include "parser/parse_coerce.h"
+#include "utils/builtins.h"
+#include "utils/fmgrprotos.h"
+
+PG_MODULE_MAGIC;
+
+typedef struct Custom
+{
+	int	first;
+	int	second;
+}	Custom;
+
+PG_FUNCTION_INFO_V1(custom_in);
+PG_FUNCTION_INFO_V1(custom_subscript_parse);
+PG_FUNCTION_INFO_V1(custom_subscripting_assign);
+PG_FUNCTION_INFO_V1(custom_subscripting_extract);
+
+/*****************************************************************************
+ * Input/Output functions
+ *****************************************************************************/
+
+Datum
+custom_in(PG_FUNCTION_ARGS)
+{
+	char	*str = PG_GETARG_CSTRING(0);
+	int		firstValue,
+			secondValue;
+	Custom	*result;
+
+	if (sscanf(str, " ( %d , %d )", &firstValue, &secondValue) != 2)
+		ereport(ERROR,
+				(errcode(ERRCODE_INVALID_TEXT_REPRESENTATION),
+				 errmsg("invalid input syntax for complex: \"%s\"",
+						str)));
+
+
+	result = (Custom *) palloc(sizeof(Custom));
+	result->first = firstValue;
+	result->second = secondValue;
+	PG_RETURN_POINTER(result);
+}
+
+PG_FUNCTION_INFO_V1(custom_out);
+
+Datum
+custom_out(PG_FUNCTION_ARGS)
+{
+	Custom	*custom = (Custom *) PG_GETARG_POINTER(0);
+	char	*result;
+
+	result = psprintf("(%d, %d)", custom->first, custom->second);
+	PG_RETURN_CSTRING(result);
+}
+
+/*****************************************************************************
+ * Custom subscripting logic functions
+ *****************************************************************************/
+
+Datum
+custom_subscripting_assign(PG_FUNCTION_ARGS)
+{
+	Custom						*containerSource = (Custom *) PG_GETARG_DATUM(0);
+	ExprEvalStep				*step = (ExprEvalStep *) PG_GETARG_POINTER(1);
+
+	SubscriptingRefState		*sbstate = step->d.sbsref.state;
+	int							index;
+
+	if (sbstate->numupper != 1)
+		ereport(ERROR, (errmsg("custom does not support nested subscripting")));
+
+	index = DatumGetInt32(sbstate->upper[0]);
+
+	if (index == 1)
+		containerSource->first = DatumGetInt32(sbstate->replacevalue);
+	else
+		containerSource->second = DatumGetInt32(sbstate->replacevalue);
+
+	PG_RETURN_POINTER(containerSource);
+}
+
+
+Datum
+custom_subscripting_extract(PG_FUNCTION_ARGS)
+{
+	Custom					*containerSource = (Custom *) PG_GETARG_DATUM(0);
+	ExprEvalStep			*step = (ExprEvalStep *) PG_GETARG_POINTER(1);
+	SubscriptingRefState	*sbstate = step->d.sbsref.state;
+
+	int						index;
+
+	if (sbstate->numupper != 1)
+		ereport(ERROR, (errmsg("custom does not support nested subscripting")));
+
+	index = DatumGetInt32(sbstate->upper[0]);
+
+	if (index == 1)
+		PG_RETURN_INT32(containerSource->first);
+	else
+		PG_RETURN_INT32(containerSource->second);
+}
+
+Datum
+custom_subscript_parse(PG_FUNCTION_ARGS)
+{
+	bool				isAssignment = PG_GETARG_BOOL(0);
+	SubscriptingRef	   *sbsref = (SubscriptingRef *) PG_GETARG_POINTER(1);
+	ParseState		   *pstate = (ParseState *) PG_GETARG_POINTER(2);
+	List			   *upperIndexpr = NIL;
+	ListCell		   *l;
+	Datum				assign_proc = CStringGetTextDatum("custom_subscripting_assign");
+	Datum				extract_proc = CStringGetTextDatum("custom_subscripting_extract");
+
+	if (sbsref->reflowerindexpr != NIL)
+		ereport(ERROR,
+				(errcode(ERRCODE_DATATYPE_MISMATCH),
+				 errmsg("custom subscript does not support slices"),
+				 parser_errposition(pstate, exprLocation(
+						 ((Node *)lfirst(sbsref->reflowerindexpr->head))))));
+
+	foreach(l, sbsref->refupperindexpr)
+	{
+		Node *subexpr = (Node *) lfirst(l);
+
+		Assert(subexpr != NULL);
+
+		if (subexpr == NULL)
+			ereport(ERROR,
+					(errcode(ERRCODE_DATATYPE_MISMATCH),
+					 errmsg("custom subscript does not support slices"),
+					 parser_errposition(pstate, exprLocation(
+						((Node *) lfirst(sbsref->refupperindexpr->head))))));
+
+		subexpr = coerce_to_target_type(pstate,
+										subexpr, exprType(subexpr),
+										INT4OID, -1,
+										COERCION_ASSIGNMENT,
+										COERCE_IMPLICIT_CAST,
+										-1);
+		if (subexpr == NULL)
+			ereport(ERROR,
+					(errcode(ERRCODE_DATATYPE_MISMATCH),
+					 errmsg("custom subscript must have int type"),
+					 parser_errposition(pstate, exprLocation(subexpr))));
+
+		upperIndexpr = lappend(upperIndexpr, subexpr);
+	}
+
+	sbsref->refupperindexpr = upperIndexpr;
+	sbsref->refelemtype = INT4OID;
+
+	if (isAssignment)
+		sbsref->refevalfunc = DirectFunctionCall1(to_regproc, assign_proc);
+	else
+		sbsref->refevalfunc = DirectFunctionCall1(to_regproc, extract_proc);
+
+	PG_RETURN_POINTER(sbsref);
+}
diff --git a/src/tutorial/subscripting.source b/src/tutorial/subscripting.source
new file mode 100644
index 0000000..8182229
--- /dev/null
+++ b/src/tutorial/subscripting.source
@@ -0,0 +1,81 @@
+---------------------------------------------------------------------------
+--
+-- subscripting.sql-
+--    This file shows how to create a new subscripting procedure for
+--    user-defined type.
+--
+--
+-- Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
+-- Portions Copyright (c) 1994, Regents of the University of California
+--
+-- src/tutorial/subscripting.source
+--
+---------------------------------------------------------------------------
+
+-----------------------------
+-- Creating a new type:
+--	We are going to create a new type called 'complex' which represents
+--	complex numbers.
+--	A user-defined type must have an input and an output function, and
+--	optionally can have binary input and output functions.  All of these
+--	are usually user-defined C functions.
+-----------------------------
+
+-- Assume the user defined functions are in /home/erthalion/programms/postgresql-master/src/tutorial/complex$DLSUFFIX
+-- (we do not want to assume this is in the dynamic loader search path).
+-- Look at $PWD/complex.c for the source.  Note that we declare all of
+-- them as STRICT, so we do not need to cope with NULL inputs in the
+-- C code.  We also mark them IMMUTABLE, since they always return the
+-- same outputs given the same inputs.
+
+-- the input function 'complex_in' takes a null-terminated string (the
+-- textual representation of the type) and turns it into the internal
+-- (in memory) representation. You will get a message telling you 'complex'
+-- does not exist yet but that's okay.
+
+CREATE FUNCTION custom_in(cstring)
+   RETURNS custom
+   AS '_OBJWD_/subscripting'
+   LANGUAGE C IMMUTABLE STRICT;
+
+-- the output function 'complex_out' takes the internal representation and
+-- converts it into the textual representation.
+
+CREATE FUNCTION custom_out(custom)
+   RETURNS cstring
+   AS '_OBJWD_/subscripting'
+   LANGUAGE C IMMUTABLE STRICT;
+
+CREATE FUNCTION custom_subscript_parse(internal)
+   RETURNS internal
+   AS '_OBJWD_/subscripting'
+   LANGUAGE C IMMUTABLE STRICT;
+
+CREATE FUNCTION custom_subscripting_extract(internal)
+   RETURNS internal
+   AS '_OBJWD_/subscripting'
+   LANGUAGE C IMMUTABLE STRICT;
+
+CREATE FUNCTION custom_subscripting_assign(internal)
+   RETURNS internal
+   AS '_OBJWD_/subscripting'
+   LANGUAGE C IMMUTABLE STRICT;
+
+CREATE TYPE custom (
+   internallength = 8,
+   input = custom_in,
+   output = custom_out,
+   subscripting = custom_subscript_parse
+);
+
+-- we can use it in a table
+
+CREATE TABLE test_subscripting (
+	data	custom
+);
+
+INSERT INTO test_subscripting VALUES ('(1, 2)');
+
+SELECT data[0] from test_subscripting;
+
+UPDATE test_subscripting SET data[1] = 3;
