From 18f406c16e5ebeaaf4a24c5b5a57a8358a91afb4 Mon Sep 17 00:00:00 2001
From: Justin Pryzby <pryzbyj@telsasoft.com>
Date: Fri, 17 Feb 2023 19:51:42 -0600
Subject: [PATCH] wip: pg_upgrade data structure

---
 src/bin/pg_upgrade/check.c      | 929 ++++++++++++++------------------
 src/bin/pg_upgrade/pg_upgrade.h |  10 +-
 src/bin/pg_upgrade/version.c    | 256 ++++-----
 3 files changed, 517 insertions(+), 678 deletions(-)

diff --git a/src/bin/pg_upgrade/check.c b/src/bin/pg_upgrade/check.c
index 189aa51c4f8..5a5f69e789b 100644
--- a/src/bin/pg_upgrade/check.c
+++ b/src/bin/pg_upgrade/check.c
@@ -23,17 +23,17 @@ static bool equivalent_locale(int category, const char *loca, const char *locb);
 static void check_is_install_user(ClusterInfo *cluster);
 static void check_proper_datallowconn(ClusterInfo *cluster);
 static void check_for_prepared_transactions(ClusterInfo *cluster);
-static void check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster);
-static void check_for_user_defined_postfix_ops(ClusterInfo *cluster);
-static void check_for_incompatible_polymorphics(ClusterInfo *cluster);
-static void check_for_tables_with_oids(ClusterInfo *cluster);
-static void check_for_composite_data_type_usage(ClusterInfo *cluster);
-static void check_for_reg_data_type_usage(ClusterInfo *cluster);
-static void check_for_aclitem_data_type_usage(ClusterInfo *cluster);
-static void check_for_jsonb_9_4_usage(ClusterInfo *cluster);
+static bool check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script);
+static bool check_for_user_defined_postfix_ops(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script);
+static bool check_for_incompatible_polymorphics(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script);
+static bool check_for_tables_with_oids(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script);
+static bool check_for_composite_data_type_usage(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script);
+static bool check_for_reg_data_type_usage(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script);
+static bool check_for_aclitem_data_type_usage(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script);
+static bool check_for_jsonb_9_4_usage(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script);
 static void check_for_pg_role_prefix(ClusterInfo *cluster);
 static void check_for_new_tablespace_dir(ClusterInfo *new_cluster);
-static void check_for_user_defined_encoding_conversions(ClusterInfo *cluster);
+static bool check_for_user_defined_encoding_conversions(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script);
 static char *get_canonical_locale_name(int category, const char *locale);
 
 
@@ -88,6 +88,127 @@ check_and_dump_old_cluster(bool live_check)
 {
 	/* -- OLD -- */
 
+	struct checks {
+		int maxmajor; /* Last major version that needs this test */
+		bool (*fn)(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script);
+		char *scriptname;
+		char *fataltext;
+		/* filled in later: */
+		FILE *scriptfp;
+		bool failedtests;
+	} checks[] = {
+		{0, check_for_composite_data_type_usage,
+			 "tables_using_composite.txt",
+			 .fataltext = "Your installation contains system-defined composite type(s) in user tables.\n"
+				 "These type OIDs are not stable across PostgreSQL versions,\n"
+				 "so this cluster cannot currently be upgraded.  You can\n"
+				 "drop the problem columns and restart the upgrade.\n"
+				 "A list of the problem columns is in the file:\n",
+		},
+
+		{0, check_for_reg_data_type_usage,
+			 "tables_using_reg.txt",
+			 .fataltext = "Your installation contains one of the reg* data types in user tables.\n"
+				 "These data types reference system OIDs that are not preserved by\n"
+				 "pg_upgrade, so this cluster cannot currently be upgraded.  You can\n"
+				 "drop the problem columns and restart the upgrade.\n"
+				 "A list of the problem columns is in the file:\n",
+		},
+
+		{1500, check_for_aclitem_data_type_usage,
+			"tables_using_aclitem.txt",
+			.fataltext = "Your installation contains the \"aclitem\" data type in user tables.\n"
+				 "The internal format of \"aclitem\" changed in PostgreSQL version 16\n"
+				 "so this cluster cannot currently be upgraded.  You can drop the\n"
+				 "problem columns and restart the upgrade.  A list of the problem\n"
+				 "columns is in the file:\n",
+		},
+
+		{1300, check_for_user_defined_encoding_conversions,
+			 "encoding_conversions.txt",
+			 .fataltext = "Your installation contains user-defined encoding conversions.\n"
+				 "The conversion function parameters changed in PostgreSQL version 14\n"
+				 "so this cluster cannot currently be upgraded.  You can remove the\n"
+				 "encoding conversions in the old cluster and restart the upgrade.\n"
+				 "A list of user-defined encoding conversions is in the file:\n"
+		},
+
+		{1300, check_for_user_defined_postfix_ops,
+			"postfix_ops.txt",
+			.fataltext = "Your installation contains user-defined postfix operators, which are not\n"
+				"supported anymore.  Consider dropping the postfix operators and replacing\n"
+				"them with prefix operators or function calls.\n"
+				"A list of user-defined postfix operators is in the file:\n",
+		},
+
+		{1300, check_for_incompatible_polymorphics,
+			 "incompatible_polymorphics.txt",
+			 .fataltext = "Your installation contains user-defined objects that refer to internal\n"
+				 "polymorphic functions with arguments of type \"anyarray\" or \"anyelement\".\n"
+				 "These user-defined objects must be dropped before upgrading and restored\n"
+				 "afterwards, changing them to refer to the new corresponding functions with\n"
+				 "arguments of type \"anycompatiblearray\" and \"anycompatible\".\n"
+				 "A list of the problematic objects is in the file:\n",
+		},
+
+		{1100, check_for_tables_with_oids,
+			 "tables_with_oids.txt",
+			 .fataltext = "Your installation contains tables declared WITH OIDS, which is not\n"
+				 "supported anymore.  Consider removing the oid column using\n"
+				 "    ALTER TABLE ... SET WITHOUT OIDS;\n"
+				 "A list of tables with the problem is in the file:\n",
+		},
+
+		{1100, old_11_check_for_sql_identifier_data_type_usage,
+			 "tables_using_sql_identifier.txt",
+			 .fataltext = "Your installation contains the \"sql_identifier\" data type in user tables.\n"
+				 "The on-disk format for this data type has changed, so this\n"
+				 "cluster cannot currently be upgraded.  You can\n"
+				 "drop the problem columns and restart the upgrade.\n"
+				 "A list of the problem columns is in the file:\n",
+		},
+
+		{906, old_9_6_check_for_unknown_data_type_usage,
+			 "tables_using_unknown.txt",
+			 .fataltext = "Your installation contains the \"unknown\" data type in user tables.\n"
+				 "This data type is no longer allowed in tables, so this\n"
+				 "cluster cannot currently be upgraded.  You can\n"
+				 "drop the problem columns and restart the upgrade.\n"
+				 "A list of the problem columns is in the file:\n",
+		},
+
+		{904, check_for_jsonb_9_4_usage,
+			 "tables_using_jsonb.txt",
+			 .fataltext = "Your installation contains the \"jsonb\" data type in user tables.\n"
+				 "The internal format of \"jsonb\" changed during 9.4 beta so this\n"
+				 "cluster cannot currently be upgraded.  You can\n"
+				 "drop the problem columns and restart the upgrade.\n"
+				 "A list of the problem columns is in the file:\n",
+		},
+
+		{903, old_9_3_check_for_line_data_type_usage,
+			 "tables_using_line.txt",
+			 .fataltext = "Your installation contains the \"line\" data type in user tables.\n"
+				 "This data type changed its internal and input/output format\n"
+				 "between your old and new versions so this\n"
+				 "cluster cannot currently be upgraded.  You can\n"
+				 "drop the problem columns and restart the upgrade.\n"
+				 "A list of the problem columns is in the file:\n",
+		},
+
+		{0000, check_for_isn_and_int8_passing_mismatch,
+			"contrib_isn_and_int8_pass_by_value.txt",
+			.fataltext = "Your installation contains \"contrib/isn\" functions which rely on the\n"
+				"bigint data type.  Your old and new clusters pass bigint values\n"
+				"differently so this cluster cannot currently be upgraded.  You can\n"
+				"manually dump databases in the old cluster that use \"contrib/isn\"\n"
+				"facilities, drop them, perform the upgrade, and then restore them.  A\n"
+				"list of the problem functions is in the file:\n"
+		},
+
+		{ .fn = NULL, } /* sentinel */
+	};
+
 	if (!live_check)
 		start_postmaster(&old_cluster, true);
 
@@ -98,61 +219,76 @@ check_and_dump_old_cluster(bool live_check)
 
 	get_loadable_libraries();
 
-
 	/*
 	 * Check for various failure cases
 	 */
 	check_is_install_user(&old_cluster);
 	check_proper_datallowconn(&old_cluster);
 	check_for_prepared_transactions(&old_cluster);
-	check_for_composite_data_type_usage(&old_cluster);
-	check_for_reg_data_type_usage(&old_cluster);
-	check_for_isn_and_int8_passing_mismatch(&old_cluster);
 
 	/*
-	 * PG 16 increased the size of the 'aclitem' type, which breaks the on-disk
-	 * format for existing data.
+	 * For each DB, run all checks.  This amortizes the cost of opening new
+	 * DB connections in case there are many DBs.
 	 */
-	if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1500)
-		check_for_aclitem_data_type_usage(&old_cluster);
 
-	/*
-	 * PG 14 changed the function signature of encoding conversion functions.
-	 * Conversions from older versions cannot be upgraded automatically
-	 * because the user-defined functions used by the encoding conversions
-	 * need to be changed to match the new signature.
-	 */
-	if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1300)
-		check_for_user_defined_encoding_conversions(&old_cluster);
+	for (int dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
+	{
+		DbInfo		*active_db = &old_cluster.dbarr.dbs[dbnum];
+		PGconn	    *conn = connectToServer(&old_cluster, active_db->db_name);
 
-	/*
-	 * Pre-PG 14 allowed user defined postfix operators, which are not
-	 * supported anymore.  Verify there are none, iff applicable.
-	 */
-	if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1300)
-		check_for_user_defined_postfix_ops(&old_cluster);
+		for (struct checks *check = checks; check->fn != NULL; check++)
+		{
+			bool		ret;
 
-	/*
-	 * PG 14 changed polymorphic functions from anyarray to
-	 * anycompatiblearray.
-	 */
-	if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1300)
-		check_for_incompatible_polymorphics(&old_cluster);
+			if (GET_MAJOR_VERSION(old_cluster.major_version) > check->maxmajor &&
+					check->maxmajor != 0)
+				continue;
 
-	/*
-	 * Pre-PG 12 allowed tables to be declared WITH OIDS, which is not
-	 * supported anymore. Verify there are none, iff applicable.
-	 */
-	if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1100)
-		check_for_tables_with_oids(&old_cluster);
+			if (check->scriptfp == NULL)
+			{
+				char		output_path[MAXPGPATH];
+				snprintf(output_path, sizeof(output_path), "%s/%s",
+						log_opts.basedir, check->scriptname);
 
-	/*
-	 * PG 12 changed the 'sql_identifier' type storage to be based on name,
-	 * not varchar, which breaks on-disk format for existing data. So we need
-	 * to prevent upgrade when used in user objects (tables, indexes, ...).
-	 */
-	if (GET_MAJOR_VERSION(old_cluster.major_version) <= 1100)
-		old_11_check_for_sql_identifier_data_type_usage(&old_cluster);
+				if ((check->scriptfp = fopen_priv(output_path, "w")) == NULL)
+					pg_fatal("could not open file \"%s\": %m", output_path);
+			}
+
+			ret = check->fn(&old_cluster, active_db, conn, check->scriptfp);
+			if (!ret)
+			{
+				if (!check->failedtests)
+					pg_log(PG_WARNING, "%s  %s", _(check->fataltext),
+							check->scriptname);
+				check->failedtests = true;
+			}
+		}
+
+		PQfinish(conn);
+	}
+
+	/* Remove empty script files for successful tests */
+	for (struct checks *check = checks; check->fn != NULL; check++)
+	{
+		char		output_path[MAXPGPATH];
+
+		if (check->failedtests)
+			continue;
+
+		/* This ought to be empty */
+		snprintf(output_path, sizeof(output_path), "%s/%s",
+				log_opts.basedir, check->scriptname);
+		unlink(output_path);
+	}
+
+	for (struct checks *check = checks; check->fn != NULL; check++)
+	{
+		if (check->failedtests)
+		{
+			pg_log(PG_REPORT, "fatal");
+			exit(1);
+		}
+	}
 
 	/*
 	 * Pre-PG 10 allowed tables with 'unknown' type columns and non WAL logged
@@ -160,7 +296,6 @@ check_and_dump_old_cluster(bool live_check)
 	 */
 	if (GET_MAJOR_VERSION(old_cluster.major_version) <= 906)
 	{
-		old_9_6_check_for_unknown_data_type_usage(&old_cluster);
 		if (user_opts.check)
 			old_9_6_invalidate_hash_indexes(&old_cluster, true);
 	}
@@ -169,14 +304,6 @@ check_and_dump_old_cluster(bool live_check)
 	if (GET_MAJOR_VERSION(old_cluster.major_version) <= 905)
 		check_for_pg_role_prefix(&old_cluster);
 
-	if (GET_MAJOR_VERSION(old_cluster.major_version) == 904 &&
-		old_cluster.controldata.cat_ver < JSONB_FORMAT_CHANGE_CAT_VER)
-		check_for_jsonb_9_4_usage(&old_cluster);
-
-	/* Pre-PG 9.4 had a different 'line' data type internal format */
-	if (GET_MAJOR_VERSION(old_cluster.major_version) <= 903)
-		old_9_3_check_for_line_data_type_usage(&old_cluster);
-
 	/*
 	 * While not a check option, we do this now because this is the only time
 	 * the old server is running.
@@ -833,179 +960,116 @@ check_for_prepared_transactions(ClusterInfo *cluster)
  *	by value.  The schema dumps the CREATE TYPE PASSEDBYVALUE setting so
  *	it must match for the old and new servers.
  */
-static void
-check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster)
+static bool
+check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script)
 {
-	int			dbnum;
-	FILE	   *script = NULL;
-	char		output_path[MAXPGPATH];
-
-	prep_status("Checking for contrib/isn with bigint-passing mismatch");
+	PGresult   *res;
+	bool		db_used = false;
+	int			ntups;
+	int			rowno;
+	int			i_nspname,
+				i_proname;
 
 	if (old_cluster.controldata.float8_pass_by_value ==
 		new_cluster.controldata.float8_pass_by_value)
 	{
 		/* no mismatch */
-		check_ok();
-		return;
+		return true;
 	}
 
-	snprintf(output_path, sizeof(output_path), "%s/%s",
-			 log_opts.basedir,
-			 "contrib_isn_and_int8_pass_by_value.txt");
+	/* Find any functions coming from contrib/isn */
+	res = executeQueryOrDie(conn,
+							"SELECT n.nspname, p.proname "
+							"FROM	pg_catalog.pg_proc p, "
+							"		pg_catalog.pg_namespace n "
+							"WHERE	p.pronamespace = n.oid AND "
+							"		p.probin = '$libdir/isn'");
 
-	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
+	ntups = PQntuples(res);
+	i_nspname = PQfnumber(res, "nspname");
+	i_proname = PQfnumber(res, "proname");
+	for (rowno = 0; rowno < ntups; rowno++)
 	{
-		PGresult   *res;
-		bool		db_used = false;
-		int			ntups;
-		int			rowno;
-		int			i_nspname,
-					i_proname;
-		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
-		PGconn	   *conn = connectToServer(cluster, active_db->db_name);
-
-		/* Find any functions coming from contrib/isn */
-		res = executeQueryOrDie(conn,
-								"SELECT n.nspname, p.proname "
-								"FROM	pg_catalog.pg_proc p, "
-								"		pg_catalog.pg_namespace n "
-								"WHERE	p.pronamespace = n.oid AND "
-								"		p.probin = '$libdir/isn'");
-
-		ntups = PQntuples(res);
-		i_nspname = PQfnumber(res, "nspname");
-		i_proname = PQfnumber(res, "proname");
-		for (rowno = 0; rowno < ntups; rowno++)
+		if (!db_used)
 		{
-			if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
-				pg_fatal("could not open file \"%s\": %s",
-						 output_path, strerror(errno));
-			if (!db_used)
-			{
-				fprintf(script, "In database: %s\n", active_db->db_name);
-				db_used = true;
-			}
-			fprintf(script, "  %s.%s\n",
-					PQgetvalue(res, rowno, i_nspname),
-					PQgetvalue(res, rowno, i_proname));
+			fprintf(script, "In database: %s\n", active_db->db_name);
+			db_used = true;
 		}
 
-		PQclear(res);
-
-		PQfinish(conn);
+		fprintf(script, "  %s.%s\n",
+				PQgetvalue(res, rowno, i_nspname),
+				PQgetvalue(res, rowno, i_proname));
 	}
 
-	if (script)
-	{
-		fclose(script);
-		pg_log(PG_REPORT, "fatal");
-		pg_fatal("Your installation contains \"contrib/isn\" functions which rely on the\n"
-				 "bigint data type.  Your old and new clusters pass bigint values\n"
-				 "differently so this cluster cannot currently be upgraded.  You can\n"
-				 "manually dump databases in the old cluster that use \"contrib/isn\"\n"
-				 "facilities, drop them, perform the upgrade, and then restore them.  A\n"
-				 "list of the problem functions is in the file:\n"
-				 "    %s", output_path);
-	}
-	else
-		check_ok();
+	PQclear(res);
+	return ntups == 0;
 }
 
 /*
  * Verify that no user defined postfix operators exist.
- */
-static void
-check_for_user_defined_postfix_ops(ClusterInfo *cluster)
-{
-	int			dbnum;
-	FILE	   *script = NULL;
-	char		output_path[MAXPGPATH];
-
-	prep_status("Checking for user-defined postfix operators");
+ *
+ * Pre-PG 14 allowed user defined postfix operators, which are not
+ * supported anymore.
+*/
+static bool
+check_for_user_defined_postfix_ops(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script)
 
-	snprintf(output_path, sizeof(output_path), "%s/%s",
-			 log_opts.basedir,
-			 "postfix_ops.txt");
+{
+	PGresult   *res;
+	bool		db_used = false;
+	int			ntups;
+	int			rowno;
+	int			i_oproid,
+				i_oprnsp,
+				i_oprname,
+				i_typnsp,
+				i_typname;
 
-	/* Find any user defined postfix operators */
-	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
+	/*
+	 * The query below hardcodes FirstNormalObjectId as 16384 rather than
+	 * interpolating that C #define into the query because, if that
+	 * #define is ever changed, the cutoff we want to use is the value
+	 * used by pre-version 14 servers, not that of some future version.
+	 */
+	res = executeQueryOrDie(conn,
+							"SELECT o.oid AS oproid, "
+							"       n.nspname AS oprnsp, "
+							"       o.oprname, "
+							"       tn.nspname AS typnsp, "
+							"       t.typname "
+							"FROM pg_catalog.pg_operator o, "
+							"     pg_catalog.pg_namespace n, "
+							"     pg_catalog.pg_type t, "
+							"     pg_catalog.pg_namespace tn "
+							"WHERE o.oprnamespace = n.oid AND "
+							"      o.oprleft = t.oid AND "
+							"      t.typnamespace = tn.oid AND "
+							"      o.oprright = 0 AND "
+							"      o.oid >= 16384");
+	ntups = PQntuples(res);
+	i_oproid = PQfnumber(res, "oproid");
+	i_oprnsp = PQfnumber(res, "oprnsp");
+	i_oprname = PQfnumber(res, "oprname");
+	i_typnsp = PQfnumber(res, "typnsp");
+	i_typname = PQfnumber(res, "typname");
+	for (rowno = 0; rowno < ntups; rowno++)
 	{
-		PGresult   *res;
-		bool		db_used = false;
-		int			ntups;
-		int			rowno;
-		int			i_oproid,
-					i_oprnsp,
-					i_oprname,
-					i_typnsp,
-					i_typname;
-		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
-		PGconn	   *conn = connectToServer(cluster, active_db->db_name);
-
-		/*
-		 * The query below hardcodes FirstNormalObjectId as 16384 rather than
-		 * interpolating that C #define into the query because, if that
-		 * #define is ever changed, the cutoff we want to use is the value
-		 * used by pre-version 14 servers, not that of some future version.
-		 */
-		res = executeQueryOrDie(conn,
-								"SELECT o.oid AS oproid, "
-								"       n.nspname AS oprnsp, "
-								"       o.oprname, "
-								"       tn.nspname AS typnsp, "
-								"       t.typname "
-								"FROM pg_catalog.pg_operator o, "
-								"     pg_catalog.pg_namespace n, "
-								"     pg_catalog.pg_type t, "
-								"     pg_catalog.pg_namespace tn "
-								"WHERE o.oprnamespace = n.oid AND "
-								"      o.oprleft = t.oid AND "
-								"      t.typnamespace = tn.oid AND "
-								"      o.oprright = 0 AND "
-								"      o.oid >= 16384");
-		ntups = PQntuples(res);
-		i_oproid = PQfnumber(res, "oproid");
-		i_oprnsp = PQfnumber(res, "oprnsp");
-		i_oprname = PQfnumber(res, "oprname");
-		i_typnsp = PQfnumber(res, "typnsp");
-		i_typname = PQfnumber(res, "typname");
-		for (rowno = 0; rowno < ntups; rowno++)
+		if (!db_used)
 		{
-			if (script == NULL &&
-				(script = fopen_priv(output_path, "w")) == NULL)
-				pg_fatal("could not open file \"%s\": %s",
-						 output_path, strerror(errno));
-			if (!db_used)
-			{
-				fprintf(script, "In database: %s\n", active_db->db_name);
-				db_used = true;
-			}
-			fprintf(script, "  (oid=%s) %s.%s (%s.%s, NONE)\n",
-					PQgetvalue(res, rowno, i_oproid),
-					PQgetvalue(res, rowno, i_oprnsp),
-					PQgetvalue(res, rowno, i_oprname),
-					PQgetvalue(res, rowno, i_typnsp),
-					PQgetvalue(res, rowno, i_typname));
+			fprintf(script, "In database: %s\n", active_db->db_name);
+			db_used = true;
 		}
-
-		PQclear(res);
-
-		PQfinish(conn);
+		fprintf(script, "  (oid=%s) %s.%s (%s.%s, NONE)\n",
+				PQgetvalue(res, rowno, i_oproid),
+				PQgetvalue(res, rowno, i_oprnsp),
+				PQgetvalue(res, rowno, i_oprname),
+				PQgetvalue(res, rowno, i_typnsp),
+				PQgetvalue(res, rowno, i_typname));
 	}
 
-	if (script)
-	{
-		fclose(script);
-		pg_log(PG_REPORT, "fatal");
-		pg_fatal("Your installation contains user-defined postfix operators, which are not\n"
-				 "supported anymore.  Consider dropping the postfix operators and replacing\n"
-				 "them with prefix operators or function calls.\n"
-				 "A list of user-defined postfix operators is in the file:\n"
-				 "    %s", output_path);
-	}
-	else
-		check_ok();
+	PQclear(res);
+
+	return ntups == 0;
 }
 
 /*
@@ -1014,19 +1078,16 @@ check_for_user_defined_postfix_ops(ClusterInfo *cluster)
  *	Make sure nothing is using old polymorphic functions with
  *	anyarray/anyelement rather than the new anycompatible variants.
  */
-static void
-check_for_incompatible_polymorphics(ClusterInfo *cluster)
+static bool
+check_for_incompatible_polymorphics(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script)
 {
 	PGresult   *res;
-	FILE	   *script = NULL;
-	char		output_path[MAXPGPATH];
 	PQExpBufferData old_polymorphics;
 
-	prep_status("Checking for incompatible polymorphic functions");
-
-	snprintf(output_path, sizeof(output_path), "%s/%s",
-			 log_opts.basedir,
-			 "incompatible_polymorphics.txt");
+	bool		db_used = false;
+	int			ntups;
+	int			i_objkind,
+				i_objname;
 
 	/* The set of problematic functions varies a bit in different versions */
 	initPQExpBuffer(&old_polymorphics);
@@ -1048,167 +1109,109 @@ check_for_incompatible_polymorphics(ClusterInfo *cluster)
 							 ", 'array_positions(anyarray,anyelement)'"
 							 ", 'width_bucket(anyelement,anyarray)'");
 
-	for (int dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
-	{
-		bool		db_used = false;
-		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
-		PGconn	   *conn = connectToServer(cluster, active_db->db_name);
-		int			ntups;
-		int			i_objkind,
-					i_objname;
-
-		/*
-		 * The query below hardcodes FirstNormalObjectId as 16384 rather than
-		 * interpolating that C #define into the query because, if that
-		 * #define is ever changed, the cutoff we want to use is the value
-		 * used by pre-version 14 servers, not that of some future version.
-		 */
-		res = executeQueryOrDie(conn,
-		/* Aggregate transition functions */
-								"SELECT 'aggregate' AS objkind, p.oid::regprocedure::text AS objname "
-								"FROM pg_proc AS p "
-								"JOIN pg_aggregate AS a ON a.aggfnoid=p.oid "
-								"JOIN pg_proc AS transfn ON transfn.oid=a.aggtransfn "
-								"WHERE p.oid >= 16384 "
-								"AND a.aggtransfn = ANY(ARRAY[%s]::regprocedure[]) "
-								"AND a.aggtranstype = ANY(ARRAY['anyarray', 'anyelement']::regtype[]) "
-
-		/* Aggregate final functions */
-								"UNION ALL "
-								"SELECT 'aggregate' AS objkind, p.oid::regprocedure::text AS objname "
-								"FROM pg_proc AS p "
-								"JOIN pg_aggregate AS a ON a.aggfnoid=p.oid "
-								"JOIN pg_proc AS finalfn ON finalfn.oid=a.aggfinalfn "
-								"WHERE p.oid >= 16384 "
-								"AND a.aggfinalfn = ANY(ARRAY[%s]::regprocedure[]) "
-								"AND a.aggtranstype = ANY(ARRAY['anyarray', 'anyelement']::regtype[]) "
-
-		/* Operators */
-								"UNION ALL "
-								"SELECT 'operator' AS objkind, op.oid::regoperator::text AS objname "
-								"FROM pg_operator AS op "
-								"WHERE op.oid >= 16384 "
-								"AND oprcode = ANY(ARRAY[%s]::regprocedure[]) "
-								"AND oprleft = ANY(ARRAY['anyarray', 'anyelement']::regtype[]);",
-								old_polymorphics.data,
-								old_polymorphics.data,
-								old_polymorphics.data);
-
-		ntups = PQntuples(res);
-
-		i_objkind = PQfnumber(res, "objkind");
-		i_objname = PQfnumber(res, "objname");
-
-		for (int rowno = 0; rowno < ntups; rowno++)
-		{
-			if (script == NULL &&
-				(script = fopen_priv(output_path, "w")) == NULL)
-				pg_fatal("could not open file \"%s\": %s",
-						 output_path, strerror(errno));
-			if (!db_used)
-			{
-				fprintf(script, "In database: %s\n", active_db->db_name);
-				db_used = true;
-			}
+	/*
+	 * The query below hardcodes FirstNormalObjectId as 16384 rather than
+	 * interpolating that C #define into the query because, if that
+	 * #define is ever changed, the cutoff we want to use is the value
+	 * used by pre-version 14 servers, not that of some future version.
+	 */
+	res = executeQueryOrDie(conn,
+	/* Aggregate transition functions */
+							"SELECT 'aggregate' AS objkind, p.oid::regprocedure::text AS objname "
+							"FROM pg_proc AS p "
+							"JOIN pg_aggregate AS a ON a.aggfnoid=p.oid "
+							"JOIN pg_proc AS transfn ON transfn.oid=a.aggtransfn "
+							"WHERE p.oid >= 16384 "
+							"AND a.aggtransfn = ANY(ARRAY[%s]::regprocedure[]) "
+							"AND a.aggtranstype = ANY(ARRAY['anyarray', 'anyelement']::regtype[]) "
+
+	/* Aggregate final functions */
+							"UNION ALL "
+							"SELECT 'aggregate' AS objkind, p.oid::regprocedure::text AS objname "
+							"FROM pg_proc AS p "
+							"JOIN pg_aggregate AS a ON a.aggfnoid=p.oid "
+							"JOIN pg_proc AS finalfn ON finalfn.oid=a.aggfinalfn "
+							"WHERE p.oid >= 16384 "
+							"AND a.aggfinalfn = ANY(ARRAY[%s]::regprocedure[]) "
+							"AND a.aggtranstype = ANY(ARRAY['anyarray', 'anyelement']::regtype[]) "
+
+	/* Operators */
+							"UNION ALL "
+							"SELECT 'operator' AS objkind, op.oid::regoperator::text AS objname "
+							"FROM pg_operator AS op "
+							"WHERE op.oid >= 16384 "
+							"AND oprcode = ANY(ARRAY[%s]::regprocedure[]) "
+							"AND oprleft = ANY(ARRAY['anyarray', 'anyelement']::regtype[]);",
+							old_polymorphics.data,
+							old_polymorphics.data,
+							old_polymorphics.data);
 
-			fprintf(script, "  %s: %s\n",
-					PQgetvalue(res, rowno, i_objkind),
-					PQgetvalue(res, rowno, i_objname));
-		}
+	ntups = PQntuples(res);
 
-		PQclear(res);
-		PQfinish(conn);
-	}
+	i_objkind = PQfnumber(res, "objkind");
+	i_objname = PQfnumber(res, "objname");
 
-	if (script)
+	for (int rowno = 0; rowno < ntups; rowno++)
 	{
-		fclose(script);
-		pg_log(PG_REPORT, "fatal");
-		pg_fatal("Your installation contains user-defined objects that refer to internal\n"
-				 "polymorphic functions with arguments of type \"anyarray\" or \"anyelement\".\n"
-				 "These user-defined objects must be dropped before upgrading and restored\n"
-				 "afterwards, changing them to refer to the new corresponding functions with\n"
-				 "arguments of type \"anycompatiblearray\" and \"anycompatible\".\n"
-				 "A list of the problematic objects is in the file:\n"
-				 "    %s", output_path);
+		if (!db_used)
+		{
+			fprintf(script, "In database: %s\n", active_db->db_name);
+			db_used = true;
+		}
+
+		fprintf(script, "  %s: %s\n",
+				PQgetvalue(res, rowno, i_objkind),
+				PQgetvalue(res, rowno, i_objname));
 	}
-	else
-		check_ok();
 
+	PQclear(res);
 	termPQExpBuffer(&old_polymorphics);
+
+	return ntups == 0;
 }
 
 /*
  * Verify that no tables are declared WITH OIDS.
+ *
+ * Pre-PG 12 allowed tables to be declared WITH OIDS, which is not
+ * supported anymore.
  */
-static void
-check_for_tables_with_oids(ClusterInfo *cluster)
+static bool
+check_for_tables_with_oids(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script)
 {
-	int			dbnum;
-	FILE	   *script = NULL;
-	char		output_path[MAXPGPATH];
-
-	prep_status("Checking for tables WITH OIDS");
+	PGresult   *res;
+	bool		db_used = false;
+	int			ntups;
+	int			rowno;
+	int			i_nspname,
+				i_relname;
 
-	snprintf(output_path, sizeof(output_path), "%s/%s",
-			 log_opts.basedir,
-			 "tables_with_oids.txt");
+	res = executeQueryOrDie(conn,
+							"SELECT n.nspname, c.relname "
+							"FROM	pg_catalog.pg_class c, "
+							"		pg_catalog.pg_namespace n "
+							"WHERE	c.relnamespace = n.oid AND "
+							"		c.relhasoids AND"
+							"       n.nspname NOT IN ('pg_catalog')");
 
-	/* Find any tables declared WITH OIDS */
-	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
+	ntups = PQntuples(res);
+	i_nspname = PQfnumber(res, "nspname");
+	i_relname = PQfnumber(res, "relname");
+	for (rowno = 0; rowno < ntups; rowno++)
 	{
-		PGresult   *res;
-		bool		db_used = false;
-		int			ntups;
-		int			rowno;
-		int			i_nspname,
-					i_relname;
-		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
-		PGconn	   *conn = connectToServer(cluster, active_db->db_name);
-
-		res = executeQueryOrDie(conn,
-								"SELECT n.nspname, c.relname "
-								"FROM	pg_catalog.pg_class c, "
-								"		pg_catalog.pg_namespace n "
-								"WHERE	c.relnamespace = n.oid AND "
-								"		c.relhasoids AND"
-								"       n.nspname NOT IN ('pg_catalog')");
-
-		ntups = PQntuples(res);
-		i_nspname = PQfnumber(res, "nspname");
-		i_relname = PQfnumber(res, "relname");
-		for (rowno = 0; rowno < ntups; rowno++)
+		if (!db_used)
 		{
-			if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
-				pg_fatal("could not open file \"%s\": %s",
-						 output_path, strerror(errno));
-			if (!db_used)
-			{
-				fprintf(script, "In database: %s\n", active_db->db_name);
-				db_used = true;
-			}
-			fprintf(script, "  %s.%s\n",
-					PQgetvalue(res, rowno, i_nspname),
-					PQgetvalue(res, rowno, i_relname));
+			fprintf(script, "In database: %s\n", active_db->db_name);
+			db_used = true;
 		}
-
-		PQclear(res);
-
-		PQfinish(conn);
+		fprintf(script, "  %s.%s\n",
+				PQgetvalue(res, rowno, i_nspname),
+				PQgetvalue(res, rowno, i_relname));
 	}
 
-	if (script)
-	{
-		fclose(script);
-		pg_log(PG_REPORT, "fatal");
-		pg_fatal("Your installation contains tables declared WITH OIDS, which is not\n"
-				 "supported anymore.  Consider removing the oid column using\n"
-				 "    ALTER TABLE ... SET WITHOUT OIDS;\n"
-				 "A list of tables with the problem is in the file:\n"
-				 "    %s", output_path);
-	}
-	else
-		check_ok();
+	PQclear(res);
+
+	return ntups == 0;
 }
 
 
@@ -1221,20 +1224,13 @@ check_for_tables_with_oids(ClusterInfo *cluster)
  *	no mechanism for forcing them to be the same in the new cluster.
  *	Hence, if any user table uses one, that's problematic for pg_upgrade.
  */
-static void
-check_for_composite_data_type_usage(ClusterInfo *cluster)
+static bool
+check_for_composite_data_type_usage(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script)
 {
 	bool		found;
 	Oid			firstUserOid;
-	char		output_path[MAXPGPATH];
 	char	   *base_query;
 
-	prep_status("Checking for system-defined composite types in user tables");
-
-	snprintf(output_path, sizeof(output_path), "%s/%s",
-			 log_opts.basedir,
-			 "tables_using_composite.txt");
-
 	/*
 	 * Look for composite types that were made during initdb *or* belong to
 	 * information_schema; that's important in case information_schema was
@@ -1253,22 +1249,11 @@ check_for_composite_data_type_usage(ClusterInfo *cluster)
 						  " WHERE typtype = 'c' AND (t.oid < %u OR nspname = 'information_schema')",
 						  firstUserOid);
 
-	found = check_for_data_types_usage(cluster, base_query, output_path);
+	found = check_for_data_types_usage(cluster, base_query, active_db, conn, script);
 
 	free(base_query);
 
-	if (found)
-	{
-		pg_log(PG_REPORT, "fatal");
-		pg_fatal("Your installation contains system-defined composite type(s) in user tables.\n"
-				 "These type OIDs are not stable across PostgreSQL versions,\n"
-				 "so this cluster cannot currently be upgraded.  You can\n"
-				 "drop the problem columns and restart the upgrade.\n"
-				 "A list of the problem columns is in the file:\n"
-				 "    %s", output_path);
-	}
-	else
-		check_ok();
+	return !found;
 }
 
 /*
@@ -1282,17 +1267,10 @@ check_for_composite_data_type_usage(ClusterInfo *cluster)
  *	not preserved, and hence these data types cannot be used in user
  *	tables upgraded by pg_upgrade.
  */
-static void
-check_for_reg_data_type_usage(ClusterInfo *cluster)
+static bool
+check_for_reg_data_type_usage(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script)
 {
 	bool		found;
-	char		output_path[MAXPGPATH];
-
-	prep_status("Checking for reg* data types in user tables");
-
-	snprintf(output_path, sizeof(output_path), "%s/%s",
-			 log_opts.basedir,
-			 "tables_using_reg.txt");
 
 	/*
 	 * Note: older servers will not have all of these reg* types, so we have
@@ -1316,48 +1294,22 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
 	/* pg_authid.oid is preserved, so 'regrole' is OK */
 	/* pg_type.oid is (mostly) preserved, so 'regtype' is OK */
 									   "         )",
-									   output_path);
+									   active_db, conn, script);
 
-	if (found)
-	{
-		pg_log(PG_REPORT, "fatal");
-		pg_fatal("Your installation contains one of the reg* data types in user tables.\n"
-				 "These data types reference system OIDs that are not preserved by\n"
-				 "pg_upgrade, so this cluster cannot currently be upgraded.  You can\n"
-				 "drop the problem columns and restart the upgrade.\n"
-				 "A list of the problem columns is in the file:\n"
-				 "    %s", output_path);
-	}
-	else
-		check_ok();
+	return !found;
 }
 
 /*
  * check_for_aclitem_data_type_usage
  *
- *	aclitem changed its storage format in 16, so check for it.
+ * PG 16 increased the size of the 'aclitem' type, which breaks the on-disk
+ * format for existing data.
  */
-static void
-check_for_aclitem_data_type_usage(ClusterInfo *cluster)
+static bool
+check_for_aclitem_data_type_usage(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script)
 {
-	char		output_path[MAXPGPATH];
-
 	prep_status("Checking for incompatible aclitem data type in user tables");
-
-	snprintf(output_path, sizeof(output_path), "tables_using_aclitem.txt");
-
-	if (check_for_data_type_usage(cluster, "pg_catalog.aclitem", output_path))
-	{
-		pg_log(PG_REPORT, "fatal");
-		pg_fatal("Your installation contains the \"aclitem\" data type in user tables.\n"
-				 "The internal format of \"aclitem\" changed in PostgreSQL version 16\n"
-				 "so this cluster cannot currently be upgraded.  You can drop the\n"
-				 "problem columns and restart the upgrade.  A list of the problem\n"
-				 "columns is in the file:\n"
-				 "    %s", output_path);
-	}
-	else
-		check_ok();
+	return check_for_data_type_usage(cluster, "pg_catalog.aclitem", active_db, conn, script);
 }
 
 /*
@@ -1365,29 +1317,13 @@ check_for_aclitem_data_type_usage(ClusterInfo *cluster)
  *
  *	JSONB changed its storage format during 9.4 beta, so check for it.
  */
-static void
-check_for_jsonb_9_4_usage(ClusterInfo *cluster)
+static bool
+check_for_jsonb_9_4_usage(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script)
 {
-	char		output_path[MAXPGPATH];
-
-	prep_status("Checking for incompatible \"jsonb\" data type");
-
-	snprintf(output_path, sizeof(output_path), "%s/%s",
-			 log_opts.basedir,
-			 "tables_using_jsonb.txt");
+	if (old_cluster.controldata.cat_ver >= JSONB_FORMAT_CHANGE_CAT_VER)
+		return true;
 
-	if (check_for_data_type_usage(cluster, "pg_catalog.jsonb", output_path))
-	{
-		pg_log(PG_REPORT, "fatal");
-		pg_fatal("Your installation contains the \"jsonb\" data type in user tables.\n"
-				 "The internal format of \"jsonb\" changed during 9.4 beta so this\n"
-				 "cluster cannot currently be upgraded.  You can\n"
-				 "drop the problem columns and restart the upgrade.\n"
-				 "A list of the problem columns is in the file:\n"
-				 "    %s", output_path);
-	}
-	else
-		check_ok();
+	return check_for_data_type_usage(cluster, "pg_catalog.jsonb", active_db, conn, script);
 }
 
 /*
@@ -1450,84 +1386,55 @@ check_for_pg_role_prefix(ClusterInfo *cluster)
 
 /*
  * Verify that no user-defined encoding conversions exist.
+ *
+ * PG 14 changed the function signature of encoding conversion functions.
+ * Conversions from older versions cannot be upgraded automatically
+ * because the user-defined functions used by the encoding conversions
+ * need to be changed to match the new signature.
  */
-static void
-check_for_user_defined_encoding_conversions(ClusterInfo *cluster)
+static bool
+check_for_user_defined_encoding_conversions(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script)
 {
-	int			dbnum;
-	FILE	   *script = NULL;
-	char		output_path[MAXPGPATH];
-
-	prep_status("Checking for user-defined encoding conversions");
-
-	snprintf(output_path, sizeof(output_path), "%s/%s",
-			 log_opts.basedir,
-			 "encoding_conversions.txt");
+	PGresult   *res;
+	bool		db_used = false;
+	int			ntups;
+	int			rowno;
+	int			i_conoid,
+				i_conname,
+				i_nspname;
 
-	/* Find any user defined encoding conversions */
-	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
+	/*
+	 * The query below hardcodes FirstNormalObjectId as 16384 rather than
+	 * interpolating that C #define into the query because, if that
+	 * #define is ever changed, the cutoff we want to use is the value
+	 * used by pre-version 14 servers, not that of some future version.
+	 */
+	res = executeQueryOrDie(conn,
+							"SELECT c.oid as conoid, c.conname, n.nspname "
+							"FROM pg_catalog.pg_conversion c, "
+							"     pg_catalog.pg_namespace n "
+							"WHERE c.connamespace = n.oid AND "
+							"      c.oid >= 16384");
+	ntups = PQntuples(res);
+	i_conoid = PQfnumber(res, "conoid");
+	i_conname = PQfnumber(res, "conname");
+	i_nspname = PQfnumber(res, "nspname");
+	for (rowno = 0; rowno < ntups; rowno++)
 	{
-		PGresult   *res;
-		bool		db_used = false;
-		int			ntups;
-		int			rowno;
-		int			i_conoid,
-					i_conname,
-					i_nspname;
-		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
-		PGconn	   *conn = connectToServer(cluster, active_db->db_name);
-
-		/*
-		 * The query below hardcodes FirstNormalObjectId as 16384 rather than
-		 * interpolating that C #define into the query because, if that
-		 * #define is ever changed, the cutoff we want to use is the value
-		 * used by pre-version 14 servers, not that of some future version.
-		 */
-		res = executeQueryOrDie(conn,
-								"SELECT c.oid as conoid, c.conname, n.nspname "
-								"FROM pg_catalog.pg_conversion c, "
-								"     pg_catalog.pg_namespace n "
-								"WHERE c.connamespace = n.oid AND "
-								"      c.oid >= 16384");
-		ntups = PQntuples(res);
-		i_conoid = PQfnumber(res, "conoid");
-		i_conname = PQfnumber(res, "conname");
-		i_nspname = PQfnumber(res, "nspname");
-		for (rowno = 0; rowno < ntups; rowno++)
+		if (!db_used)
 		{
-			if (script == NULL &&
-				(script = fopen_priv(output_path, "w")) == NULL)
-				pg_fatal("could not open file \"%s\": %s",
-						 output_path, strerror(errno));
-			if (!db_used)
-			{
-				fprintf(script, "In database: %s\n", active_db->db_name);
-				db_used = true;
-			}
-			fprintf(script, "  (oid=%s) %s.%s\n",
-					PQgetvalue(res, rowno, i_conoid),
-					PQgetvalue(res, rowno, i_nspname),
-					PQgetvalue(res, rowno, i_conname));
+			fprintf(script, "In database: %s\n", active_db->db_name);
+			db_used = true;
 		}
-
-		PQclear(res);
-
-		PQfinish(conn);
+		fprintf(script, "  (oid=%s) %s.%s\n",
+				PQgetvalue(res, rowno, i_conoid),
+				PQgetvalue(res, rowno, i_nspname),
+				PQgetvalue(res, rowno, i_conname));
 	}
 
-	if (script)
-	{
-		fclose(script);
-		pg_log(PG_REPORT, "fatal");
-		pg_fatal("Your installation contains user-defined encoding conversions.\n"
-				 "The conversion function parameters changed in PostgreSQL version 14\n"
-				 "so this cluster cannot currently be upgraded.  You can remove the\n"
-				 "encoding conversions in the old cluster and restart the upgrade.\n"
-				 "A list of user-defined encoding conversions is in the file:\n"
-				 "    %s", output_path);
-	}
-	else
-		check_ok();
+	PQclear(res);
+
+	return ntups == 0;
 }
 
 
diff --git a/src/bin/pg_upgrade/pg_upgrade.h b/src/bin/pg_upgrade/pg_upgrade.h
index 5f2a116f23e..39e8fd2c8d5 100644
--- a/src/bin/pg_upgrade/pg_upgrade.h
+++ b/src/bin/pg_upgrade/pg_upgrade.h
@@ -444,16 +444,16 @@ unsigned int str2uint(const char *str);
 
 bool		check_for_data_types_usage(ClusterInfo *cluster,
 									   const char *base_query,
-									   const char *output_path);
+									   DbInfo *active_db, PGconn *conn, FILE *script);
 bool		check_for_data_type_usage(ClusterInfo *cluster,
 									  const char *type_name,
-									  const char *output_path);
-void		old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster);
-void		old_9_6_check_for_unknown_data_type_usage(ClusterInfo *cluster);
+									  DbInfo *active_db, PGconn *conn, FILE *script);
+bool		old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script);
+bool		old_9_6_check_for_unknown_data_type_usage(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script);
 void		old_9_6_invalidate_hash_indexes(ClusterInfo *cluster,
 											bool check_mode);
 
-void		old_11_check_for_sql_identifier_data_type_usage(ClusterInfo *cluster);
+bool		old_11_check_for_sql_identifier_data_type_usage(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script);
 void		report_extension_updates(ClusterInfo *cluster);
 
 /* parallel.c */
diff --git a/src/bin/pg_upgrade/version.c b/src/bin/pg_upgrade/version.c
index 403a6d7cfaa..f2b885347a8 100644
--- a/src/bin/pg_upgrade/version.c
+++ b/src/bin/pg_upgrade/version.c
@@ -30,111 +30,96 @@
 bool
 check_for_data_types_usage(ClusterInfo *cluster,
 						   const char *base_query,
-						   const char *output_path)
+						   DbInfo *active_db, PGconn *conn, FILE *script)
 {
 	bool		found = false;
-	FILE	   *script = NULL;
-	int			dbnum;
 
-	for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++)
+	PQExpBufferData querybuf;
+	PGresult   *res;
+	bool		db_used = false;
+	int			ntups;
+	int			rowno;
+	int			i_nspname,
+				i_relname,
+				i_attname;
+
+	/*
+	 * The type(s) of interest might be wrapped in a domain, array,
+	 * composite, or range, and these container types can be nested (to
+	 * varying extents depending on server version, but that's not of
+	 * concern here).  To handle all these cases we need a recursive CTE.
+	 */
+	initPQExpBuffer(&querybuf);
+	appendPQExpBuffer(&querybuf,
+					  "WITH RECURSIVE oids AS ( "
+	/* start with the type(s) returned by base_query */
+					  "	%s "
+					  "	UNION ALL "
+					  "	SELECT * FROM ( "
+	/* inner WITH because we can only reference the CTE once */
+					  "		WITH x AS (SELECT oid FROM oids) "
+	/* domains on any type selected so far */
+					  "			SELECT t.oid FROM pg_catalog.pg_type t, x WHERE typbasetype = x.oid AND typtype = 'd' "
+					  "			UNION ALL "
+	/* arrays over any type selected so far */
+					  "			SELECT t.oid FROM pg_catalog.pg_type t, x WHERE typelem = x.oid AND typtype = 'b' "
+					  "			UNION ALL "
+	/* composite types containing any type selected so far */
+					  "			SELECT t.oid FROM pg_catalog.pg_type t, pg_catalog.pg_class c, pg_catalog.pg_attribute a, x "
+					  "			WHERE t.typtype = 'c' AND "
+					  "				  t.oid = c.reltype AND "
+					  "				  c.oid = a.attrelid AND "
+					  "				  NOT a.attisdropped AND "
+					  "				  a.atttypid = x.oid "
+					  "			UNION ALL "
+	/* ranges containing any type selected so far */
+					  "			SELECT t.oid FROM pg_catalog.pg_type t, pg_catalog.pg_range r, x "
+					  "			WHERE t.typtype = 'r' AND r.rngtypid = t.oid AND r.rngsubtype = x.oid"
+					  "	) foo "
+					  ") "
+	/* now look for stored columns of any such type */
+					  "SELECT n.nspname, c.relname, a.attname "
+					  "FROM	pg_catalog.pg_class c, "
+					  "		pg_catalog.pg_namespace n, "
+					  "		pg_catalog.pg_attribute a "
+					  "WHERE	c.oid = a.attrelid AND "
+					  "		NOT a.attisdropped AND "
+					  "		a.atttypid IN (SELECT oid FROM oids) AND "
+					  "		c.relkind IN ("
+					  CppAsString2(RELKIND_RELATION) ", "
+					  CppAsString2(RELKIND_MATVIEW) ", "
+					  CppAsString2(RELKIND_INDEX) ") AND "
+					  "		c.relnamespace = n.oid AND "
+	/* exclude possible orphaned temp tables */
+					  "		n.nspname !~ '^pg_temp_' AND "
+					  "		n.nspname !~ '^pg_toast_temp_' AND "
+	/* exclude system catalogs, too */
+					  "		n.nspname NOT IN ('pg_catalog', 'information_schema')",
+					  base_query);
+
+	res = executeQueryOrDie(conn, "%s", querybuf.data);
+
+	ntups = PQntuples(res);
+	i_nspname = PQfnumber(res, "nspname");
+	i_relname = PQfnumber(res, "relname");
+	i_attname = PQfnumber(res, "attname");
+	for (rowno = 0; rowno < ntups; rowno++)
 	{
-		DbInfo	   *active_db = &cluster->dbarr.dbs[dbnum];
-		PGconn	   *conn = connectToServer(cluster, active_db->db_name);
-		PQExpBufferData querybuf;
-		PGresult   *res;
-		bool		db_used = false;
-		int			ntups;
-		int			rowno;
-		int			i_nspname,
-					i_relname,
-					i_attname;
-
-		/*
-		 * The type(s) of interest might be wrapped in a domain, array,
-		 * composite, or range, and these container types can be nested (to
-		 * varying extents depending on server version, but that's not of
-		 * concern here).  To handle all these cases we need a recursive CTE.
-		 */
-		initPQExpBuffer(&querybuf);
-		appendPQExpBuffer(&querybuf,
-						  "WITH RECURSIVE oids AS ( "
-		/* start with the type(s) returned by base_query */
-						  "	%s "
-						  "	UNION ALL "
-						  "	SELECT * FROM ( "
-		/* inner WITH because we can only reference the CTE once */
-						  "		WITH x AS (SELECT oid FROM oids) "
-		/* domains on any type selected so far */
-						  "			SELECT t.oid FROM pg_catalog.pg_type t, x WHERE typbasetype = x.oid AND typtype = 'd' "
-						  "			UNION ALL "
-		/* arrays over any type selected so far */
-						  "			SELECT t.oid FROM pg_catalog.pg_type t, x WHERE typelem = x.oid AND typtype = 'b' "
-						  "			UNION ALL "
-		/* composite types containing any type selected so far */
-						  "			SELECT t.oid FROM pg_catalog.pg_type t, pg_catalog.pg_class c, pg_catalog.pg_attribute a, x "
-						  "			WHERE t.typtype = 'c' AND "
-						  "				  t.oid = c.reltype AND "
-						  "				  c.oid = a.attrelid AND "
-						  "				  NOT a.attisdropped AND "
-						  "				  a.atttypid = x.oid "
-						  "			UNION ALL "
-		/* ranges containing any type selected so far */
-						  "			SELECT t.oid FROM pg_catalog.pg_type t, pg_catalog.pg_range r, x "
-						  "			WHERE t.typtype = 'r' AND r.rngtypid = t.oid AND r.rngsubtype = x.oid"
-						  "	) foo "
-						  ") "
-		/* now look for stored columns of any such type */
-						  "SELECT n.nspname, c.relname, a.attname "
-						  "FROM	pg_catalog.pg_class c, "
-						  "		pg_catalog.pg_namespace n, "
-						  "		pg_catalog.pg_attribute a "
-						  "WHERE	c.oid = a.attrelid AND "
-						  "		NOT a.attisdropped AND "
-						  "		a.atttypid IN (SELECT oid FROM oids) AND "
-						  "		c.relkind IN ("
-						  CppAsString2(RELKIND_RELATION) ", "
-						  CppAsString2(RELKIND_MATVIEW) ", "
-						  CppAsString2(RELKIND_INDEX) ") AND "
-						  "		c.relnamespace = n.oid AND "
-		/* exclude possible orphaned temp tables */
-						  "		n.nspname !~ '^pg_temp_' AND "
-						  "		n.nspname !~ '^pg_toast_temp_' AND "
-		/* exclude system catalogs, too */
-						  "		n.nspname NOT IN ('pg_catalog', 'information_schema')",
-						  base_query);
-
-		res = executeQueryOrDie(conn, "%s", querybuf.data);
-
-		ntups = PQntuples(res);
-		i_nspname = PQfnumber(res, "nspname");
-		i_relname = PQfnumber(res, "relname");
-		i_attname = PQfnumber(res, "attname");
-		for (rowno = 0; rowno < ntups; rowno++)
+		found = true;
+		if (!db_used)
 		{
-			found = true;
-			if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
-				pg_fatal("could not open file \"%s\": %s", output_path,
-						 strerror(errno));
-			if (!db_used)
-			{
-				fprintf(script, "In database: %s\n", active_db->db_name);
-				db_used = true;
-			}
-			fprintf(script, "  %s.%s.%s\n",
-					PQgetvalue(res, rowno, i_nspname),
-					PQgetvalue(res, rowno, i_relname),
-					PQgetvalue(res, rowno, i_attname));
+			fprintf(script, "In database: %s\n", active_db->db_name);
+			db_used = true;
 		}
-
-		PQclear(res);
-
-		termPQExpBuffer(&querybuf);
-
-		PQfinish(conn);
+		fprintf(script, "  %s.%s.%s\n",
+				PQgetvalue(res, rowno, i_nspname),
+				PQgetvalue(res, rowno, i_relname),
+				PQgetvalue(res, rowno, i_attname));
 	}
 
-	if (script)
-		fclose(script);
+	PQclear(res);
+
+	termPQExpBuffer(&querybuf);
 
 	return found;
 }
@@ -152,7 +137,7 @@ check_for_data_types_usage(ClusterInfo *cluster,
 bool
 check_for_data_type_usage(ClusterInfo *cluster,
 						  const char *type_name,
-						  const char *output_path)
+						  DbInfo *active_db, PGconn *conn, FILE *script)
 {
 	bool		found;
 	char	   *base_query;
@@ -160,7 +145,7 @@ check_for_data_type_usage(ClusterInfo *cluster,
 	base_query = psprintf("SELECT '%s'::pg_catalog.regtype AS oid",
 						  type_name);
 
-	found = check_for_data_types_usage(cluster, base_query, output_path);
+	found = check_for_data_types_usage(cluster, base_query, active_db, conn, script);
 
 	free(base_query);
 
@@ -176,30 +161,12 @@ check_for_data_type_usage(ClusterInfo *cluster,
  *	compile-time switch; as of 9.4 "line" has a different on-disk
  *	representation format.
  */
-void
-old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster)
+bool
+old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script)
 {
-	char		output_path[MAXPGPATH];
-
 	prep_status("Checking for incompatible \"line\" data type");
 
-	snprintf(output_path, sizeof(output_path), "%s/%s",
-			 log_opts.basedir,
-			 "tables_using_line.txt");
-
-	if (check_for_data_type_usage(cluster, "pg_catalog.line", output_path))
-	{
-		pg_log(PG_REPORT, "fatal");
-		pg_fatal("Your installation contains the \"line\" data type in user tables.\n"
-				 "This data type changed its internal and input/output format\n"
-				 "between your old and new versions so this\n"
-				 "cluster cannot currently be upgraded.  You can\n"
-				 "drop the problem columns and restart the upgrade.\n"
-				 "A list of the problem columns is in the file:\n"
-				 "    %s", output_path);
-	}
-	else
-		check_ok();
+	return check_for_data_type_usage(cluster, "pg_catalog.line", active_db, conn, script);
 }
 
 
@@ -216,29 +183,12 @@ old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster)
  *	DDL reload will silently change it to "text" which won't match the
  *	on-disk storage (which is like "cstring").  So we *must* reject that.
  */
-void
-old_9_6_check_for_unknown_data_type_usage(ClusterInfo *cluster)
+bool
+old_9_6_check_for_unknown_data_type_usage(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script)
 {
-	char		output_path[MAXPGPATH];
-
 	prep_status("Checking for invalid \"unknown\" user columns");
 
-	snprintf(output_path, sizeof(output_path), "%s/%s",
-			 log_opts.basedir,
-			 "tables_using_unknown.txt");
-
-	if (check_for_data_type_usage(cluster, "pg_catalog.unknown", output_path))
-	{
-		pg_log(PG_REPORT, "fatal");
-		pg_fatal("Your installation contains the \"unknown\" data type in user tables.\n"
-				 "This data type is no longer allowed in tables, so this\n"
-				 "cluster cannot currently be upgraded.  You can\n"
-				 "drop the problem columns and restart the upgrade.\n"
-				 "A list of the problem columns is in the file:\n"
-				 "    %s", output_path);
-	}
-	else
-		check_ok();
+	return check_for_data_type_usage(cluster, "pg_catalog.unknown", active_db, conn, script);
 }
 
 /*
@@ -358,33 +308,15 @@ old_9_6_invalidate_hash_indexes(ClusterInfo *cluster, bool check_mode)
  *	11 -> 12
  *	In 12, the sql_identifier data type was switched from name to varchar,
  *	which does affect the storage (name is by-ref, but not varlena). This
- *	means user tables using sql_identifier for columns are broken because
+ *	means user objects using sql_identifier for columns are broken because
  *	the on-disk format is different.
  */
-void
-old_11_check_for_sql_identifier_data_type_usage(ClusterInfo *cluster)
+bool
+old_11_check_for_sql_identifier_data_type_usage(ClusterInfo *cluster, DbInfo *active_db, PGconn *conn, FILE *script)
 {
-	char		output_path[MAXPGPATH];
-
 	prep_status("Checking for invalid \"sql_identifier\" user columns");
 
-	snprintf(output_path, sizeof(output_path), "%s/%s",
-			 log_opts.basedir,
-			 "tables_using_sql_identifier.txt");
-
-	if (check_for_data_type_usage(cluster, "information_schema.sql_identifier",
-								  output_path))
-	{
-		pg_log(PG_REPORT, "fatal");
-		pg_fatal("Your installation contains the \"sql_identifier\" data type in user tables.\n"
-				 "The on-disk format for this data type has changed, so this\n"
-				 "cluster cannot currently be upgraded.  You can\n"
-				 "drop the problem columns and restart the upgrade.\n"
-				 "A list of the problem columns is in the file:\n"
-				 "    %s", output_path);
-	}
-	else
-		check_ok();
+	return check_for_data_type_usage(cluster, "information_schema.sql_identifier", active_db, conn, script);
 }
 
 
-- 
2.34.1

