From b64c247210c5a5067b5c76f6ab68c978606b0902 Mon Sep 17 00:00:00 2001
From: Andres Freund <andres@anarazel.de>
Date: Mon, 9 Dec 2024 14:14:13 -0500
Subject: [PATCH v2 04/20] aio: Core AIO implementation

At this point nothing can use AIO - this commit does not include any
implementation of aio subjects / callbacks. That will come in later commits.

Todo:
- lots of cleanup
---
 src/include/storage/aio.h                     | 296 ++++++
 src/include/storage/aio_internal.h            | 244 +++++
 src/include/storage/aio_ref.h                 |  24 +
 src/include/utils/resowner.h                  |   5 +
 src/backend/access/transam/xact.c             |   9 +
 src/backend/storage/aio/Makefile              |   3 +
 src/backend/storage/aio/aio.c                 | 906 ++++++++++++++++++
 src/backend/storage/aio/aio_init.c            | 186 +++-
 src/backend/storage/aio/aio_io.c              | 140 +++
 src/backend/storage/aio/aio_subject.c         | 231 +++++
 src/backend/storage/aio/meson.build           |   3 +
 src/backend/storage/aio/method_sync.c         |  45 +
 .../utils/activity/wait_event_names.txt       |   3 +
 src/backend/utils/resowner/resowner.c         |  30 +
 src/tools/pgindent/typedefs.list              |  18 +
 15 files changed, 2139 insertions(+), 4 deletions(-)
 create mode 100644 src/include/storage/aio_internal.h
 create mode 100644 src/include/storage/aio_ref.h
 create mode 100644 src/backend/storage/aio/aio_io.c
 create mode 100644 src/backend/storage/aio/aio_subject.c
 create mode 100644 src/backend/storage/aio/method_sync.c

diff --git a/src/include/storage/aio.h b/src/include/storage/aio.h
index 0ee9d0043de..b386dabc921 100644
--- a/src/include/storage/aio.h
+++ b/src/include/storage/aio.h
@@ -15,9 +15,305 @@
 #define AIO_H
 
 
+#include "storage/aio_ref.h"
+#include "storage/procnumber.h"
 #include "utils/guc_tables.h"
 
 
+typedef struct PgAioHandle PgAioHandle;
+
+typedef enum PgAioOp
+{
+	/* intentionally the zero value, to help catch zeroed memory etc */
+	PGAIO_OP_INVALID = 0,
+
+	PGAIO_OP_READV,
+	PGAIO_OP_WRITEV,
+
+	/**
+	 * In the near term we'll need at least:
+	 * - fsync / fdatasync
+	 * - flush_range
+	 *
+	 * Eventually we'll additionally want at least:
+	 * - send
+	 * - recv
+	 * - accept
+	 **/
+} PgAioOp;
+
+#define PGAIO_OP_COUNT	(PGAIO_OP_WRITEV + 1)
+
+
+/*
+ * On what is IO being performed.
+ *
+ * PgAioSharedCallback specific behaviour should be implemented in
+ * aio_subject.c.
+ */
+typedef enum PgAioSubjectID
+{
+	/* intentionally the zero value, to help catch zeroed memory etc */
+	ASI_INVALID = 0,
+} PgAioSubjectID;
+
+#define ASI_COUNT (ASI_INVALID + 1)
+
+/*
+ * Flags for an IO that can be set with pgaio_io_set_flag().
+ */
+typedef enum PgAioHandleFlags
+{
+	/* hint that IO will be executed synchronously */
+	AHF_SYNCHRONOUS = 1 << 0,
+
+	/* the IO references backend local memory */
+	AHF_REFERENCES_LOCAL = 1 << 1,
+
+	/*
+	 * IO is using buffered IO, used to control heuristic in some IO
+	 * methods. Advantageous to set, if applicable, but not required for
+	 * correctness.
+	 */
+	AHF_BUFFERED = 1 << 2,
+} PgAioHandleFlags;
+
+
+/*
+ * IDs for callbacks that can be registered on an IO.
+ *
+ * Callbacks are identified by an ID rather than a function pointer. There are
+ * two main reasons:
+
+ * 1) Memory within PgAioHandle is precious, due to the number of PgAioHandle
+ *    structs in pre-allocated shared memory.
+
+ * 2) Due to EXEC_BACKEND function pointers are not necessarily stable between
+ *    different backends, therefore function pointers cannot directly be in
+ *    shared memory.
+ *
+ * Without 2), we could fairly easily allow to add new callbacks, by filling a
+ * ID->pointer mapping table on demand. In the presence of 2 that's still
+ * doable, but harder, because every process has to re-register the pointers
+ * so that a local ID->"backend local pointer" mapping can be maintained.
+ */
+typedef enum PgAioHandleSharedCallbackID
+{
+	ASC_INVALID,
+} PgAioHandleSharedCallbackID;
+
+
+/*
+ * Data necessary for basic IO types (PgAioOp).
+ *
+ * NB: Note that the FDs in here may *not* be relied upon for re-issuing
+ * requests (e.g. for partial reads/writes) - the FD might be from another
+ * process, or closed since. That's not a problem for IOs waiting to be issued
+ * only because the queue is flushed when closing an FD.
+ */
+typedef union
+{
+	struct
+	{
+		int			fd;
+		uint16		iov_length;
+		uint64		offset;
+	}			read;
+
+	struct
+	{
+		int			fd;
+		uint16		iov_length;
+		uint64		offset;
+	}			write;
+} PgAioOpData;
+
+
+/* XXX: Perhaps it's worth moving this to a dedicated file? */
+#include "storage/block.h"
+#include "storage/relfilelocator.h"
+
+typedef union PgAioSubjectData
+{
+	/* just as an example placeholder for later */
+	struct
+	{
+		uint32		queue_id;
+	}			wal;
+} PgAioSubjectData;
+
+
+typedef enum PgAioResultStatus
+{
+	ARS_UNKNOWN,	/* not yet completed / uninitialized */
+	ARS_OK,
+	ARS_PARTIAL,	/* did not fully succeed, but no error */
+	ARS_ERROR,
+} PgAioResultStatus;
+
+typedef struct PgAioResult
+{
+	/*
+	 * This is of type PgAioHandleSharedCallbackID, but can't use a bitfield
+	 * of an enum, because some compilers treat enums as signed.
+	 */
+	uint32		id:8;
+
+	/* of type PgAioResultStatus, see above */
+	uint32		status:2;
+
+	/* meaning defined by callback->error */
+	uint32		error_data:22;
+
+	int32		result;
+} PgAioResult;
+
+/*
+ * Result of IO operation, visible only to the initiator of IO.
+ */
+typedef struct PgAioReturn
+{
+	PgAioResult result;
+	PgAioSubjectData subject_data;
+} PgAioReturn;
+
+
+typedef struct PgAioSubjectInfo
+{
+	void		(*reopen) (PgAioHandle *ioh);
+
+#ifdef NOT_YET
+	char	   *(*describe_identity) (PgAioHandle *ioh);
+#endif
+
+	const char *name;
+} PgAioSubjectInfo;
+
+
+typedef PgAioResult (*PgAioHandleSharedCallbackComplete) (PgAioHandle *ioh, PgAioResult prior_result);
+typedef void (*PgAioHandleSharedCallbackPrepare) (PgAioHandle *ioh);
+typedef void (*PgAioHandleSharedCallbackError) (PgAioResult result, const PgAioSubjectData *subject_data, int elevel);
+
+typedef struct PgAioHandleSharedCallbacks
+{
+	PgAioHandleSharedCallbackPrepare prepare;
+	PgAioHandleSharedCallbackComplete complete;
+	PgAioHandleSharedCallbackError error;
+} PgAioHandleSharedCallbacks;
+
+
+
+/*
+ * How many callbacks can be registered for one IO handle. Currently we only
+ * need two, but it's not hard to imagine needing a few more.
+ */
+#define AIO_MAX_SHARED_CALLBACKS	4
+
+
+
+/* AIO API */
+
+
+/* --------------------------------------------------------------------------------
+ * IO Handles
+ * --------------------------------------------------------------------------------
+ */
+
+struct ResourceOwnerData;
+extern PgAioHandle *pgaio_io_get(struct ResourceOwnerData *resowner, PgAioReturn *ret);
+extern PgAioHandle *pgaio_io_get_nb(struct ResourceOwnerData *resowner, PgAioReturn *ret);
+
+extern void pgaio_io_release(PgAioHandle *ioh);
+extern void pgaio_io_release_resowner(dlist_node *ioh_node, bool on_error);
+
+extern void pgaio_io_get_ref(PgAioHandle *ioh, PgAioHandleRef *ior);
+
+extern void pgaio_io_set_subject(PgAioHandle *ioh, PgAioSubjectID subjid);
+extern void pgaio_io_set_flag(PgAioHandle *ioh, PgAioHandleFlags flag);
+
+extern void pgaio_io_add_shared_cb(PgAioHandle *ioh, PgAioHandleSharedCallbackID cbid);
+
+extern void pgaio_io_set_io_data_32(PgAioHandle *ioh, uint32 *data, uint8 len);
+extern void pgaio_io_set_io_data_64(PgAioHandle *ioh, uint64 *data, uint8 len);
+extern uint64 *pgaio_io_get_io_data(PgAioHandle *ioh, uint8 *len);
+
+extern void pgaio_io_prepare(PgAioHandle *ioh, PgAioOp op);
+
+extern int	pgaio_io_get_id(PgAioHandle *ioh);
+struct iovec;
+extern int	pgaio_io_get_iovec(PgAioHandle *ioh, struct iovec **iov);
+extern bool pgaio_io_has_subject(PgAioHandle *ioh);
+
+extern PgAioSubjectData *pgaio_io_get_subject_data(PgAioHandle *ioh);
+extern PgAioOpData *pgaio_io_get_op_data(PgAioHandle *ioh);
+extern ProcNumber pgaio_io_get_owner(PgAioHandle *ioh);
+
+
+
+/* --------------------------------------------------------------------------------
+ * IO References
+ * --------------------------------------------------------------------------------
+ */
+
+extern void pgaio_io_ref_clear(PgAioHandleRef *ior);
+extern bool pgaio_io_ref_valid(PgAioHandleRef *ior);
+extern int	pgaio_io_ref_get_id(PgAioHandleRef *ior);
+
+
+extern void pgaio_io_ref_wait(PgAioHandleRef *ior);
+extern bool pgaio_io_ref_check_done(PgAioHandleRef *ior);
+
+
+
+/* --------------------------------------------------------------------------------
+ * IO Result
+ * --------------------------------------------------------------------------------
+ */
+
+extern void pgaio_result_log(PgAioResult result, const PgAioSubjectData *subject_data,
+							 int elevel);
+
+
+
+/* --------------------------------------------------------------------------------
+ * Actions on multiple IOs.
+ * --------------------------------------------------------------------------------
+ */
+
+extern void pgaio_submit_staged(void);
+extern bool pgaio_have_staged(void);
+
+
+
+/* --------------------------------------------------------------------------------
+ * Low level IO preparation routines
+ *
+ * These will often be called by code lowest level of initiating an
+ * IO. E.g. bufmgr.c may initiate IO for a buffer, but pgaio_io_prep_readv()
+ * will be called from within fd.c.
+ *
+ * Implemented in aio_io.c
+ * --------------------------------------------------------------------------------
+ */
+
+extern void pgaio_io_prep_readv(PgAioHandle *ioh,
+								int fd, int iovcnt, uint64 offset);
+
+extern void pgaio_io_prep_writev(PgAioHandle *ioh,
+								 int fd, int iovcnt, uint64 offset);
+
+
+
+/* --------------------------------------------------------------------------------
+ * Other
+ * --------------------------------------------------------------------------------
+ */
+
+extern void pgaio_closing_fd(int fd);
+extern void pgaio_at_xact_end(bool is_subxact, bool is_commit);
+extern void pgaio_at_error(void);
+
+
 /* GUC related */
 extern void assign_io_method(int newval, void *extra);
 
diff --git a/src/include/storage/aio_internal.h b/src/include/storage/aio_internal.h
new file mode 100644
index 00000000000..d600d45b4fd
--- /dev/null
+++ b/src/include/storage/aio_internal.h
@@ -0,0 +1,244 @@
+/*-------------------------------------------------------------------------
+ *
+ * aio_internal.h
+ *    aio_internal
+ *
+ *
+ * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/storage/aio_internal.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef AIO_INTERNAL_H
+#define AIO_INTERNAL_H
+
+
+#include "lib/ilist.h"
+#include "port/pg_iovec.h"
+#include "storage/aio.h"
+#include "storage/condition_variable.h"
+
+
+#define PGAIO_VERBOSE
+
+
+/* AFIXME */
+#define PGAIO_SUBMIT_BATCH_SIZE 32
+
+
+
+typedef enum PgAioHandleState
+{
+	/* not in use */
+	AHS_IDLE = 0,
+
+	/* returned by pgaio_io_get() */
+	AHS_HANDED_OUT,
+
+	/* pgaio_io_start_*() has been called, but IO hasn't been submitted yet */
+	AHS_DEFINED,
+
+	/* subjects prepare() callback has been called */
+	AHS_PREPARED,
+
+	/* IO is being executed */
+	AHS_IN_FLIGHT,
+
+	/* IO finished, but result has not yet been processed */
+	AHS_REAPED,
+
+	/* IO completed, shared completion has been called */
+	AHS_COMPLETED_SHARED,
+
+	/* IO completed, local completion has been called */
+	AHS_COMPLETED_LOCAL,
+} PgAioHandleState;
+
+
+struct ResourceOwnerData;
+
+/* typedef is in public header */
+struct PgAioHandle
+{
+	/* all state updates should go through pgaio_io_update_state() */
+	PgAioHandleState state:8;
+
+	/* what are we operating on */
+	PgAioSubjectID subject:8;
+
+	/* which operation */
+	PgAioOp		op:8;
+
+	/* bitfield of PgAioHandleFlags */
+	uint8		flags;
+
+	uint8		num_shared_callbacks;
+
+	/* using the proper type here would use more space */
+	uint8		shared_callbacks[AIO_MAX_SHARED_CALLBACKS];
+
+	uint8		iovec_data_len;
+
+	/* XXX: could be optimized out with some pointer math */
+	int32		owner_procno;
+
+	/* FIXME: remove in favor of distilled_result */
+	/* raw result of the IO operation */
+	int32		result;
+
+	/* index into PgAioCtl->iovecs */
+	uint32		iovec_off;
+
+	/**
+	 * In which list the handle is registered, depends on the state:
+	 * - IDLE, in per-backend list
+	 * - HANDED_OUT - not in a list
+	 * - DEFINED - in per-backend staged list
+	 * - PREPARED - in per-backend staged list
+	 * - IN_FLIGHT - in issuer's in_flight list
+	 * - REAPED - in issuer's in_flight list
+	 * - COMPLETED_SHARED - in issuer's in_flight list
+	 * - COMPLETED_LOCAL - in issuer's in_flight list
+	 *
+	 * XXX: It probably make sense to optimize this out to save on per-io
+	 * memory at the cost of per-backend memory.
+	 **/
+	dlist_node	node;
+
+	struct ResourceOwnerData *resowner;
+	dlist_node	resowner_node;
+
+	/* incremented every time the IO handle is reused */
+	uint64		generation;
+
+	ConditionVariable cv;
+
+	/* result of shared callback, passed to issuer callback */
+	PgAioResult distilled_result;
+
+	PgAioReturn *report_return;
+
+	PgAioOpData op_data;
+
+	/*
+	 * Data necessary for shared completions. Needs to be sufficient to allow
+	 * another backend to retry an IO.
+	 */
+	PgAioSubjectData scb_data;
+};
+
+
+typedef struct PgAioPerBackend
+{
+	/* index into PgAioCtl->io_handles */
+	uint32		io_handle_off;
+
+	/* IO Handles that currently are not used */
+	dclist_head idle_ios;
+
+	/*
+	 * Only one IO may be returned by pgaio_io_get()/pgaio_io_get() without
+	 * having been either defined (by actually associating it with IO) or by
+	 * released (with pgaio_io_release()). This restriction is necessary to
+	 * guarantee that we always can acquire an IO. ->handed_out_io is used to
+	 * enforce that rule.
+	 */
+	PgAioHandle *handed_out_io;
+
+	/*
+	 * IOs that are defined, but not yet submitted.
+	 */
+	uint16		num_staged_ios;
+	PgAioHandle *staged_ios[PGAIO_SUBMIT_BATCH_SIZE];
+
+	/*
+	 * List of in-flight IOs. Also contains IOs that aren't strict speaking
+	 * in-flight anymore, but have been waited-for and completed by another
+	 * backend. Once this backend sees such an IO it'll be reclaimed.
+	 *
+	 * The list is ordered by submission time, with more recently submitted
+	 * IOs being appended at the end.
+	 */
+	dclist_head in_flight_ios;
+} PgAioPerBackend;
+
+
+typedef struct PgAioCtl
+{
+	int			backend_state_count;
+	PgAioPerBackend *backend_state;
+
+	/*
+	 * Array of iovec structs. Each iovec is owned by a specific backend. The
+	 * allocation is in PgAioCtl to allow the maximum number of iovecs for
+	 * individual IOs to be configurable with PGC_POSTMASTER GUC.
+	 */
+	uint64		iovec_count;
+	struct iovec *iovecs;
+
+	/*
+	 * For, e.g., an IO covering multiple buffers in shared / temp buffers, we
+	 * need to get Buffer IDs during completion to be able to change the
+	 * BufferDesc state accordingly. This space can be used to store e.g.
+	 * Buffer IDs.  Note that the actual iovec might be shorter than this,
+	 * because we combine neighboring pages into one larger iovec entry.
+	 */
+	uint64	   *iovecs_data;
+
+	uint64		io_handle_count;
+	PgAioHandle *io_handles;
+} PgAioCtl;
+
+
+
+/*
+ * The set of callbacks that each IO method must implement.
+ */
+typedef struct IoMethodOps
+{
+	/* global initialization */
+	size_t		(*shmem_size) (void);
+	void		(*shmem_init) (bool first_time);
+
+	/* per-backend initialization */
+	void		(*init_backend) (void);
+
+	/* handling of IOs */
+	bool		(*needs_synchronous_execution) (PgAioHandle *ioh);
+	int			(*submit) (uint16 num_staged_ios, PgAioHandle **staged_ios);
+
+	void		(*wait_one) (PgAioHandle *ioh,
+							 uint64 ref_generation);
+} IoMethodOps;
+
+
+extern bool pgaio_io_was_recycled(PgAioHandle *ioh, uint64 ref_generation, PgAioHandleState *state);
+
+extern void pgaio_io_prepare_subject(PgAioHandle *ioh);
+extern void pgaio_io_process_completion_subject(PgAioHandle *ioh);
+extern void pgaio_io_process_completion(PgAioHandle *ioh, int result);
+extern void pgaio_io_prepare_submit(PgAioHandle *ioh);
+
+extern bool pgaio_io_needs_synchronous_execution(PgAioHandle *ioh);
+extern void pgaio_io_perform_synchronously(PgAioHandle *ioh);
+
+extern bool pgaio_io_can_reopen(PgAioHandle *ioh);
+extern void pgaio_io_reopen(PgAioHandle *ioh);
+
+extern const char *pgaio_io_get_subject_name(PgAioHandle *ioh);
+extern const char *pgaio_io_get_op_name(PgAioHandle *ioh);
+extern const char *pgaio_io_get_state_name(PgAioHandle *ioh);
+
+
+/* Declarations for the tables of function pointers exposed by each IO method. */
+extern const IoMethodOps pgaio_sync_ops;
+
+extern const IoMethodOps *pgaio_impl;
+extern PgAioCtl *aio_ctl;
+extern PgAioPerBackend *my_aio;
+
+
+
+#endif							/* AIO_INTERNAL_H */
diff --git a/src/include/storage/aio_ref.h b/src/include/storage/aio_ref.h
new file mode 100644
index 00000000000..ad7e9ad34f3
--- /dev/null
+++ b/src/include/storage/aio_ref.h
@@ -0,0 +1,24 @@
+/*-------------------------------------------------------------------------
+ *
+ * aio_ref.h Definition of PgAioHandleRef, which sometimes needs to be used in
+ *    headers.
+ *
+ *
+ * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * src/include/storage/aio_ref.h
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef AIO_REF_H
+#define AIO_REF_H
+
+typedef struct PgAioHandleRef
+{
+	uint32		aio_index;
+	uint32		generation_upper;
+	uint32		generation_lower;
+} PgAioHandleRef;
+
+#endif							/* AIO_REF_H */
diff --git a/src/include/utils/resowner.h b/src/include/utils/resowner.h
index 4e534bc3e70..2d55720a54c 100644
--- a/src/include/utils/resowner.h
+++ b/src/include/utils/resowner.h
@@ -164,4 +164,9 @@ struct LOCALLOCK;
 extern void ResourceOwnerRememberLock(ResourceOwner owner, struct LOCALLOCK *locallock);
 extern void ResourceOwnerForgetLock(ResourceOwner owner, struct LOCALLOCK *locallock);
 
+/* special support for AIO */
+struct dlist_node;
+extern void ResourceOwnerRememberAioHandle(ResourceOwner owner, struct dlist_node *ioh_node);
+extern void ResourceOwnerForgetAioHandle(ResourceOwner owner, struct dlist_node *ioh_node);
+
 #endif							/* RESOWNER_H */
diff --git a/src/backend/access/transam/xact.c b/src/backend/access/transam/xact.c
index 3ebd7c40418..0356552c499 100644
--- a/src/backend/access/transam/xact.c
+++ b/src/backend/access/transam/xact.c
@@ -51,6 +51,7 @@
 #include "replication/origin.h"
 #include "replication/snapbuild.h"
 #include "replication/syncrep.h"
+#include "storage/aio.h"
 #include "storage/condition_variable.h"
 #include "storage/fd.h"
 #include "storage/lmgr.h"
@@ -2475,6 +2476,8 @@ CommitTransaction(void)
 	AtEOXact_LogicalRepWorkers(true);
 	pgstat_report_xact_timestamp(0);
 
+	pgaio_at_xact_end( /* is_subxact = */ false, /* is_commit = */ true);
+
 	ResourceOwnerDelete(TopTransactionResourceOwner);
 	s->curTransactionOwner = NULL;
 	CurTransactionResourceOwner = NULL;
@@ -2988,6 +2991,8 @@ AbortTransaction(void)
 		pgstat_report_xact_timestamp(0);
 	}
 
+	pgaio_at_xact_end( /* is_subxact = */ false, /* is_commit = */ false);
+
 	/*
 	 * State remains TRANS_ABORT until CleanupTransaction().
 	 */
@@ -5185,6 +5190,8 @@ CommitSubTransaction(void)
 	AtEOSubXact_PgStat(true, s->nestingLevel);
 	AtSubCommit_Snapshot(s->nestingLevel);
 
+	pgaio_at_xact_end( /* is_subxact = */ true, /* is_commit = */ true);
+
 	/*
 	 * We need to restore the upper transaction's read-only state, in case the
 	 * upper is read-write while the child is read-only; GUC will incorrectly
@@ -5351,6 +5358,8 @@ AbortSubTransaction(void)
 		AtSubAbort_Snapshot(s->nestingLevel);
 	}
 
+	pgaio_at_xact_end( /* is_subxact = */ true, /* is_commit = */ false);
+
 	/*
 	 * Restore the upper transaction's read-only state, too.  This should be
 	 * redundant with GUC's cleanup but we may as well do it for consistency
diff --git a/src/backend/storage/aio/Makefile b/src/backend/storage/aio/Makefile
index eaeaeeee8e3..b253278f3c1 100644
--- a/src/backend/storage/aio/Makefile
+++ b/src/backend/storage/aio/Makefile
@@ -11,6 +11,9 @@ include $(top_builddir)/src/Makefile.global
 OBJS = \
 	aio.o \
 	aio_init.o \
+	aio_io.o \
+	aio_subject.o \
+	method_sync.o \
 	read_stream.o
 
 include $(top_srcdir)/src/backend/common.mk
diff --git a/src/backend/storage/aio/aio.c b/src/backend/storage/aio/aio.c
index 72110c0df3e..3e2ff9718ca 100644
--- a/src/backend/storage/aio/aio.c
+++ b/src/backend/storage/aio/aio.c
@@ -3,6 +3,28 @@
  * aio.c
  *    AIO - Core Logic
  *
+ * For documentation about how AIO works on a higher level, including a
+ * schematic example, see README.md.
+ *
+ *
+ * AIO is a complicated subsystem. To keep things navigable it is split across
+ * a number of files:
+ *
+ * - aio.c - core AIO state handling
+ *
+ * - aio_init.c - initialization
+ *
+ * - aio_io.c - dealing with actual IO, including executing IOs synchronously
+ *
+ * - aio_subject.c - functionality related to executing IO for different
+ *   subjects
+ *
+ * - method_*.c - different ways of executing AIO
+ *
+ * - read_stream.c - helper for accessing buffered relation data with
+ *	 look-ahead
+ *
+ *
  * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
  * Portions Copyright (c) 1994, Regents of the University of California
  *
@@ -14,7 +36,22 @@
 
 #include "postgres.h"
 
+#include "miscadmin.h"
+#include "port/atomics.h"
 #include "storage/aio.h"
+#include "storage/aio_internal.h"
+#include "storage/bufmgr.h"
+#include "utils/resowner.h"
+#include "utils/wait_event_types.h"
+
+
+
+static inline void pgaio_io_update_state(PgAioHandle *ioh, PgAioHandleState new_state);
+static void pgaio_io_reclaim(PgAioHandle *ioh);
+static void pgaio_io_resowner_register(PgAioHandle *ioh);
+static void pgaio_io_wait_for_free(void);
+static PgAioHandle *pgaio_io_from_ref(PgAioHandleRef *ior, uint64 *ref_generation);
+
 
 
 /* Options for io_method. */
@@ -27,7 +64,876 @@ int			io_method = DEFAULT_IO_METHOD;
 int			io_max_concurrency = -1;
 
 
+/* global control for AIO */
+PgAioCtl   *aio_ctl;
+
+/* current backend's per-backend state */
+PgAioPerBackend *my_aio;
+
+
+static const IoMethodOps *pgaio_ops_table[] = {
+	[IOMETHOD_SYNC] = &pgaio_sync_ops,
+};
+
+
+const IoMethodOps *pgaio_impl;
+
+
+
+/* --------------------------------------------------------------------------------
+ * "Core" IO Api
+ * --------------------------------------------------------------------------------
+ */
+
+/*
+ * Acquire an AioHandle, waiting for IO completion if necessary.
+ *
+ * Each backend can only have one AIO handle that that has been "handed out"
+ * to code, but not yet submitted or released. This restriction is necessary
+ * to ensure that it is possible for code to wait for an unused handle by
+ * waiting for in-flight IO to complete. There is a limited number of handles
+ * in each backend, if multiple handles could be handed out without being
+ * submitted, waiting for all in-flight IO to complete would not guarantee
+ * that handles free up.
+ *
+ * It is cheap to acquire an IO handle, unless all handles are in use. In that
+ * case this function waits for the oldest IO to complete. In case that is not
+ * desirable, see pgaio_io_get_nb().
+ *
+ * If a handle was acquired but then does not turn out to be needed,
+ * e.g. because pgaio_io_get() is called before starting an IO in a critical
+ * section, the handle needs to be be released with pgaio_io_release().
+ *
+ *
+ * To react to the completion of the IO as soon as it is know to have
+ * completed, callbacks can be registered with pgaio_io_add_shared_cb().
+ *
+ * To actually execute IO using the returned handle, the pgaio_io_prep_*()
+ * family of functions is used. In many cases the pgaio_io_prep_*() call will
+ * not be done directly by code that acquired the handle, but by lower level
+ * code that gets passed the handle. E.g. if code in bufmgr.c wants to perform
+ * AIO, it typically will pass the handle to smgr., which will pass it on to
+ * md.c, on to fd.c, which then finally calls pgaio_io_prep_*().  This
+ * forwarding allows the various layers to react to the IO's completion by
+ * registering callbacks. These callbacks in turn can translate a lower
+ * layer's result into a result understandable by a higher layer.
+ *
+ * Once pgaio_io_prep_*() is called, the IO may be in the process of being
+ * executed and might even complete before the functions return. That is,
+ * however, not guaranteed, to allow IO submission to be batched. To guarantee
+ * IO submission pgaio_submit_staged() needs to be called.
+ *
+ * After pgaio_io_prep_*() the AioHandle is "consumed" and may not be
+ * referenced by the IO issuing code. To e.g. wait for IO, references to the
+ * IO can be established with pgaio_io_get_ref() *before* pgaio_io_prep_*() is
+ * called.  pgaio_io_ref_wait() can be used to wait for the IO to complete.
+ *
+ *
+ * To know if the IO [partially] succeeded or failed, a PgAioReturn * can be
+ * passed to pgaio_io_get(). Once the issuing backend has called
+ * pgaio_io_ref_wait(), the PgAioReturn contains information about whether the
+ * operation succeeded and details about the first failure, if any. The error
+ * can be raised / logged with pgaio_result_log().
+ *
+ * The lifetime of the memory pointed to be *ret needs to be at least as long
+ * as the passed in resowner. If the resowner releases resources before the IO
+ * completes, the reference to *ret will be cleared.
+ */
+PgAioHandle *
+pgaio_io_get(struct ResourceOwnerData *resowner, PgAioReturn *ret)
+{
+	PgAioHandle *h;
+
+	while (true)
+	{
+		h = pgaio_io_get_nb(resowner, ret);
+
+		if (h != NULL)
+			return h;
+
+		/*
+		 * Evidently all handles by this backend are in use. Just wait for
+		 * some to complete.
+		 */
+		pgaio_io_wait_for_free();
+	}
+}
+
+/*
+ * Acquire an AioHandle, returning NULL if no handles are free.
+ *
+ * See pgaio_io_get(). The only difference is that this function will return
+ * NULL if there are no idle handles, instead of blocking.
+ */
+PgAioHandle *
+pgaio_io_get_nb(struct ResourceOwnerData *resowner, PgAioReturn *ret)
+{
+	if (my_aio->num_staged_ios >= PGAIO_SUBMIT_BATCH_SIZE)
+	{
+		Assert(my_aio->num_staged_ios == PGAIO_SUBMIT_BATCH_SIZE);
+		pgaio_submit_staged();
+	}
+
+	if (my_aio->handed_out_io)
+	{
+		ereport(ERROR,
+				errmsg("API violation: Only one IO can be handed out"));
+	}
+
+	if (!dclist_is_empty(&my_aio->idle_ios))
+	{
+		dlist_node *ion = dclist_pop_head_node(&my_aio->idle_ios);
+		PgAioHandle *ioh = dclist_container(PgAioHandle, node, ion);
+
+		Assert(ioh->state == AHS_IDLE);
+		Assert(ioh->owner_procno == MyProcNumber);
+
+		pgaio_io_update_state(ioh, AHS_HANDED_OUT);
+		my_aio->handed_out_io = ioh;
+
+		if (resowner)
+			pgaio_io_resowner_register(ioh);
+
+		if (ret)
+		{
+			ioh->report_return = ret;
+			ret->result.status = ARS_UNKNOWN;
+		}
+
+		return ioh;
+	}
+
+	return NULL;
+}
+
+/*
+ * Release IO handle that turned out to not be required.
+ *
+ * See pgaio_io_get() for more details.
+ */
+void
+pgaio_io_release(PgAioHandle *ioh)
+{
+	if (ioh == my_aio->handed_out_io)
+	{
+		Assert(ioh->state == AHS_HANDED_OUT);
+		Assert(ioh->resowner);
+
+		my_aio->handed_out_io = NULL;
+		pgaio_io_reclaim(ioh);
+	}
+	else
+	{
+		elog(ERROR, "release in unexpected state");
+	}
+}
+
+/*
+ * Release IO handle during resource owner cleanup.
+ */
+void
+pgaio_io_release_resowner(dlist_node *ioh_node, bool on_error)
+{
+	PgAioHandle *ioh = dlist_container(PgAioHandle, resowner_node, ioh_node);
+
+	Assert(ioh->resowner);
+
+	ResourceOwnerForgetAioHandle(ioh->resowner, &ioh->resowner_node);
+	ioh->resowner = NULL;
+
+	switch (ioh->state)
+	{
+		case AHS_IDLE:
+			elog(ERROR, "unexpected");
+			break;
+		case AHS_HANDED_OUT:
+			Assert(ioh == my_aio->handed_out_io || my_aio->handed_out_io == NULL);
+
+			if (ioh == my_aio->handed_out_io)
+			{
+				my_aio->handed_out_io = NULL;
+				if (!on_error)
+					elog(WARNING, "leaked AIO handle");
+			}
+
+			pgaio_io_reclaim(ioh);
+			break;
+		case AHS_DEFINED:
+		case AHS_PREPARED:
+			/* XXX: Should we warn about this when is_commit? */
+			pgaio_submit_staged();
+			break;
+		case AHS_IN_FLIGHT:
+		case AHS_REAPED:
+		case AHS_COMPLETED_SHARED:
+			/* this is expected to happen */
+			break;
+		case AHS_COMPLETED_LOCAL:
+			/* XXX: unclear if this ought to be possible? */
+			pgaio_io_reclaim(ioh);
+			break;
+	}
+
+	/*
+	 * Need to unregister the reporting of the IO's result, the memory it's
+	 * referencing likely has gone away.
+	 */
+	if (ioh->report_return)
+		ioh->report_return = NULL;
+}
+
+int
+pgaio_io_get_iovec(PgAioHandle *ioh, struct iovec **iov)
+{
+	Assert(ioh->state == AHS_HANDED_OUT);
+
+	*iov = &aio_ctl->iovecs[ioh->iovec_off];
+
+	/* AFIXME: Needs to be the value at startup time */
+	return io_combine_limit;
+}
+
+PgAioSubjectData *
+pgaio_io_get_subject_data(PgAioHandle *ioh)
+{
+	return &ioh->scb_data;
+}
+
+PgAioOpData *
+pgaio_io_get_op_data(PgAioHandle *ioh)
+{
+	return &ioh->op_data;
+}
+
+ProcNumber
+pgaio_io_get_owner(PgAioHandle *ioh)
+{
+	return ioh->owner_procno;
+}
+
+bool
+pgaio_io_has_subject(PgAioHandle *ioh)
+{
+	return ioh->subject != ASI_INVALID;
+}
+
+void
+pgaio_io_set_flag(PgAioHandle *ioh, PgAioHandleFlags flag)
+{
+	Assert(ioh->state == AHS_HANDED_OUT);
+
+	ioh->flags |= flag;
+}
+
+void
+pgaio_io_set_io_data_32(PgAioHandle *ioh, uint32 *data, uint8 len)
+{
+	Assert(ioh->state == AHS_HANDED_OUT);
+
+	for (int i = 0; i < len; i++)
+		aio_ctl->iovecs_data[ioh->iovec_off + i] = data[i];
+	ioh->iovec_data_len = len;
+}
+
+uint64 *
+pgaio_io_get_io_data(PgAioHandle *ioh, uint8 *len)
+{
+	Assert(ioh->iovec_data_len > 0);
+
+	*len = ioh->iovec_data_len;
+
+	return &aio_ctl->iovecs_data[ioh->iovec_off];
+}
+
+void
+pgaio_io_set_subject(PgAioHandle *ioh, PgAioSubjectID subjid)
+{
+	Assert(ioh->state == AHS_HANDED_OUT);
+
+	ioh->subject = subjid;
+
+	elog(DEBUG3, "io:%d, op %s, subject %s, set subject",
+		 pgaio_io_get_id(ioh),
+		 pgaio_io_get_op_name(ioh),
+		 pgaio_io_get_subject_name(ioh));
+}
+
+void
+pgaio_io_get_ref(PgAioHandle *ioh, PgAioHandleRef *ior)
+{
+	Assert(ioh->state == AHS_HANDED_OUT ||
+		   ioh->state == AHS_DEFINED ||
+		   ioh->state == AHS_PREPARED);
+	Assert(ioh->generation != 0);
+
+	ior->aio_index = ioh - aio_ctl->io_handles;
+	ior->generation_upper = (uint32) (ioh->generation >> 32);
+	ior->generation_lower = (uint32) ioh->generation;
+}
+
+void
+pgaio_io_ref_clear(PgAioHandleRef *ior)
+{
+	ior->aio_index = PG_UINT32_MAX;
+}
+
+bool
+pgaio_io_ref_valid(PgAioHandleRef *ior)
+{
+	return ior->aio_index != PG_UINT32_MAX;
+}
+
+int
+pgaio_io_ref_get_id(PgAioHandleRef *ior)
+{
+	Assert(pgaio_io_ref_valid(ior));
+	return ior->aio_index;
+}
+
+bool
+pgaio_io_was_recycled(PgAioHandle *ioh, uint64 ref_generation, PgAioHandleState *state)
+{
+	*state = ioh->state;
+	pg_read_barrier();
+
+	return ioh->generation != ref_generation;
+}
+
+void
+pgaio_io_ref_wait(PgAioHandleRef *ior)
+{
+	uint64		ref_generation;
+	PgAioHandleState state;
+	bool		am_owner;
+	PgAioHandle *ioh;
+
+	ioh = pgaio_io_from_ref(ior, &ref_generation);
+
+	am_owner = ioh->owner_procno == MyProcNumber;
+
+	if (pgaio_io_was_recycled(ioh, ref_generation, &state))
+		return;
+
+	if (am_owner)
+	{
+		if (state == AHS_DEFINED || state == AHS_PREPARED)
+		{
+			/* XXX: Arguably this should be prevented by callers? */
+			pgaio_submit_staged();
+		}
+		else if (state != AHS_IN_FLIGHT
+				 && state != AHS_REAPED
+				 && state != AHS_COMPLETED_SHARED
+				 && state != AHS_COMPLETED_LOCAL)
+		{
+			elog(PANIC, "waiting for own IO in wrong state: %d",
+				 state);
+		}
+
+		/*
+		 * Somebody else completed the IO, need to execute issuer callback, so
+		 * reclaim eagerly.
+		 */
+		if (state == AHS_COMPLETED_LOCAL)
+		{
+			pgaio_io_reclaim(ioh);
+
+			return;
+		}
+	}
+
+	while (true)
+	{
+		if (pgaio_io_was_recycled(ioh, ref_generation, &state))
+			return;
+
+		switch (state)
+		{
+			case AHS_IDLE:
+			case AHS_HANDED_OUT:
+				elog(ERROR, "IO in wrong state: %d", state);
+				break;
+
+			case AHS_IN_FLIGHT:
+				/*
+				 * If we need to wait via the IO method, do so now. Don't
+				 * check via the IO method if the issuing backend is executing
+				 * the IO synchronously.
+				 */
+				if (pgaio_impl->wait_one && !(ioh->flags & AHF_SYNCHRONOUS))
+				{
+					pgaio_impl->wait_one(ioh, ref_generation);
+					continue;
+				}
+				/* fallthrough */
+
+				/* waiting for owner to submit */
+			case AHS_PREPARED:
+			case AHS_DEFINED:
+				/* waiting for reaper to complete */
+				/* fallthrough */
+			case AHS_REAPED:
+				/* shouldn't be able to hit this otherwise */
+				Assert(IsUnderPostmaster);
+				/* ensure we're going to get woken up */
+				ConditionVariablePrepareToSleep(&ioh->cv);
+
+				while (!pgaio_io_was_recycled(ioh, ref_generation, &state))
+				{
+					if (state != AHS_REAPED && state != AHS_DEFINED &&
+						state != AHS_IN_FLIGHT)
+						break;
+					ConditionVariableSleep(&ioh->cv, WAIT_EVENT_AIO_COMPLETION);
+				}
+
+				ConditionVariableCancelSleep();
+				break;
+
+			case AHS_COMPLETED_SHARED:
+				/* see above */
+				if (am_owner)
+					pgaio_io_reclaim(ioh);
+				return;
+			case AHS_COMPLETED_LOCAL:
+				return;
+		}
+	}
+}
+
+/*
+ * Check if the the referenced IO completed, without blocking.
+ */
+bool
+pgaio_io_ref_check_done(PgAioHandleRef *ior)
+{
+	uint64		ref_generation;
+	PgAioHandleState state;
+	bool		am_owner;
+	PgAioHandle *ioh;
+
+	ioh = pgaio_io_from_ref(ior, &ref_generation);
+
+	if (pgaio_io_was_recycled(ioh, ref_generation, &state))
+		return true;
+
+
+	if (state == AHS_IDLE)
+		return true;
+
+	am_owner = ioh->owner_procno == MyProcNumber;
+
+	if (state == AHS_COMPLETED_SHARED || state == AHS_COMPLETED_LOCAL)
+	{
+		if (am_owner)
+			pgaio_io_reclaim(ioh);
+		return true;
+	}
+
+	return false;
+}
+
+int
+pgaio_io_get_id(PgAioHandle *ioh)
+{
+	Assert(ioh >= aio_ctl->io_handles &&
+		   ioh <= (aio_ctl->io_handles + aio_ctl->io_handle_count));
+	return ioh - aio_ctl->io_handles;
+}
+
+const char *
+pgaio_io_get_state_name(PgAioHandle *ioh)
+{
+	switch (ioh->state)
+	{
+		case AHS_IDLE:
+			return "idle";
+		case AHS_HANDED_OUT:
+			return "handed_out";
+		case AHS_DEFINED:
+			return "DEFINED";
+		case AHS_PREPARED:
+			return "PREPARED";
+		case AHS_IN_FLIGHT:
+			return "IN_FLIGHT";
+		case AHS_REAPED:
+			return "REAPED";
+		case AHS_COMPLETED_SHARED:
+			return "COMPLETED_SHARED";
+		case AHS_COMPLETED_LOCAL:
+			return "COMPLETED_LOCAL";
+	}
+	pg_unreachable();
+}
+
+/*
+ * Internal, should only be called from pgaio_io_prep_*().
+ */
+void
+pgaio_io_prepare(PgAioHandle *ioh, PgAioOp op)
+{
+	bool		needs_synchronous;
+
+	Assert(ioh->state == AHS_HANDED_OUT);
+	Assert(pgaio_io_has_subject(ioh));
+
+	ioh->op = op;
+	ioh->result = 0;
+
+	pgaio_io_update_state(ioh, AHS_DEFINED);
+
+	/* allow a new IO to be staged */
+	my_aio->handed_out_io = NULL;
+
+	pgaio_io_prepare_subject(ioh);
+
+	pgaio_io_update_state(ioh, AHS_PREPARED);
+
+	needs_synchronous = pgaio_io_needs_synchronous_execution(ioh);
+
+	elog(DEBUG3, "io:%d: prepared %s, executed synchronously: %d",
+		 pgaio_io_get_id(ioh), pgaio_io_get_op_name(ioh),
+		 needs_synchronous);
+
+	if (!needs_synchronous)
+	{
+		my_aio->staged_ios[my_aio->num_staged_ios++] = ioh;
+		Assert(my_aio->num_staged_ios <= PGAIO_SUBMIT_BATCH_SIZE);
+	}
+	else
+	{
+		pgaio_io_prepare_submit(ioh);
+		pgaio_io_perform_synchronously(ioh);
+	}
+}
+
+/*
+ * Handle IO getting completed by a method.
+ */
+void
+pgaio_io_process_completion(PgAioHandle *ioh, int result)
+{
+	Assert(ioh->state == AHS_IN_FLIGHT);
+
+	ioh->result = result;
+
+	pgaio_io_update_state(ioh, AHS_REAPED);
+
+	pgaio_io_process_completion_subject(ioh);
+
+	pgaio_io_update_state(ioh, AHS_COMPLETED_SHARED);
+
+	/* condition variable broadcast ensures state is visible before wakeup */
+	ConditionVariableBroadcast(&ioh->cv);
+
+	if (ioh->owner_procno == MyProcNumber)
+		pgaio_io_reclaim(ioh);
+}
+
+bool
+pgaio_io_needs_synchronous_execution(PgAioHandle *ioh)
+{
+	if (ioh->flags & AHF_SYNCHRONOUS)
+	{
+		/* XXX: should we also check if there are other IOs staged? */
+		return true;
+	}
+
+	if (pgaio_impl->needs_synchronous_execution)
+		return pgaio_impl->needs_synchronous_execution(ioh);
+	return false;
+}
+
+/*
+ * Handle IO being processed by IO method.
+ */
+void
+pgaio_io_prepare_submit(PgAioHandle *ioh)
+{
+	pgaio_io_update_state(ioh, AHS_IN_FLIGHT);
+
+	dclist_push_tail(&my_aio->in_flight_ios, &ioh->node);
+}
+
+static inline void
+pgaio_io_update_state(PgAioHandle *ioh, PgAioHandleState new_state)
+{
+	/*
+	 * Ensure the changes signified by the new state are visible before the
+	 * new state becomes visible.
+	 */
+	pg_write_barrier();
+
+	ioh->state = new_state;
+}
+
+static PgAioHandle *
+pgaio_io_from_ref(PgAioHandleRef *ior, uint64 *ref_generation)
+{
+	PgAioHandle *ioh;
+
+	Assert(ior->aio_index < aio_ctl->io_handle_count);
+
+	ioh = &aio_ctl->io_handles[ior->aio_index];
+
+	*ref_generation = ((uint64) ior->generation_upper) << 32 |
+		ior->generation_lower;
+
+	Assert(*ref_generation != 0);
+
+	return ioh;
+}
+
+static void
+pgaio_io_resowner_register(PgAioHandle *ioh)
+{
+	Assert(!ioh->resowner);
+	Assert(CurrentResourceOwner);
+
+	ResourceOwnerRememberAioHandle(CurrentResourceOwner, &ioh->resowner_node);
+	ioh->resowner = CurrentResourceOwner;
+}
+
+static void
+pgaio_io_reclaim(PgAioHandle *ioh)
+{
+	/* This is only ok if it's our IO */
+	Assert(ioh->owner_procno == MyProcNumber);
+
+	ereport(DEBUG3,
+			errmsg("reclaiming io:%d, state: %s, op %s, subject %s, result: %d, distilled_result: AFIXME, report to: %p",
+				   pgaio_io_get_id(ioh),
+				   pgaio_io_get_state_name(ioh),
+				   pgaio_io_get_op_name(ioh),
+				   pgaio_io_get_subject_name(ioh),
+				   ioh->result,
+				   ioh->report_return
+				   ),
+			errhidestmt(true), errhidecontext(true));
+
+	/* if the IO has been defined, we might need to do more work */
+	if (ioh->state != AHS_HANDED_OUT)
+	{
+		dclist_delete_from(&my_aio->in_flight_ios, &ioh->node);
+
+		if (ioh->report_return)
+		{
+			ioh->report_return->result = ioh->distilled_result;
+			ioh->report_return->subject_data = ioh->scb_data;
+		}
+	}
+
+	if (ioh->resowner)
+	{
+		ResourceOwnerForgetAioHandle(ioh->resowner, &ioh->resowner_node);
+		ioh->resowner = NULL;
+	}
+
+	Assert(!ioh->resowner);
+
+	ioh->num_shared_callbacks = 0;
+	ioh->iovec_data_len = 0;
+	ioh->report_return = NULL;
+	ioh->flags = 0;
+
+	/* XXX: the barrier is probably superfluous */
+	pg_write_barrier();
+	ioh->generation++;
+
+	pgaio_io_update_state(ioh, AHS_IDLE);
+
+	/*
+	 * We push the IO to the head of the idle IO list, that seems more cache
+	 * efficient in cases where only a few IOs are used.
+	 */
+	dclist_push_head(&my_aio->idle_ios, &ioh->node);
+}
+
+static void
+pgaio_io_wait_for_free(void)
+{
+	int			reclaimed = 0;
+
+	elog(DEBUG2,
+		 "waiting for self: %d pending",
+		 my_aio->num_staged_ios);
+
+	/*
+	 * First check if any of our IOs actually have completed - when using
+	 * worker, that'll often be the case. We could do so as part of the loop
+	 * below, but that'd potentially lead us to wait for some IO submitted
+	 * before.
+	 */
+	for (int i = 0; i < io_max_concurrency; i++)
+	{
+		PgAioHandle *ioh = &aio_ctl->io_handles[my_aio->io_handle_off + i];
+
+		if (ioh->state == AHS_COMPLETED_SHARED)
+		{
+			pgaio_io_reclaim(ioh);
+			reclaimed++;
+		}
+	}
+
+	if (reclaimed > 0)
+		return;
+
+	/*
+	 * If we have any unsubmitted IOs, submit them now. We'll start waiting in
+	 * a second, so it's better they're in flight. This also addresses the
+	 * edge-case that all IOs are unsubmitted.
+	 */
+	if (my_aio->num_staged_ios > 0)
+	{
+		elog(DEBUG2, "submitting while acquiring free io");
+		pgaio_submit_staged();
+	}
+
+	/*
+	 * It's possible that we recognized there were free IOs while submitting.
+	 */
+	if (dclist_count(&my_aio->in_flight_ios) == 0)
+	{
+		elog(ERROR, "no free IOs despite no in-flight IOs");
+	}
+
+	/*
+	 * Wait for the oldest in-flight IO to complete.
+	 *
+	 * XXX: Reusing the general IO wait is suboptimal, we don't need to wait
+	 * for that specific IO to complete, we just need *any* IO to complete.
+	 */
+	{
+		PgAioHandle *ioh = dclist_head_element(PgAioHandle, node, &my_aio->in_flight_ios);
+
+		switch (ioh->state)
+		{
+			/* should not be in in-flight list */
+			case AHS_IDLE:
+			case AHS_DEFINED:
+			case AHS_HANDED_OUT:
+			case AHS_PREPARED:
+			case AHS_COMPLETED_LOCAL:
+				elog(ERROR, "shouldn't get here with io:%d in state %d",
+					 pgaio_io_get_id(ioh), ioh->state);
+				break;
+
+			case AHS_REAPED:
+			case AHS_IN_FLIGHT:
+				{
+					PgAioHandleRef ior;
+
+					ior.aio_index = ioh - aio_ctl->io_handles;
+					ior.generation_upper = (uint32) (ioh->generation >> 32);
+					ior.generation_lower = (uint32) ioh->generation;
+
+					pgaio_io_ref_wait(&ior);
+					elog(DEBUG2, "waited for io:%d",
+						 pgaio_io_get_id(ioh));
+				}
+				break;
+			case AHS_COMPLETED_SHARED:
+				/* it's possible that another backend just finished this IO */
+				pgaio_io_reclaim(ioh);
+				break;
+		}
+
+		if (dclist_count(&my_aio->idle_ios) == 0)
+			elog(PANIC, "no idle IOs after waiting");
+		return;
+	}
+}
+
+
+
+/* --------------------------------------------------------------------------------
+ * Actions on multiple IOs.
+ * --------------------------------------------------------------------------------
+ */
+
+void
+pgaio_submit_staged(void)
+{
+	int			total_submitted = 0;
+	int			did_submit;
+
+	if (my_aio->num_staged_ios == 0)
+		return;
+
+
+	START_CRIT_SECTION();
+
+	did_submit = pgaio_impl->submit(my_aio->num_staged_ios, my_aio->staged_ios);
+
+	END_CRIT_SECTION();
+
+	total_submitted += did_submit;
+
+	Assert(total_submitted == did_submit);
+
+	my_aio->num_staged_ios = 0;
+
+#ifdef PGAIO_VERBOSE
+	ereport(DEBUG2,
+			errmsg("submitted %d",
+				   total_submitted),
+			errhidestmt(true),
+			errhidecontext(true));
+#endif
+}
+
+bool
+pgaio_have_staged(void)
+{
+	return my_aio->num_staged_ios > 0;
+}
+
+
+
+/* --------------------------------------------------------------------------------
+ * Other
+ * --------------------------------------------------------------------------------
+ */
+
+/*
+ * Need to submit staged but not yet submitted IOs using the fd, otherwise
+ * the IO would end up targeting something bogus.
+ */
+void
+pgaio_closing_fd(int fd)
+{
+	/*
+	 * Might be called before AIO is initialized or in a subprocess that
+	 * doesn't use AIO.
+	 */
+	if (!my_aio)
+		return;
+
+	/*
+	 * For now just submit all staged IOs - we could be more selective, but
+	 * it's probably not worth it.
+	 */
+	pgaio_submit_staged();
+}
+
+void
+pgaio_at_xact_end(bool is_subxact, bool is_commit)
+{
+	Assert(!my_aio->handed_out_io);
+}
+
+/*
+ * Similar to pgaio_at_xact_end(..., is_commit = false), but for cases where
+ * errors happen outside of transactions.
+ */
+void
+pgaio_at_error(void)
+{
+	Assert(!my_aio->handed_out_io);
+}
+
+
 void
 assign_io_method(int newval, void *extra)
 {
+	pgaio_impl = pgaio_ops_table[newval];
 }
diff --git a/src/backend/storage/aio/aio_init.c b/src/backend/storage/aio/aio_init.c
index 84e0e37baae..b9bdf51680a 100644
--- a/src/backend/storage/aio/aio_init.c
+++ b/src/backend/storage/aio/aio_init.c
@@ -14,28 +14,206 @@
 
 #include "postgres.h"
 
+#include "miscadmin.h"
+#include "storage/aio.h"
 #include "storage/aio_init.h"
+#include "storage/aio_internal.h"
+#include "storage/bufmgr.h"
+#include "storage/proc.h"
+#include "storage/shmem.h"
 
 
+static Size
+AioCtlShmemSize(void)
+{
+	Size		sz;
+
+	/* aio_ctl itself */
+	sz = offsetof(PgAioCtl, io_handles);
+
+	return sz;
+}
+
+static uint32
+AioProcs(void)
+{
+	return MaxBackends + NUM_AUXILIARY_PROCS;
+}
+
+static Size
+AioBackendShmemSize(void)
+{
+	return mul_size(AioProcs(), sizeof(PgAioPerBackend));
+}
+
+static Size
+AioHandleShmemSize(void)
+{
+	Size		sz;
+
+	/* ios */
+	sz = mul_size(AioProcs(),
+				  mul_size(io_max_concurrency, sizeof(PgAioHandle)));
+
+	return sz;
+}
+
+static Size
+AioIOVShmemSize(void)
+{
+	/* FIXME: io_combine_limit is USERSET */
+	return mul_size(sizeof(struct iovec),
+					mul_size(mul_size(io_combine_limit, AioProcs()),
+							 io_max_concurrency));
+}
+
+static Size
+AioIOVDataShmemSize(void)
+{
+	/* FIXME: io_combine_limit is USERSET */
+	return mul_size(sizeof(uint64),
+					mul_size(mul_size(io_combine_limit, AioProcs()),
+							 io_max_concurrency));
+}
+
+/*
+ * Choose a suitable value for io_max_concurrency.
+ *
+ * It's unlikely that we could have more IOs in flight than buffers that we
+ * would be allowed to pin.
+ *
+ * On the upper end, apply a cap too - just because shared_buffers is large,
+ * it doesn't make sense have millions of buffers undergo IO concurrently.
+ */
+static int
+AioChooseMaxConccurrency(void)
+{
+	uint32		max_backends;
+	int			max_proportional_pins;
+
+	/* Similar logic to LimitAdditionalPins() */
+	max_backends = MaxBackends + NUM_AUXILIARY_PROCS;
+	max_proportional_pins = NBuffers / max_backends;
+
+	max_proportional_pins = Max(max_proportional_pins, 1);
+
+	/* apply upper limit */
+	return Min(max_proportional_pins, 64);
+}
+
 Size
 AioShmemSize(void)
 {
 	Size		sz = 0;
 
+	/*
+	 * We prefer to report this value's source as PGC_S_DYNAMIC_DEFAULT.
+	 * However, if the DBA explicitly set wal_buffers = -1 in the config file,
+	 * then PGC_S_DYNAMIC_DEFAULT will fail to override that and we must force
+	 *
+	 */
+	if (io_max_concurrency == -1)
+	{
+		char		buf[32];
+
+		snprintf(buf, sizeof(buf), "%d", AioChooseMaxConccurrency());
+		SetConfigOption("io_max_concurrency", buf, PGC_POSTMASTER,
+						PGC_S_DYNAMIC_DEFAULT);
+		if (io_max_concurrency == -1)	/* failed to apply it? */
+			SetConfigOption("io_max_concurrency", buf, PGC_POSTMASTER,
+							PGC_S_OVERRIDE);
+	}
+
+	sz = add_size(sz, AioCtlShmemSize());
+	sz = add_size(sz, AioBackendShmemSize());
+	sz = add_size(sz, AioHandleShmemSize());
+	sz = add_size(sz, AioIOVShmemSize());
+	sz = add_size(sz, AioIOVDataShmemSize());
+
+	if (pgaio_impl->shmem_size)
+		sz = add_size(sz, pgaio_impl->shmem_size());
+
 	return sz;
 }
 
 void
 AioShmemInit(void)
 {
+	bool		found;
+	uint32		io_handle_off = 0;
+	uint32		iovec_off = 0;
+	uint32		per_backend_iovecs = io_max_concurrency * io_combine_limit;
+
+	aio_ctl = (PgAioCtl *)
+		ShmemInitStruct("AioCtl", AioCtlShmemSize(), &found);
+
+	if (found)
+		goto out;
+
+	memset(aio_ctl, 0, AioCtlShmemSize());
+
+	aio_ctl->io_handle_count = AioProcs() * io_max_concurrency;
+	aio_ctl->iovec_count = AioProcs() * per_backend_iovecs;
+
+	aio_ctl->backend_state = (PgAioPerBackend *)
+		ShmemInitStruct("AioBackend", AioBackendShmemSize(), &found);
+
+	aio_ctl->io_handles = (PgAioHandle *)
+		ShmemInitStruct("AioHandle", AioHandleShmemSize(), &found);
+
+	aio_ctl->iovecs = ShmemInitStruct("AioIOV", AioIOVShmemSize(), &found);
+	aio_ctl->iovecs_data = ShmemInitStruct("AioIOVData", AioIOVDataShmemSize(), &found);
+
+	for (int procno = 0; procno < AioProcs(); procno++)
+	{
+		PgAioPerBackend *bs = &aio_ctl->backend_state[procno];
+
+		bs->io_handle_off = io_handle_off;
+		io_handle_off += io_max_concurrency;
+
+		dclist_init(&bs->idle_ios);
+		memset(bs->staged_ios, 0, sizeof(PgAioHandle *) * PGAIO_SUBMIT_BATCH_SIZE);
+		dclist_init(&bs->in_flight_ios);
+
+		/* initialize per-backend IOs */
+		for (int i = 0; i < io_max_concurrency; i++)
+		{
+			PgAioHandle *ioh = &aio_ctl->io_handles[bs->io_handle_off + i];
+
+			ioh->generation = 1;
+			ioh->owner_procno = procno;
+			ioh->iovec_off = iovec_off;
+			ioh->iovec_data_len = 0;
+			ioh->report_return = NULL;
+			ioh->resowner = NULL;
+			ioh->num_shared_callbacks = 0;
+			ioh->distilled_result.status = ARS_UNKNOWN;
+			ioh->flags = 0;
+
+			ConditionVariableInit(&ioh->cv);
+
+			dclist_push_tail(&bs->idle_ios, &ioh->node);
+			iovec_off += io_combine_limit;
+		}
+	}
+
+out:
+	/* Initialize IO method specific resources. */
+	if (pgaio_impl->shmem_init)
+		pgaio_impl->shmem_init(!found);
 }
 
 void
 pgaio_init_backend(void)
 {
-}
+	/* shouldn't be initialized twice */
+	Assert(!my_aio);
+
+	if (MyProc == NULL || MyProcNumber >= AioProcs())
+		elog(ERROR, "aio requires a normal PGPROC");
+
+	my_aio = &aio_ctl->backend_state[MyProcNumber];
 
-void
-pgaio_postmaster_child_init_local(void)
-{
+	if (pgaio_impl->init_backend)
+		pgaio_impl->init_backend();
 }
diff --git a/src/backend/storage/aio/aio_io.c b/src/backend/storage/aio/aio_io.c
new file mode 100644
index 00000000000..3c255775833
--- /dev/null
+++ b/src/backend/storage/aio/aio_io.c
@@ -0,0 +1,140 @@
+/*-------------------------------------------------------------------------
+ *
+ * aio_io.c
+ *    AIO - Low Level IO Handling
+ *
+ * Functions related to associating IO operations to IO Handles and IO-method
+ * independent support functions for actually performing IO.
+ *
+ *
+ * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ *    src/backend/storage/aio/aio_io.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "storage/aio.h"
+#include "storage/aio_internal.h"
+#include "storage/fd.h"
+#include "utils/wait_event.h"
+
+
+static void pgaio_io_before_prep(PgAioHandle *ioh);
+
+
+
+/* --------------------------------------------------------------------------------
+ * "Preparation" routines for individual IO types
+ *
+ * These are called by place the place actually initiating an IO, to associate
+ * the IO specific data with an AIO handle.
+ *
+ * Each of the preparation routines first needs to call
+ * pgaio_io_before_prep(), then fill IO specific fields in the handle and then
+ * finally call pgaio_io_prepare().
+ * --------------------------------------------------------------------------------
+ */
+
+void
+pgaio_io_prep_readv(PgAioHandle *ioh,
+					int fd, int iovcnt, uint64 offset)
+{
+	pgaio_io_before_prep(ioh);
+
+	ioh->op_data.read.fd = fd;
+	ioh->op_data.read.offset = offset;
+	ioh->op_data.read.iov_length = iovcnt;
+
+	pgaio_io_prepare(ioh, PGAIO_OP_READV);
+}
+
+void
+pgaio_io_prep_writev(PgAioHandle *ioh,
+					 int fd, int iovcnt, uint64 offset)
+{
+	pgaio_io_before_prep(ioh);
+
+	ioh->op_data.write.fd = fd;
+	ioh->op_data.write.offset = offset;
+	ioh->op_data.write.iov_length = iovcnt;
+
+	pgaio_io_prepare(ioh, PGAIO_OP_WRITEV);
+}
+
+
+
+/* --------------------------------------------------------------------------------
+ * Functions implementing IO handle operations that are directly related to IO
+ * operations.
+ * --------------------------------------------------------------------------------
+ */
+
+/*
+ * Execute IO operation synchronously. This is implemented here, not in
+ * method_sync.c, because other IO methods lso might use it / fall back to it.
+ */
+void
+pgaio_io_perform_synchronously(PgAioHandle *ioh)
+{
+	ssize_t		result = 0;
+	struct iovec *iov = &aio_ctl->iovecs[ioh->iovec_off];
+
+	/* Perform IO. */
+	switch (ioh->op)
+	{
+		case PGAIO_OP_READV:
+			pgstat_report_wait_start(WAIT_EVENT_DATA_FILE_READ);
+			result = pg_preadv(ioh->op_data.read.fd, iov,
+							   ioh->op_data.read.iov_length,
+							   ioh->op_data.read.offset);
+			pgstat_report_wait_end();
+			break;
+		case PGAIO_OP_WRITEV:
+			pgstat_report_wait_start(WAIT_EVENT_DATA_FILE_WRITE);
+			result = pg_pwritev(ioh->op_data.write.fd, iov,
+								ioh->op_data.write.iov_length,
+								ioh->op_data.write.offset);
+			pgstat_report_wait_end();
+			break;
+		case PGAIO_OP_INVALID:
+			elog(ERROR, "trying to execute invalid IO operation");
+	}
+
+	ioh->result = result < 0 ? -errno : result;
+
+	pgaio_io_process_completion(ioh, ioh->result);
+}
+
+const char *
+pgaio_io_get_op_name(PgAioHandle *ioh)
+{
+	Assert(ioh->op >= 0 && ioh->op < PGAIO_OP_COUNT);
+
+	switch (ioh->op)
+	{
+		case PGAIO_OP_INVALID:
+			return "invalid";
+		case PGAIO_OP_READV:
+			return "read";
+		case PGAIO_OP_WRITEV:
+			return "write";
+	}
+
+	pg_unreachable();
+}
+
+/*
+ * Helper function to be called by IO operation preparation functions, before
+ * any data in the handle is set.  Mostly to centralize assertions.
+ */
+static void
+pgaio_io_before_prep(PgAioHandle *ioh)
+{
+	Assert(ioh->state == AHS_HANDED_OUT);
+	Assert(pgaio_io_has_subject(ioh));
+}
diff --git a/src/backend/storage/aio/aio_subject.c b/src/backend/storage/aio/aio_subject.c
new file mode 100644
index 00000000000..8694cfafcd1
--- /dev/null
+++ b/src/backend/storage/aio/aio_subject.c
@@ -0,0 +1,231 @@
+/*-------------------------------------------------------------------------
+ *
+ * aio_subject.c
+ *	  AIO - Functionality related to executing IO for different subjects
+ *
+ * XXX Write me
+ *
+ * Portions Copyright (c) 1996-2024, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ *    src/backend/storage/aio/aio_subject.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "storage/aio.h"
+#include "storage/aio_internal.h"
+#include "storage/buf_internals.h"
+#include "storage/bufmgr.h"
+#include "storage/smgr.h"
+#include "utils/memutils.h"
+
+
+/*
+ * Registry for entities that can be the target of AIO.
+ *
+ * To support executing using worker processes, the file descriptor for an IO
+ * may need to be be reopened in a different process. This is done via the
+ * PgAioSubjectInfo.reopen callback.
+ */
+static const PgAioSubjectInfo *aio_subject_info[] = {
+	[ASI_INVALID] = &(PgAioSubjectInfo) {
+		.name = "invalid",
+	},
+};
+
+
+typedef struct PgAioHandleSharedCallbacksEntry
+{
+	const PgAioHandleSharedCallbacks *const cb;
+	const char *const name;
+} PgAioHandleSharedCallbacksEntry;
+
+static const PgAioHandleSharedCallbacksEntry aio_shared_cbs[] = {
+#define CALLBACK_ENTRY(id, callback)  [id] = {.cb = &callback, .name = #callback}
+#undef CALLBACK_ENTRY
+};
+
+
+/*
+ * Register callback for the IO handle.
+ *
+ * Only a limited number (AIO_MAX_SHARED_CALLBACKS) of callbacks can be
+ * registered for each IO.
+ *
+ * Callbacks need to be registered before [indirectly] calling
+ * pgaio_io_prep_*(), as the IO may be executed immediately.
+ *
+ *
+ * Note that callbacks are executed in critical sections.  This is necessary
+ * to be able to execute IO in critical sections (consider e.g. WAL
+ * logging). To perform AIO we first need to acquire a handle, which, if there
+ * are no free handles, requires waiting for IOs to complete and to execute
+ * their completion callbacks.
+ *
+ * Callbacks may be executed in the issuing backend but also in another
+ * backend (because that backend is waiting for the IO) or in IO workers (if
+ * io_method=worker is used).
+ *
+ *
+ * See PgAioHandleSharedCallbackID's definition for an explanation for why
+ * callbacks are not identified by a pointer.
+ */
+void
+pgaio_io_add_shared_cb(PgAioHandle *ioh, PgAioHandleSharedCallbackID cbid)
+{
+	const PgAioHandleSharedCallbacksEntry *ce = &aio_shared_cbs[cbid];
+
+	if (cbid >= lengthof(aio_shared_cbs))
+		elog(ERROR, "callback %d is out of range", cbid);
+	if (aio_shared_cbs[cbid].cb->complete == NULL)
+		elog(ERROR, "callback %d is undefined", cbid);
+	if (ioh->num_shared_callbacks >= AIO_MAX_SHARED_CALLBACKS)
+		elog(PANIC, "too many callbacks, the max is %d", AIO_MAX_SHARED_CALLBACKS);
+	ioh->shared_callbacks[ioh->num_shared_callbacks] = cbid;
+
+	elog(DEBUG3, "io:%d, op %s, subject %s, adding cb #%d, id %d/%s",
+		 pgaio_io_get_id(ioh),
+		 pgaio_io_get_op_name(ioh),
+		 pgaio_io_get_subject_name(ioh),
+		 ioh->num_shared_callbacks + 1,
+		 cbid, ce->name);
+
+	ioh->num_shared_callbacks++;
+}
+
+/*
+ * Return the name for the subject associated with the IO. Mostly useful for
+ * debugging/logging.
+ */
+const char *
+pgaio_io_get_subject_name(PgAioHandle *ioh)
+{
+	Assert(ioh->subject >= 0 && ioh->subject < ASI_COUNT);
+
+	return aio_subject_info[ioh->subject]->name;
+}
+
+/*
+ * Internal function which invokes ->prepare for all the registered callbacks.
+ */
+void
+pgaio_io_prepare_subject(PgAioHandle *ioh)
+{
+	Assert(ioh->subject > ASI_INVALID && ioh->subject < ASI_COUNT);
+	Assert(ioh->op >= 0 && ioh->op < PGAIO_OP_COUNT);
+
+	for (int i = ioh->num_shared_callbacks; i > 0; i--)
+	{
+		PgAioHandleSharedCallbackID cbid = ioh->shared_callbacks[i - 1];
+		const PgAioHandleSharedCallbacksEntry *ce = &aio_shared_cbs[cbid];
+
+		if (!ce->cb->prepare)
+			continue;
+
+		elog(DEBUG3, "io:%d, op %s, subject %s, calling cb #%d %d/%s->prepare",
+			 pgaio_io_get_id(ioh),
+			 pgaio_io_get_op_name(ioh),
+			 pgaio_io_get_subject_name(ioh),
+			 i,
+			 cbid, ce->name);
+		ce->cb->prepare(ioh);
+	}
+}
+
+/*
+ * Internal function which invokes ->complete for all the registered
+ * callbacks.
+ */
+void
+pgaio_io_process_completion_subject(PgAioHandle *ioh)
+{
+	PgAioResult result;
+
+	Assert(ioh->subject >= 0 && ioh->subject < ASI_COUNT);
+	Assert(ioh->op >= 0 && ioh->op < PGAIO_OP_COUNT);
+
+	result.status = ARS_OK;		/* low level IO is always considered OK */
+	result.result = ioh->result;
+	result.id = ASC_INVALID;
+	result.error_data = 0;
+
+	for (int i = ioh->num_shared_callbacks; i > 0; i--)
+	{
+		PgAioHandleSharedCallbackID cbid = ioh->shared_callbacks[i - 1];
+		const PgAioHandleSharedCallbacksEntry *ce = &aio_shared_cbs[cbid];
+
+		elog(DEBUG3, "io:%d, op %s, subject %s, calling cb #%d, id %d/%s->complete with distilled result status %d, id %u, error_data: %d, result: %d",
+			 pgaio_io_get_id(ioh),
+			 pgaio_io_get_op_name(ioh),
+			 pgaio_io_get_subject_name(ioh),
+			 i,
+			 cbid, ce->name,
+			 result.status,
+			 result.id,
+			 result.error_data,
+			 result.result);
+		result = ce->cb->complete(ioh, result);
+	}
+
+	ioh->distilled_result = result;
+
+	elog(DEBUG3, "io:%d, op %s, subject %s, distilled result status %d, id %u, error_data: %d, result: %d, raw_result %d",
+		 pgaio_io_get_id(ioh),
+		 pgaio_io_get_op_name(ioh),
+		 pgaio_io_get_subject_name(ioh),
+		 result.status,
+		 result.id,
+		 result.error_data,
+		 result.result,
+		 ioh->result);
+}
+
+/*
+ * Check if pgaio_io_reopen() is available for the IO.
+ */
+bool
+pgaio_io_can_reopen(PgAioHandle *ioh)
+{
+	return aio_subject_info[ioh->subject]->reopen != NULL;
+}
+
+/*
+ * Before executing an IO outside of the context of the process the IO has
+ * been prepared in, the file descriptor has to be reopened - any FD
+ * referenced in the IO itself, won't be valid in the separate process.
+ */
+void
+pgaio_io_reopen(PgAioHandle *ioh)
+{
+	Assert(ioh->subject >= 0 && ioh->subject < ASI_COUNT);
+	Assert(ioh->op >= 0 && ioh->op < PGAIO_OP_COUNT);
+
+	aio_subject_info[ioh->subject]->reopen(ioh);
+}
+
+
+
+/* --------------------------------------------------------------------------------
+ * IO Result
+ * --------------------------------------------------------------------------------
+ */
+
+void
+pgaio_result_log(PgAioResult result, const PgAioSubjectData *subject_data, int elevel)
+{
+	PgAioHandleSharedCallbackID cbid = result.id;
+	const PgAioHandleSharedCallbacksEntry *ce = &aio_shared_cbs[cbid];
+
+	Assert(result.status != ARS_UNKNOWN);
+	Assert(result.status != ARS_OK);
+
+	if (ce->cb->error == NULL)
+		elog(ERROR, "scb id %d/%s does not have an error callback",
+			 result.id, ce->name);
+
+	ce->cb->error(result, subject_data, elevel);
+}
diff --git a/src/backend/storage/aio/meson.build b/src/backend/storage/aio/meson.build
index 8d20759ebf8..8339d473aae 100644
--- a/src/backend/storage/aio/meson.build
+++ b/src/backend/storage/aio/meson.build
@@ -3,5 +3,8 @@
 backend_sources += files(
   'aio.c',
   'aio_init.c',
+  'aio_io.c',
+  'aio_subject.c',
+  'method_sync.c',
   'read_stream.c',
 )
diff --git a/src/backend/storage/aio/method_sync.c b/src/backend/storage/aio/method_sync.c
new file mode 100644
index 00000000000..61fd06a277b
--- /dev/null
+++ b/src/backend/storage/aio/method_sync.c
@@ -0,0 +1,45 @@
+/*-------------------------------------------------------------------------
+ *
+ * method_sync.c
+ *    AIO - perform "AIO" by executing it synchronously
+ *
+ * This method is mainly to check if AIO use causes regressions. Other IO
+ * methods might also fall back to the synchronous method for functionality
+ * they cannot provide.
+ *
+ * Portions Copyright (c) 1996-2021, PostgreSQL Global Development Group
+ * Portions Copyright (c) 1994, Regents of the University of California
+ *
+ * IDENTIFICATION
+ *	  src/backend/storage/aio/method_sync.c
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "postgres.h"
+
+#include "storage/aio.h"
+#include "storage/aio_internal.h"
+
+static bool pgaio_sync_needs_synchronous_execution(PgAioHandle *ioh);
+static int	pgaio_sync_submit(uint16 num_staged_ios, PgAioHandle **staged_ios);
+
+
+const IoMethodOps pgaio_sync_ops = {
+	.needs_synchronous_execution = pgaio_sync_needs_synchronous_execution,
+	.submit = pgaio_sync_submit,
+};
+
+static bool
+pgaio_sync_needs_synchronous_execution(PgAioHandle *ioh)
+{
+	return true;
+}
+
+static int
+pgaio_sync_submit(uint16 num_staged_ios, PgAioHandle **staged_ios)
+{
+	elog(ERROR, "should be unreachable");
+
+	return 0;
+}
diff --git a/src/backend/utils/activity/wait_event_names.txt b/src/backend/utils/activity/wait_event_names.txt
index 16144c2b72d..7a2e2b4432e 100644
--- a/src/backend/utils/activity/wait_event_names.txt
+++ b/src/backend/utils/activity/wait_event_names.txt
@@ -190,6 +190,9 @@ ABI_compatibility:
 
 Section: ClassName - WaitEventIO
 
+AIO_SUBMIT	"Waiting for AIO submission."
+AIO_DRAIN	"Waiting for IOs to finish."
+AIO_COMPLETION	"Waiting for completion callback."
 BASEBACKUP_READ	"Waiting for base backup to read from a file."
 BASEBACKUP_SYNC	"Waiting for data written by a base backup to reach durable storage."
 BASEBACKUP_WRITE	"Waiting for base backup to write to a file."
diff --git a/src/backend/utils/resowner/resowner.c b/src/backend/utils/resowner/resowner.c
index 505534ee8d3..5cf14472ebd 100644
--- a/src/backend/utils/resowner/resowner.c
+++ b/src/backend/utils/resowner/resowner.c
@@ -47,6 +47,8 @@
 
 #include "common/hashfn.h"
 #include "common/int.h"
+#include "lib/ilist.h"
+#include "storage/aio.h"
 #include "storage/ipc.h"
 #include "storage/predicate.h"
 #include "storage/proc.h"
@@ -155,6 +157,12 @@ struct ResourceOwnerData
 
 	/* The local locks cache. */
 	LOCALLOCK  *locks[MAX_RESOWNER_LOCKS];	/* list of owned locks */
+
+	/*
+	 * AIO handles need be registered in critical sections and therefore
+	 * cannot use the normal ResoureElem mechanism.
+	 */
+	dlist_head	aio_handles;
 };
 
 
@@ -425,6 +433,8 @@ ResourceOwnerCreate(ResourceOwner parent, const char *name)
 		parent->firstchild = owner;
 	}
 
+	dlist_init(&owner->aio_handles);
+
 	return owner;
 }
 
@@ -725,6 +735,14 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
 		 * so issue warnings.  In the abort case, just clean up quietly.
 		 */
 		ResourceOwnerReleaseAll(owner, phase, isCommit);
+
+		/* XXX: Could probably be a later phase? */
+		while (!dlist_is_empty(&owner->aio_handles))
+		{
+			dlist_node *node = dlist_head_node(&owner->aio_handles);
+
+			pgaio_io_release_resowner(node, !isCommit);
+		}
 	}
 	else if (phase == RESOURCE_RELEASE_LOCKS)
 	{
@@ -1082,3 +1100,15 @@ ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
 	elog(ERROR, "lock reference %p is not owned by resource owner %s",
 		 locallock, owner->name);
 }
+
+void
+ResourceOwnerRememberAioHandle(ResourceOwner owner, struct dlist_node *ioh_node)
+{
+	dlist_push_tail(&owner->aio_handles, ioh_node);
+}
+
+void
+ResourceOwnerForgetAioHandle(ResourceOwner owner, struct dlist_node *ioh_node)
+{
+	dlist_delete_from(&owner->aio_handles, ioh_node);
+}
diff --git a/src/tools/pgindent/typedefs.list b/src/tools/pgindent/typedefs.list
index 2586d1cf53f..bc1acbb98ee 100644
--- a/src/tools/pgindent/typedefs.list
+++ b/src/tools/pgindent/typedefs.list
@@ -1263,6 +1263,7 @@ InvalMessageArray
 InvalidationInfo
 InvalidationMsgsGroup
 IoMethod
+IoMethodOps
 IpcMemoryId
 IpcMemoryKey
 IpcMemoryState
@@ -2100,6 +2101,23 @@ Permutation
 PermutationStep
 PermutationStepBlocker
 PermutationStepBlockerType
+PgAioCtl
+PgAioHandle
+PgAioHandleFlags
+PgAioHandleRef
+PgAioHandleSharedCallbackID
+PgAioHandleSharedCallbacks
+PgAioHandleSharedCallbacksEntry
+PgAioHandleState
+PgAioOp
+PgAioOpData
+PgAioPerBackend
+PgAioResultStatus
+PgAioResult
+PgAioReturn
+PgAioSubjectData
+PgAioSubjectID
+PgAioSubjectInfo
 PgArchData
 PgBackendGSSStatus
 PgBackendSSLStatus
-- 
2.45.2.746.g06e570c0df.dirty

