Separate memory contexts for relcache and catcache
Hi hackers,
Most catcache and relcache entries (other than index info etc.) currently
go straight into CacheMemoryContext. And I believe these two caches can be
the ones with the largest contribution to the memory usage of
CacheMemoryContext most of the time. For example, in cases where we have
lots of database objects accessed in a long-lived connection,
CacheMemoryContext tends to increase significantly.
While I've been working on another patch for pg_backend_memory_contexts
view, we thought that it would also be better to see the memory usages of
different kinds of caches broken down into their own contexts. The attached
patch implements this and aims to easily keep track of the memory used by
relcache and catcache
To quickly show how pg_backend_memory_contexts would look like, I did the
following:
-Create some tables:
SELECT 'BEGIN;' UNION ALL SELECT format('CREATE TABLE %1$s(id serial
primary key, data text not null unique)', 'test_'||g.i) FROM
generate_series(0, 1000) g(i) UNION ALL SELECT 'COMMIT;';\gexec
-Open a new connection and query pg_backend_memory_contexts [1]SELECT:
This is what you'll see before and after the patch.
-- HEAD:
name | used_bytes | free_bytes | total_bytes
--------------------+------------+------------+-------------
CacheMemoryContext | 467656 | 56632 | 524288
index info | 111760 | 46960 | 158720
relation rules | 4416 | 3776 | 8192
(3 rows)
-- Patch:
name | used_bytes | free_bytes | total_bytes
-----------------------+------------+------------+-------------
CatCacheMemoryContext | 217696 | 44448 | 262144
RelCacheMemoryContext | 248264 | 13880 | 262144
index info | 111760 | 46960 | 158720
CacheMemoryContext | 2336 | 5856 | 8192
relation rules | 4416 | 3776 | 8192
(5 rows)
- Run select on all tables
SELECT format('SELECT count(*) FROM %1$s', 'test_'||g.i) FROM
generate_series(0, 1000) g(i);\gexec
- Then check pg_backend_memory_contexts [1]SELECT again:
--HEAD
name | used_bytes | free_bytes | total_bytes
--------------------+------------+------------+-------------
CacheMemoryContext | 8197344 | 257056 | 8454400
index info | 2102160 | 113776 | 2215936
relation rules | 4416 | 3776 | 8192
(3 rows)
--Patch
name | used_bytes | free_bytes | total_bytes
-----------------------+------------+------------+-------------
RelCacheMemoryContext | 4706464 | 3682144 | 8388608
CatCacheMemoryContext | 3489384 | 770712 | 4260096
index info | 2102160 | 113776 | 2215936
CacheMemoryContext | 2336 | 5856 | 8192
relation rules | 4416 | 3776 | 8192
(5 rows)
You can see that CacheMemoryContext does not use much memory without
catcache and relcache (at least in cases similar to above), and it's easy
to bloat catcache and relcache. That's why I think it would be useful to
see their usage separately.
Any feedback would be appreciated.
[1]: SELECT
SELECT
name,
sum(used_bytes) AS used_bytes,
sum(free_bytes) AS free_bytes,
sum(total_bytes) AS total_bytes
FROM pg_backend_memory_contexts
WHERE name LIKE '%CacheMemoryContext%' OR parent LIKE '%CacheMemoryContext%'
GROUP BY name
ORDER BY total_bytes DESC;
Thanks,
--
Melih Mutlu
Microsoft
Attachments:
0001-Separate-memory-contexts-for-relcache-and-catcache.patchapplication/octet-stream; name=0001-Separate-memory-contexts-for-relcache-and-catcache.patchDownload
From 7589a1d666ead07ac838b155685e37a16e497f1b Mon Sep 17 00:00:00 2001
From: Melih Mutlu <m.melihmutlu@gmail.com>
Date: Tue, 13 Jun 2023 16:43:24 +0300
Subject: [PATCH] Separate memory contexts for relcache and catcache
This patch introduces two new memory contexts under CacheMemoryContext
for relcache and catcache. Most of the time relcache and catcache
constitutes a large part of CacheMemoryContext. Before this patch, it
was hard to see the contribution of both caches in memory usage of
CacheMemoryContext. Having separate contexts
for these two aims to help to understand more about memory usage of
contexts.
---
src/backend/utils/cache/catcache.c | 35 ++++++++---
src/backend/utils/cache/relcache.c | 99 +++++++++++++++++-------------
2 files changed, 82 insertions(+), 52 deletions(-)
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 1aacb736c2..ce30bdbc0a 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -99,6 +99,9 @@ static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos,
static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
Datum *srckeys, Datum *dstkeys);
+/* Separate memory context for catcache */
+static MemoryContext CatCacheMemoryContext = NULL;
+static void CreateCatCacheMemoryContext(void);
/*
* internal support functions
@@ -782,10 +785,10 @@ InitCatCache(int id,
* first switch to the cache context so our allocations do not vanish at
* the end of a transaction
*/
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ if (!CatCacheMemoryContext)
+ CreateCatCacheMemoryContext();
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheMemoryContext);
/*
* if first time through, initialize the cache group header
@@ -864,7 +867,7 @@ RehashCatCache(CatCache *cp)
/* Allocate a new, larger, hash table. */
newnbuckets = cp->cc_nbuckets * 2;
- newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
+ newbucket = (dlist_head *) MemoryContextAllocZero(CatCacheMemoryContext, newnbuckets * sizeof(dlist_head));
/* Move all entries from old hash table to new. */
for (i = 0; i < cp->cc_nbuckets; i++)
@@ -931,9 +934,9 @@ CatalogCacheInitializeCache(CatCache *cache)
* switch to the cache context so our allocations do not vanish at the end
* of a transaction
*/
- Assert(CacheMemoryContext != NULL);
+ Assert(CatCacheMemoryContext != NULL);
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheMemoryContext);
/*
* copy the relcache's tuple descriptor to permanent cache storage
@@ -994,7 +997,7 @@ CatalogCacheInitializeCache(CatCache *cache)
*/
fmgr_info_cxt(eqfunc,
&cache->cc_skey[i].sk_func,
- CacheMemoryContext);
+ CatCacheMemoryContext);
/* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
cache->cc_skey[i].sk_attno = cache->cc_keyno[i];
@@ -1695,7 +1698,7 @@ SearchCatCacheList(CatCache *cache,
table_close(relation, AccessShareLock);
/* Now we can build the CatCList entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheMemoryContext);
nmembers = list_length(ctlist);
cl = (CatCList *)
palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
@@ -1827,7 +1830,7 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
dtp = ntp;
/* Allocate memory for CatCTup and the cached tuple in one go */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheMemoryContext);
ct = (CatCTup *) palloc(sizeof(CatCTup) +
MAXIMUM_ALIGNOF + dtp->t_len);
@@ -1862,7 +1865,7 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
else
{
Assert(negative);
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheMemoryContext);
ct = (CatCTup *) palloc(sizeof(CatCTup));
/*
@@ -2086,3 +2089,15 @@ PrintCatCacheListLeakWarning(CatCList *list)
list->my_cache->cc_relname, list->my_cache->id,
list, list->refcount);
}
+
+static void
+CreateCatCacheMemoryContext()
+{
+ if (!CacheMemoryContext)
+ CreateCacheMemoryContext();
+
+ if (!CatCacheMemoryContext)
+ CatCacheMemoryContext = AllocSetContextCreate(CacheMemoryContext,
+ "CatCacheMemoryContext",
+ ALLOCSET_DEFAULT_SIZES);
+}
\ No newline at end of file
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 8e28335915..8eaa6df9ac 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -318,6 +318,9 @@ static OpClassCacheEnt *LookupOpclassInfo(Oid operatorClassOid,
static void RelationCacheInitFileRemoveInDir(const char *tblspcpath);
static void unlink_initfile(const char *initfilename, int elevel);
+/* Separate memory context for relcache */
+static MemoryContext RelCacheMemoryContext = NULL;
+static void CreateRelCacheMemoryContext(void);
/*
* ScanPgRelation
@@ -410,8 +413,8 @@ AllocateRelationDesc(Form_pg_class relp)
MemoryContext oldcxt;
Form_pg_class relationForm;
- /* Relcache entries must live in CacheMemoryContext */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ /* Relcache entries must live in RelCacheMemoryContext */
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
/*
* allocate and zero space for new relation descriptor
@@ -495,14 +498,14 @@ RelationParseRelOptions(Relation relation, HeapTuple tuple)
options = extractRelOptions(tuple, GetPgClassDescriptor(), amoptsfn);
/*
- * Copy parsed data into CacheMemoryContext. To guard against the
+ * Copy parsed data into RelCacheMemoryContext. To guard against the
* possibility of leaks in the reloptions code, we want to do the actual
* parsing in the caller's memory context and copy the results into
- * CacheMemoryContext after the fact.
+ * RelCacheMemoryContext after the fact.
*/
if (options)
{
- relation->rd_options = MemoryContextAlloc(CacheMemoryContext,
+ relation->rd_options = MemoryContextAlloc(RelCacheMemoryContext,
VARSIZE(options));
memcpy(relation->rd_options, options, VARSIZE(options));
pfree(options);
@@ -532,7 +535,7 @@ RelationBuildTupleDesc(Relation relation)
relation->rd_rel->reltype ? relation->rd_rel->reltype : RECORDOID;
relation->rd_att->tdtypmod = -1; /* just to be sure */
- constr = (TupleConstr *) MemoryContextAllocZero(CacheMemoryContext,
+ constr = (TupleConstr *) MemoryContextAllocZero(RelCacheMemoryContext,
sizeof(TupleConstr));
constr->has_not_null = false;
constr->has_generated_stored = false;
@@ -613,7 +616,7 @@ RelationBuildTupleDesc(Relation relation)
if (attrmiss == NULL)
attrmiss = (AttrMissing *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(RelCacheMemoryContext,
relation->rd_rel->relnatts *
sizeof(AttrMissing));
@@ -634,7 +637,7 @@ RelationBuildTupleDesc(Relation relation)
else
{
/* otherwise copy in the correct context */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
attrmiss[attnum - 1].am_value = datumCopy(missval,
attp->attbyval,
attp->attlen);
@@ -745,7 +748,7 @@ RelationBuildRuleLock(Relation relation)
/*
* Make the private context. Assume it'll not contain much data.
*/
- rulescxt = AllocSetContextCreate(CacheMemoryContext,
+ rulescxt = AllocSetContextCreate(RelCacheMemoryContext,
"relation rules",
ALLOCSET_SMALL_SIZES);
relation->rd_rulescxt = rulescxt;
@@ -1441,7 +1444,7 @@ RelationInitIndexAccessInfo(Relation relation)
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for index %u",
RelationGetRelid(relation));
- oldcontext = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcontext = MemoryContextSwitchTo(RelCacheMemoryContext);
relation->rd_indextuple = heap_copytuple(tuple);
relation->rd_index = (Form_pg_index) GETSTRUCT(relation->rd_indextuple);
MemoryContextSwitchTo(oldcontext);
@@ -1470,7 +1473,7 @@ RelationInitIndexAccessInfo(Relation relation)
* a context, and not just a couple of pallocs, is so that we won't leak
* any subsidiary info attached to fmgr lookup records.
*/
- indexcxt = AllocSetContextCreate(CacheMemoryContext,
+ indexcxt = AllocSetContextCreate(RelCacheMemoryContext,
"index info",
ALLOCSET_SMALL_SIZES);
relation->rd_indexcxt = indexcxt;
@@ -1652,9 +1655,9 @@ LookupOpclassInfo(Oid operatorClassOid,
/* First time through: initialize the opclass cache */
HASHCTL ctl;
- /* Also make sure CacheMemoryContext exists */
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ /* Also make sure RelCacheMemoryContext exists */
+ if (!RelCacheMemoryContext)
+ CreateRelCacheMemoryContext();
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(OpClassCacheEnt);
@@ -1701,7 +1704,7 @@ LookupOpclassInfo(Oid operatorClassOid,
*/
if (opcentry->supportProcs == NULL && numSupport > 0)
opcentry->supportProcs = (RegProcedure *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(RelCacheMemoryContext,
numSupport * sizeof(RegProcedure));
/*
@@ -1860,7 +1863,7 @@ RelationInitTableAccessMethod(Relation relation)
* during bootstrap or before RelationCacheInitializePhase3 runs, and none of
* these properties matter then...)
*
- * NOTE: we assume we are already switched into CacheMemoryContext.
+ * NOTE: we assume we are already switched into RelCacheMemoryContext.
*/
static void
formrdesc(const char *relationName, Oid relationReltype,
@@ -3059,7 +3062,7 @@ RememberToFreeTupleDescAtEOX(TupleDesc td)
{
MemoryContext oldcxt;
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
EOXactTupleDescArray = (TupleDesc *) palloc(16 * sizeof(TupleDesc));
EOXactTupleDescArrayLen = 16;
@@ -3523,10 +3526,10 @@ RelationBuildLocalRelation(const char *relname,
/*
* switch to the cache context to create the relcache entry.
*/
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ if (!RelCacheMemoryContext)
+ CreateRelCacheMemoryContext();
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
/*
* allocate a new relation descriptor and fill in basic state fields.
@@ -3655,7 +3658,7 @@ RelationBuildLocalRelation(const char *relname,
/*
* RelationInitTableAccessMethod will do syscache lookups, so we mustn't
- * run it in CacheMemoryContext. Fortunately, the remaining steps don't
+ * run it in RelCacheMemoryContext. Fortunately, the remaining steps don't
* require a long-lived current context.
*/
MemoryContextSwitchTo(oldcxt);
@@ -3940,8 +3943,8 @@ RelationCacheInitialize(void)
/*
* make sure cache memory context exists
*/
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ if (!RelCacheMemoryContext)
+ CreateRelCacheMemoryContext();
/*
* create hashtable that indexes the relcache
@@ -3956,7 +3959,7 @@ RelationCacheInitialize(void)
*/
allocsize = 4;
in_progress_list =
- MemoryContextAlloc(CacheMemoryContext,
+ MemoryContextAlloc(RelCacheMemoryContext,
allocsize * sizeof(*in_progress_list));
in_progress_list_maxlen = allocsize;
@@ -3995,9 +3998,9 @@ RelationCacheInitializePhase2(void)
return;
/*
- * switch to cache memory context
+ * switch to relcache memory context
*/
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
/*
* Try to load the shared relcache cache file. If unsuccessful, bootstrap
@@ -4050,9 +4053,9 @@ RelationCacheInitializePhase3(void)
RelationMapInitializePhase3();
/*
- * switch to cache memory context
+ * switch to relcache memory context
*/
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
/*
* Try to load the local relcache cache file. If unsuccessful, bootstrap
@@ -4362,7 +4365,7 @@ BuildHardcodedDescriptor(int natts, const FormData_pg_attribute *attrs)
MemoryContext oldcxt;
int i;
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
result = CreateTemplateTupleDesc(natts);
result->tdtypeid = RECORDOID; /* not right, but we don't care */
@@ -4432,7 +4435,7 @@ AttrDefaultFetch(Relation relation, int ndef)
/* Allocate array with room for as many entries as expected */
attrdef = (AttrDefault *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(RelCacheMemoryContext,
ndef * sizeof(AttrDefault));
/* Search pg_attrdef for relevant entries */
@@ -4471,7 +4474,7 @@ AttrDefaultFetch(Relation relation, int ndef)
char *s = TextDatumGetCString(val);
attrdef[found].adnum = adform->adnum;
- attrdef[found].adbin = MemoryContextStrdup(CacheMemoryContext, s);
+ attrdef[found].adbin = MemoryContextStrdup(RelCacheMemoryContext, s);
pfree(s);
found++;
}
@@ -4528,7 +4531,7 @@ CheckConstraintFetch(Relation relation)
/* Allocate array with room for as many entries as expected */
check = (ConstrCheck *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(RelCacheMemoryContext,
ncheck * sizeof(ConstrCheck));
/* Search pg_constraint for relevant entries */
@@ -4561,7 +4564,7 @@ CheckConstraintFetch(Relation relation)
check[found].ccvalid = conform->convalidated;
check[found].ccnoinherit = conform->connoinherit;
- check[found].ccname = MemoryContextStrdup(CacheMemoryContext,
+ check[found].ccname = MemoryContextStrdup(RelCacheMemoryContext,
NameStr(conform->conname));
/* Grab and test conbin is actually set */
@@ -4576,7 +4579,7 @@ CheckConstraintFetch(Relation relation)
/* detoast and convert to cstring in caller's context */
char *s = TextDatumGetCString(val);
- check[found].ccbin = MemoryContextStrdup(CacheMemoryContext, s);
+ check[found].ccbin = MemoryContextStrdup(RelCacheMemoryContext, s);
pfree(s);
found++;
}
@@ -4693,7 +4696,7 @@ RelationGetFKeyList(Relation relation)
table_close(conrel, AccessShareLock);
/* Now save a copy of the completed list in the relcache entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
oldlist = relation->rd_fkeylist;
relation->rd_fkeylist = copyObject(result);
relation->rd_fkeyvalid = true;
@@ -4815,7 +4818,7 @@ RelationGetIndexList(Relation relation)
list_sort(result, list_oid_cmp);
/* Now save a copy of the completed list in the relcache entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
oldlist = relation->rd_indexlist;
relation->rd_indexlist = list_copy(result);
relation->rd_pkindex = pkeyIndex;
@@ -4906,7 +4909,7 @@ RelationGetStatExtList(Relation relation)
list_sort(result, list_oid_cmp);
/* Now save a copy of the completed list in the relcache entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
oldlist = relation->rd_statlist;
relation->rd_statlist = list_copy(result);
@@ -5396,7 +5399,7 @@ restart:
* leave the relcache entry looking like the other ones are valid but
* empty.
*/
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
relation->rd_keyattr = bms_copy(uindexattrs);
relation->rd_pkattr = bms_copy(pkindexattrs);
relation->rd_idattr = bms_copy(idindexattrs);
@@ -5496,7 +5499,7 @@ RelationGetIdentityKeyBitmap(Relation relation)
relation->rd_idattr = NULL;
/* Now save copy of the bitmap in the relcache entry */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
relation->rd_idattr = bms_copy(idindexattrs);
MemoryContextSwitchTo(oldcxt);
@@ -5787,7 +5790,7 @@ RelationBuildPublicationDesc(Relation relation, PublicationDesc *pubdesc)
}
/* Now save copy of the descriptor in the relcache entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
relation->rd_pubdesc = palloc(sizeof(PublicationDesc));
memcpy(relation->rd_pubdesc, pubdesc, sizeof(PublicationDesc));
MemoryContextSwitchTo(oldcxt);
@@ -6019,7 +6022,7 @@ errtableconstraint(Relation rel, const char *conname)
* criticalSharedRelcachesBuilt to true.
* If not successful, return false.
*
- * NOTE: we assume we are already switched into CacheMemoryContext.
+ * NOTE: we assume we are already switched into RelCacheMemoryContext.
*/
static bool
load_relcache_init_file(bool shared)
@@ -6188,7 +6191,7 @@ load_relcache_init_file(bool shared)
* prepare index info context --- parameters should match
* RelationInitIndexAccessInfo
*/
- indexcxt = AllocSetContextCreate(CacheMemoryContext,
+ indexcxt = AllocSetContextCreate(RelCacheMemoryContext,
"index info",
ALLOCSET_SMALL_SIZES);
rel->rd_indexcxt = indexcxt;
@@ -6814,3 +6817,15 @@ unlink_initfile(const char *initfilename, int elevel)
initfilename)));
}
}
+
+static void
+CreateRelCacheMemoryContext()
+{
+ if (!CacheMemoryContext)
+ CreateCacheMemoryContext();
+
+ if (!RelCacheMemoryContext)
+ RelCacheMemoryContext = AllocSetContextCreate(CacheMemoryContext,
+ "RelCacheMemoryContext",
+ ALLOCSET_DEFAULT_SIZES);
+}
\ No newline at end of file
--
2.25.1
Most catcache and relcache entries (other than index info etc.) currently
go straight into CacheMemoryContext. And I believe these two caches can be
the ones with the largest contribution to the memory usage of
CacheMemoryContext most of the time. For example, in cases where we have
lots of database objects accessed in a long-lived connection,
CacheMemoryContext tends to increase significantly.While I've been working on another patch for pg_backend_memory_contexts
view, we thought that it would also be better to see the memory usages of
different kinds of caches broken down into their own contexts. The attached
patch implements this and aims to easily keep track of the memory used by
relcache and catcache
+ 1 for the idea, this would be pretty useful as a proof of which
context is consuming most of the memory and it doesn't cost
much. It would be handy than estimating that by something
like select count(*) from pg_class.
I think, for example, if we find relcache using too much memory,
it is a signal that the user may use too many partitioned tables.
--
Best Regards
Andy Fan
On 2023-Aug-09, Melih Mutlu wrote:
--Patch
name | used_bytes | free_bytes | total_bytes
-----------------------+------------+------------+-------------
RelCacheMemoryContext | 4706464 | 3682144 | 8388608
CatCacheMemoryContext | 3489384 | 770712 | 4260096
index info | 2102160 | 113776 | 2215936
CacheMemoryContext | 2336 | 5856 | 8192
relation rules | 4416 | 3776 | 8192
(5 rows)
Hmm, is this saying that there's too much fragmentation in the relcache
context? Maybe it would improve things to make it a SlabContext instead
of AllocSet. Or, more precisely, a bunch of SlabContexts, each with the
appropriate chunkSize for the object being stored. (I don't say this
because I know for a fact that Slab is better for these purposes; it's
just that I happened to read its comments yesterday and they stated that
it behaves better in terms of fragmentation. Maybe Andres or Tomas have
an opinion on this.)
--
Álvaro Herrera 48°01'N 7°57'E — https://www.EnterpriseDB.com/
"I love the Postgres community. It's all about doing things _properly_. :-)"
(David Garamond)
On Thu, 10 Aug 2023 at 01:23, Alvaro Herrera <alvherre@alvh.no-ip.org> wrote:
On 2023-Aug-09, Melih Mutlu wrote:
--Patch
name | used_bytes | free_bytes | total_bytes
-----------------------+------------+------------+-------------
RelCacheMemoryContext | 4706464 | 3682144 | 8388608
CatCacheMemoryContext | 3489384 | 770712 | 4260096
index info | 2102160 | 113776 | 2215936
CacheMemoryContext | 2336 | 5856 | 8192
relation rules | 4416 | 3776 | 8192
(5 rows)Hmm, is this saying that there's too much fragmentation in the relcache
context?
free_bytes is just the space in the blocks that are not being used by
any allocated chunks or chunks on the freelist.
It looks like RelCacheMemoryContext has 10 blocks including the 8kb
initial block:
postgres=# select 8192 + sum(8192*power(2,x)) as total_bytes from
generate_series(0,9) x;
total_bytes
-------------
8388608
The first 2 blocks are 8KB as we only start doubling after we malloc
the first 8kb block after the keeper block.
If there was 1 fewer block then total_bytes would be 4194304, which is
less than the used_bytes for that context, so those 10 block look
needed.
Maybe it would improve things to make it a SlabContext instead
of AllocSet. Or, more precisely, a bunch of SlabContexts, each with the
appropriate chunkSize for the object being stored.
It would at least save from having to do the power of 2 rounding that
aset does. However, on a quick glance, it seems not all the size
requests in relcache.c are fixed. I see a datumCopy() in
RelationBuildTupleDesc() for the attmissingval stuff, so we couldn't
SlabAlloc that.
It could be worth looking at the size classes of the fixed-sized
allocations to estimate how much memory we might save by using slab to
avoid the power-2 rounding that aset.c does. However, if there are too
many contexts then we may end up using more memory with all the
mostly-empty contexts for backends that only query a tiny number of
tables. That might not be good. Slab also does not do block doubling
like aset does, so it might be hard to choose a good block size.
(I don't say this
because I know for a fact that Slab is better for these purposes; it's
just that I happened to read its comments yesterday and they stated that
it behaves better in terms of fragmentation. Maybe Andres or Tomas have
an opinion on this.)
I'm not sure of the exact comment, but I was in the recently and
there's a chance that I wrote that comment. Slab priorities putting
new chunks on fuller blocks and may free() blocks once they become
empty of any chunks. Aset does no free()ing of blocks unless a block
was malloc()ed especially for a chunk above allocChunkLimit. That
means aset might hold a lot of malloc'ed memory for chunks that just
sit on freelists which might never be used ever again, meanwhile,
other request sizes may have to malloc new blocks.
David
Hi,
On 2023-08-09 15:02:31 +0300, Melih Mutlu wrote:
To quickly show how pg_backend_memory_contexts would look like, I did the
following:-Create some tables:
SELECT 'BEGIN;' UNION ALL SELECT format('CREATE TABLE %1$s(id serial
primary key, data text not null unique)', 'test_'||g.i) FROM
generate_series(0, 1000) g(i) UNION ALL SELECT 'COMMIT;';\gexec-Open a new connection and query pg_backend_memory_contexts [1]:
This is what you'll see before and after the patch.
-- HEAD:
name | used_bytes | free_bytes | total_bytes
--------------------+------------+------------+-------------
CacheMemoryContext | 467656 | 56632 | 524288
index info | 111760 | 46960 | 158720
relation rules | 4416 | 3776 | 8192
(3 rows)-- Patch:
name | used_bytes | free_bytes | total_bytes
-----------------------+------------+------------+-------------
CatCacheMemoryContext | 217696 | 44448 | 262144
RelCacheMemoryContext | 248264 | 13880 | 262144
index info | 111760 | 46960 | 158720
CacheMemoryContext | 2336 | 5856 | 8192
relation rules | 4416 | 3776 | 8192
(5 rows)
Have you checked what the source of the remaining allocations in
CacheMemoryContext are?
One thing that I had observed previously and reproduced with this patch, is
that the first backend starting after a restart uses considerably more memory:
first:
┌───────────────────────┬────────────┬────────────┬─────────────┐
│ name │ used_bytes │ free_bytes │ total_bytes │
├───────────────────────┼────────────┼────────────┼─────────────┤
│ CatCacheMemoryContext │ 370112 │ 154176 │ 524288 │
│ RelCacheMemoryContext │ 244136 │ 18008 │ 262144 │
│ index info │ 104392 │ 45112 │ 149504 │
│ CacheMemoryContext │ 2304 │ 5888 │ 8192 │
│ relation rules │ 3856 │ 240 │ 4096 │
└───────────────────────┴────────────┴────────────┴─────────────┘
second:
┌───────────────────────┬────────────┬────────────┬─────────────┐
│ name │ used_bytes │ free_bytes │ total_bytes │
├───────────────────────┼────────────┼────────────┼─────────────┤
│ CatCacheMemoryContext │ 215072 │ 47072 │ 262144 │
│ RelCacheMemoryContext │ 243856 │ 18288 │ 262144 │
│ index info │ 104944 │ 47632 │ 152576 │
│ CacheMemoryContext │ 2304 │ 5888 │ 8192 │
│ relation rules │ 3856 │ 240 │ 4096 │
└───────────────────────┴────────────┴────────────┴─────────────┘
This isn't caused by this patch, but it does make it easier to pinpoint than
before. The reason is fairly simple: On the first start we start without
being able to use relcache init files, in later starts we can. The reason the
size increase is in CatCacheMemoryContext, rather than RelCacheMemoryContext,
is simple: When using the init file the catcache isn't used, when not, we have
to query the catcache a lot to build the initial relcache contents.
Given the size of both CatCacheMemoryContext and RelCacheMemoryContext in a
new backend, I think it might be worth using non-default aset parameters. A
bit ridiculous to increase block sizes from 8k upwards in every single
connection made to postgres ever.
- Run select on all tables
SELECT format('SELECT count(*) FROM %1$s', 'test_'||g.i) FROM
generate_series(0, 1000) g(i);\gexec- Then check pg_backend_memory_contexts [1] again:
--HEAD
name | used_bytes | free_bytes | total_bytes
--------------------+------------+------------+-------------
CacheMemoryContext | 8197344 | 257056 | 8454400
index info | 2102160 | 113776 | 2215936
relation rules | 4416 | 3776 | 8192
(3 rows)--Patch
name | used_bytes | free_bytes | total_bytes
-----------------------+------------+------------+-------------
RelCacheMemoryContext | 4706464 | 3682144 | 8388608
CatCacheMemoryContext | 3489384 | 770712 | 4260096
index info | 2102160 | 113776 | 2215936
CacheMemoryContext | 2336 | 5856 | 8192
relation rules | 4416 | 3776 | 8192
(5 rows)You can see that CacheMemoryContext does not use much memory without
catcache and relcache (at least in cases similar to above), and it's easy
to bloat catcache and relcache. That's why I think it would be useful to
see their usage separately.
Yes, I think it'd be quite useful. There's ways to bloat particularly catcache
much further, and it's hard to differentiate that from other sources of bloat
right now.
+static void
+CreateCatCacheMemoryContext()
We typically use (void) to differentiate from an older way of function
declarations that didn't have argument types.
+{ + if (!CacheMemoryContext) + CreateCacheMemoryContext();
I wish we just made sure that cache memory context were created in the right
place, instead of spreading this check everywhere...
@@ -3995,9 +3998,9 @@ RelationCacheInitializePhase2(void)
return;/* - * switch to cache memory context + * switch to relcache memory context */ - oldcxt = MemoryContextSwitchTo(CacheMemoryContext); + oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);/*
* Try to load the shared relcache cache file. If unsuccessful, bootstrap
@@ -4050,9 +4053,9 @@ RelationCacheInitializePhase3(void)
RelationMapInitializePhase3();/* - * switch to cache memory context + * switch to relcache memory context */ - oldcxt = MemoryContextSwitchTo(CacheMemoryContext); + oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);/*
* Try to load the local relcache cache file. If unsuccessful, bootstrap
I'd just delete these comments, they're just pointlessly restating the code.
Greetings,
Andres Freund
Hi,
I also think this change would be helpful.
I imagine you're working on the Andres's comments and you already notice
this, but v1 patch cannot be applied to HEAD.
For the convenience of other reviewers, I marked it 'Waiting on Author'.
--
Regards,
--
Atsushi Torikoshi
NTT DATA Group Corporation
Hi,
torikoshia <torikoshia@oss.nttdata.com>, 4 Ara 2023 Pzt, 07:59 tarihinde
şunu yazdı:
Hi,
I also think this change would be helpful.
I imagine you're working on the Andres's comments and you already notice
this, but v1 patch cannot be applied to HEAD.
For the convenience of other reviewers, I marked it 'Waiting on Author'.
Thanks for letting me know. I rebased the patch. PFA new version.
Andres Freund <andres@anarazel.de>, 12 Eki 2023 Per, 20:01 tarihinde şunu
yazdı:
Hi,
Have you checked what the source of the remaining allocations in
CacheMemoryContext are?
It's mostly typecache, around 2K. Do you think typecache also needs a
separate context?
Given the size of both CatCacheMemoryContext and RelCacheMemoryContext in a
new backend, I think it might be worth using non-default aset parameters. A
bit ridiculous to increase block sizes from 8k upwards in every single
connection made to postgres ever.
Considering it starts from ~262K, what would be better for init size?
256K?
+static void
+CreateCatCacheMemoryContext()
We typically use (void) to differentiate from an older way of function
declarations that didn't have argument types.
Done.
+{
+ if (!CacheMemoryContext) + CreateCacheMemoryContext();I wish we just made sure that cache memory context were created in the
right
place, instead of spreading this check everywhere...
That would be nice. Do you have a suggestion about where that right place
would be?
I'd just delete these comments, they're just pointlessly restating the code.
Done.
Thanks,
--
Melih Mutlu
Microsoft
Attachments:
v2-0001-Separate-memory-contexts-for-relcache-and-catcach.patchapplication/octet-stream; name=v2-0001-Separate-memory-contexts-for-relcache-and-catcach.patchDownload
From 7a7c972a51aa9dc74245d89b7a26334f8707743c Mon Sep 17 00:00:00 2001
From: Melih Mutlu <m.melihmutlu@gmail.com>
Date: Tue, 13 Jun 2023 16:43:24 +0300
Subject: [PATCH v2] Separate memory contexts for relcache and catcache
This patch introduces two new memory contexts under CacheMemoryContext
for relcache and catcache. Most of the time relcache and catcache
constitutes a large part of CacheMemoryContext. Before this patch, it
was hard to see the contribution of both caches in memory usage of
CacheMemoryContext. Having separate contexts
for these two aims to help to understand more about memory usage of
contexts.
---
src/backend/utils/cache/catcache.c | 35 +++++++---
src/backend/utils/cache/relcache.c | 101 ++++++++++++++++-------------
2 files changed, 80 insertions(+), 56 deletions(-)
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 2e2e4d9f1f..2c42112de9 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -102,6 +102,9 @@ static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos,
static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
Datum *srckeys, Datum *dstkeys);
+/* Separate memory context for catcache */
+static MemoryContext CatCacheMemoryContext = NULL;
+static void CreateCatCacheMemoryContext(void);
/*
* internal support functions
@@ -835,10 +838,10 @@ InitCatCache(int id,
* first switch to the cache context so our allocations do not vanish at
* the end of a transaction
*/
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ if (!CatCacheMemoryContext)
+ CreateCatCacheMemoryContext();
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheMemoryContext);
/*
* if first time through, initialize the cache group header
@@ -917,7 +920,7 @@ RehashCatCache(CatCache *cp)
/* Allocate a new, larger, hash table. */
newnbuckets = cp->cc_nbuckets * 2;
- newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
+ newbucket = (dlist_head *) MemoryContextAllocZero(CatCacheMemoryContext, newnbuckets * sizeof(dlist_head));
/* Move all entries from old hash table to new. */
for (i = 0; i < cp->cc_nbuckets; i++)
@@ -984,9 +987,9 @@ CatalogCacheInitializeCache(CatCache *cache)
* switch to the cache context so our allocations do not vanish at the end
* of a transaction
*/
- Assert(CacheMemoryContext != NULL);
+ Assert(CatCacheMemoryContext != NULL);
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheMemoryContext);
/*
* copy the relcache's tuple descriptor to permanent cache storage
@@ -1047,7 +1050,7 @@ CatalogCacheInitializeCache(CatCache *cache)
*/
fmgr_info_cxt(eqfunc,
&cache->cc_skey[i].sk_func,
- CacheMemoryContext);
+ CatCacheMemoryContext);
/* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
cache->cc_skey[i].sk_attno = cache->cc_keyno[i];
@@ -1756,7 +1759,7 @@ SearchCatCacheList(CatCache *cache,
ResourceOwnerEnlarge(CurrentResourceOwner);
/* Now we can build the CatCList entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheMemoryContext);
nmembers = list_length(ctlist);
cl = (CatCList *)
palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
@@ -1895,7 +1898,7 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
dtp = ntp;
/* Allocate memory for CatCTup and the cached tuple in one go */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheMemoryContext);
ct = (CatCTup *) palloc(sizeof(CatCTup) +
MAXIMUM_ALIGNOF + dtp->t_len);
@@ -1930,7 +1933,7 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
else
{
Assert(negative);
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheMemoryContext);
ct = (CatCTup *) palloc(sizeof(CatCTup));
/*
@@ -2166,3 +2169,15 @@ ResOwnerPrintCatCacheList(Datum res)
list->my_cache->cc_relname, list->my_cache->id,
list, list->refcount);
}
+
+static void
+CreateCatCacheMemoryContext(void)
+{
+ if (!CacheMemoryContext)
+ CreateCacheMemoryContext();
+
+ if (!CatCacheMemoryContext)
+ CatCacheMemoryContext = AllocSetContextCreate(CacheMemoryContext,
+ "CatCacheMemoryContext",
+ ALLOCSET_DEFAULT_SIZES);
+}
\ No newline at end of file
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index b3faccbefe..0a797fde1a 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -320,6 +320,9 @@ static OpClassCacheEnt *LookupOpclassInfo(Oid operatorClassOid,
static void RelationCacheInitFileRemoveInDir(const char *tblspcpath);
static void unlink_initfile(const char *initfilename, int elevel);
+/* Separate memory context for relcache */
+static MemoryContext RelCacheMemoryContext = NULL;
+static void CreateRelCacheMemoryContext(void);
/*
* ScanPgRelation
@@ -412,8 +415,8 @@ AllocateRelationDesc(Form_pg_class relp)
MemoryContext oldcxt;
Form_pg_class relationForm;
- /* Relcache entries must live in CacheMemoryContext */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ /* Relcache entries must live in RelCacheMemoryContext */
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
/*
* allocate and zero space for new relation descriptor
@@ -497,14 +500,14 @@ RelationParseRelOptions(Relation relation, HeapTuple tuple)
options = extractRelOptions(tuple, GetPgClassDescriptor(), amoptsfn);
/*
- * Copy parsed data into CacheMemoryContext. To guard against the
+ * Copy parsed data into RelCacheMemoryContext. To guard against the
* possibility of leaks in the reloptions code, we want to do the actual
* parsing in the caller's memory context and copy the results into
- * CacheMemoryContext after the fact.
+ * RelCacheMemoryContext after the fact.
*/
if (options)
{
- relation->rd_options = MemoryContextAlloc(CacheMemoryContext,
+ relation->rd_options = MemoryContextAlloc(RelCacheMemoryContext,
VARSIZE(options));
memcpy(relation->rd_options, options, VARSIZE(options));
pfree(options);
@@ -534,7 +537,7 @@ RelationBuildTupleDesc(Relation relation)
relation->rd_rel->reltype ? relation->rd_rel->reltype : RECORDOID;
relation->rd_att->tdtypmod = -1; /* just to be sure */
- constr = (TupleConstr *) MemoryContextAllocZero(CacheMemoryContext,
+ constr = (TupleConstr *) MemoryContextAllocZero(RelCacheMemoryContext,
sizeof(TupleConstr));
constr->has_not_null = false;
constr->has_generated_stored = false;
@@ -615,7 +618,7 @@ RelationBuildTupleDesc(Relation relation)
if (attrmiss == NULL)
attrmiss = (AttrMissing *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(RelCacheMemoryContext,
relation->rd_rel->relnatts *
sizeof(AttrMissing));
@@ -636,7 +639,7 @@ RelationBuildTupleDesc(Relation relation)
else
{
/* otherwise copy in the correct context */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
attrmiss[attnum - 1].am_value = datumCopy(missval,
attp->attbyval,
attp->attlen);
@@ -747,7 +750,7 @@ RelationBuildRuleLock(Relation relation)
/*
* Make the private context. Assume it'll not contain much data.
*/
- rulescxt = AllocSetContextCreate(CacheMemoryContext,
+ rulescxt = AllocSetContextCreate(RelCacheMemoryContext,
"relation rules",
ALLOCSET_SMALL_SIZES);
relation->rd_rulescxt = rulescxt;
@@ -1443,7 +1446,7 @@ RelationInitIndexAccessInfo(Relation relation)
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for index %u",
RelationGetRelid(relation));
- oldcontext = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcontext = MemoryContextSwitchTo(RelCacheMemoryContext);
relation->rd_indextuple = heap_copytuple(tuple);
relation->rd_index = (Form_pg_index) GETSTRUCT(relation->rd_indextuple);
MemoryContextSwitchTo(oldcontext);
@@ -1472,7 +1475,7 @@ RelationInitIndexAccessInfo(Relation relation)
* a context, and not just a couple of pallocs, is so that we won't leak
* any subsidiary info attached to fmgr lookup records.
*/
- indexcxt = AllocSetContextCreate(CacheMemoryContext,
+ indexcxt = AllocSetContextCreate(RelCacheMemoryContext,
"index info",
ALLOCSET_SMALL_SIZES);
relation->rd_indexcxt = indexcxt;
@@ -1654,9 +1657,9 @@ LookupOpclassInfo(Oid operatorClassOid,
/* First time through: initialize the opclass cache */
HASHCTL ctl;
- /* Also make sure CacheMemoryContext exists */
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ /* Also make sure RelCacheMemoryContext exists */
+ if (!RelCacheMemoryContext)
+ CreateRelCacheMemoryContext();
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(OpClassCacheEnt);
@@ -1703,7 +1706,7 @@ LookupOpclassInfo(Oid operatorClassOid,
*/
if (opcentry->supportProcs == NULL && numSupport > 0)
opcentry->supportProcs = (RegProcedure *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(RelCacheMemoryContext,
numSupport * sizeof(RegProcedure));
/*
@@ -1862,7 +1865,7 @@ RelationInitTableAccessMethod(Relation relation)
* during bootstrap or before RelationCacheInitializePhase3 runs, and none of
* these properties matter then...)
*
- * NOTE: we assume we are already switched into CacheMemoryContext.
+ * NOTE: we assume we are already switched into RelCacheMemoryContext.
*/
static void
formrdesc(const char *relationName, Oid relationReltype,
@@ -3092,7 +3095,7 @@ RememberToFreeTupleDescAtEOX(TupleDesc td)
{
MemoryContext oldcxt;
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
EOXactTupleDescArray = (TupleDesc *) palloc(16 * sizeof(TupleDesc));
EOXactTupleDescArrayLen = 16;
@@ -3556,10 +3559,10 @@ RelationBuildLocalRelation(const char *relname,
/*
* switch to the cache context to create the relcache entry.
*/
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ if (!RelCacheMemoryContext)
+ CreateRelCacheMemoryContext();
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
/*
* allocate a new relation descriptor and fill in basic state fields.
@@ -3688,7 +3691,7 @@ RelationBuildLocalRelation(const char *relname,
/*
* RelationInitTableAccessMethod will do syscache lookups, so we mustn't
- * run it in CacheMemoryContext. Fortunately, the remaining steps don't
+ * run it in RelCacheMemoryContext. Fortunately, the remaining steps don't
* require a long-lived current context.
*/
MemoryContextSwitchTo(oldcxt);
@@ -3973,8 +3976,8 @@ RelationCacheInitialize(void)
/*
* make sure cache memory context exists
*/
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ if (!RelCacheMemoryContext)
+ CreateRelCacheMemoryContext();
/*
* create hashtable that indexes the relcache
@@ -3989,7 +3992,7 @@ RelationCacheInitialize(void)
*/
allocsize = 4;
in_progress_list =
- MemoryContextAlloc(CacheMemoryContext,
+ MemoryContextAlloc(RelCacheMemoryContext,
allocsize * sizeof(*in_progress_list));
in_progress_list_maxlen = allocsize;
@@ -4027,10 +4030,7 @@ RelationCacheInitializePhase2(void)
if (IsBootstrapProcessingMode())
return;
- /*
- * switch to cache memory context
- */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
/*
* Try to load the shared relcache cache file. If unsuccessful, bootstrap
@@ -4082,10 +4082,7 @@ RelationCacheInitializePhase3(void)
*/
RelationMapInitializePhase3();
- /*
- * switch to cache memory context
- */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
/*
* Try to load the local relcache cache file. If unsuccessful, bootstrap
@@ -4395,7 +4392,7 @@ BuildHardcodedDescriptor(int natts, const FormData_pg_attribute *attrs)
MemoryContext oldcxt;
int i;
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
result = CreateTemplateTupleDesc(natts);
result->tdtypeid = RECORDOID; /* not right, but we don't care */
@@ -4465,7 +4462,7 @@ AttrDefaultFetch(Relation relation, int ndef)
/* Allocate array with room for as many entries as expected */
attrdef = (AttrDefault *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(RelCacheMemoryContext,
ndef * sizeof(AttrDefault));
/* Search pg_attrdef for relevant entries */
@@ -4504,7 +4501,7 @@ AttrDefaultFetch(Relation relation, int ndef)
char *s = TextDatumGetCString(val);
attrdef[found].adnum = adform->adnum;
- attrdef[found].adbin = MemoryContextStrdup(CacheMemoryContext, s);
+ attrdef[found].adbin = MemoryContextStrdup(RelCacheMemoryContext, s);
pfree(s);
found++;
}
@@ -4561,7 +4558,7 @@ CheckConstraintFetch(Relation relation)
/* Allocate array with room for as many entries as expected */
check = (ConstrCheck *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(RelCacheMemoryContext,
ncheck * sizeof(ConstrCheck));
/* Search pg_constraint for relevant entries */
@@ -4594,7 +4591,7 @@ CheckConstraintFetch(Relation relation)
check[found].ccvalid = conform->convalidated;
check[found].ccnoinherit = conform->connoinherit;
- check[found].ccname = MemoryContextStrdup(CacheMemoryContext,
+ check[found].ccname = MemoryContextStrdup(RelCacheMemoryContext,
NameStr(conform->conname));
/* Grab and test conbin is actually set */
@@ -4609,7 +4606,7 @@ CheckConstraintFetch(Relation relation)
/* detoast and convert to cstring in caller's context */
char *s = TextDatumGetCString(val);
- check[found].ccbin = MemoryContextStrdup(CacheMemoryContext, s);
+ check[found].ccbin = MemoryContextStrdup(RelCacheMemoryContext, s);
pfree(s);
found++;
}
@@ -4726,7 +4723,7 @@ RelationGetFKeyList(Relation relation)
table_close(conrel, AccessShareLock);
/* Now save a copy of the completed list in the relcache entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
oldlist = relation->rd_fkeylist;
relation->rd_fkeylist = copyObject(result);
relation->rd_fkeyvalid = true;
@@ -4870,7 +4867,7 @@ RelationGetIndexList(Relation relation)
list_sort(result, list_oid_cmp);
/* Now save a copy of the completed list in the relcache entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
oldlist = relation->rd_indexlist;
relation->rd_indexlist = list_copy(result);
relation->rd_pkindex = pkeyIndex;
@@ -4961,7 +4958,7 @@ RelationGetStatExtList(Relation relation)
list_sort(result, list_oid_cmp);
/* Now save a copy of the completed list in the relcache entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
oldlist = relation->rd_statlist;
relation->rd_statlist = list_copy(result);
@@ -5457,7 +5454,7 @@ restart:
* leave the relcache entry looking like the other ones are valid but
* empty.
*/
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
relation->rd_keyattr = bms_copy(uindexattrs);
relation->rd_pkattr = bms_copy(pkindexattrs);
relation->rd_idattr = bms_copy(idindexattrs);
@@ -5557,7 +5554,7 @@ RelationGetIdentityKeyBitmap(Relation relation)
relation->rd_idattr = NULL;
/* Now save copy of the bitmap in the relcache entry */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
relation->rd_idattr = bms_copy(idindexattrs);
MemoryContextSwitchTo(oldcxt);
@@ -5848,7 +5845,7 @@ RelationBuildPublicationDesc(Relation relation, PublicationDesc *pubdesc)
}
/* Now save copy of the descriptor in the relcache entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
relation->rd_pubdesc = palloc(sizeof(PublicationDesc));
memcpy(relation->rd_pubdesc, pubdesc, sizeof(PublicationDesc));
MemoryContextSwitchTo(oldcxt);
@@ -6051,7 +6048,7 @@ errtableconstraint(Relation rel, const char *conname)
* criticalSharedRelcachesBuilt to true.
* If not successful, return false.
*
- * NOTE: we assume we are already switched into CacheMemoryContext.
+ * NOTE: we assume we are already switched into RelCacheMemoryContext.
*/
static bool
load_relcache_init_file(bool shared)
@@ -6220,7 +6217,7 @@ load_relcache_init_file(bool shared)
* prepare index info context --- parameters should match
* RelationInitIndexAccessInfo
*/
- indexcxt = AllocSetContextCreate(CacheMemoryContext,
+ indexcxt = AllocSetContextCreate(RelCacheMemoryContext,
"index info",
ALLOCSET_SMALL_SIZES);
rel->rd_indexcxt = indexcxt;
@@ -6873,3 +6870,15 @@ ResOwnerReleaseRelation(Datum res)
RelationCloseCleanup((Relation) res);
}
+
+static void
+CreateRelCacheMemoryContext(void)
+{
+ if (!CacheMemoryContext)
+ CreateCacheMemoryContext();
+
+ if (!RelCacheMemoryContext)
+ RelCacheMemoryContext = AllocSetContextCreate(CacheMemoryContext,
+ "RelCacheMemoryContext",
+ ALLOCSET_DEFAULT_SIZES);
+}
--
2.34.1
On Wed, 3 Jan 2024 at 16:56, Melih Mutlu <m.melihmutlu@gmail.com> wrote:
Hi,
torikoshia <torikoshia@oss.nttdata.com>, 4 Ara 2023 Pzt, 07:59 tarihinde şunu yazdı:
Hi,
I also think this change would be helpful.
I imagine you're working on the Andres's comments and you already notice
this, but v1 patch cannot be applied to HEAD.
For the convenience of other reviewers, I marked it 'Waiting on Author'.Thanks for letting me know. I rebased the patch. PFA new version.
CFBot shows that the patch does not apply anymore as in [1]http://cfbot.cputube.org/patch_46_4554.log:
=== Applying patches on top of PostgreSQL commit ID
729439607ad210dbb446e31754e8627d7e3f7dda ===
=== applying patch
./v2-0001-Separate-memory-contexts-for-relcache-and-catcach.patch
patching file src/backend/utils/cache/catcache.c
...
Hunk #8 FAILED at 1933.
Hunk #9 succeeded at 2253 (offset 84 lines).
1 out of 9 hunks FAILED -- saving rejects to file
src/backend/utils/cache/catcache.c.rej
Please post an updated version for the same.
[1]: http://cfbot.cputube.org/patch_46_4554.log
Regards,
Vignesh
vignesh C <vignesh21@gmail.com>, 27 Oca 2024 Cmt, 06:01 tarihinde şunu
yazdı:
On Wed, 3 Jan 2024 at 16:56, Melih Mutlu <m.melihmutlu@gmail.com> wrote:
CFBot shows that the patch does not apply anymore as in [1]:
=== Applying patches on top of PostgreSQL commit ID
729439607ad210dbb446e31754e8627d7e3f7dda ===
=== applying patch
./v2-0001-Separate-memory-contexts-for-relcache-and-catcach.patch
patching file src/backend/utils/cache/catcache.c
...
Hunk #8 FAILED at 1933.
Hunk #9 succeeded at 2253 (offset 84 lines).
1 out of 9 hunks FAILED -- saving rejects to file
src/backend/utils/cache/catcache.c.rejPlease post an updated version for the same.
[1] - http://cfbot.cputube.org/patch_46_4554.log
Regards,
Vignesh
Rebased. PSA.
--
Melih Mutlu
Microsoft
Attachments:
v3-0001-Separate-memory-contexts-for-relcache-and-catcach.patchapplication/octet-stream; name=v3-0001-Separate-memory-contexts-for-relcache-and-catcach.patchDownload
From b97d6d3220f59249b7730d014b80ff7b575fd3d6 Mon Sep 17 00:00:00 2001
From: Melih Mutlu <m.melihmutlu@gmail.com>
Date: Tue, 13 Jun 2023 16:43:24 +0300
Subject: [PATCH v3] Separate memory contexts for relcache and catcache
This patch introduces two new memory contexts under CacheMemoryContext
for relcache and catcache. Most of the time relcache and catcache
constitutes a large part of CacheMemoryContext. Before this patch, it
was hard to see the contribution of both caches in memory usage of
CacheMemoryContext. Having separate contexts
for these two aims to help to understand more about memory usage of
contexts.
---
src/backend/utils/cache/catcache.c | 35 +++++++---
src/backend/utils/cache/relcache.c | 101 ++++++++++++++++-------------
2 files changed, 80 insertions(+), 56 deletions(-)
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 569f51cb33..7ca8deb259 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -103,6 +103,9 @@ static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos,
static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
Datum *srckeys, Datum *dstkeys);
+/* Separate memory context for catcache */
+static MemoryContext CatCacheMemoryContext = NULL;
+static void CreateCatCacheMemoryContext(void);
/*
* internal support functions
@@ -852,10 +855,10 @@ InitCatCache(int id,
* first switch to the cache context so our allocations do not vanish at
* the end of a transaction
*/
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ if (!CatCacheMemoryContext)
+ CreateCatCacheMemoryContext();
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheMemoryContext);
/*
* if first time through, initialize the cache group header
@@ -942,7 +945,7 @@ RehashCatCache(CatCache *cp)
/* Allocate a new, larger, hash table. */
newnbuckets = cp->cc_nbuckets * 2;
- newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
+ newbucket = (dlist_head *) MemoryContextAllocZero(CatCacheMemoryContext, newnbuckets * sizeof(dlist_head));
/* Move all entries from old hash table to new. */
for (i = 0; i < cp->cc_nbuckets; i++)
@@ -1047,9 +1050,9 @@ CatalogCacheInitializeCache(CatCache *cache)
* switch to the cache context so our allocations do not vanish at the end
* of a transaction
*/
- Assert(CacheMemoryContext != NULL);
+ Assert(CatCacheMemoryContext != NULL);
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheMemoryContext);
/*
* copy the relcache's tuple descriptor to permanent cache storage
@@ -1110,7 +1113,7 @@ CatalogCacheInitializeCache(CatCache *cache)
*/
fmgr_info_cxt(eqfunc,
&cache->cc_skey[i].sk_func,
- CacheMemoryContext);
+ CatCacheMemoryContext);
/* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
cache->cc_skey[i].sk_attno = cache->cc_keyno[i];
@@ -1897,7 +1900,7 @@ SearchCatCacheList(CatCache *cache,
ResourceOwnerEnlarge(CurrentResourceOwner);
/* Now we can build the CatCList entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheMemoryContext);
nmembers = list_length(ctlist);
cl = (CatCList *)
palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
@@ -2074,7 +2077,7 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, SysScanDesc scandesc,
dtp = ntp;
/* Allocate memory for CatCTup and the cached tuple in one go */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheMemoryContext);
ct = (CatCTup *) palloc(sizeof(CatCTup) +
MAXIMUM_ALIGNOF + dtp->t_len);
@@ -2109,7 +2112,7 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, SysScanDesc scandesc,
else
{
/* Set up keys for a negative cache entry */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheMemoryContext);
ct = (CatCTup *) palloc(sizeof(CatCTup));
/*
@@ -2345,3 +2348,15 @@ ResOwnerPrintCatCacheList(Datum res)
list->my_cache->cc_relname, list->my_cache->id,
list, list->refcount);
}
+
+static void
+CreateCatCacheMemoryContext(void)
+{
+ if (!CacheMemoryContext)
+ CreateCacheMemoryContext();
+
+ if (!CatCacheMemoryContext)
+ CatCacheMemoryContext = AllocSetContextCreate(CacheMemoryContext,
+ "CatCacheMemoryContext",
+ ALLOCSET_DEFAULT_SIZES);
+}
\ No newline at end of file
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 3fe74dabd0..a9e226870d 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -319,6 +319,9 @@ static OpClassCacheEnt *LookupOpclassInfo(Oid operatorClassOid,
static void RelationCacheInitFileRemoveInDir(const char *tblspcpath);
static void unlink_initfile(const char *initfilename, int elevel);
+/* Separate memory context for relcache */
+static MemoryContext RelCacheMemoryContext = NULL;
+static void CreateRelCacheMemoryContext(void);
/*
* ScanPgRelation
@@ -411,8 +414,8 @@ AllocateRelationDesc(Form_pg_class relp)
MemoryContext oldcxt;
Form_pg_class relationForm;
- /* Relcache entries must live in CacheMemoryContext */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ /* Relcache entries must live in RelCacheMemoryContext */
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
/*
* allocate and zero space for new relation descriptor
@@ -496,14 +499,14 @@ RelationParseRelOptions(Relation relation, HeapTuple tuple)
options = extractRelOptions(tuple, GetPgClassDescriptor(), amoptsfn);
/*
- * Copy parsed data into CacheMemoryContext. To guard against the
+ * Copy parsed data into RelCacheMemoryContext. To guard against the
* possibility of leaks in the reloptions code, we want to do the actual
* parsing in the caller's memory context and copy the results into
- * CacheMemoryContext after the fact.
+ * RelCacheMemoryContext after the fact.
*/
if (options)
{
- relation->rd_options = MemoryContextAlloc(CacheMemoryContext,
+ relation->rd_options = MemoryContextAlloc(RelCacheMemoryContext,
VARSIZE(options));
memcpy(relation->rd_options, options, VARSIZE(options));
pfree(options);
@@ -533,7 +536,7 @@ RelationBuildTupleDesc(Relation relation)
relation->rd_rel->reltype ? relation->rd_rel->reltype : RECORDOID;
relation->rd_att->tdtypmod = -1; /* just to be sure */
- constr = (TupleConstr *) MemoryContextAllocZero(CacheMemoryContext,
+ constr = (TupleConstr *) MemoryContextAllocZero(RelCacheMemoryContext,
sizeof(TupleConstr));
constr->has_not_null = false;
constr->has_generated_stored = false;
@@ -614,7 +617,7 @@ RelationBuildTupleDesc(Relation relation)
if (attrmiss == NULL)
attrmiss = (AttrMissing *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(RelCacheMemoryContext,
relation->rd_rel->relnatts *
sizeof(AttrMissing));
@@ -635,7 +638,7 @@ RelationBuildTupleDesc(Relation relation)
else
{
/* otherwise copy in the correct context */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
attrmiss[attnum - 1].am_value = datumCopy(missval,
attp->attbyval,
attp->attlen);
@@ -746,7 +749,7 @@ RelationBuildRuleLock(Relation relation)
/*
* Make the private context. Assume it'll not contain much data.
*/
- rulescxt = AllocSetContextCreate(CacheMemoryContext,
+ rulescxt = AllocSetContextCreate(RelCacheMemoryContext,
"relation rules",
ALLOCSET_SMALL_SIZES);
relation->rd_rulescxt = rulescxt;
@@ -1449,7 +1452,7 @@ RelationInitIndexAccessInfo(Relation relation)
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for index %u",
RelationGetRelid(relation));
- oldcontext = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcontext = MemoryContextSwitchTo(RelCacheMemoryContext);
relation->rd_indextuple = heap_copytuple(tuple);
relation->rd_index = (Form_pg_index) GETSTRUCT(relation->rd_indextuple);
MemoryContextSwitchTo(oldcontext);
@@ -1478,7 +1481,7 @@ RelationInitIndexAccessInfo(Relation relation)
* a context, and not just a couple of pallocs, is so that we won't leak
* any subsidiary info attached to fmgr lookup records.
*/
- indexcxt = AllocSetContextCreate(CacheMemoryContext,
+ indexcxt = AllocSetContextCreate(RelCacheMemoryContext,
"index info",
ALLOCSET_SMALL_SIZES);
relation->rd_indexcxt = indexcxt;
@@ -1660,9 +1663,9 @@ LookupOpclassInfo(Oid operatorClassOid,
/* First time through: initialize the opclass cache */
HASHCTL ctl;
- /* Also make sure CacheMemoryContext exists */
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ /* Also make sure RelCacheMemoryContext exists */
+ if (!RelCacheMemoryContext)
+ CreateRelCacheMemoryContext();
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(OpClassCacheEnt);
@@ -1709,7 +1712,7 @@ LookupOpclassInfo(Oid operatorClassOid,
*/
if (opcentry->supportProcs == NULL && numSupport > 0)
opcentry->supportProcs = (RegProcedure *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(RelCacheMemoryContext,
numSupport * sizeof(RegProcedure));
/*
@@ -1868,7 +1871,7 @@ RelationInitTableAccessMethod(Relation relation)
* during bootstrap or before RelationCacheInitializePhase3 runs, and none of
* these properties matter then...)
*
- * NOTE: we assume we are already switched into CacheMemoryContext.
+ * NOTE: we assume we are already switched into RelCacheMemoryContext.
*/
static void
formrdesc(const char *relationName, Oid relationReltype,
@@ -3070,7 +3073,7 @@ RememberToFreeTupleDescAtEOX(TupleDesc td)
{
MemoryContext oldcxt;
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
EOXactTupleDescArray = (TupleDesc *) palloc(16 * sizeof(TupleDesc));
EOXactTupleDescArrayLen = 16;
@@ -3534,10 +3537,10 @@ RelationBuildLocalRelation(const char *relname,
/*
* switch to the cache context to create the relcache entry.
*/
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ if (!RelCacheMemoryContext)
+ CreateRelCacheMemoryContext();
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
/*
* allocate a new relation descriptor and fill in basic state fields.
@@ -3666,7 +3669,7 @@ RelationBuildLocalRelation(const char *relname,
/*
* RelationInitTableAccessMethod will do syscache lookups, so we mustn't
- * run it in CacheMemoryContext. Fortunately, the remaining steps don't
+ * run it in RelCacheMemoryContext. Fortunately, the remaining steps don't
* require a long-lived current context.
*/
MemoryContextSwitchTo(oldcxt);
@@ -3951,8 +3954,8 @@ RelationCacheInitialize(void)
/*
* make sure cache memory context exists
*/
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ if (!RelCacheMemoryContext)
+ CreateRelCacheMemoryContext();
/*
* create hashtable that indexes the relcache
@@ -3967,7 +3970,7 @@ RelationCacheInitialize(void)
*/
allocsize = 4;
in_progress_list =
- MemoryContextAlloc(CacheMemoryContext,
+ MemoryContextAlloc(RelCacheMemoryContext,
allocsize * sizeof(*in_progress_list));
in_progress_list_maxlen = allocsize;
@@ -4005,10 +4008,7 @@ RelationCacheInitializePhase2(void)
if (IsBootstrapProcessingMode())
return;
- /*
- * switch to cache memory context
- */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
/*
* Try to load the shared relcache cache file. If unsuccessful, bootstrap
@@ -4060,10 +4060,7 @@ RelationCacheInitializePhase3(void)
*/
RelationMapInitializePhase3();
- /*
- * switch to cache memory context
- */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
/*
* Try to load the local relcache cache file. If unsuccessful, bootstrap
@@ -4377,7 +4374,7 @@ BuildHardcodedDescriptor(int natts, const FormData_pg_attribute *attrs)
MemoryContext oldcxt;
int i;
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
result = CreateTemplateTupleDesc(natts);
result->tdtypeid = RECORDOID; /* not right, but we don't care */
@@ -4447,7 +4444,7 @@ AttrDefaultFetch(Relation relation, int ndef)
/* Allocate array with room for as many entries as expected */
attrdef = (AttrDefault *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(RelCacheMemoryContext,
ndef * sizeof(AttrDefault));
/* Search pg_attrdef for relevant entries */
@@ -4486,7 +4483,7 @@ AttrDefaultFetch(Relation relation, int ndef)
char *s = TextDatumGetCString(val);
attrdef[found].adnum = adform->adnum;
- attrdef[found].adbin = MemoryContextStrdup(CacheMemoryContext, s);
+ attrdef[found].adbin = MemoryContextStrdup(RelCacheMemoryContext, s);
pfree(s);
found++;
}
@@ -4543,7 +4540,7 @@ CheckConstraintFetch(Relation relation)
/* Allocate array with room for as many entries as expected */
check = (ConstrCheck *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(RelCacheMemoryContext,
ncheck * sizeof(ConstrCheck));
/* Search pg_constraint for relevant entries */
@@ -4576,7 +4573,7 @@ CheckConstraintFetch(Relation relation)
check[found].ccvalid = conform->convalidated;
check[found].ccnoinherit = conform->connoinherit;
- check[found].ccname = MemoryContextStrdup(CacheMemoryContext,
+ check[found].ccname = MemoryContextStrdup(RelCacheMemoryContext,
NameStr(conform->conname));
/* Grab and test conbin is actually set */
@@ -4591,7 +4588,7 @@ CheckConstraintFetch(Relation relation)
/* detoast and convert to cstring in caller's context */
char *s = TextDatumGetCString(val);
- check[found].ccbin = MemoryContextStrdup(CacheMemoryContext, s);
+ check[found].ccbin = MemoryContextStrdup(RelCacheMemoryContext, s);
pfree(s);
found++;
}
@@ -4708,7 +4705,7 @@ RelationGetFKeyList(Relation relation)
table_close(conrel, AccessShareLock);
/* Now save a copy of the completed list in the relcache entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
oldlist = relation->rd_fkeylist;
relation->rd_fkeylist = copyObject(result);
relation->rd_fkeyvalid = true;
@@ -4859,7 +4856,7 @@ RelationGetIndexList(Relation relation)
list_sort(result, list_oid_cmp);
/* Now save a copy of the completed list in the relcache entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
oldlist = relation->rd_indexlist;
relation->rd_indexlist = list_copy(result);
relation->rd_pkindex = pkeyIndex;
@@ -4951,7 +4948,7 @@ RelationGetStatExtList(Relation relation)
list_sort(result, list_oid_cmp);
/* Now save a copy of the completed list in the relcache entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
oldlist = relation->rd_statlist;
relation->rd_statlist = list_copy(result);
@@ -5452,7 +5449,7 @@ restart:
* leave the relcache entry looking like the other ones are valid but
* empty.
*/
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
relation->rd_keyattr = bms_copy(uindexattrs);
relation->rd_pkattr = bms_copy(pkindexattrs);
relation->rd_idattr = bms_copy(idindexattrs);
@@ -5552,7 +5549,7 @@ RelationGetIdentityKeyBitmap(Relation relation)
relation->rd_idattr = NULL;
/* Now save copy of the bitmap in the relcache entry */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
relation->rd_idattr = bms_copy(idindexattrs);
MemoryContextSwitchTo(oldcxt);
@@ -5849,7 +5846,7 @@ RelationBuildPublicationDesc(Relation relation, PublicationDesc *pubdesc)
}
/* Now save copy of the descriptor in the relcache entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheMemoryContext);
relation->rd_pubdesc = palloc(sizeof(PublicationDesc));
memcpy(relation->rd_pubdesc, pubdesc, sizeof(PublicationDesc));
MemoryContextSwitchTo(oldcxt);
@@ -6052,7 +6049,7 @@ errtableconstraint(Relation rel, const char *conname)
* criticalSharedRelcachesBuilt to true.
* If not successful, return false.
*
- * NOTE: we assume we are already switched into CacheMemoryContext.
+ * NOTE: we assume we are already switched into RelCacheMemoryContext.
*/
static bool
load_relcache_init_file(bool shared)
@@ -6221,7 +6218,7 @@ load_relcache_init_file(bool shared)
* prepare index info context --- parameters should match
* RelationInitIndexAccessInfo
*/
- indexcxt = AllocSetContextCreate(CacheMemoryContext,
+ indexcxt = AllocSetContextCreate(RelCacheMemoryContext,
"index info",
ALLOCSET_SMALL_SIZES);
rel->rd_indexcxt = indexcxt;
@@ -6882,3 +6879,15 @@ ResOwnerReleaseRelation(Datum res)
RelationCloseCleanup((Relation) res);
}
+
+static void
+CreateRelCacheMemoryContext(void)
+{
+ if (!CacheMemoryContext)
+ CreateCacheMemoryContext();
+
+ if (!RelCacheMemoryContext)
+ RelCacheMemoryContext = AllocSetContextCreate(CacheMemoryContext,
+ "RelCacheMemoryContext",
+ ALLOCSET_DEFAULT_SIZES);
+}
--
2.34.1
On Wed, 2024-04-03 at 16:12 +0300, Melih Mutlu wrote:
Rebased. PSA.
Thank you. I missed your patch and came up with a similar patch over
here:
/messages/by-id/78599c442380ddb5990117e281a4fa65a74231af.camel@j-davis.com
I closed my thread and we can continue this one.
One difference is that I tried to capture almost all uses of
CacheMemoryContext so that it would become just a parent context
without many allocations of its own.
The plan cache and SPI caches can be important, too. Or, one of the
other caches that we expect to be small might grow in some edge cases
(or due to a bug), and it would be good to be able to see that.
I agree with others that we should look at changing the initial size or
type of the contexts, but that should be a separate commit.
Regards,
Jeff Davis
Hi Jeff,
Jeff Davis <pgsql@j-davis.com>, 30 Eki 2024 Çar, 01:00 tarihinde şunu yazdı:
On Wed, 2024-04-03 at 16:12 +0300, Melih Mutlu wrote:
Rebased. PSA.
Thank you. I missed your patch and came up with a similar patch over
here:/messages/by-id/78599c442380ddb5990117e281a4fa65a74231af.camel@j-davis.com
I closed my thread and we can continue this one.
Thanks for being interested in this patch. I simply merged your patch and
mine, which was pretty easy as your patch is quite similar to mine but
covers much more caches.
One difference is that I tried to capture almost all uses of
CacheMemoryContext so that it would become just a parent context
without many allocations of its own.The plan cache and SPI caches can be important, too. Or, one of the
other caches that we expect to be small might grow in some edge cases
(or due to a bug), and it would be good to be able to see that.
My only concern would be allocating too much memory for each cache
type unnecessarily. Especially the ones that are expected to be small most
of the time, would we see cases where we waste too much memory when the
number of backends increases? Or maybe having separate contexts for each
cache wouldn't hurt at all. I don't really have enough knowledge about each
cache type.
I only quickly checked the memory usage right after starting a backend to
ensure that the patch does not slow down backend starts. We now have an
additional TypCacheContext, which was previously using CacheMemoryContext,
with 8KB init size. I'm not sure how much we should be worried about this
additional 8KB. I think we can reduce the initial size of
CacheMemoryContext instead,
assuming that CacheMemoryContext wouldn't have many allocations of its own
anymore.
I agree with others that we should look at changing the initial size or
type of the contexts, but that should be a separate commit.
Attached a separate patch to change initial sizes for relcache and catcache
contexts as they grow large from the start. This was suggested in the
thread previously [1].
Also changed CacheMemoryContext to use ALLOCSET_START_SMALL_SIZES, so it
starts from 1KB.
Regards,
--
Melih Mutlu
Microsoft
Attachments:
v4-0002-Adjusting-cache-memory-context-sizes.patchapplication/octet-stream; name=v4-0002-Adjusting-cache-memory-context-sizes.patchDownload
From 91346d94a67f78b1540046373283bbc1bfcd30c9 Mon Sep 17 00:00:00 2001
From: Melih Mutlu <m.melihmutlu@gmail.com>
Date: Thu, 31 Oct 2024 16:34:34 +0300
Subject: [PATCH v4 2/2] Adjusting cache memory context sizes
---
src/backend/utils/cache/catcache.c | 6 ++++--
src/backend/utils/cache/relcache.c | 4 +++-
2 files changed, 7 insertions(+), 3 deletions(-)
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index db41a26a9d..0c3b85cd28 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -688,7 +688,7 @@ CreateCacheMemoryContext(void)
if (!CacheMemoryContext)
CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
"CacheMemoryContext",
- ALLOCSET_DEFAULT_SIZES);
+ ALLOCSET_START_SMALL_SIZES);
}
static void
@@ -700,7 +700,9 @@ CreateCatCacheContext(void)
if (!CatCacheContext)
CatCacheContext = AllocSetContextCreate(CacheMemoryContext,
"CatCacheContext",
- ALLOCSET_DEFAULT_SIZES);
+ ALLOCSET_DEFAULT_MINSIZE,
+ 128 * 1024,
+ ALLOCSET_DEFAULT_MAXSIZE);
}
/*
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 9392060e25..2e4eb88c58 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -1631,7 +1631,9 @@ CreateRelCacheContext(void)
if (!RelCacheContext)
RelCacheContext = AllocSetContextCreate(CacheMemoryContext,
"RelCacheContext",
- ALLOCSET_DEFAULT_SIZES);
+ ALLOCSET_DEFAULT_MINSIZE,
+ 128 * 1024,
+ ALLOCSET_DEFAULT_MAXSIZE);
}
/*
--
2.34.1
v4-0001-Separate-memory-contexts-for-caches.patchapplication/octet-stream; name=v4-0001-Separate-memory-contexts-for-caches.patchDownload
From b96af51a05f2db585beb3a9006be88bbe958cf1c Mon Sep 17 00:00:00 2001
From: Melih Mutlu <m.melihmutlu@gmail.com>
Date: Tue, 13 Jun 2023 16:43:24 +0300
Subject: [PATCH v4 1/2] Separate memory contexts for caches
This patch introduces new memory contexts under CacheMemoryContext
for different types of caches such as CatCacheContext and
RelCacheContext. Most of the time relcache and catcache constitutes
a large part of CacheMemoryContext and this can make memory usages from
other caches unvisible. Having separate contexts aims to help to
understand more about memory usage of cache related contexts.
---
src/backend/catalog/namespace.c | 5 +-
src/backend/commands/policy.c | 4 +-
src/backend/commands/tablecmds.c | 2 +-
src/backend/commands/trigger.c | 4 +-
src/backend/executor/spi.c | 25 ++++-
src/backend/foreign/foreign.c | 4 +-
src/backend/partitioning/partdesc.c | 6 +-
src/backend/replication/pgoutput/pgoutput.c | 14 ++-
src/backend/utils/adt/pg_locale.c | 6 +-
src/backend/utils/cache/attoptcache.c | 8 +-
src/backend/utils/cache/catcache.c | 41 +++++---
src/backend/utils/cache/partcache.c | 10 +-
src/backend/utils/cache/plancache.c | 36 ++++---
src/backend/utils/cache/relcache.c | 102 +++++++++++---------
src/backend/utils/cache/spccache.c | 7 +-
src/backend/utils/cache/ts_cache.c | 47 +++++----
src/backend/utils/cache/typcache.c | 62 +++++++-----
src/include/utils/relcache.h | 3 +
18 files changed, 239 insertions(+), 147 deletions(-)
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index 30807f9190..f582325ac1 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -321,8 +321,11 @@ spcache_init(void)
if (SearchPathCacheContext == NULL)
{
+ if (!CacheMemoryContext)
+ CreateCacheMemoryContext();
+
/* Make the context we'll keep search path cache hashtable in */
- SearchPathCacheContext = AllocSetContextCreate(TopMemoryContext,
+ SearchPathCacheContext = AllocSetContextCreate(CacheMemoryContext,
"search_path processing cache",
ALLOCSET_DEFAULT_SIZES);
}
diff --git a/src/backend/commands/policy.c b/src/backend/commands/policy.c
index 6ff3eba824..f83dc4c153 100644
--- a/src/backend/commands/policy.c
+++ b/src/backend/commands/policy.c
@@ -313,10 +313,10 @@ RelationBuildRowSecurity(Relation relation)
/*
* Success. Reparent the descriptor's memory context under
- * CacheMemoryContext so that it will live indefinitely, then attach the
+ * RelCacheContext so that it will live indefinitely, then attach the
* policy descriptor to the relcache entry.
*/
- MemoryContextSetParent(rscxt, CacheMemoryContext);
+ MemoryContextSetParent(rscxt, RelCacheContext);
relation->rd_rsdesc = rsdesc;
}
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 4345b96de5..eefbde326a 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -17616,7 +17616,7 @@ register_on_commit_action(Oid relid, OnCommitAction action)
if (action == ONCOMMIT_NOOP || action == ONCOMMIT_PRESERVE_ROWS)
return;
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
oc = (OnCommitItem *) palloc(sizeof(OnCommitItem));
oc->relid = relid;
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 09356e46d1..7ffb0ecd3a 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -1847,7 +1847,7 @@ EnableDisableTrigger(Relation rel, const char *tgname, Oid tgparent,
* Build trigger data to attach to the given relcache entry.
*
* Note that trigger data attached to a relcache entry must be stored in
- * CacheMemoryContext to ensure it survives as long as the relcache entry.
+ * RelCacheContext to ensure it survives as long as the relcache entry.
* But we should be running in a less long-lived working context. To avoid
* leaking cache memory if this routine fails partway through, we build a
* temporary TriggerDesc in working memory and then copy the completed
@@ -1994,7 +1994,7 @@ RelationBuildTriggers(Relation relation)
SetTriggerFlags(trigdesc, &(triggers[i]));
/* Copy completed trigdesc into cache storage */
- oldContext = MemoryContextSwitchTo(CacheMemoryContext);
+ oldContext = MemoryContextSwitchTo(RelCacheContext);
relation->trigdesc = CopyTriggerDesc(trigdesc);
MemoryContextSwitchTo(oldContext);
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index 2fb2e73604..63f2116dd4 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -50,6 +50,8 @@ static _SPI_connection *_SPI_current = NULL;
static int _SPI_stack_depth = 0; /* allocated size of _SPI_stack */
static int _SPI_connected = -1; /* current stack index */
+static MemoryContext SPICacheContext = NULL;
+
typedef struct SPICallbackArg
{
const char *query;
@@ -972,6 +974,15 @@ SPI_prepare_params(const char *src,
return result;
}
+static void
+CreateSPICacheContext(void)
+{
+ if (!SPICacheContext)
+ SPICacheContext = AllocSetContextCreate(CacheMemoryContext,
+ "SPICacheContext",
+ ALLOCSET_DEFAULT_SIZES);
+}
+
int
SPI_keepplan(SPIPlanPtr plan)
{
@@ -981,13 +992,16 @@ SPI_keepplan(SPIPlanPtr plan)
plan->saved || plan->oneshot)
return SPI_ERROR_ARGUMENT;
+ if (!SPICacheContext)
+ CreateSPICacheContext();
+
/*
- * Mark it saved, reparent it under CacheMemoryContext, and mark all the
+ * Mark it saved, reparent it under SPICacheContext, and mark all the
* component CachedPlanSources as saved. This sequence cannot fail
* partway through, so there's no risk of long-term memory leakage.
*/
plan->saved = true;
- MemoryContextSetParent(plan->plancxt, CacheMemoryContext);
+ MemoryContextSetParent(plan->plancxt, SPICacheContext);
foreach(lc, plan->plancache_list)
{
@@ -3255,13 +3269,16 @@ _SPI_save_plan(SPIPlanPtr plan)
MemoryContextSwitchTo(oldcxt);
+ if (!SPICacheContext)
+ CreateSPICacheContext();
+
/*
- * Mark it saved, reparent it under CacheMemoryContext, and mark all the
+ * Mark it saved, reparent it under SPICacheContext, and mark all the
* component CachedPlanSources as saved. This sequence cannot fail
* partway through, so there's no risk of long-term memory leakage.
*/
newplan->saved = true;
- MemoryContextSetParent(newplan->plancxt, CacheMemoryContext);
+ MemoryContextSetParent(newplan->plancxt, SPICacheContext);
foreach(lc, newplan->plancache_list)
{
diff --git a/src/backend/foreign/foreign.c b/src/backend/foreign/foreign.c
index 4c06e1ff1c..dbc34dabd8 100644
--- a/src/backend/foreign/foreign.c
+++ b/src/backend/foreign/foreign.c
@@ -449,8 +449,8 @@ GetFdwRoutineForRelation(Relation relation, bool makecopy)
/* Get the info by consulting the catalogs and the FDW code */
fdwroutine = GetFdwRoutineByRelId(RelationGetRelid(relation));
- /* Save the data for later reuse in CacheMemoryContext */
- cfdwroutine = (FdwRoutine *) MemoryContextAlloc(CacheMemoryContext,
+ /* Save the data for later reuse in RelCacheContext */
+ cfdwroutine = (FdwRoutine *) MemoryContextAlloc(RelCacheContext,
sizeof(FdwRoutine));
memcpy(cfdwroutine, fdwroutine, sizeof(FdwRoutine));
relation->rd_fdwroutine = cfdwroutine;
diff --git a/src/backend/partitioning/partdesc.c b/src/backend/partitioning/partdesc.c
index b4e0ed0e71..e08a5731d1 100644
--- a/src/backend/partitioning/partdesc.c
+++ b/src/backend/partitioning/partdesc.c
@@ -115,10 +115,10 @@ RelationGetPartitionDesc(Relation rel, bool omit_detached)
*
* Partition descriptor is a complex structure; to avoid complicated logic to
* free individual elements whenever the relcache entry is flushed, we give it
- * its own memory context, a child of CacheMemoryContext, which can easily be
+ * its own memory context, a child of RelCacheContext, which can easily be
* deleted on its own. To avoid leaking memory in that context in case of an
* error partway through this function, the context is initially created as a
- * child of CurTransactionContext and only re-parented to CacheMemoryContext
+ * child of CurTransactionContext and only re-parented to RelCacheContext
* at the end, when no further errors are possible. Also, we don't make this
* context the current context except in very brief code sections, out of fear
* that some of our callees allocate memory on their own which would be leaked
@@ -373,7 +373,7 @@ retry:
* We have a fully valid partdesc. Reparent it so that it has the right
* lifespan.
*/
- MemoryContextSetParent(new_pdcxt, CacheMemoryContext);
+ MemoryContextSetParent(new_pdcxt, RelCacheContext);
/*
* Store it into relcache.
diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c
index 12c1735906..736d128b20 100644
--- a/src/backend/replication/pgoutput/pgoutput.c
+++ b/src/backend/replication/pgoutput/pgoutput.c
@@ -209,6 +209,8 @@ typedef struct PGOutputTxnData
/* Map used to remember which relation schemas we sent. */
static HTAB *RelationSyncCache = NULL;
+static MemoryContext PgOutputCacheContext = NULL;
+
static void init_rel_sync_cache(MemoryContext cachectx);
static void cleanup_rel_sync_cache(TransactionId xid, bool is_commit);
static RelationSyncEntry *get_rel_sync_entry(PGOutputData *data,
@@ -435,6 +437,10 @@ pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt,
/* This plugin uses binary protocol. */
opt->output_type = OUTPUT_PLUGIN_BINARY_OUTPUT;
+ PgOutputCacheContext = AllocSetContextCreate(CacheMemoryContext,
+ "pgoutput cache context",
+ ALLOCSET_DEFAULT_SIZES);
+
/*
* This is replication start and not slot initialization.
*
@@ -521,7 +527,7 @@ pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt,
}
/* Initialize relation schema cache. */
- init_rel_sync_cache(CacheMemoryContext);
+ init_rel_sync_cache(PgOutputCacheContext);
}
else
{
@@ -1169,7 +1175,7 @@ init_tuple_slot(PGOutputData *data, Relation relation,
TupleDesc outdesc = RelationGetDescr(ancestor);
/* Map must live as long as the session does. */
- oldctx = MemoryContextSwitchTo(CacheMemoryContext);
+ oldctx = MemoryContextSwitchTo(PgOutputCacheContext);
entry->attrmap = build_attrmap_by_name_if_req(indesc, outdesc, false);
@@ -1963,7 +1969,7 @@ set_schema_sent_in_streamed_txn(RelationSyncEntry *entry, TransactionId xid)
{
MemoryContext oldctx;
- oldctx = MemoryContextSwitchTo(CacheMemoryContext);
+ oldctx = MemoryContextSwitchTo(PgOutputCacheContext);
entry->streamed_txns = lappend_xid(entry->streamed_txns, xid);
@@ -2034,7 +2040,7 @@ get_rel_sync_entry(PGOutputData *data, Relation relation)
/* Reload publications if needed before use. */
if (!publications_valid)
{
- oldctx = MemoryContextSwitchTo(CacheMemoryContext);
+ oldctx = MemoryContextSwitchTo(PgOutputCacheContext);
if (data->publications)
{
list_free_deep(data->publications);
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index d4e89663ec..3653d43c1d 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -62,6 +62,7 @@
#include "mb/pg_wchar.h"
#include "miscadmin.h"
#include "utils/builtins.h"
+#include "utils/catcache.h"
#include "utils/formatting.h"
#include "utils/guc_hooks.h"
#include "utils/lsyscache.h"
@@ -1464,7 +1465,10 @@ pg_newlocale_from_collation(Oid collid)
if (CollationCache == NULL)
{
- CollationCacheContext = AllocSetContextCreate(TopMemoryContext,
+ if (!CacheMemoryContext)
+ CreateCacheMemoryContext();
+
+ CollationCacheContext = AllocSetContextCreate(CacheMemoryContext,
"collation cache",
ALLOCSET_DEFAULT_SIZES);
CollationCache = collation_cache_create(CollationCacheContext,
diff --git a/src/backend/utils/cache/attoptcache.c b/src/backend/utils/cache/attoptcache.c
index 259865d5b3..b12526d4be 100644
--- a/src/backend/utils/cache/attoptcache.c
+++ b/src/backend/utils/cache/attoptcache.c
@@ -113,9 +113,9 @@ InitializeAttoptCache(void)
hash_create("Attopt cache", 256, &ctl,
HASH_ELEM | HASH_FUNCTION);
- /* Make sure we've initialized CacheMemoryContext. */
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ /* Make sure we've initialized RelCacheContext. */
+ if (!RelCacheContext)
+ CreateRelCacheContext();
/* Watch for invalidation events. */
CacheRegisterSyscacheCallback(ATTNUM,
@@ -178,7 +178,7 @@ get_attribute_options(Oid attrelid, int attnum)
{
bytea *bytea_opts = attribute_reloptions(datum, false);
- opts = MemoryContextAlloc(CacheMemoryContext,
+ opts = MemoryContextAlloc(RelCacheContext,
VARSIZE(bytea_opts));
memcpy(opts, bytea_opts, VARSIZE(bytea_opts));
}
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index ee303dc501..db41a26a9d 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -64,6 +64,9 @@
/* Cache management header --- pointer is NULL until created */
static CatCacheHeader *CacheHdr = NULL;
+static MemoryContext CatCacheContext = NULL;
+static void CreateCatCacheContext(void);
+
static inline HeapTuple SearchCatCacheInternal(CatCache *cache,
int nkeys,
Datum v1, Datum v2,
@@ -104,7 +107,6 @@ static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos,
static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
Datum *srckeys, Datum *dstkeys);
-
/*
* internal support functions
*/
@@ -689,6 +691,17 @@ CreateCacheMemoryContext(void)
ALLOCSET_DEFAULT_SIZES);
}
+static void
+CreateCatCacheContext(void)
+{
+ if (!CacheMemoryContext)
+ CreateCacheMemoryContext();
+
+ if (!CatCacheContext)
+ CatCacheContext = AllocSetContextCreate(CacheMemoryContext,
+ "CatCacheContext",
+ ALLOCSET_DEFAULT_SIZES);
+}
/*
* ResetCatalogCache
@@ -853,10 +866,10 @@ InitCatCache(int id,
* first switch to the cache context so our allocations do not vanish at
* the end of a transaction
*/
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ if (!CatCacheContext)
+ CreateCatCacheContext();
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheContext);
/*
* if first time through, initialize the cache group header
@@ -943,7 +956,7 @@ RehashCatCache(CatCache *cp)
/* Allocate a new, larger, hash table. */
newnbuckets = cp->cc_nbuckets * 2;
- newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
+ newbucket = (dlist_head *) MemoryContextAllocZero(CatCacheContext, newnbuckets * sizeof(dlist_head));
/* Move all entries from old hash table to new. */
for (i = 0; i < cp->cc_nbuckets; i++)
@@ -981,7 +994,7 @@ RehashCatCacheLists(CatCache *cp)
/* Allocate a new, larger, hash table. */
newnbuckets = cp->cc_nlbuckets * 2;
- newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
+ newbucket = (dlist_head *) MemoryContextAllocZero(CatCacheContext, newnbuckets * sizeof(dlist_head));
/* Move all entries from old hash table to new. */
for (i = 0; i < cp->cc_nlbuckets; i++)
@@ -1048,9 +1061,9 @@ CatalogCacheInitializeCache(CatCache *cache)
* switch to the cache context so our allocations do not vanish at the end
* of a transaction
*/
- Assert(CacheMemoryContext != NULL);
+ Assert(CatCacheContext != NULL);
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheContext);
/*
* copy the relcache's tuple descriptor to permanent cache storage
@@ -1111,7 +1124,7 @@ CatalogCacheInitializeCache(CatCache *cache)
*/
fmgr_info_cxt(eqfunc,
&cache->cc_skey[i].sk_func,
- CacheMemoryContext);
+ CatCacheContext);
/* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
cache->cc_skey[i].sk_attno = cache->cc_keyno[i];
@@ -1694,7 +1707,7 @@ SearchCatCacheList(CatCache *cache,
int nbuckets = 16;
cache->cc_lbucket = (dlist_head *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(CatCacheContext,
nbuckets * sizeof(dlist_head));
/* Don't set cc_nlbuckets if we get OOM allocating cc_lbucket */
cache->cc_nlbuckets = nbuckets;
@@ -1896,7 +1909,7 @@ SearchCatCacheList(CatCache *cache,
ResourceOwnerEnlarge(CurrentResourceOwner);
/* Now we can build the CatCList entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheContext);
nmembers = list_length(ctlist);
cl = (CatCList *)
palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
@@ -2110,7 +2123,7 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, SysScanDesc scandesc,
dtp = ntp;
/* Allocate memory for CatCTup and the cached tuple in one go */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheContext);
ct = (CatCTup *) palloc(sizeof(CatCTup) +
MAXIMUM_ALIGNOF + dtp->t_len);
@@ -2145,7 +2158,7 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, SysScanDesc scandesc,
else
{
/* Set up keys for a negative cache entry */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheContext);
ct = (CatCTup *) palloc(sizeof(CatCTup));
/*
@@ -2381,4 +2394,4 @@ ResOwnerPrintCatCacheList(Datum res)
return psprintf("cache %s (%d), list %p has count %d",
list->my_cache->cc_relname, list->my_cache->id,
list, list->refcount);
-}
+}
\ No newline at end of file
diff --git a/src/backend/utils/cache/partcache.c b/src/backend/utils/cache/partcache.c
index beec6cddbc..76b60c40f6 100644
--- a/src/backend/utils/cache/partcache.c
+++ b/src/backend/utils/cache/partcache.c
@@ -65,11 +65,11 @@ RelationGetPartitionKey(Relation rel)
*
* Partitioning key data is a complex structure; to avoid complicated logic to
* free individual elements whenever the relcache entry is flushed, we give it
- * its own memory context, a child of CacheMemoryContext, which can easily be
+ * its own memory context, a child of RelCacheContext, which can easily be
* deleted on its own. To avoid leaking memory in that context in case of an
* error partway through this function, the context is initially created as a
- * child of CurTransactionContext and only re-parented to CacheMemoryContext
- * at the end, when no further errors are possible. Also, we don't make this
+ * child of CurTransactionContext and only re-parented to RelCacheContext at
+ * the end, when no further errors are possible. Also, we don't make this
* context the current context except in very brief code sections, out of fear
* that some of our callees allocate memory on their own which would be leaked
* permanently.
@@ -263,7 +263,7 @@ RelationBuildPartitionKey(Relation relation)
* Success --- reparent our context and make the relcache point to the
* newly constructed key
*/
- MemoryContextSetParent(partkeycxt, CacheMemoryContext);
+ MemoryContextSetParent(partkeycxt, RelCacheContext);
relation->rd_partkeycxt = partkeycxt;
relation->rd_partkey = key;
}
@@ -411,7 +411,7 @@ generate_partition_qual(Relation rel)
*/
if (result != NIL)
{
- rel->rd_partcheckcxt = AllocSetContextCreate(CacheMemoryContext,
+ rel->rd_partcheckcxt = AllocSetContextCreate(RelCacheContext,
"partition constraint",
ALLOCSET_SMALL_SIZES);
MemoryContextCopyAndSetIdentifier(rel->rd_partcheckcxt,
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index 5af1a168ec..cebf4e9483 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -99,6 +99,8 @@ static dlist_head saved_plan_list = DLIST_STATIC_INIT(saved_plan_list);
*/
static dlist_head cached_expression_list = DLIST_STATIC_INIT(cached_expression_list);
+static MemoryContext PlanCacheContext = NULL;
+
static void ReleaseGenericPlan(CachedPlanSource *plansource);
static List *RevalidateCachedQuery(CachedPlanSource *plansource,
QueryEnvironment *queryEnv);
@@ -463,10 +465,19 @@ CompleteCachedPlan(CachedPlanSource *plansource,
plansource->is_valid = true;
}
+static void
+CreatePlanCacheContext(void)
+{
+ if (!PlanCacheContext)
+ PlanCacheContext = AllocSetContextCreate(CacheMemoryContext,
+ "PlanCacheContext",
+ ALLOCSET_DEFAULT_SIZES);
+}
+
/*
* SaveCachedPlan: save a cached plan permanently
*
- * This function moves the cached plan underneath CacheMemoryContext (making
+ * This function moves the cached plan underneath PlanCacheContext (making
* it live for the life of the backend, unless explicitly dropped), and adds
* it to the list of cached plans that are checked for invalidation when an
* sinval event occurs.
@@ -493,18 +504,21 @@ SaveCachedPlan(CachedPlanSource *plansource)
/*
* In typical use, this function would be called before generating any
* plans from the CachedPlanSource. If there is a generic plan, moving it
- * into CacheMemoryContext would be pretty risky since it's unclear
+ * into PlanCacheContext would be pretty risky since it's unclear
* whether the caller has taken suitable care with making references
* long-lived. Best thing to do seems to be to discard the plan.
*/
ReleaseGenericPlan(plansource);
+ if (!PlanCacheContext)
+ CreatePlanCacheContext();
+
/*
- * Reparent the source memory context under CacheMemoryContext so that it
+ * Reparent the source memory context under PlanCacheContext so that it
* will live indefinitely. The query_context follows along since it's
* already a child of the other one.
*/
- MemoryContextSetParent(plansource->context, CacheMemoryContext);
+ MemoryContextSetParent(plansource->context, PlanCacheContext);
/*
* Add the entry to the global list of cached plans.
@@ -1205,8 +1219,8 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
/* Immediately reparent into appropriate context */
if (plansource->is_saved)
{
- /* saved plans all live under CacheMemoryContext */
- MemoryContextSetParent(plan->context, CacheMemoryContext);
+ /* saved plans all live under PlanCacheContext */
+ MemoryContextSetParent(plan->context, PlanCacheContext);
plan->is_saved = true;
}
else
@@ -1262,14 +1276,14 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
ResourceOwnerRememberPlanCacheRef(owner, plan);
/*
- * Saved plans should be under CacheMemoryContext so they will not go away
+ * Saved plans should be under PlanCacheContext so they will not go away
* until their reference count goes to zero. In the generic-plan cases we
* already took care of that, but for a custom plan, do it as soon as we
* have created a reference-counted link.
*/
if (customplan && plansource->is_saved)
{
- MemoryContextSetParent(plan->context, CacheMemoryContext);
+ MemoryContextSetParent(plan->context, PlanCacheContext);
plan->is_saved = true;
}
@@ -1492,7 +1506,7 @@ CachedPlanIsSimplyValid(CachedPlanSource *plansource, CachedPlan *plan,
* CachedPlanSetParentContext: move a CachedPlanSource to a new memory context
*
* This can only be applied to unsaved plans; once saved, a plan always
- * lives underneath CacheMemoryContext.
+ * lives underneath PlanCacheContext.
*/
void
CachedPlanSetParentContext(CachedPlanSource *plansource,
@@ -1713,10 +1727,10 @@ GetCachedExpression(Node *expr)
MemoryContextSwitchTo(oldcxt);
/*
- * Reparent the expr's memory context under CacheMemoryContext so that it
+ * Reparent the expr's memory context under PlanCacheContext so that it
* will live indefinitely.
*/
- MemoryContextSetParent(cexpr_context, CacheMemoryContext);
+ MemoryContextSetParent(cexpr_context, PlanCacheContext);
/*
* Add the entry to the global list of cached expressions.
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index c326f687eb..9392060e25 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -145,6 +145,8 @@ bool criticalRelcachesBuilt = false;
*/
bool criticalSharedRelcachesBuilt = false;
+MemoryContext RelCacheContext = NULL;
+
/*
* This counter counts relcache inval events received since backend startup
* (but only for rels that are actually in cache). Presently, we use it only
@@ -270,7 +272,6 @@ typedef struct opclasscacheent
static HTAB *OpClassCache = NULL;
-
/* non-export function prototypes */
static void RelationCloseCleanup(Relation relation);
@@ -320,7 +321,6 @@ static OpClassCacheEnt *LookupOpclassInfo(Oid operatorClassOid,
static void RelationCacheInitFileRemoveInDir(const char *tblspcpath);
static void unlink_initfile(const char *initfilename, int elevel);
-
/*
* ScanPgRelation
*
@@ -412,8 +412,8 @@ AllocateRelationDesc(Form_pg_class relp)
MemoryContext oldcxt;
Form_pg_class relationForm;
- /* Relcache entries must live in CacheMemoryContext */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ /* Relcache entries must live in RelCacheContext */
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
/*
* allocate and zero space for new relation descriptor
@@ -497,14 +497,14 @@ RelationParseRelOptions(Relation relation, HeapTuple tuple)
options = extractRelOptions(tuple, GetPgClassDescriptor(), amoptsfn);
/*
- * Copy parsed data into CacheMemoryContext. To guard against the
+ * Copy parsed data into RelCacheContext. To guard against the
* possibility of leaks in the reloptions code, we want to do the actual
* parsing in the caller's memory context and copy the results into
- * CacheMemoryContext after the fact.
+ * RelCacheContext after the fact.
*/
if (options)
{
- relation->rd_options = MemoryContextAlloc(CacheMemoryContext,
+ relation->rd_options = MemoryContextAlloc(RelCacheContext,
VARSIZE(options));
memcpy(relation->rd_options, options, VARSIZE(options));
pfree(options);
@@ -534,7 +534,7 @@ RelationBuildTupleDesc(Relation relation)
relation->rd_rel->reltype ? relation->rd_rel->reltype : RECORDOID;
relation->rd_att->tdtypmod = -1; /* just to be sure */
- constr = (TupleConstr *) MemoryContextAllocZero(CacheMemoryContext,
+ constr = (TupleConstr *) MemoryContextAllocZero(RelCacheContext,
sizeof(TupleConstr));
/*
@@ -613,7 +613,7 @@ RelationBuildTupleDesc(Relation relation)
if (attrmiss == NULL)
attrmiss = (AttrMissing *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(RelCacheContext,
relation->rd_rel->relnatts *
sizeof(AttrMissing));
@@ -634,7 +634,7 @@ RelationBuildTupleDesc(Relation relation)
else
{
/* otherwise copy in the correct context */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
attrmiss[attnum - 1].am_value = datumCopy(missval,
attp->attbyval,
attp->attlen);
@@ -745,7 +745,7 @@ RelationBuildRuleLock(Relation relation)
/*
* Make the private context. Assume it'll not contain much data.
*/
- rulescxt = AllocSetContextCreate(CacheMemoryContext,
+ rulescxt = AllocSetContextCreate(RelCacheContext,
"relation rules",
ALLOCSET_SMALL_SIZES);
relation->rd_rulescxt = rulescxt;
@@ -1448,7 +1448,7 @@ RelationInitIndexAccessInfo(Relation relation)
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for index %u",
RelationGetRelid(relation));
- oldcontext = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcontext = MemoryContextSwitchTo(RelCacheContext);
relation->rd_indextuple = heap_copytuple(tuple);
relation->rd_index = (Form_pg_index) GETSTRUCT(relation->rd_indextuple);
MemoryContextSwitchTo(oldcontext);
@@ -1477,7 +1477,7 @@ RelationInitIndexAccessInfo(Relation relation)
* a context, and not just a couple of pallocs, is so that we won't leak
* any subsidiary info attached to fmgr lookup records.
*/
- indexcxt = AllocSetContextCreate(CacheMemoryContext,
+ indexcxt = AllocSetContextCreate(RelCacheContext,
"index info",
ALLOCSET_SMALL_SIZES);
relation->rd_indexcxt = indexcxt;
@@ -1622,6 +1622,18 @@ IndexSupportInitialize(oidvector *indclass,
}
}
+void
+CreateRelCacheContext(void)
+{
+ if (!CacheMemoryContext)
+ CreateCacheMemoryContext();
+
+ if (!RelCacheContext)
+ RelCacheContext = AllocSetContextCreate(CacheMemoryContext,
+ "RelCacheContext",
+ ALLOCSET_DEFAULT_SIZES);
+}
+
/*
* LookupOpclassInfo
*
@@ -1659,9 +1671,9 @@ LookupOpclassInfo(Oid operatorClassOid,
/* First time through: initialize the opclass cache */
HASHCTL ctl;
- /* Also make sure CacheMemoryContext exists */
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ /* Also make sure RelCacheContext exists */
+ if (!RelCacheContext)
+ CreateRelCacheContext();
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(OpClassCacheEnt);
@@ -1708,7 +1720,7 @@ LookupOpclassInfo(Oid operatorClassOid,
*/
if (opcentry->supportProcs == NULL && numSupport > 0)
opcentry->supportProcs = (RegProcedure *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(RelCacheContext,
numSupport * sizeof(RegProcedure));
/*
@@ -1867,7 +1879,7 @@ RelationInitTableAccessMethod(Relation relation)
* during bootstrap or before RelationCacheInitializePhase3 runs, and none of
* these properties matter then...)
*
- * NOTE: we assume we are already switched into CacheMemoryContext.
+ * NOTE: we assume we are already switched into RelCacheContext.
*/
static void
formrdesc(const char *relationName, Oid relationReltype,
@@ -3116,7 +3128,7 @@ RememberToFreeTupleDescAtEOX(TupleDesc td)
{
MemoryContext oldcxt;
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
EOXactTupleDescArray = (TupleDesc *) palloc(16 * sizeof(TupleDesc));
EOXactTupleDescArrayLen = 16;
@@ -3580,10 +3592,10 @@ RelationBuildLocalRelation(const char *relname,
/*
* switch to the cache context to create the relcache entry.
*/
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ if (!RelCacheContext)
+ CreateRelCacheContext();
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
/*
* allocate a new relation descriptor and fill in basic state fields.
@@ -3712,7 +3724,7 @@ RelationBuildLocalRelation(const char *relname,
/*
* RelationInitTableAccessMethod will do syscache lookups, so we mustn't
- * run it in CacheMemoryContext. Fortunately, the remaining steps don't
+ * run it in RelCacheContext. Fortunately, the remaining steps don't
* require a long-lived current context.
*/
MemoryContextSwitchTo(oldcxt);
@@ -4000,8 +4012,8 @@ RelationCacheInitialize(void)
/*
* make sure cache memory context exists
*/
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ if (!RelCacheContext)
+ CreateRelCacheContext();
/*
* create hashtable that indexes the relcache
@@ -4016,7 +4028,7 @@ RelationCacheInitialize(void)
*/
allocsize = 4;
in_progress_list =
- MemoryContextAlloc(CacheMemoryContext,
+ MemoryContextAlloc(RelCacheContext,
allocsize * sizeof(*in_progress_list));
in_progress_list_maxlen = allocsize;
@@ -4054,10 +4066,7 @@ RelationCacheInitializePhase2(void)
if (IsBootstrapProcessingMode())
return;
- /*
- * switch to cache memory context
- */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
/*
* Try to load the shared relcache cache file. If unsuccessful, bootstrap
@@ -4109,10 +4118,7 @@ RelationCacheInitializePhase3(void)
*/
RelationMapInitializePhase3();
- /*
- * switch to cache memory context
- */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
/*
* Try to load the local relcache cache file. If unsuccessful, bootstrap
@@ -4426,7 +4432,7 @@ BuildHardcodedDescriptor(int natts, const FormData_pg_attribute *attrs)
MemoryContext oldcxt;
int i;
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
result = CreateTemplateTupleDesc(natts);
result->tdtypeid = RECORDOID; /* not right, but we don't care */
@@ -4496,7 +4502,7 @@ AttrDefaultFetch(Relation relation, int ndef)
/* Allocate array with room for as many entries as expected */
attrdef = (AttrDefault *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(RelCacheContext,
ndef * sizeof(AttrDefault));
/* Search pg_attrdef for relevant entries */
@@ -4535,7 +4541,7 @@ AttrDefaultFetch(Relation relation, int ndef)
char *s = TextDatumGetCString(val);
attrdef[found].adnum = adform->adnum;
- attrdef[found].adbin = MemoryContextStrdup(CacheMemoryContext, s);
+ attrdef[found].adbin = MemoryContextStrdup(RelCacheContext, s);
pfree(s);
found++;
}
@@ -4592,7 +4598,7 @@ CheckConstraintFetch(Relation relation)
/* Allocate array with room for as many entries as expected */
check = (ConstrCheck *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(RelCacheContext,
ncheck * sizeof(ConstrCheck));
/* Search pg_constraint for relevant entries */
@@ -4625,7 +4631,7 @@ CheckConstraintFetch(Relation relation)
check[found].ccvalid = conform->convalidated;
check[found].ccnoinherit = conform->connoinherit;
- check[found].ccname = MemoryContextStrdup(CacheMemoryContext,
+ check[found].ccname = MemoryContextStrdup(RelCacheContext,
NameStr(conform->conname));
/* Grab and test conbin is actually set */
@@ -4640,7 +4646,7 @@ CheckConstraintFetch(Relation relation)
/* detoast and convert to cstring in caller's context */
char *s = TextDatumGetCString(val);
- check[found].ccbin = MemoryContextStrdup(CacheMemoryContext, s);
+ check[found].ccbin = MemoryContextStrdup(RelCacheContext, s);
pfree(s);
found++;
}
@@ -4757,7 +4763,7 @@ RelationGetFKeyList(Relation relation)
table_close(conrel, AccessShareLock);
/* Now save a copy of the completed list in the relcache entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
oldlist = relation->rd_fkeylist;
relation->rd_fkeylist = copyObject(result);
relation->rd_fkeyvalid = true;
@@ -4880,7 +4886,7 @@ RelationGetIndexList(Relation relation)
list_sort(result, list_oid_cmp);
/* Now save a copy of the completed list in the relcache entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
oldlist = relation->rd_indexlist;
relation->rd_indexlist = list_copy(result);
relation->rd_pkindex = pkeyIndex;
@@ -4972,7 +4978,7 @@ RelationGetStatExtList(Relation relation)
list_sort(result, list_oid_cmp);
/* Now save a copy of the completed list in the relcache entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
oldlist = relation->rd_statlist;
relation->rd_statlist = list_copy(result);
@@ -5473,7 +5479,7 @@ restart:
* leave the relcache entry looking like the other ones are valid but
* empty.
*/
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
relation->rd_keyattr = bms_copy(uindexattrs);
relation->rd_pkattr = bms_copy(pkindexattrs);
relation->rd_idattr = bms_copy(idindexattrs);
@@ -5573,7 +5579,7 @@ RelationGetIdentityKeyBitmap(Relation relation)
relation->rd_idattr = NULL;
/* Now save copy of the bitmap in the relcache entry */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
relation->rd_idattr = bms_copy(idindexattrs);
MemoryContextSwitchTo(oldcxt);
@@ -5870,7 +5876,7 @@ RelationBuildPublicationDesc(Relation relation, PublicationDesc *pubdesc)
}
/* Now save copy of the descriptor in the relcache entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
relation->rd_pubdesc = palloc(sizeof(PublicationDesc));
memcpy(relation->rd_pubdesc, pubdesc, sizeof(PublicationDesc));
MemoryContextSwitchTo(oldcxt);
@@ -6073,7 +6079,7 @@ errtableconstraint(Relation rel, const char *conname)
* criticalSharedRelcachesBuilt to true.
* If not successful, return false.
*
- * NOTE: we assume we are already switched into CacheMemoryContext.
+ * NOTE: we assume we are already switched into RelCacheContext.
*/
static bool
load_relcache_init_file(bool shared)
@@ -6242,7 +6248,7 @@ load_relcache_init_file(bool shared)
* prepare index info context --- parameters should match
* RelationInitIndexAccessInfo
*/
- indexcxt = AllocSetContextCreate(CacheMemoryContext,
+ indexcxt = AllocSetContextCreate(RelCacheContext,
"index info",
ALLOCSET_SMALL_SIZES);
rel->rd_indexcxt = indexcxt;
diff --git a/src/backend/utils/cache/spccache.c b/src/backend/utils/cache/spccache.c
index ec63cdc8e5..2be8d4a792 100644
--- a/src/backend/utils/cache/spccache.c
+++ b/src/backend/utils/cache/spccache.c
@@ -86,9 +86,8 @@ InitializeTableSpaceCache(void)
hash_create("TableSpace cache", 16, &ctl,
HASH_ELEM | HASH_BLOBS);
- /* Make sure we've initialized CacheMemoryContext. */
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ if (!RelCacheContext)
+ CreateRelCacheContext();
/* Watch for invalidation events. */
CacheRegisterSyscacheCallback(TABLESPACEOID,
@@ -151,7 +150,7 @@ get_tablespace(Oid spcid)
{
bytea *bytea_opts = tablespace_reloptions(datum, false);
- opts = MemoryContextAlloc(CacheMemoryContext, VARSIZE(bytea_opts));
+ opts = MemoryContextAlloc(RelCacheContext, VARSIZE(bytea_opts));
memcpy(opts, bytea_opts, VARSIZE(bytea_opts));
}
ReleaseSysCache(tp);
diff --git a/src/backend/utils/cache/ts_cache.c b/src/backend/utils/cache/ts_cache.c
index 54de33eadd..4429bd9f2d 100644
--- a/src/backend/utils/cache/ts_cache.c
+++ b/src/backend/utils/cache/ts_cache.c
@@ -75,6 +75,7 @@ static TSConfigCacheEntry *lastUsedConfig = NULL;
*/
char *TSCurrentConfig = NULL;
+static MemoryContext TextSearchCacheContext = NULL;
static Oid TSCurrentConfigCache = InvalidOid;
@@ -106,6 +107,18 @@ InvalidateTSCacheCallBack(Datum arg, int cacheid, uint32 hashvalue)
TSCurrentConfigCache = InvalidOid;
}
+static void
+CreateTextSearchCacheContext(void)
+{
+ if (!CacheMemoryContext)
+ CreateCacheMemoryContext();
+
+ if (!TextSearchCacheContext)
+ TextSearchCacheContext = AllocSetContextCreate(CacheMemoryContext,
+ "TextSearchCacheContext",
+ ALLOCSET_DEFAULT_SIZES);
+}
+
/*
* Fetch parser cache entry
*/
@@ -127,9 +140,9 @@ lookup_ts_parser_cache(Oid prsId)
CacheRegisterSyscacheCallback(TSPARSEROID, InvalidateTSCacheCallBack,
PointerGetDatum(TSParserCacheHash));
- /* Also make sure CacheMemoryContext exists */
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ /* Also make sure TextSearchCacheContext exists */
+ if (!TextSearchCacheContext)
+ CreateTextSearchCacheContext();
}
/* Check single-entry cache */
@@ -186,12 +199,12 @@ lookup_ts_parser_cache(Oid prsId)
ReleaseSysCache(tp);
- fmgr_info_cxt(entry->startOid, &entry->prsstart, CacheMemoryContext);
- fmgr_info_cxt(entry->tokenOid, &entry->prstoken, CacheMemoryContext);
- fmgr_info_cxt(entry->endOid, &entry->prsend, CacheMemoryContext);
+ fmgr_info_cxt(entry->startOid, &entry->prsstart, TextSearchCacheContext);
+ fmgr_info_cxt(entry->tokenOid, &entry->prstoken, TextSearchCacheContext);
+ fmgr_info_cxt(entry->endOid, &entry->prsend, TextSearchCacheContext);
if (OidIsValid(entry->headlineOid))
fmgr_info_cxt(entry->headlineOid, &entry->prsheadline,
- CacheMemoryContext);
+ TextSearchCacheContext);
entry->isvalid = true;
}
@@ -224,9 +237,9 @@ lookup_ts_dictionary_cache(Oid dictId)
CacheRegisterSyscacheCallback(TSTEMPLATEOID, InvalidateTSCacheCallBack,
PointerGetDatum(TSDictionaryCacheHash));
- /* Also make sure CacheMemoryContext exists */
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ /* Also make sure TextSearchCacheContext exists */
+ if (!TextSearchCacheContext)
+ CreateTextSearchCacheContext();
}
/* Check single-entry cache */
@@ -291,7 +304,7 @@ lookup_ts_dictionary_cache(Oid dictId)
Assert(!found); /* it wasn't there a moment ago */
/* Create private memory context the first time through */
- saveCtx = AllocSetContextCreate(CacheMemoryContext,
+ saveCtx = AllocSetContextCreate(TextSearchCacheContext,
"TS dictionary",
ALLOCSET_SMALL_SIZES);
MemoryContextCopyAndSetIdentifier(saveCtx, NameStr(dict->dictname));
@@ -373,9 +386,9 @@ init_ts_config_cache(void)
CacheRegisterSyscacheCallback(TSCONFIGMAP, InvalidateTSCacheCallBack,
PointerGetDatum(TSConfigCacheHash));
- /* Also make sure CacheMemoryContext exists */
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ /* Also make sure TextSearchCacheContext exists */
+ if (!TextSearchCacheContext)
+ CreateTextSearchCacheContext();
}
/*
@@ -498,7 +511,7 @@ lookup_ts_config_cache(Oid cfgId)
{
maplists[maxtokentype].len = ndicts;
maplists[maxtokentype].dictIds = (Oid *)
- MemoryContextAlloc(CacheMemoryContext,
+ MemoryContextAlloc(TextSearchCacheContext,
sizeof(Oid) * ndicts);
memcpy(maplists[maxtokentype].dictIds, mapdicts,
sizeof(Oid) * ndicts);
@@ -525,14 +538,14 @@ lookup_ts_config_cache(Oid cfgId)
/* save the last token type's dictionaries */
maplists[maxtokentype].len = ndicts;
maplists[maxtokentype].dictIds = (Oid *)
- MemoryContextAlloc(CacheMemoryContext,
+ MemoryContextAlloc(TextSearchCacheContext,
sizeof(Oid) * ndicts);
memcpy(maplists[maxtokentype].dictIds, mapdicts,
sizeof(Oid) * ndicts);
/* and save the overall map */
entry->lenmap = maxtokentype + 1;
entry->map = (ListDictionary *)
- MemoryContextAlloc(CacheMemoryContext,
+ MemoryContextAlloc(TextSearchCacheContext,
sizeof(ListDictionary) * entry->lenmap);
memcpy(entry->map, maplists,
sizeof(ListDictionary) * entry->lenmap);
diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c
index 1972bd1944..c80a3e1f20 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -78,6 +78,8 @@
/* The main type cache hashtable searched by lookup_type_cache */
static HTAB *TypeCacheHash = NULL;
+static MemoryContext TypCacheContext = NULL;
+
/*
* The mapping of relation's OID to the corresponding composite type OID.
* We're keeping the map entry when the corresponding typentry has something
@@ -362,6 +364,18 @@ type_cache_syshash(const void *key, Size keysize)
return GetSysCacheHashValue1(TYPEOID, ObjectIdGetDatum(*(const Oid *) key));
}
+static void
+CreateTypCacheContext(void)
+{
+ if (!CacheMemoryContext)
+ CreateCacheMemoryContext();
+
+ if (!TypCacheContext)
+ TypCacheContext = AllocSetContextCreate(CacheMemoryContext,
+ "TypCacheContext",
+ ALLOCSET_DEFAULT_SIZES);
+}
+
/*
* lookup_type_cache
*
@@ -421,16 +435,16 @@ lookup_type_cache(Oid type_id, int flags)
CacheRegisterSyscacheCallback(CLAOID, TypeCacheOpcCallback, (Datum) 0);
CacheRegisterSyscacheCallback(CONSTROID, TypeCacheConstrCallback, (Datum) 0);
- /* Also make sure CacheMemoryContext exists */
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ /* Also make sure TypCacheContext exists */
+ if (!TypCacheContext)
+ CreateTypCacheContext();
/*
* reserve enough in_progress_list slots for many cases
*/
allocsize = 4;
in_progress_list =
- MemoryContextAlloc(CacheMemoryContext,
+ MemoryContextAlloc(TypCacheContext,
allocsize * sizeof(*in_progress_list));
in_progress_list_maxlen = allocsize;
}
@@ -854,7 +868,7 @@ lookup_type_cache(Oid type_id, int flags)
/*
* Set up fmgr lookup info as requested
*
- * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
+ * Note: we tell fmgr the finfo structures live in TypCacheContext,
* which is not quite right (they're really in the hash table's private
* memory context) but this will do for our purposes.
*
@@ -872,21 +886,21 @@ lookup_type_cache(Oid type_id, int flags)
eq_opr_func = get_opcode(typentry->eq_opr);
if (eq_opr_func != InvalidOid)
fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
- CacheMemoryContext);
+ TypCacheContext);
}
if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
typentry->cmp_proc != InvalidOid)
{
fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
- CacheMemoryContext);
+ TypCacheContext);
}
if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
typentry->hash_proc_finfo.fn_oid == InvalidOid &&
typentry->hash_proc != InvalidOid)
{
fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
- CacheMemoryContext);
+ TypCacheContext);
}
if ((flags & TYPECACHE_HASH_EXTENDED_PROC_FINFO) &&
typentry->hash_extended_proc_finfo.fn_oid == InvalidOid &&
@@ -894,7 +908,7 @@ lookup_type_cache(Oid type_id, int flags)
{
fmgr_info_cxt(typentry->hash_extended_proc,
&typentry->hash_extended_proc_finfo,
- CacheMemoryContext);
+ TypCacheContext);
}
/*
@@ -1039,13 +1053,13 @@ load_rangetype_info(TypeCacheEntry *typentry)
/* set up cached fmgrinfo structs */
fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
- CacheMemoryContext);
+ TypCacheContext);
if (OidIsValid(canonicalOid))
fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
- CacheMemoryContext);
+ TypCacheContext);
if (OidIsValid(subdiffOid))
fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
- CacheMemoryContext);
+ TypCacheContext);
/* Lastly, set up link to the element type --- this marks data valid */
typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
@@ -1074,7 +1088,7 @@ load_multirangetype_info(TypeCacheEntry *typentry)
* Note: we assume we're called in a relatively short-lived context, so it's
* okay to leak data into the current context while scanning pg_constraint.
* We build the new DomainConstraintCache data in a context underneath
- * CurrentMemoryContext, and reparent it under CacheMemoryContext when
+ * CurrentMemoryContext, and reparent it under TypCacheContext when
* complete.
*/
static void
@@ -1296,12 +1310,12 @@ load_domaintype_info(TypeCacheEntry *typentry)
}
/*
- * If we made a constraint object, move it into CacheMemoryContext and
+ * If we made a constraint object, move it into TypCacheContext and
* attach it to the typcache entry.
*/
if (dcc)
{
- MemoryContextSetParent(dcc->dccContext, CacheMemoryContext);
+ MemoryContextSetParent(dcc->dccContext, TypCacheContext);
typentry->domainData = dcc;
dcc->dccRefCount++; /* count the typcache's reference */
}
@@ -1799,7 +1813,7 @@ ensure_record_cache_typmod_slot_exists(int32 typmod)
if (RecordCacheArray == NULL)
{
RecordCacheArray = (RecordCacheArrayEntry *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(TypCacheContext,
64 * sizeof(RecordCacheArrayEntry));
RecordCacheArrayLen = 64;
}
@@ -2059,9 +2073,9 @@ assign_record_type_typmod(TupleDesc tupDesc)
&ctl,
HASH_ELEM | HASH_FUNCTION | HASH_COMPARE);
- /* Also make sure CacheMemoryContext exists */
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ /* Also make sure TypCacheContext exists */
+ if (!TypCacheContext)
+ CreateTypCacheContext();
}
/*
@@ -2079,7 +2093,7 @@ assign_record_type_typmod(TupleDesc tupDesc)
}
/* Not present, so need to manufacture an entry */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(TypCacheContext);
/* Look in the SharedRecordTypmodRegistry, if attached */
entDesc = find_or_make_matching_shared_tupledesc(tupDesc);
@@ -2746,8 +2760,8 @@ load_enum_cache_data(TypeCacheEntry *tcache)
/*
* Read all the information for members of the enum type. We collect the
* info in working memory in the caller's context, and then transfer it to
- * permanent memory in CacheMemoryContext. This minimizes the risk of
- * leaking memory from CacheMemoryContext in the event of an error partway
+ * permanent memory in TypCacheContext. This minimizes the risk of
+ * leaking memory from TypCacheContext in the event of an error partway
* through.
*/
maxitems = 64;
@@ -2851,8 +2865,8 @@ load_enum_cache_data(TypeCacheEntry *tcache)
break;
}
- /* OK, copy the data into CacheMemoryContext */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ /* OK, copy the data into TypCacheContext */
+ oldcxt = MemoryContextSwitchTo(TypCacheContext);
enumdata = (TypeCacheEnumData *)
palloc(offsetof(TypeCacheEnumData, enum_values) +
numitems * sizeof(EnumItem));
diff --git a/src/include/utils/relcache.h b/src/include/utils/relcache.h
index 18c32ea700..ca089d3dbc 100644
--- a/src/include/utils/relcache.h
+++ b/src/include/utils/relcache.h
@@ -95,6 +95,7 @@ extern int errtableconstraint(Relation rel, const char *conname);
/*
* Routines for backend startup
*/
+extern void CreateRelCacheContext(void);
extern void RelationCacheInitialize(void);
extern void RelationCacheInitializePhase2(void);
extern void RelationCacheInitializePhase3(void);
@@ -152,4 +153,6 @@ extern PGDLLIMPORT bool criticalRelcachesBuilt;
/* should be used only by relcache.c and postinit.c */
extern PGDLLIMPORT bool criticalSharedRelcachesBuilt;
+extern PGDLLIMPORT MemoryContext RelCacheContext;
+
#endif /* RELCACHE_H */
--
2.34.1
Hi,
On 2024-10-29 15:00:02 -0700, Jeff Davis wrote:
On Wed, 2024-04-03 at 16:12 +0300, Melih Mutlu wrote:
Rebased. PSA.
Thank you. I missed your patch and came up with a similar patch over
here:/messages/by-id/78599c442380ddb5990117e281a4fa65a74231af.camel@j-davis.com
I closed my thread and we can continue this one.
One difference is that I tried to capture almost all uses of
CacheMemoryContext so that it would become just a parent context
without many allocations of its own.
I'm a bit worried about the increase in "wasted" memory we might end up when
creating one aset for *everything*. Just splitting out Relcache and CatCache
isn't a big deal from that angle, they're always used reasonably much. But
creating a bunch of barely used contexts does have the potential for lots of
memory being wasted at the end of a page and on freelists. It might be ok as
far was what you proposed in the above email, I haven't analyzed that in depth
yet.
I agree with others that we should look at changing the initial size or
type of the contexts, but that should be a separate commit.
It needs to be done close together though, otherwise we'll increase the
new-connection-memory-usage of postgres measurably.
I've previously proposed creating a type of memory context that's intended for
places where we never expect to allocate much which allocates from either a
superior memory context or just from the system allocator and tracks memory
via linked lists. That'd allow us to use fairly granular memory contexts with
low overhead, which we e.g. could use to actually create each catcache &
relcache entry in its own context.
One concern that was voiced about that idea was that it'd perform badly if
such a context did end up being used hotly - I'm not sure that's a real
problem, but we could address it by switching to a different allocation scheme
once a certain size is reached.
Greetings,
Andres Freund
On Fri, 2024-11-01 at 15:19 -0400, Andres Freund wrote:
I'm a bit worried about the increase in "wasted" memory we might end
up when
creating one aset for *everything*. Just splitting out Relcache and
CatCache
isn't a big deal from that angle, they're always used reasonably
much. But
creating a bunch of barely used contexts does have the potential for
lots of
memory being wasted at the end of a page and on freelists. It might
be ok as
far was what you proposed in the above email, I haven't analyzed that
in depth
yet.
Melih raised similar concerns. The new contexts that my patch created
were CatCacheContext, RelCacheContext, SPICacheContext,
PgOutputContext, PlanCacheContext, TextSearchCacheContext, and
TypCacheContext.
Those are all created lazily, so you need to at least be using the
relevant feature before it has any cost (with the exception of the
first two).
I agree with others that we should look at changing the initial
size or
type of the contexts, but that should be a separate commit.It needs to be done close together though, otherwise we'll increase
the
new-connection-memory-usage of postgres measurably.
I don't have a strong opinion here; that was a passing comment. But I'm
curious: why it would increase the per-connection memory usage much to
just have a couple new memory contexts?
I've previously proposed creating a type of memory context that's
intended for
places where we never expect to allocate much which allocates from
either a
superior memory context or just from the system allocator and tracks
memory
via linked lists.
Why not just use ALLOCSET_SMALL_SIZES?
Regards,
Jeff Davis
Hi,
On 2024-11-01 14:47:37 -0700, Jeff Davis wrote:
On Fri, 2024-11-01 at 15:19 -0400, Andres Freund wrote:
I'm a bit worried about the increase in "wasted" memory we might end
up when
creating one aset for *everything*. Just splitting out Relcache and
CatCache
isn't a big deal from that angle, they're always used reasonably
much. But
creating a bunch of barely used contexts does have the potential for
lots of
memory being wasted at the end of a page and on freelists. It might
be ok as
far was what you proposed in the above email, I haven't analyzed that
in depth
yet.Melih raised similar concerns. The new contexts that my patch created
were CatCacheContext, RelCacheContext, SPICacheContext,
PgOutputContext, PlanCacheContext, TextSearchCacheContext, and
TypCacheContext.Those are all created lazily, so you need to at least be using the
relevant feature before it has any cost (with the exception of the
first two).
Well, you can't get very far without using at least CatCacheContext,
RelCacheContext, PlanCacheContext, TypCacheContext. The others are indeed much
more specific and not really worth worrying about.
I agree with others that we should look at changing the initial
size or
type of the contexts, but that should be a separate commit.It needs to be done close together though, otherwise we'll increase
the
new-connection-memory-usage of postgres measurably.I don't have a strong opinion here; that was a passing comment. But I'm
curious: why it would increase the per-connection memory usage much to
just have a couple new memory contexts?
"much" is maybe too strong. But the memory usage in a new connection is fairly
low, it doesn't take a large increase to be noticeable percentage-wise. And
given how much people love having poolers full of idle connections, it shows
up in aggregate.
I've previously proposed creating a type of memory context that's
intended for
places where we never expect to allocate much which allocates from
either a
superior memory context or just from the system allocator and tracks
memory
via linked lists.Why not just use ALLOCSET_SMALL_SIZES?
That helps some, but not *that* much. You still end up with a bunch of partially
filled blocks. Here's e.g. an excerpt with your patch applied:
│ name │ ident │ type │ level │ path │ total_bytes │ total_nblocks │ free_bytes │ free_chunks │ used_bytes │
├──────────────────────────────┼────────────────────────────────────────────────┼──────────┼───────┼───────────────┼─────────────┼───────────────┼────────────┼─────────────┼────────────┤
│ CacheMemoryContext │ (null) │ AllocSet │ 2 │ {1,19} │ 8192 │ 1 │ 7952 │ 0 │ 240 │
│ TypCacheContext │ (null) │ AllocSet │ 3 │ {1,19,28} │ 8192 │ 1 │ 4816 │ 0 │ 3376 │
│ search_path processing cache │ (null) │ AllocSet │ 3 │ {1,19,29} │ 8192 │ 1 │ 5280 │ 7 │ 2912 │
│ CatCacheContext │ (null) │ AllocSet │ 3 │ {1,19,30} │ 262144 │ 6 │ 14808 │ 0 │ 247336 │
│ RelCacheContext │ (null) │ AllocSet │ 3 │ {1,19,31} │ 262144 │ 6 │ 8392 │ 2 │ 253752 │
│ relation rules │ pg_backend_memory_contexts │ AllocSet │ 4 │ {1,19,31,34} │ 8192 │ 4 │ 3280 │ 1 │ 4912 │
│ index info │ manyrows_pkey │ AllocSet │ 4 │ {1,19,31,35} │ 2048 │ 2 │ 864 │ 1 │ 1184 │
│ index info │ pg_statistic_ext_relid_index │ AllocSet │ 4 │ {1,19,31,36} │ 2048 │ 2 │ 928 │ 1 │ 1120 │
│ index info │ pg_class_tblspc_relfilenode_index │ AllocSet │ 4 │ {1,19,31,37} │ 2048 │ 2 │ 440 │ 1 │ 1608 │
(this is a tiny bit misleading as "search_path processing cache" was just moved")
You can quickly see that the various contexts have a decent amount of free
space, some of their space.
We've already been more aggressive about using separate contets for indexes -
and in aggregate that memory usage shows up:
postgres[1088243][1]=# SELECT count(*), sum(total_bytes) as total_bytes, sum(total_nblocks) as total_nblocks, sum(free_bytes) free_bytes, sum(free_chunks) as free_chunks, sum(used_bytes) used_bytes FROM pg_backend_memory_contexts WHERE path @> (SELECT path FROM pg_backend_memory_contexts WHERE name = 'CacheMemoryContext') and name = 'index info'
┌───────┬─────────────┬───────────────┬────────────┬─────────────┬────────────┐
│ count │ total_bytes │ total_nblocks │ free_bytes │ free_chunks │ used_bytes │
├───────┼─────────────┼───────────────┼────────────┼─────────────┼────────────┤
│ 87 │ 162816 │ 144 │ 48736 │ 120 │ 114080 │
└───────┴─────────────┴───────────────┴────────────┴─────────────┴────────────┘
And it's not just the partially filled blocks that are an "issue", it's also
the freelists that are much less likely to be used soon if they're split very
granularly. Often we'll end up with memory in freelists that are created while
building some information that then will not be used again.
Without your patch:
┌────────────────────┬────────────────────────────────────────────────┬──────────┬───────┬────────────┬─────────────┬───────────────┬────────────┬─────────────┬────────────┐
│ name │ ident │ type │ level │ path │ total_bytes │ total_nblocks │ free_bytes │ free_chunks │ used_bytes │
├────────────────────┼────────────────────────────────────────────────┼──────────┼───────┼────────────┼─────────────┼───────────────┼────────────┼─────────────┼────────────┤
│ CacheMemoryContext │ (null) │ AllocSet │ 2 │ {1,17} │ 524288 │ 7 │ 75448 │ 0 │ 448840 │
│ relation rules │ pg_backend_memory_contexts │ AllocSet │ 3 │ {1,17,27} │ 8192 │ 4 │ 3472 │ 4 │ 4720 │
...
Greetings,
Andres Freund
On Sat, Nov 2, 2024 at 3:17 AM Jeff Davis <pgsql@j-davis.com> wrote:
On Fri, 2024-11-01 at 15:19 -0400, Andres Freund wrote:
I'm a bit worried about the increase in "wasted" memory we might end
up when
creating one aset for *everything*. Just splitting out Relcache and
CatCache
isn't a big deal from that angle, they're always used reasonably
much. But
creating a bunch of barely used contexts does have the potential for
lots of
memory being wasted at the end of a page and on freelists. It might
be ok as
far was what you proposed in the above email, I haven't analyzed that
in depth
yet.Melih raised similar concerns. The new contexts that my patch created
were CatCacheContext, RelCacheContext, SPICacheContext,
PgOutputContext, PlanCacheContext, TextSearchCacheContext, and
TypCacheContext.Those are all created lazily, so you need to at least be using the
relevant feature before it has any cost (with the exception of the
first two).I agree with others that we should look at changing the initial
size or
type of the contexts, but that should be a separate commit.It needs to be done close together though, otherwise we'll increase
the
new-connection-memory-usage of postgres measurably.I don't have a strong opinion here; that was a passing comment. But I'm
curious: why it would increase the per-connection memory usage much to
just have a couple new memory contexts?
Without patch
First backend
SELECT count(*), pg_size_pretty(sum(total_bytes)) as total_bytes,
sum(total_nblocks) as total_nblocks, pg_size_pretty(sum(free_bytes))
free_bytes, sum(free_chunks) as free_chunks,
pg_size_pretty(sum(used_bytes)) used_bytes from
pg_get_backend_memory_contexts();
count | total_bytes | total_nblocks | free_bytes | free_chunks | used_bytes
-------+-------------+---------------+------------+-------------+------------
121 | 1917 kB | 208 | 716 kB | 128 | 1201 kB
(1 row)
Second backend
SELECT count(*), pg_size_pretty(sum(total_bytes)) as total_bytes,
sum(total_nblocks) as total_nblocks, pg_size_pretty(sum(free_bytes))
free_bytes, sum(free_chunks) as free_chunks,
pg_size_pretty(sum(used_bytes)) used_bytes from
pg_get_backend_memory_contexts();
count | total_bytes | total_nblocks | free_bytes | free_chunks | used_bytes
-------+-------------+---------------+------------+-------------+------------
121 | 1408 kB | 210 | 384 kB | 186 | 1024 kB
(1 row)
With both patches from Melih applied
First backend
SELECT count(*), pg_size_pretty(sum(total_bytes)) as total_bytes,
sum(total_nblocks) as total_nblocks, pg_size_pretty(sum(free_bytes))
free_bytes, sum(free_chunks) as free_chunks,
pg_size_pretty(sum(used_bytes)) used_bytes from
pg_get_backend_memory_contexts();
count | total_bytes | total_nblocks | free_bytes | free_chunks | used_bytes
-------+-------------+---------------+------------+-------------+------------
124 | 1670 kB | 207 | 467 kB | 128 | 1203 kB
(1 row)
Second backend
SELECT count(*), pg_size_pretty(sum(total_bytes)) as total_bytes,
sum(total_nblocks) as total_nblocks, pg_size_pretty(sum(free_bytes))
free_bytes, sum(free_chunks) as free_chunks,
pg_size_pretty(sum(used_bytes)) used_bytes from
pg_get_backend_memory_contexts();
count | total_bytes | total_nblocks | free_bytes | free_chunks | used_bytes
-------+-------------+---------------+------------+-------------+------------
124 | 1417 kB | 209 | 391 kB | 187 | 1026 kB
(1 row)
So it looks like the patches do reduce memory allocated at the start
of a backend. That is better as far as the conditions just after the
backend start are concerned.
The chunks of memory allocated in a given context will more likely
have similar sizes since they will be allocated for the same types of
objects as compared to one big context where chunks are allocated for
many different kinds of objects. I believe this will lead to a better
utilization of freelist.
--
Best Wishes,
Ashutosh Bapat
On Sat, Nov 2, 2024 at 4:18 AM Andres Freund <andres@anarazel.de> wrote:
I've previously proposed creating a type of memory context that's
intended for
places where we never expect to allocate much which allocates from
either a
superior memory context or just from the system allocator and tracks
memory
via linked lists.Why not just use ALLOCSET_SMALL_SIZES?
That helps some, but not *that* much. You still end up with a bunch of partially
filled blocks. Here's e.g. an excerpt with your patch applied:│ name │ ident │ type │ level │ path │ total_bytes │ total_nblocks │ free_bytes │ free_chunks │ used_bytes │
├──────────────────────────────┼────────────────────────────────────────────────┼──────────┼───────┼───────────────┼─────────────┼───────────────┼────────────┼─────────────┼────────────┤
│ CacheMemoryContext │ (null) │ AllocSet │ 2 │ {1,19} │ 8192 │ 1 │ 7952 │ 0 │ 240 │
│ TypCacheContext │ (null) │ AllocSet │ 3 │ {1,19,28} │ 8192 │ 1 │ 4816 │ 0 │ 3376 │
│ search_path processing cache │ (null) │ AllocSet │ 3 │ {1,19,29} │ 8192 │ 1 │ 5280 │ 7 │ 2912 │
│ CatCacheContext │ (null) │ AllocSet │ 3 │ {1,19,30} │ 262144 │ 6 │ 14808 │ 0 │ 247336 │
│ RelCacheContext │ (null) │ AllocSet │ 3 │ {1,19,31} │ 262144 │ 6 │ 8392 │ 2 │ 253752 │
│ relation rules │ pg_backend_memory_contexts │ AllocSet │ 4 │ {1,19,31,34} │ 8192 │ 4 │ 3280 │ 1 │ 4912 │
│ index info │ manyrows_pkey │ AllocSet │ 4 │ {1,19,31,35} │ 2048 │ 2 │ 864 │ 1 │ 1184 │
│ index info │ pg_statistic_ext_relid_index │ AllocSet │ 4 │ {1,19,31,36} │ 2048 │ 2 │ 928 │ 1 │ 1120 │
│ index info │ pg_class_tblspc_relfilenode_index │ AllocSet │ 4 │ {1,19,31,37} │ 2048 │ 2 │ 440 │ 1 │ 1608 │(this is a tiny bit misleading as "search_path processing cache" was just moved")
You can quickly see that the various contexts have a decent amount of free
space, some of their space.We've already been more aggressive about using separate contets for indexes -
and in aggregate that memory usage shows up:postgres[1088243][1]=# SELECT count(*), sum(total_bytes) as total_bytes, sum(total_nblocks) as total_nblocks, sum(free_bytes) free_bytes, sum(free_chunks) as free_chunks, sum(used_bytes) used_bytes FROM pg_backend_memory_contexts WHERE path @> (SELECT path FROM pg_backend_memory_contexts WHERE name = 'CacheMemoryContext') and name = 'index info'
┌───────┬─────────────┬───────────────┬────────────┬─────────────┬────────────┐
│ count │ total_bytes │ total_nblocks │ free_bytes │ free_chunks │ used_bytes │
├───────┼─────────────┼───────────────┼────────────┼─────────────┼────────────┤
│ 87 │ 162816 │ 144 │ 48736 │ 120 │ 114080 │
└───────┴─────────────┴───────────────┴────────────┴─────────────┴────────────┘And it's not just the partially filled blocks that are an "issue", it's also
the freelists that are much less likely to be used soon if they're split very
granularly. Often we'll end up with memory in freelists that are created while
building some information that then will not be used again.Without your patch:
┌────────────────────┬────────────────────────────────────────────────┬──────────┬───────┬────────────┬─────────────┬───────────────┬────────────┬─────────────┬────────────┐
│ name │ ident │ type │ level │ path │ total_bytes │ total_nblocks │ free_bytes │ free_chunks │ used_bytes │
├────────────────────┼────────────────────────────────────────────────┼──────────┼───────┼────────────┼─────────────┼───────────────┼────────────┼─────────────┼────────────┤
│ CacheMemoryContext │ (null) │ AllocSet │ 2 │ {1,17} │ 524288 │ 7 │ 75448 │ 0 │ 448840 │
│ relation rules │ pg_backend_memory_contexts │ AllocSet │ 3 │ {1,17,27} │ 8192 │ 4 │ 3472 │ 4 │ 4720 │
...
If these caches are not used at all, this might be a problem. But I
think the applications which use TextSearchCacheContext, let's say,
are likely to use it so frequently that the free chunks will be
recycled. So, I don't know whether that will be a huge problem with
partial blocks and freelists.
However, we agree that it's generally good to have (at least some)
specific contexts as children of cache memory context. It will be good
to move ahead with the ones we all agree for now. Looking at all the
emails, those will be CatCacheContext,
RelCacheContext, PlanCacheContext, TypCacheContext. If we go with
fewer context, it will be good not to lose the work Jeff did for other
contexts though. I like those Create*CacheContext() functions. They
identify various specific uses of CacheMemoryContext. In future, if we
think that we need specific contexts for some of those, these will be
the functions where we will create specific contexts. We might need to
change the name of those functions to Get*CacheContext() instead of
Create since they won't create a context right now.
--
Best Wishes,
Ashutosh Bapat
On Mon, 2024-11-11 at 17:05 +0530, Ashutosh Bapat wrote:
It will be good
to move ahead with the ones we all agree for now. Looking at all the
emails, those will be CatCacheContext,
RelCacheContext, PlanCacheContext, TypCacheContext.
I'm not sure we have consensus on all of those yet. Andres's concern,
IIUC, is that the additional memory contexts will cause additional
fragmentation.
I believe we have a rough consensus that CatCacheContext and
RelCacheContext are wanted, but we're trying to find ways to mitigate
the fragmentation.
Regards,
Jeff Davis
On Tue, Nov 12, 2024 at 2:57 AM Jeff Davis <pgsql@j-davis.com> wrote:
On Mon, 2024-11-11 at 17:05 +0530, Ashutosh Bapat wrote:
It will be good
to move ahead with the ones we all agree for now. Looking at all the
emails, those will be CatCacheContext,
RelCacheContext, PlanCacheContext, TypCacheContext.I'm not sure we have consensus on all of those yet. Andres's concern,
IIUC, is that the additional memory contexts will cause additional
fragmentation.I believe we have a rough consensus that CatCacheContext and
RelCacheContext are wanted, but we're trying to find ways to mitigate
the fragmentation.
The totals (free_bytes, total_bytes, used_bytes) of memory contexts
separated from CacheMemoryContext and those without separate are
(35968, 540672, 504704) vs (75448,524288,448840). There's about 20K
increased in used_bytes and total_bytes. And we guess/know that that
increase is because of fragmentation. Am I right? But I don't find any
reference to what load Andres ran which resulted in this state [1]/messages/by-id/dywwv6v6vq3wfqyebypspq7kuez44tnycbvqjspgsqypuunbzn@mzixkn6g47y2. So
can not make a judgement of whether that increase represents a typical
case or not.
I experimented with the plan cache context. I created 1000 tables
using Melih's [2]/messages/by-id/CAGPVpCTJWEQLt2eOSDGTDtRbQPUQ9b9JtZWro9osJubTyWAEMA@mail.gmail.com queries. But moved them into a single partitioned
table.
With no prepared statement
#SELECT name, count(*), pg_size_pretty(sum(total_bytes)) as
total_bytes, sum(total_nblocks) as total_nblocks,
pg_size_pretty(sum(free_bytes)) free_by
tes, sum(free_chunks) as free_chunks, pg_size_pretty(sum(used_bytes))
used_bytes from pg_get_backend_memory_contexts() where name like
'CachedPlan%' or name = 'PlanCacheContext' group by name;
name | count | total_bytes | total_nblocks | free_bytes |
free_chunks | used_bytes
------------------+-------+-------------+---------------+------------+-------------+------------
PlanCacheContext | 1 | 8192 bytes | 1 | 7952 bytes |
0 | 240 bytes
(1 row)
With 10 prepared statement each selecting from the partitioned table
#SELECT format('prepare all_tables_%s as SELECT count(*) FROM test',
g.i) from generate_series(1, 10) g(i); \gexec
#SELECT name, count(*), pg_size_pretty(sum(total_bytes)) as
total_bytes, sum(total_nblocks) as total_nblocks,
pg_size_pretty(sum(free_bytes)) free_bytes, sum(free_chunks) as
free_chunks, pg_size_pretty(sum(used_bytes)) used_bytes from
pg_get_backend_memory_contexts() where name like 'CachedPlan%' or name
= 'PlanCacheContext' group by name;
name | count | total_bytes | total_nblocks | free_bytes |
free_chunks | used_bytes
------------------+-------+-------------+---------------+------------+-------------+------------
CachedPlanQuery | 10 | 40 kB | 30 | 17 kB |
0 | 23 kB
CachedPlanSource | 10 | 20 kB | 20 | 3920 bytes |
0 | 16 kB
PlanCacheContext | 1 | 8192 bytes | 1 | 7952 bytes |
0 | 240 bytes
(3 rows)
After executing all those 10 statements
#SELECT format('execute all_tables_%s', g.i) from generate_series(1,
10) g(i); \gexec
#SELECT name, count(*), pg_size_pretty(sum(total_bytes)) as
total_bytes, sum(total_nblocks) as total_nblocks,
pg_size_pretty(sum(free_bytes)) free_bytes, sum(free_chunks) as
free_chunks, pg_size_pretty(sum(used_bytes)) used_bytes from
pg_get_backend_memory_contexts() where name like 'CachedPlan%' or name
= 'PlanCacheContext' group by name;
name | count | total_bytes | total_nblocks | free_bytes |
free_chunks | used_bytes
------------------+-------+-------------+---------------+------------+-------------+------------
CachedPlan | 10 | 20 MB | 124 | 9388 kB |
28 | 11 MB
CachedPlanQuery | 10 | 40 kB | 30 | 17 kB |
0 | 23 kB
CachedPlanSource | 10 | 20 kB | 20 | 3920 bytes |
0 | 16 kB
PlanCacheContext | 1 | 8192 bytes | 1 | 7952 bytes |
0 | 240 bytes
(4 rows)
PlanCacheContext is never used for actual planned statements. In fact
I am not sure whether those 8K bytes it's consuming are real or just
context overhead. The real memory is used from CachedPlan* contexts
which are created and destroyed for each prepared statement.
The only use of the shell context is to be able to query memory
context statistics of cached plans, in case we change the names of
contexts for individual planned queries in future.
SELECT name, count(*), pg_size_pretty(sum(total_bytes)) as
total_bytes, sum(total_nblocks) as total_nblocks,
pg_size_pretty(sum(free_bytes)) free_bytes, sum(free_chunks) as
free_chunks, pg_size_pretty(sum(used_bytes)) used_bytes from
pg_get_backend_memory_contexts() where path @> (select path from
pg_get_backend_memory_contexts() where name = 'PlanCacheContext')
group by name;
So separating PlanCacheContext seems to have little use.
[1]: /messages/by-id/dywwv6v6vq3wfqyebypspq7kuez44tnycbvqjspgsqypuunbzn@mzixkn6g47y2
[2]: /messages/by-id/CAGPVpCTJWEQLt2eOSDGTDtRbQPUQ9b9JtZWro9osJubTyWAEMA@mail.gmail.com
--
Best Wishes,
Ashutosh Bapat
Hi Melih, Jeff,
I tested the v4 patch along with the memory statistics reporting patch from
[1]: PostgreSQL: Enhancing Memory Context Statistics Reporting </messages/by-id/CAH2L28v8mc9HDt8QoSJ8TRmKau_8FM_HKS41NeO9-6ZAkuZKXw@mail.gmail.com>
[image: Memorycontext.drawio.png]
Observations:
1. While there are a number of child contexts like index info of
RelCacheContext,
CatCacheContext does not have any children.
2. While there is a bunch of used memory in RelCacheContext and
CatCacheContext,
SPICacheContext and PlanCacheContext do not have any allocations of their
own
and serve only as parents for SPI and CachedPlan related contexts
respectively.
Having reviewed the discussion regarding potential fragmentation issues
caused by
creating a large number of new contexts in each backend, I would like to
take a step
back and better understand the motivation behind separating these contexts.
IIUC, segregating cache memory allocations into RelCacheContext and
CatCacheContext
allows for grouping a large number of memory allocations under a
common context, which, in turn, aids in monitoring memory consumption.
However,
I believe this reasoning does not apply to SPICacheContext and
PlanCacheContext,
as these contexts do not have any allocations of their own.
How, then, does separating these contexts from CacheMemoryContext improve
monitoring?
Additionally, IIUC, these contexts are created as long-lived contexts, so
they are not designed
to optimize deletion of all their children via MemoryContextDelete on the
parent.
Attached a separate patch to change initial sizes for relcache and
catcache contexts as they grow
large from the start. This was suggested in the thread previously [1].
Also changed CacheMemoryContext to use ALLOCSET_START_SMALL_SIZES, so it
starts from 1KB.
Applying the same change to use ALLOCSET_START_SMALL_SIZES would be
beneficial for
SPICacheContext and PlanCacheContext contexts as well.
On documentation front, the newly added contexts would require a mention in
src/backend/utils/mmgr/README.
[1]: PostgreSQL: Enhancing Memory Context Statistics Reporting </messages/by-id/CAH2L28v8mc9HDt8QoSJ8TRmKau_8FM_HKS41NeO9-6ZAkuZKXw@mail.gmail.com>
</messages/by-id/CAH2L28v8mc9HDt8QoSJ8TRmKau_8FM_HKS41NeO9-6ZAkuZKXw@mail.gmail.com>
[2]: Memorycontext.png - Page-1 <https://viewer.diagrams.net/index.html?tags=%7B%7D&lightbox=1&highlight=0000ff&edit=_blank&layers=1&nav=1&title=Memorycontext.png#R%3Cmxfile%3E%3Cdiagram%20id%3D%22prtHgNgQTEPvFCAcTncT%22%20name%3D%22Page-1%22%3E7Ztbc9o4FMc%2FDY9kLF%2FhMSHptjNNhy17aZ92hBFYHWN5ZRFgP%2F1KtoSvgCE2mAwvrXWs6%2Fn%2Fjmwdk54xWm5%2BozD0XskM%2BT1dm216xnNP152Bzf8Vhm1iMG0nMSwoniUmkBom%2BD8kjZq0rvAMRbmKjBCf4TBvdEkQIJflbJBSss5XmxM%2FP2oIF6hkmLjQL1v%2FxjPmJdaB7qT2zwgvPDUysIfJnSVUleVKIg%2FOyDpjMl56xogSwpKr5WaEfOE75Zek3ac9d3cToyhgdRqQyVibPgZgYn3%2FFv6Fwx9%2FzmEfyG7eoL%2BSK5azZVvlAjTjHpFFQplHFiSA%2FktqfaJkFcyQGEfjpbTOV0JCbgTc%2BAsxtpXywhUj3OSxpS%2FvJmOKgfYuTpoisqIuOrAiOX8G6QKxA%2FWcnQQcXUSWiNEtb0eRDxl%2By88DSogWu3qpn%2FmFdPUJbtcrvG77fLpPUQiDnPvtf1eCkKc5CVg%2Fih34yCsAELLYcZqPA9T3JIPxLY2vzYpvqsb8aiH%2BH0HXQ69oSeh2xPtDG6bG5ctIhk4qliDIS7z2MEOTEMZKrHngV8n5higf4bCgZQFkA0OFjdw3bFlcp0EIlM3LBKCyNS6ZsVcyoUwtycxwU6nKH9swFqYsSdJ1RyQpKAKcCkn0S0pinh5FpwRLQd85dPN9fUb%2BG2LYhdmm78BgBFktDLoZsMDqGh5WexE79mFwEyE76Joo9l5RpkXCp%2FQcC59VqadQvCD6MIpyYr9G5BuhS%2BgfDf3zQSnP8zvyT4vysNRXM4463ZnvgRn6eBHwa5eTi2gzdFud23Kciz6RzsdyMv5yw48aE5gd033YrO5arOMcLrG%2FTW6OePxMKe7pI9EZDKJ%2BhCiet0tJhCB1vX9CcZLNbBGfMgPBpZA3mEZhnpcqjN49n5ASF0URDhY9oW67g7kiPm4sMOxh1zZElcY5mF8IZo8iUyOeDuIhjd28Y%2FJeRBvMfojrB0uWfsp64vp5k6n2vFWFgK8laWQaQ2X4GRs0y1aGtHFcyrUe83DjLhGPrtjYu3zGwqrWPqOtVSGtstVObMgRxgTHEaTO4wW0Ssgky5StsmmoQkemWegIFDpK%2FFDqKMZvt%2Bx3EFmVe2mISDAY1GeSF4pQZTi1cpQeIfQKMJp3GJuAsSqr1BCMORQfNPChcbTvODaBY1VGrRkcMzBqD451mMWUOzvLXV9TGN%2FA09q4E9kEkVVJvEaIBDkiawNp2KCIpGneCJLDayJpa9UfdE4l0nD0XD9ANy9LZFUG86onGuHyPJGWcWUirZpEgsGH2CUNxaDqyLrwLjmoYLKhPEicJ5yJDx5d%2FcgBCh85DPWMylA0PEBR80mP%2FenABuWYJEHYhihtJOsNs2MqqTxYKxu5Uzh9JXv0uacv2ypt8eBmtvirvnV8mBdhtfQ29pTJ%2BAu%2F3eUtvvjFZ4fHscS22druoZf0wNwlG9EumJOre8yyteMeAxf1WIu5rrNenK%2BV03Jq7pz7FL7vnKdx115S67a4s%2Btyp9%2B5a4K71lJX4itm4fXSOfn1skGwPsgrnuEMcx2Z2oWB2f%2FbuNq%2FCj9%2BbPx9hYQ3W%2FgBwyVOjTtNmj818mL6xxiJpulftBgv%2FwM%3D%3C%2Fdiagram%3E%3C%2Fmxfile%3E#%7B%22pageId%22%3A%22prtHgNgQTEPvFCAcTncT%22%7D>
<https://viewer.diagrams.net/index.html?tags=%7B%7D&lightbox=1&highlight=0000ff&edit=_blank&layers=1&nav=1&title=Memorycontext.png#R%3Cmxfile%3E%3Cdiagram%20id%3D%22prtHgNgQTEPvFCAcTncT%22%20name%3D%22Page-1%22%3E7Ztbc9o4FMc%2FDY9kLF%2FhMSHptjNNhy17aZ92hBFYHWN5ZRFgP%2F1KtoSvgCE2mAwvrXWs6%2Fn%2Fjmwdk54xWm5%2BozD0XskM%2BT1dm216xnNP152Bzf8Vhm1iMG0nMSwoniUmkBom%2BD8kjZq0rvAMRbmKjBCf4TBvdEkQIJflbJBSss5XmxM%2FP2oIF6hkmLjQL1v%2FxjPmJdaB7qT2zwgvPDUysIfJnSVUleVKIg%2FOyDpjMl56xogSwpKr5WaEfOE75Zek3ac9d3cToyhgdRqQyVibPgZgYn3%2FFv6Fwx9%2FzmEfyG7eoL%2BSK5azZVvlAjTjHpFFQplHFiSA%2FktqfaJkFcyQGEfjpbTOV0JCbgTc%2BAsxtpXywhUj3OSxpS%2FvJmOKgfYuTpoisqIuOrAiOX8G6QKxA%2FWcnQQcXUSWiNEtb0eRDxl%2By88DSogWu3qpn%2FmFdPUJbtcrvG77fLpPUQiDnPvtf1eCkKc5CVg%2Fih34yCsAELLYcZqPA9T3JIPxLY2vzYpvqsb8aiH%2BH0HXQ69oSeh2xPtDG6bG5ctIhk4qliDIS7z2MEOTEMZKrHngV8n5higf4bCgZQFkA0OFjdw3bFlcp0EIlM3LBKCyNS6ZsVcyoUwtycxwU6nKH9swFqYsSdJ1RyQpKAKcCkn0S0pinh5FpwRLQd85dPN9fUb%2BG2LYhdmm78BgBFktDLoZsMDqGh5WexE79mFwEyE76Joo9l5RpkXCp%2FQcC59VqadQvCD6MIpyYr9G5BuhS%2BgfDf3zQSnP8zvyT4vysNRXM4463ZnvgRn6eBHwa5eTi2gzdFud23Kciz6RzsdyMv5yw48aE5gd033YrO5arOMcLrG%2FTW6OePxMKe7pI9EZDKJ%2BhCiet0tJhCB1vX9CcZLNbBGfMgPBpZA3mEZhnpcqjN49n5ASF0URDhY9oW67g7kiPm4sMOxh1zZElcY5mF8IZo8iUyOeDuIhjd28Y%2FJeRBvMfojrB0uWfsp64vp5k6n2vFWFgK8laWQaQ2X4GRs0y1aGtHFcyrUe83DjLhGPrtjYu3zGwqrWPqOtVSGtstVObMgRxgTHEaTO4wW0Ssgky5StsmmoQkemWegIFDpK%2FFDqKMZvt%2Bx3EFmVe2mISDAY1GeSF4pQZTi1cpQeIfQKMJp3GJuAsSqr1BCMORQfNPChcbTvODaBY1VGrRkcMzBqD451mMWUOzvLXV9TGN%2FA09q4E9kEkVVJvEaIBDkiawNp2KCIpGneCJLDayJpa9UfdE4l0nD0XD9ANy9LZFUG86onGuHyPJGWcWUirZpEgsGH2CUNxaDqyLrwLjmoYLKhPEicJ5yJDx5d%2FcgBCh85DPWMylA0PEBR80mP%2FenABuWYJEHYhihtJOsNs2MqqTxYKxu5Uzh9JXv0uacv2ypt8eBmtvirvnV8mBdhtfQ29pTJ%2BAu%2F3eUtvvjFZ4fHscS22druoZf0wNwlG9EumJOre8yyteMeAxf1WIu5rrNenK%2BV03Jq7pz7FL7vnKdx115S67a4s%2Btyp9%2B5a4K71lJX4itm4fXSOfn1skGwPsgrnuEMcx2Z2oWB2f%2FbuNq%2FCj9%2BbPx9hYQ3W%2FgBwyVOjTtNmj818mL6xxiJpulftBgv%2FwM%3D%3C%2Fdiagram%3E%3C%2Fmxfile%3E#%7B%22pageId%22%3A%22prtHgNgQTEPvFCAcTncT%22%7D>
Thank you,
Rahila Syed
Attachments:
On Tue, Nov 26, 2024 at 4:10 PM Rahila Syed <rahilasyed90@gmail.com> wrote:
Having reviewed the discussion regarding potential fragmentation issues
caused by
creating a large number of new contexts in each backend, I would like to
take a step
back and better understand the motivation behind separating these contexts.IIUC, segregating cache memory allocations into RelCacheContext and
CatCacheContext
allows for grouping a large number of memory allocations under a
common context, which, in turn, aids in monitoring memory consumption.
However,
I believe this reasoning does not apply to SPICacheContext and
PlanCacheContext,
as these contexts do not have any allocations of their own.How, then, does separating these contexts from CacheMemoryContext improve
monitoring?
A query which accumulates statistics based on the (context) path prefix
(path of PlanCacheContext or SPICacheContext) can be used to provide total
memory allocated for plans. This will work even if we change the names of
child context e.g. CachedPlanContext, CachedQueryContext or if we add more
child contexts. Probably such a change is mostly unlikely. Whether that
advantage is worth spending extra memory in fragmentation? Probably not.
But I just wanted to note some use.
--
Best Wishes,
Ashutosh Bapat
Hi Rahila,
Rahila Syed <rahilasyed90@gmail.com>, 26 Kas 2024 Sal, 13:40 tarihinde şunu
yazdı:
Observations:
1. While there are a number of child contexts like index info of
RelCacheContext,
CatCacheContext does not have any children.
2. While there is a bunch of used memory in RelCacheContext and
CatCacheContext,
SPICacheContext and PlanCacheContext do not have any allocations of their
own
and serve only as parents for SPI and CachedPlan related contexts
respectively.
Thanks for sharing your observations and the diagram.
Having reviewed the discussion regarding potential fragmentation issues
caused by
creating a large number of new contexts in each backend, I would like to
take a step
back and better understand the motivation behind separating these contexts.IIUC, segregating cache memory allocations into RelCacheContext and
CatCacheContext
allows for grouping a large number of memory allocations under a
common context, which, in turn, aids in monitoring memory consumption.
However,
I believe this reasoning does not apply to SPICacheContext and
PlanCacheContext,
as these contexts do not have any allocations of their own.How, then, does separating these contexts from CacheMemoryContext improve
monitoring?
Additionally, IIUC, these contexts are created as long-lived contexts, so
they are not designed
to optimize deletion of all their children via MemoryContextDelete on the
parent.
I think it all depends on the level of granularity we want in grouping
cache-related memory contexts. Currently, we have relatively low
granularity, and increasing it comes with additional memory usage due to
the newly introduced memory contexts. Ideally, having separate contexts for
each cache type would allow us to see how much memory is allocated for
each, as Ashutosh mentioned [1]/messages/by-id/CAExHW5vLRUk+9ZxF4FgaqdfmU2e8JbWES7ijhA0Bd6_bekr=Kw@mail.gmail.com. Even if a context does not have any
allocations of its own, its children might still use some memory. I
understand that we can already see total memory usage in, e.g.,
PlanCacheContext, since all of its children are named CachedPlan* and we
can query based on this naming. However, this may not always hold true or
could change in the future (though I’m not sure how likely that is).
That said, these changes come with a cost, and it may not be worth it to
separate every single cache into its own context. IIUC, introducing
contexts for heavily used caches results in much less fragmentation. If
that’s the case, then I believe we should focus on RelCache and CatCache,
as they are heavily used since the backend starts. I see that you and
Ashutosh [2]/messages/by-id/CAExHW5skNdLG-kDiKe5k0EHUjc9xumjogHOWtEJKgS_xMB2Vcg@mail.gmail.com mentioned that PlanCacheContext is less likely to be heavily
used, so we could consider leaving that context out for now.
Attached a separate patch to change initial sizes for relcache and
catcache contexts as they growlarge from the start. This was suggested in the thread previously [1].
Also changed CacheMemoryContext to use ALLOCSET_START_SMALL_SIZES, so it
starts from 1KB.Applying the same change to use ALLOCSET_START_SMALL_SIZES would be
beneficial for
SPICacheContext and PlanCacheContext contexts as well.
We can even use "ALLOCSET_SMALL_SIZES" if a context rarely has its own
allocations, or some non-default sizes. I'm also okay to not introduce
those new contexts at all, if that'd be what everyone agrees on.
[1]: /messages/by-id/CAExHW5vLRUk+9ZxF4FgaqdfmU2e8JbWES7ijhA0Bd6_bekr=Kw@mail.gmail.com
/messages/by-id/CAExHW5vLRUk+9ZxF4FgaqdfmU2e8JbWES7ijhA0Bd6_bekr=Kw@mail.gmail.com
[2]: /messages/by-id/CAExHW5skNdLG-kDiKe5k0EHUjc9xumjogHOWtEJKgS_xMB2Vcg@mail.gmail.com
/messages/by-id/CAExHW5skNdLG-kDiKe5k0EHUjc9xumjogHOWtEJKgS_xMB2Vcg@mail.gmail.com
Thanks,
--
Melih Mutlu
Microsoft
On Tue, Nov 26, 2024 at 05:11:16PM +0300, Melih Mutlu wrote:
That said, these changes come with a cost, and it may not be worth it to
separate every single cache into its own context. IIUC, introducing
contexts for heavily used caches results in much less fragmentation. If
that’s the case, then I believe we should focus on RelCache and CatCache,
as they are heavily used since the backend starts. I see that you and
Ashutosh [2] mentioned that PlanCacheContext is less likely to be heavily
used, so we could consider leaving that context out for now.
Looking at the tail of this thread, some feedback has been provided.
These are not that easy to look at, so I've marked the patch as
returned with feedback for now, and there were a few concerns raised
as well. Feel free to adjust as you feel if you think this is not
adapted.
--
Michael
Hi,
I rebased and updated the patch to address some concerns raised before and
see if anyone is still interested in this.
I believe that there is a general consensus around RelCacheContext and
CatCacheContext, considering that these two caches are fairly used. For the
rest, I followed Ashutosh's suggestion [1]/messages/by-id/CAExHW5sH4NZnHi4S5ai0uFQgfS_R=rts_+LK5JeEQ-dVzwKRfQ@mail.gmail.com and kept Get*CacheContext()
functions. But those functions do not create a separate context, simply use
CacheMemoryContext instead. It'd be easier to change those to actually
create new memory contexts, if it's decided to have more granularity in
CacheMemoryContext in the future.
What are your thoughts?
[1]: /messages/by-id/CAExHW5sH4NZnHi4S5ai0uFQgfS_R=rts_+LK5JeEQ-dVzwKRfQ@mail.gmail.com
/messages/by-id/CAExHW5sH4NZnHi4S5ai0uFQgfS_R=rts_+LK5JeEQ-dVzwKRfQ@mail.gmail.com
Thanks,
--
Melih Mutlu
Microsoft
Attachments:
v5-0001-Separate-memory-contexts-for-caches.patchapplication/octet-stream; name=v5-0001-Separate-memory-contexts-for-caches.patchDownload
From 4a2972855e12b245e4eb914ff1bebf1d6484fef4 Mon Sep 17 00:00:00 2001
From: Melih Mutlu <m.melihmutlu@gmail.com>
Date: Tue, 13 Jun 2023 16:43:24 +0300
Subject: [PATCH v5 1/2] Separate memory contexts for caches
This patch introduces new memory contexts under CacheMemoryContext
for different types of caches such as CatCacheContext and
RelCacheContext. Most of the time relcache and catcache constitutes
a large part of CacheMemoryContext and this can make memory usages from
other caches unvisible. Having separate contexts aims to help to
understand more about memory usage of cache related contexts.
---
.vscode/c_cpp_properties.json | 16 +++
.vscode/launch.json | 68 +++++++++++++
.vscode/settings.json | 62 ++++++++++++
.vscode/tasks.json | 28 ++++++
src/backend/catalog/namespace.c | 5 +-
src/backend/commands/policy.c | 4 +-
src/backend/commands/tablecmds.c | 2 +-
src/backend/commands/trigger.c | 4 +-
src/backend/executor/spi.c | 26 ++++-
src/backend/foreign/foreign.c | 4 +-
src/backend/partitioning/partdesc.c | 6 +-
src/backend/replication/pgoutput/pgoutput.c | 20 +++-
src/backend/utils/adt/pg_locale.c | 6 +-
src/backend/utils/cache/attoptcache.c | 8 +-
src/backend/utils/cache/catcache.c | 41 +++++---
src/backend/utils/cache/partcache.c | 10 +-
src/backend/utils/cache/plancache.c | 38 +++++---
src/backend/utils/cache/relcache.c | 102 +++++++++++---------
src/backend/utils/cache/spccache.c | 7 +-
src/backend/utils/cache/ts_cache.c | 49 ++++++----
src/backend/utils/cache/typcache.c | 64 +++++++-----
src/include/utils/relcache.h | 3 +
22 files changed, 428 insertions(+), 145 deletions(-)
create mode 100755 .vscode/c_cpp_properties.json
create mode 100755 .vscode/launch.json
create mode 100755 .vscode/settings.json
create mode 100755 .vscode/tasks.json
diff --git a/.vscode/c_cpp_properties.json b/.vscode/c_cpp_properties.json
new file mode 100755
index 0000000000..4039befa76
--- /dev/null
+++ b/.vscode/c_cpp_properties.json
@@ -0,0 +1,16 @@
+{
+ "configurations": [
+ {
+ "name": "Linux",
+ "includePath": [
+ "${workspaceFolder}/**"
+ ],
+ "defines": [],
+ "compilerPath": "/usr/bin/gcc",
+ "cStandard": "c17",
+ "cppStandard": "gnu++17",
+ "intelliSenseMode": "linux-gcc-x64"
+ }
+ ],
+ "version": 4
+}
\ No newline at end of file
diff --git a/.vscode/launch.json b/.vscode/launch.json
new file mode 100755
index 0000000000..4fc22e17f0
--- /dev/null
+++ b/.vscode/launch.json
@@ -0,0 +1,68 @@
+{
+ // Use IntelliSense to learn about possible attributes.
+ // Hover to view descriptions of existing attributes.
+ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
+ "version": "0.2.0",
+ "configurations": [
+
+ {
+ "name": "(gdb) Attach",
+ "type": "cppdbg",
+ "request": "attach",
+ "program": "/home/melih/build/src/backend/postgres",
+ "MIMode": "gdb",
+ "setupCommands": [
+ {
+ "description": "Enable pretty-printing for gdb",
+ "text": "-enable-pretty-printing",
+ "ignoreFailures": true
+ },
+ {
+ "description": "Set Disassembly Flavor to Intel",
+ "text": "-gdb-set disassembly-flavor intel",
+ "ignoreFailures": true
+ }
+ ]
+ },
+
+ {
+ "name": "(gdb) psql Attach",
+ "type": "cppdbg",
+ "request": "attach",
+ "program": "/home/melih/build/src/bin/psql/psql",
+ "MIMode": "gdb",
+ "setupCommands": [
+ {
+ "description": "Enable pretty-printing for gdb",
+ "text": "-enable-pretty-printing",
+ "ignoreFailures": true
+ },
+ {
+ "description": "Set Disassembly Flavor to Intel",
+ "text": "-gdb-set disassembly-flavor intel",
+ "ignoreFailures": true
+ }
+ ]
+ },
+
+ {
+ "name": "(gdb) pg_basebackup Attach",
+ "type": "cppdbg",
+ "request": "attach",
+ "program": "/home/melih/build/src/bin/pg_basebackup/pg_basebackup",
+ "MIMode": "gdb",
+ "setupCommands": [
+ {
+ "description": "Enable pretty-printing for gdb",
+ "text": "-enable-pretty-printing",
+ "ignoreFailures": true
+ },
+ {
+ "description": "Set Disassembly Flavor to Intel",
+ "text": "-gdb-set disassembly-flavor intel",
+ "ignoreFailures": true
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/.vscode/settings.json b/.vscode/settings.json
new file mode 100755
index 0000000000..cdc8495d97
--- /dev/null
+++ b/.vscode/settings.json
@@ -0,0 +1,62 @@
+{
+ "files.associations": {
+ "errno.h": "c",
+ "pqformat.h": "c",
+ "stringinfo.h": "c",
+ "libpq-be.h": "c",
+ "latch.h": "c",
+ "in.h": "c",
+ "array": "c",
+ "string": "c",
+ "string_view": "c",
+ "bitset": "c",
+ "initializer_list": "c",
+ "utility": "c",
+ "random": "cpp",
+ "libpq-fe.h": "c",
+ "fmgr.h": "c",
+ "libpq.h": "c",
+ "protocol.h": "c",
+ "atomic": "c",
+ "bit": "c",
+ "*.tcc": "c",
+ "chrono": "c",
+ "cmath": "c",
+ "compare": "c",
+ "concepts": "c",
+ "condition_variable": "c",
+ "cstddef": "c",
+ "deque": "c",
+ "list": "c",
+ "unordered_map": "c",
+ "vector": "c",
+ "exception": "c",
+ "functional": "c",
+ "iterator": "c",
+ "memory": "c",
+ "memory_resource": "c",
+ "numeric": "c",
+ "ratio": "c",
+ "system_error": "c",
+ "type_traits": "c",
+ "algorithm": "c",
+ "limits": "c",
+ "mutex": "c",
+ "ostream": "c",
+ "semaphore": "c",
+ "shared_mutex": "c",
+ "stop_token": "c",
+ "streambuf": "c",
+ "thread": "c",
+ "tuple": "c",
+ "typeinfo": "c",
+ "*.inc": "c",
+ "shmem.h": "c",
+ "walreceiver.h": "c",
+ "bgwriter.h": "c",
+ "xloginsert.h": "c",
+ "pg_bswap.h": "c",
+ "varatt.h": "c",
+ "varlena.h": "c"
+ }
+}
\ No newline at end of file
diff --git a/.vscode/tasks.json b/.vscode/tasks.json
new file mode 100755
index 0000000000..08d9005bbb
--- /dev/null
+++ b/.vscode/tasks.json
@@ -0,0 +1,28 @@
+{
+ "tasks": [
+ {
+ "type": "cppbuild",
+ "label": "C/C++: gcc build active file",
+ "command": "/usr/bin/gcc",
+ "args": [
+ "-fdiagnostics-color=always",
+ "-g",
+ "${file}",
+ "-o",
+ "${fileDirname}/${fileBasenameNoExtension}"
+ ],
+ "options": {
+ "cwd": "${fileDirname}"
+ },
+ "problemMatcher": [
+ "$gcc"
+ ],
+ "group": {
+ "kind": "build",
+ "isDefault": true
+ },
+ "detail": "Task generated by Debugger."
+ }
+ ],
+ "version": "2.0.0"
+}
\ No newline at end of file
diff --git a/src/backend/catalog/namespace.c b/src/backend/catalog/namespace.c
index d97d632a7e..04f7d5051c 100644
--- a/src/backend/catalog/namespace.c
+++ b/src/backend/catalog/namespace.c
@@ -321,8 +321,11 @@ spcache_init(void)
if (SearchPathCacheContext == NULL)
{
+ if (!CacheMemoryContext)
+ CreateCacheMemoryContext();
+
/* Make the context we'll keep search path cache hashtable in */
- SearchPathCacheContext = AllocSetContextCreate(TopMemoryContext,
+ SearchPathCacheContext = AllocSetContextCreate(CacheMemoryContext,
"search_path processing cache",
ALLOCSET_DEFAULT_SIZES);
}
diff --git a/src/backend/commands/policy.c b/src/backend/commands/policy.c
index 83056960fe..1c32ff8889 100644
--- a/src/backend/commands/policy.c
+++ b/src/backend/commands/policy.c
@@ -313,10 +313,10 @@ RelationBuildRowSecurity(Relation relation)
/*
* Success. Reparent the descriptor's memory context under
- * CacheMemoryContext so that it will live indefinitely, then attach the
+ * RelCacheContext so that it will live indefinitely, then attach the
* policy descriptor to the relcache entry.
*/
- MemoryContextSetParent(rscxt, CacheMemoryContext);
+ MemoryContextSetParent(rscxt, RelCacheContext);
relation->rd_rsdesc = rsdesc;
}
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 5823fce934..d33206f2eb 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -18333,7 +18333,7 @@ register_on_commit_action(Oid relid, OnCommitAction action)
if (action == ONCOMMIT_NOOP || action == ONCOMMIT_PRESERVE_ROWS)
return;
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
oc = (OnCommitItem *) palloc(sizeof(OnCommitItem));
oc->relid = relid;
diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c
index 97c087929f..837ceb8ee7 100644
--- a/src/backend/commands/trigger.c
+++ b/src/backend/commands/trigger.c
@@ -1851,7 +1851,7 @@ EnableDisableTrigger(Relation rel, const char *tgname, Oid tgparent,
* Build trigger data to attach to the given relcache entry.
*
* Note that trigger data attached to a relcache entry must be stored in
- * CacheMemoryContext to ensure it survives as long as the relcache entry.
+ * RelCacheContext to ensure it survives as long as the relcache entry.
* But we should be running in a less long-lived working context. To avoid
* leaking cache memory if this routine fails partway through, we build a
* temporary TriggerDesc in working memory and then copy the completed
@@ -1998,7 +1998,7 @@ RelationBuildTriggers(Relation relation)
SetTriggerFlags(trigdesc, &(triggers[i]));
/* Copy completed trigdesc into cache storage */
- oldContext = MemoryContextSwitchTo(CacheMemoryContext);
+ oldContext = MemoryContextSwitchTo(RelCacheContext);
relation->trigdesc = CopyTriggerDesc(trigdesc);
MemoryContextSwitchTo(oldContext);
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index ecb2e4ccaa..60a3a5ed64 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -50,6 +50,8 @@ static _SPI_connection *_SPI_current = NULL;
static int _SPI_stack_depth = 0; /* allocated size of _SPI_stack */
static int _SPI_connected = -1; /* current stack index */
+static MemoryContext SPICacheContext = NULL;
+
typedef struct SPICallbackArg
{
const char *query;
@@ -972,6 +974,16 @@ SPI_prepare_params(const char *src,
return result;
}
+static void
+CreateSPICacheContext(void)
+{
+ /*
+ * SPI does not have a separate memory context, CacheMemoryContext is used.
+ */
+ if (!SPICacheContext)
+ SPICacheContext = CacheMemoryContext;
+}
+
int
SPI_keepplan(SPIPlanPtr plan)
{
@@ -981,13 +993,16 @@ SPI_keepplan(SPIPlanPtr plan)
plan->saved || plan->oneshot)
return SPI_ERROR_ARGUMENT;
+ if (!SPICacheContext)
+ CreateSPICacheContext();
+
/*
- * Mark it saved, reparent it under CacheMemoryContext, and mark all the
+ * Mark it saved, reparent it under SPICacheContext, and mark all the
* component CachedPlanSources as saved. This sequence cannot fail
* partway through, so there's no risk of long-term memory leakage.
*/
plan->saved = true;
- MemoryContextSetParent(plan->plancxt, CacheMemoryContext);
+ MemoryContextSetParent(plan->plancxt, SPICacheContext);
foreach(lc, plan->plancache_list)
{
@@ -3255,13 +3270,16 @@ _SPI_save_plan(SPIPlanPtr plan)
MemoryContextSwitchTo(oldcxt);
+ if (!SPICacheContext)
+ CreateSPICacheContext();
+
/*
- * Mark it saved, reparent it under CacheMemoryContext, and mark all the
+ * Mark it saved, reparent it under SPICacheContext, and mark all the
* component CachedPlanSources as saved. This sequence cannot fail
* partway through, so there's no risk of long-term memory leakage.
*/
newplan->saved = true;
- MemoryContextSetParent(newplan->plancxt, CacheMemoryContext);
+ MemoryContextSetParent(newplan->plancxt, SPICacheContext);
foreach(lc, newplan->plancache_list)
{
diff --git a/src/backend/foreign/foreign.c b/src/backend/foreign/foreign.c
index f0835fc307..cd544d3b3a 100644
--- a/src/backend/foreign/foreign.c
+++ b/src/backend/foreign/foreign.c
@@ -449,8 +449,8 @@ GetFdwRoutineForRelation(Relation relation, bool makecopy)
/* Get the info by consulting the catalogs and the FDW code */
fdwroutine = GetFdwRoutineByRelId(RelationGetRelid(relation));
- /* Save the data for later reuse in CacheMemoryContext */
- cfdwroutine = (FdwRoutine *) MemoryContextAlloc(CacheMemoryContext,
+ /* Save the data for later reuse in RelCacheContext */
+ cfdwroutine = (FdwRoutine *) MemoryContextAlloc(RelCacheContext,
sizeof(FdwRoutine));
memcpy(cfdwroutine, fdwroutine, sizeof(FdwRoutine));
relation->rd_fdwroutine = cfdwroutine;
diff --git a/src/backend/partitioning/partdesc.c b/src/backend/partitioning/partdesc.c
index 328b4d450e..a59ca34200 100644
--- a/src/backend/partitioning/partdesc.c
+++ b/src/backend/partitioning/partdesc.c
@@ -115,10 +115,10 @@ RelationGetPartitionDesc(Relation rel, bool omit_detached)
*
* Partition descriptor is a complex structure; to avoid complicated logic to
* free individual elements whenever the relcache entry is flushed, we give it
- * its own memory context, a child of CacheMemoryContext, which can easily be
+ * its own memory context, a child of RelCacheContext, which can easily be
* deleted on its own. To avoid leaking memory in that context in case of an
* error partway through this function, the context is initially created as a
- * child of CurTransactionContext and only re-parented to CacheMemoryContext
+ * child of CurTransactionContext and only re-parented to RelCacheContext
* at the end, when no further errors are possible. Also, we don't make this
* context the current context except in very brief code sections, out of fear
* that some of our callees allocate memory on their own which would be leaked
@@ -373,7 +373,7 @@ retry:
* We have a fully valid partdesc. Reparent it so that it has the right
* lifespan.
*/
- MemoryContextSetParent(new_pdcxt, CacheMemoryContext);
+ MemoryContextSetParent(new_pdcxt, RelCacheContext);
/*
* Store it into relcache.
diff --git a/src/backend/replication/pgoutput/pgoutput.c b/src/backend/replication/pgoutput/pgoutput.c
index 7d464f656a..a1dcc1fdd7 100644
--- a/src/backend/replication/pgoutput/pgoutput.c
+++ b/src/backend/replication/pgoutput/pgoutput.c
@@ -216,6 +216,8 @@ typedef struct PGOutputTxnData
/* Map used to remember which relation schemas we sent. */
static HTAB *RelationSyncCache = NULL;
+static MemoryContext PgOutputCacheContext = NULL;
+
static void init_rel_sync_cache(MemoryContext cachectx);
static void cleanup_rel_sync_cache(TransactionId xid, bool is_commit);
static RelationSyncEntry *get_rel_sync_entry(PGOutputData *data,
@@ -421,6 +423,17 @@ parse_output_parameters(List *options, PGOutputData *data)
errmsg("option \"%s\" missing", "publication_names"));
}
+static void
+CreatePgOutputCacheContext(void)
+{
+ /*
+ * PgOutput does not have a separate memory context, CacheMemoryContext is
+ * used.
+ */
+ if (!PgOutputCacheContext)
+ PgOutputCacheContext = CacheMemoryContext;
+}
+
/*
* Initialize this plugin
*/
@@ -449,6 +462,9 @@ pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt,
/* This plugin uses binary protocol. */
opt->output_type = OUTPUT_PLUGIN_BINARY_OUTPUT;
+ if (!PgOutputCacheContext)
+ CreatePgOutputCacheContext();
+
/*
* This is replication start and not slot initialization.
*
@@ -535,7 +551,7 @@ pgoutput_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt,
}
/* Initialize relation schema cache. */
- init_rel_sync_cache(CacheMemoryContext);
+ init_rel_sync_cache(PgOutputCacheContext);
}
else
{
@@ -2003,7 +2019,7 @@ set_schema_sent_in_streamed_txn(RelationSyncEntry *entry, TransactionId xid)
{
MemoryContext oldctx;
- oldctx = MemoryContextSwitchTo(CacheMemoryContext);
+ oldctx = MemoryContextSwitchTo(PgOutputCacheContext);
entry->streamed_txns = lappend_xid(entry->streamed_txns, xid);
diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c
index 7d92f580a5..e573a435b6 100644
--- a/src/backend/utils/adt/pg_locale.c
+++ b/src/backend/utils/adt/pg_locale.c
@@ -62,6 +62,7 @@
#include "mb/pg_wchar.h"
#include "miscadmin.h"
#include "utils/builtins.h"
+#include "utils/catcache.h"
#include "utils/formatting.h"
#include "utils/guc_hooks.h"
#include "utils/lsyscache.h"
@@ -1345,7 +1346,10 @@ pg_newlocale_from_collation(Oid collid)
if (CollationCache == NULL)
{
- CollationCacheContext = AllocSetContextCreate(TopMemoryContext,
+ if (!CacheMemoryContext)
+ CreateCacheMemoryContext();
+
+ CollationCacheContext = AllocSetContextCreate(CacheMemoryContext,
"collation cache",
ALLOCSET_DEFAULT_SIZES);
CollationCache = collation_cache_create(CollationCacheContext,
diff --git a/src/backend/utils/cache/attoptcache.c b/src/backend/utils/cache/attoptcache.c
index 5c8360c08b..9709199727 100644
--- a/src/backend/utils/cache/attoptcache.c
+++ b/src/backend/utils/cache/attoptcache.c
@@ -113,9 +113,9 @@ InitializeAttoptCache(void)
hash_create("Attopt cache", 256, &ctl,
HASH_ELEM | HASH_FUNCTION);
- /* Make sure we've initialized CacheMemoryContext. */
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ /* Make sure we've initialized RelCacheContext. */
+ if (!RelCacheContext)
+ CreateRelCacheContext();
/* Watch for invalidation events. */
CacheRegisterSyscacheCallback(ATTNUM,
@@ -178,7 +178,7 @@ get_attribute_options(Oid attrelid, int attnum)
{
bytea *bytea_opts = attribute_reloptions(datum, false);
- opts = MemoryContextAlloc(CacheMemoryContext,
+ opts = MemoryContextAlloc(RelCacheContext,
VARSIZE(bytea_opts));
memcpy(opts, bytea_opts, VARSIZE(bytea_opts));
}
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 9ad7681f15..7c6a755e35 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -83,6 +83,9 @@ static CatCInProgress *catcache_in_progress_stack = NULL;
/* Cache management header --- pointer is NULL until created */
static CatCacheHeader *CacheHdr = NULL;
+static MemoryContext CatCacheContext = NULL;
+static void CreateCatCacheContext(void);
+
static inline HeapTuple SearchCatCacheInternal(CatCache *cache,
int nkeys,
Datum v1, Datum v2,
@@ -122,7 +125,6 @@ static void CatCacheFreeKeys(TupleDesc tupdesc, int nkeys, int *attnos,
static void CatCacheCopyKeys(TupleDesc tupdesc, int nkeys, int *attnos,
Datum *srckeys, Datum *dstkeys);
-
/*
* internal support functions
*/
@@ -717,6 +719,17 @@ CreateCacheMemoryContext(void)
ALLOCSET_DEFAULT_SIZES);
}
+static void
+CreateCatCacheContext(void)
+{
+ if (!CacheMemoryContext)
+ CreateCacheMemoryContext();
+
+ if (!CatCacheContext)
+ CatCacheContext = AllocSetContextCreate(CacheMemoryContext,
+ "CatCacheContext",
+ ALLOCSET_DEFAULT_SIZES);
+}
/*
* ResetCatalogCache
@@ -903,10 +916,10 @@ InitCatCache(int id,
* first switch to the cache context so our allocations do not vanish at
* the end of a transaction
*/
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ if (!CatCacheContext)
+ CreateCatCacheContext();
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheContext);
/*
* if first time through, initialize the cache group header
@@ -993,7 +1006,7 @@ RehashCatCache(CatCache *cp)
/* Allocate a new, larger, hash table. */
newnbuckets = cp->cc_nbuckets * 2;
- newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
+ newbucket = (dlist_head *) MemoryContextAllocZero(CatCacheContext, newnbuckets * sizeof(dlist_head));
/* Move all entries from old hash table to new. */
for (i = 0; i < cp->cc_nbuckets; i++)
@@ -1031,7 +1044,7 @@ RehashCatCacheLists(CatCache *cp)
/* Allocate a new, larger, hash table. */
newnbuckets = cp->cc_nlbuckets * 2;
- newbucket = (dlist_head *) MemoryContextAllocZero(CacheMemoryContext, newnbuckets * sizeof(dlist_head));
+ newbucket = (dlist_head *) MemoryContextAllocZero(CatCacheContext, newnbuckets * sizeof(dlist_head));
/* Move all entries from old hash table to new. */
for (i = 0; i < cp->cc_nlbuckets; i++)
@@ -1098,9 +1111,9 @@ CatalogCacheInitializeCache(CatCache *cache)
* switch to the cache context so our allocations do not vanish at the end
* of a transaction
*/
- Assert(CacheMemoryContext != NULL);
+ Assert(CatCacheContext != NULL);
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheContext);
/*
* copy the relcache's tuple descriptor to permanent cache storage
@@ -1161,7 +1174,7 @@ CatalogCacheInitializeCache(CatCache *cache)
*/
fmgr_info_cxt(eqfunc,
&cache->cc_skey[i].sk_func,
- CacheMemoryContext);
+ CatCacheContext);
/* Initialize sk_attno suitably for HeapKeyTest() and heap scans */
cache->cc_skey[i].sk_attno = cache->cc_keyno[i];
@@ -1746,7 +1759,7 @@ SearchCatCacheList(CatCache *cache,
int nbuckets = 16;
cache->cc_lbucket = (dlist_head *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(CatCacheContext,
nbuckets * sizeof(dlist_head));
/* Don't set cc_nlbuckets if we get OOM allocating cc_lbucket */
cache->cc_nlbuckets = nbuckets;
@@ -1977,7 +1990,7 @@ SearchCatCacheList(CatCache *cache,
ResourceOwnerEnlarge(CurrentResourceOwner);
/* Now we can build the CatCList entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheContext);
nmembers = list_length(ctlist);
cl = (CatCList *)
palloc(offsetof(CatCList, members) + nmembers * sizeof(CatCTup *));
@@ -2178,7 +2191,7 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
dtp = ntp;
/* Allocate memory for CatCTup and the cached tuple in one go */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheContext);
ct = (CatCTup *) palloc(sizeof(CatCTup) +
MAXIMUM_ALIGNOF + dtp->t_len);
@@ -2213,7 +2226,7 @@ CatalogCacheCreateEntry(CatCache *cache, HeapTuple ntp, Datum *arguments,
else
{
/* Set up keys for a negative cache entry */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(CatCacheContext);
ct = (CatCTup *) palloc(sizeof(CatCTup));
/*
@@ -2449,4 +2462,4 @@ ResOwnerPrintCatCacheList(Datum res)
return psprintf("cache %s (%d), list %p has count %d",
list->my_cache->cc_relname, list->my_cache->id,
list, list->refcount);
-}
+}
\ No newline at end of file
diff --git a/src/backend/utils/cache/partcache.c b/src/backend/utils/cache/partcache.c
index f5d7d70def..aed33576f0 100644
--- a/src/backend/utils/cache/partcache.c
+++ b/src/backend/utils/cache/partcache.c
@@ -65,11 +65,11 @@ RelationGetPartitionKey(Relation rel)
*
* Partitioning key data is a complex structure; to avoid complicated logic to
* free individual elements whenever the relcache entry is flushed, we give it
- * its own memory context, a child of CacheMemoryContext, which can easily be
+ * its own memory context, a child of RelCacheContext, which can easily be
* deleted on its own. To avoid leaking memory in that context in case of an
* error partway through this function, the context is initially created as a
- * child of CurTransactionContext and only re-parented to CacheMemoryContext
- * at the end, when no further errors are possible. Also, we don't make this
+ * child of CurTransactionContext and only re-parented to RelCacheContext at
+ * the end, when no further errors are possible. Also, we don't make this
* context the current context except in very brief code sections, out of fear
* that some of our callees allocate memory on their own which would be leaked
* permanently.
@@ -263,7 +263,7 @@ RelationBuildPartitionKey(Relation relation)
* Success --- reparent our context and make the relcache point to the
* newly constructed key
*/
- MemoryContextSetParent(partkeycxt, CacheMemoryContext);
+ MemoryContextSetParent(partkeycxt, RelCacheContext);
relation->rd_partkeycxt = partkeycxt;
relation->rd_partkey = key;
}
@@ -411,7 +411,7 @@ generate_partition_qual(Relation rel)
*/
if (result != NIL)
{
- rel->rd_partcheckcxt = AllocSetContextCreate(CacheMemoryContext,
+ rel->rd_partcheckcxt = AllocSetContextCreate(RelCacheContext,
"partition constraint",
ALLOCSET_SMALL_SIZES);
MemoryContextCopyAndSetIdentifier(rel->rd_partcheckcxt,
diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c
index 55db8f5370..362180109d 100644
--- a/src/backend/utils/cache/plancache.c
+++ b/src/backend/utils/cache/plancache.c
@@ -99,6 +99,8 @@ static dlist_head saved_plan_list = DLIST_STATIC_INIT(saved_plan_list);
*/
static dlist_head cached_expression_list = DLIST_STATIC_INIT(cached_expression_list);
+static MemoryContext PlanCacheContext = NULL;
+
static void ReleaseGenericPlan(CachedPlanSource *plansource);
static List *RevalidateCachedQuery(CachedPlanSource *plansource,
QueryEnvironment *queryEnv);
@@ -463,10 +465,21 @@ CompleteCachedPlan(CachedPlanSource *plansource,
plansource->is_valid = true;
}
+static void
+CreatePlanCacheContext(void)
+{
+ /*
+ * Plan cache does not have a separate memory context, CacheMemoryContext
+ * is used.
+ */
+ if (!PlanCacheContext)
+ PlanCacheContext = CacheMemoryContext;
+}
+
/*
* SaveCachedPlan: save a cached plan permanently
*
- * This function moves the cached plan underneath CacheMemoryContext (making
+ * This function moves the cached plan underneath PlanCacheContext (making
* it live for the life of the backend, unless explicitly dropped), and adds
* it to the list of cached plans that are checked for invalidation when an
* sinval event occurs.
@@ -493,18 +506,21 @@ SaveCachedPlan(CachedPlanSource *plansource)
/*
* In typical use, this function would be called before generating any
* plans from the CachedPlanSource. If there is a generic plan, moving it
- * into CacheMemoryContext would be pretty risky since it's unclear
+ * into PlanCacheContext would be pretty risky since it's unclear
* whether the caller has taken suitable care with making references
* long-lived. Best thing to do seems to be to discard the plan.
*/
ReleaseGenericPlan(plansource);
+ if (!PlanCacheContext)
+ CreatePlanCacheContext();
+
/*
- * Reparent the source memory context under CacheMemoryContext so that it
+ * Reparent the source memory context under PlanCacheContext so that it
* will live indefinitely. The query_context follows along since it's
* already a child of the other one.
*/
- MemoryContextSetParent(plansource->context, CacheMemoryContext);
+ MemoryContextSetParent(plansource->context, PlanCacheContext);
/*
* Add the entry to the global list of cached plans.
@@ -1205,8 +1221,8 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
/* Immediately reparent into appropriate context */
if (plansource->is_saved)
{
- /* saved plans all live under CacheMemoryContext */
- MemoryContextSetParent(plan->context, CacheMemoryContext);
+ /* saved plans all live under PlanCacheContext */
+ MemoryContextSetParent(plan->context, PlanCacheContext);
plan->is_saved = true;
}
else
@@ -1262,14 +1278,14 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
ResourceOwnerRememberPlanCacheRef(owner, plan);
/*
- * Saved plans should be under CacheMemoryContext so they will not go away
+ * Saved plans should be under PlanCacheContext so they will not go away
* until their reference count goes to zero. In the generic-plan cases we
* already took care of that, but for a custom plan, do it as soon as we
* have created a reference-counted link.
*/
if (customplan && plansource->is_saved)
{
- MemoryContextSetParent(plan->context, CacheMemoryContext);
+ MemoryContextSetParent(plan->context, PlanCacheContext);
plan->is_saved = true;
}
@@ -1492,7 +1508,7 @@ CachedPlanIsSimplyValid(CachedPlanSource *plansource, CachedPlan *plan,
* CachedPlanSetParentContext: move a CachedPlanSource to a new memory context
*
* This can only be applied to unsaved plans; once saved, a plan always
- * lives underneath CacheMemoryContext.
+ * lives underneath PlanCacheContext.
*/
void
CachedPlanSetParentContext(CachedPlanSource *plansource,
@@ -1713,10 +1729,10 @@ GetCachedExpression(Node *expr)
MemoryContextSwitchTo(oldcxt);
/*
- * Reparent the expr's memory context under CacheMemoryContext so that it
+ * Reparent the expr's memory context under PlanCacheContext so that it
* will live indefinitely.
*/
- MemoryContextSetParent(cexpr_context, CacheMemoryContext);
+ MemoryContextSetParent(cexpr_context, PlanCacheContext);
/*
* Add the entry to the global list of cached expressions.
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 398114373e..82e25b5ac3 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -145,6 +145,8 @@ bool criticalRelcachesBuilt = false;
*/
bool criticalSharedRelcachesBuilt = false;
+MemoryContext RelCacheContext = NULL;
+
/*
* This counter counts relcache inval events received since backend startup
* (but only for rels that are actually in cache). Presently, we use it only
@@ -270,7 +272,6 @@ typedef struct opclasscacheent
static HTAB *OpClassCache = NULL;
-
/* non-export function prototypes */
static void RelationCloseCleanup(Relation relation);
@@ -321,7 +322,6 @@ static OpClassCacheEnt *LookupOpclassInfo(Oid operatorClassOid,
static void RelationCacheInitFileRemoveInDir(const char *tblspcpath);
static void unlink_initfile(const char *initfilename, int elevel);
-
/*
* ScanPgRelation
*
@@ -413,8 +413,8 @@ AllocateRelationDesc(Form_pg_class relp)
MemoryContext oldcxt;
Form_pg_class relationForm;
- /* Relcache entries must live in CacheMemoryContext */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ /* Relcache entries must live in RelCacheContext */
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
/*
* allocate and zero space for new relation descriptor
@@ -498,14 +498,14 @@ RelationParseRelOptions(Relation relation, HeapTuple tuple)
options = extractRelOptions(tuple, GetPgClassDescriptor(), amoptsfn);
/*
- * Copy parsed data into CacheMemoryContext. To guard against the
+ * Copy parsed data into RelCacheContext. To guard against the
* possibility of leaks in the reloptions code, we want to do the actual
* parsing in the caller's memory context and copy the results into
- * CacheMemoryContext after the fact.
+ * RelCacheContext after the fact.
*/
if (options)
{
- relation->rd_options = MemoryContextAlloc(CacheMemoryContext,
+ relation->rd_options = MemoryContextAlloc(RelCacheContext,
VARSIZE(options));
memcpy(relation->rd_options, options, VARSIZE(options));
pfree(options);
@@ -535,7 +535,7 @@ RelationBuildTupleDesc(Relation relation)
relation->rd_rel->reltype ? relation->rd_rel->reltype : RECORDOID;
relation->rd_att->tdtypmod = -1; /* just to be sure */
- constr = (TupleConstr *) MemoryContextAllocZero(CacheMemoryContext,
+ constr = (TupleConstr *) MemoryContextAllocZero(RelCacheContext,
sizeof(TupleConstr));
/*
@@ -618,7 +618,7 @@ RelationBuildTupleDesc(Relation relation)
if (attrmiss == NULL)
attrmiss = (AttrMissing *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(RelCacheContext,
relation->rd_rel->relnatts *
sizeof(AttrMissing));
@@ -639,7 +639,7 @@ RelationBuildTupleDesc(Relation relation)
else
{
/* otherwise copy in the correct context */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
attrmiss[attnum - 1].am_value = datumCopy(missval,
attp->attbyval,
attp->attlen);
@@ -737,7 +737,7 @@ RelationBuildRuleLock(Relation relation)
/*
* Make the private context. Assume it'll not contain much data.
*/
- rulescxt = AllocSetContextCreate(CacheMemoryContext,
+ rulescxt = AllocSetContextCreate(RelCacheContext,
"relation rules",
ALLOCSET_SMALL_SIZES);
relation->rd_rulescxt = rulescxt;
@@ -1440,7 +1440,7 @@ RelationInitIndexAccessInfo(Relation relation)
if (!HeapTupleIsValid(tuple))
elog(ERROR, "cache lookup failed for index %u",
RelationGetRelid(relation));
- oldcontext = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcontext = MemoryContextSwitchTo(RelCacheContext);
relation->rd_indextuple = heap_copytuple(tuple);
relation->rd_index = (Form_pg_index) GETSTRUCT(relation->rd_indextuple);
MemoryContextSwitchTo(oldcontext);
@@ -1469,7 +1469,7 @@ RelationInitIndexAccessInfo(Relation relation)
* a context, and not just a couple of pallocs, is so that we won't leak
* any subsidiary info attached to fmgr lookup records.
*/
- indexcxt = AllocSetContextCreate(CacheMemoryContext,
+ indexcxt = AllocSetContextCreate(RelCacheContext,
"index info",
ALLOCSET_SMALL_SIZES);
relation->rd_indexcxt = indexcxt;
@@ -1614,6 +1614,18 @@ IndexSupportInitialize(oidvector *indclass,
}
}
+void
+CreateRelCacheContext(void)
+{
+ if (!CacheMemoryContext)
+ CreateCacheMemoryContext();
+
+ if (!RelCacheContext)
+ RelCacheContext = AllocSetContextCreate(CacheMemoryContext,
+ "RelCacheContext",
+ ALLOCSET_DEFAULT_SIZES);
+}
+
/*
* LookupOpclassInfo
*
@@ -1651,9 +1663,9 @@ LookupOpclassInfo(Oid operatorClassOid,
/* First time through: initialize the opclass cache */
HASHCTL ctl;
- /* Also make sure CacheMemoryContext exists */
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ /* Also make sure RelCacheContext exists */
+ if (!RelCacheContext)
+ CreateRelCacheContext();
ctl.keysize = sizeof(Oid);
ctl.entrysize = sizeof(OpClassCacheEnt);
@@ -1700,7 +1712,7 @@ LookupOpclassInfo(Oid operatorClassOid,
*/
if (opcentry->supportProcs == NULL && numSupport > 0)
opcentry->supportProcs = (RegProcedure *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(RelCacheContext,
numSupport * sizeof(RegProcedure));
/*
@@ -1859,7 +1871,7 @@ RelationInitTableAccessMethod(Relation relation)
* during bootstrap or before RelationCacheInitializePhase3 runs, and none of
* these properties matter then...)
*
- * NOTE: we assume we are already switched into CacheMemoryContext.
+ * NOTE: we assume we are already switched into RelCacheContext.
*/
static void
formrdesc(const char *relationName, Oid relationReltype,
@@ -3063,7 +3075,7 @@ RememberToFreeTupleDescAtEOX(TupleDesc td)
{
MemoryContext oldcxt;
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
EOXactTupleDescArray = (TupleDesc *) palloc(16 * sizeof(TupleDesc));
EOXactTupleDescArrayLen = 16;
@@ -3527,10 +3539,10 @@ RelationBuildLocalRelation(const char *relname,
/*
* switch to the cache context to create the relcache entry.
*/
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ if (!RelCacheContext)
+ CreateRelCacheContext();
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
/*
* allocate a new relation descriptor and fill in basic state fields.
@@ -3660,7 +3672,7 @@ RelationBuildLocalRelation(const char *relname,
/*
* RelationInitTableAccessMethod will do syscache lookups, so we mustn't
- * run it in CacheMemoryContext. Fortunately, the remaining steps don't
+ * run it in RelCacheContext. Fortunately, the remaining steps don't
* require a long-lived current context.
*/
MemoryContextSwitchTo(oldcxt);
@@ -3954,8 +3966,8 @@ RelationCacheInitialize(void)
/*
* make sure cache memory context exists
*/
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ if (!RelCacheContext)
+ CreateRelCacheContext();
/*
* create hashtable that indexes the relcache
@@ -3970,7 +3982,7 @@ RelationCacheInitialize(void)
*/
allocsize = 4;
in_progress_list =
- MemoryContextAlloc(CacheMemoryContext,
+ MemoryContextAlloc(RelCacheContext,
allocsize * sizeof(*in_progress_list));
in_progress_list_maxlen = allocsize;
@@ -4008,10 +4020,7 @@ RelationCacheInitializePhase2(void)
if (IsBootstrapProcessingMode())
return;
- /*
- * switch to cache memory context
- */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
/*
* Try to load the shared relcache cache file. If unsuccessful, bootstrap
@@ -4063,10 +4072,7 @@ RelationCacheInitializePhase3(void)
*/
RelationMapInitializePhase3();
- /*
- * switch to cache memory context
- */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
/*
* Try to load the local relcache cache file. If unsuccessful, bootstrap
@@ -4380,7 +4386,7 @@ BuildHardcodedDescriptor(int natts, const FormData_pg_attribute *attrs)
MemoryContext oldcxt;
int i;
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
result = CreateTemplateTupleDesc(natts);
result->tdtypeid = RECORDOID; /* not right, but we don't care */
@@ -4450,7 +4456,7 @@ AttrDefaultFetch(Relation relation, int ndef)
/* Allocate array with room for as many entries as expected */
attrdef = (AttrDefault *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(RelCacheContext,
ndef * sizeof(AttrDefault));
/* Search pg_attrdef for relevant entries */
@@ -4489,7 +4495,7 @@ AttrDefaultFetch(Relation relation, int ndef)
char *s = TextDatumGetCString(val);
attrdef[found].adnum = adform->adnum;
- attrdef[found].adbin = MemoryContextStrdup(CacheMemoryContext, s);
+ attrdef[found].adbin = MemoryContextStrdup(RelCacheContext, s);
pfree(s);
found++;
}
@@ -4546,7 +4552,7 @@ CheckConstraintFetch(Relation relation)
/* Allocate array with room for as many entries as expected */
check = (ConstrCheck *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(RelCacheContext,
ncheck * sizeof(ConstrCheck));
/* Search pg_constraint for relevant entries */
@@ -4580,7 +4586,7 @@ CheckConstraintFetch(Relation relation)
check[found].ccenforced = conform->conenforced;
check[found].ccvalid = conform->convalidated;
check[found].ccnoinherit = conform->connoinherit;
- check[found].ccname = MemoryContextStrdup(CacheMemoryContext,
+ check[found].ccname = MemoryContextStrdup(RelCacheContext,
NameStr(conform->conname));
/* Grab and test conbin is actually set */
@@ -4595,7 +4601,7 @@ CheckConstraintFetch(Relation relation)
/* detoast and convert to cstring in caller's context */
char *s = TextDatumGetCString(val);
- check[found].ccbin = MemoryContextStrdup(CacheMemoryContext, s);
+ check[found].ccbin = MemoryContextStrdup(RelCacheContext, s);
pfree(s);
found++;
}
@@ -4712,7 +4718,7 @@ RelationGetFKeyList(Relation relation)
table_close(conrel, AccessShareLock);
/* Now save a copy of the completed list in the relcache entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
oldlist = relation->rd_fkeylist;
relation->rd_fkeylist = copyObject(result);
relation->rd_fkeyvalid = true;
@@ -4855,7 +4861,7 @@ RelationGetIndexList(Relation relation)
list_sort(result, list_oid_cmp);
/* Now save a copy of the completed list in the relcache entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
oldlist = relation->rd_indexlist;
relation->rd_indexlist = list_copy(result);
relation->rd_pkindex = pkeyIndex;
@@ -4947,7 +4953,7 @@ RelationGetStatExtList(Relation relation)
list_sort(result, list_oid_cmp);
/* Now save a copy of the completed list in the relcache entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
oldlist = relation->rd_statlist;
relation->rd_statlist = list_copy(result);
@@ -5452,7 +5458,7 @@ restart:
* leave the relcache entry looking like the other ones are valid but
* empty.
*/
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
relation->rd_keyattr = bms_copy(uindexattrs);
relation->rd_pkattr = bms_copy(pkindexattrs);
relation->rd_idattr = bms_copy(idindexattrs);
@@ -5552,7 +5558,7 @@ RelationGetIdentityKeyBitmap(Relation relation)
relation->rd_idattr = NULL;
/* Now save copy of the bitmap in the relcache entry */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
relation->rd_idattr = bms_copy(idindexattrs);
MemoryContextSwitchTo(oldcxt);
@@ -5881,7 +5887,7 @@ RelationBuildPublicationDesc(Relation relation, PublicationDesc *pubdesc)
}
/* Now save copy of the descriptor in the relcache entry. */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(RelCacheContext);
relation->rd_pubdesc = palloc(sizeof(PublicationDesc));
memcpy(relation->rd_pubdesc, pubdesc, sizeof(PublicationDesc));
MemoryContextSwitchTo(oldcxt);
@@ -6084,7 +6090,7 @@ errtableconstraint(Relation rel, const char *conname)
* criticalSharedRelcachesBuilt to true.
* If not successful, return false.
*
- * NOTE: we assume we are already switched into CacheMemoryContext.
+ * NOTE: we assume we are already switched into RelCacheContext.
*/
static bool
load_relcache_init_file(bool shared)
@@ -6255,7 +6261,7 @@ load_relcache_init_file(bool shared)
* prepare index info context --- parameters should match
* RelationInitIndexAccessInfo
*/
- indexcxt = AllocSetContextCreate(CacheMemoryContext,
+ indexcxt = AllocSetContextCreate(RelCacheContext,
"index info",
ALLOCSET_SMALL_SIZES);
rel->rd_indexcxt = indexcxt;
diff --git a/src/backend/utils/cache/spccache.c b/src/backend/utils/cache/spccache.c
index 2345859929..6543f4df0f 100644
--- a/src/backend/utils/cache/spccache.c
+++ b/src/backend/utils/cache/spccache.c
@@ -86,9 +86,8 @@ InitializeTableSpaceCache(void)
hash_create("TableSpace cache", 16, &ctl,
HASH_ELEM | HASH_BLOBS);
- /* Make sure we've initialized CacheMemoryContext. */
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ if (!RelCacheContext)
+ CreateRelCacheContext();
/* Watch for invalidation events. */
CacheRegisterSyscacheCallback(TABLESPACEOID,
@@ -151,7 +150,7 @@ get_tablespace(Oid spcid)
{
bytea *bytea_opts = tablespace_reloptions(datum, false);
- opts = MemoryContextAlloc(CacheMemoryContext, VARSIZE(bytea_opts));
+ opts = MemoryContextAlloc(RelCacheContext, VARSIZE(bytea_opts));
memcpy(opts, bytea_opts, VARSIZE(bytea_opts));
}
ReleaseSysCache(tp);
diff --git a/src/backend/utils/cache/ts_cache.c b/src/backend/utils/cache/ts_cache.c
index 18cccd778f..e372874317 100644
--- a/src/backend/utils/cache/ts_cache.c
+++ b/src/backend/utils/cache/ts_cache.c
@@ -75,6 +75,7 @@ static TSConfigCacheEntry *lastUsedConfig = NULL;
*/
char *TSCurrentConfig = NULL;
+static MemoryContext TextSearchCacheContext = NULL;
static Oid TSCurrentConfigCache = InvalidOid;
@@ -106,6 +107,20 @@ InvalidateTSCacheCallBack(Datum arg, int cacheid, uint32 hashvalue)
TSCurrentConfigCache = InvalidOid;
}
+static void
+CreateTextSearchCacheContext(void)
+{
+ if (!CacheMemoryContext)
+ CreateCacheMemoryContext();
+
+ /*
+ * TextSearch cache does not have a separate memory context,
+ * CacheMemoryContext is used.
+ */
+ if (!TextSearchCacheContext)
+ TextSearchCacheContext = CacheMemoryContext;
+}
+
/*
* Fetch parser cache entry
*/
@@ -127,9 +142,9 @@ lookup_ts_parser_cache(Oid prsId)
CacheRegisterSyscacheCallback(TSPARSEROID, InvalidateTSCacheCallBack,
PointerGetDatum(TSParserCacheHash));
- /* Also make sure CacheMemoryContext exists */
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ /* Also make sure TextSearchCacheContext exists */
+ if (!TextSearchCacheContext)
+ CreateTextSearchCacheContext();
}
/* Check single-entry cache */
@@ -186,12 +201,12 @@ lookup_ts_parser_cache(Oid prsId)
ReleaseSysCache(tp);
- fmgr_info_cxt(entry->startOid, &entry->prsstart, CacheMemoryContext);
- fmgr_info_cxt(entry->tokenOid, &entry->prstoken, CacheMemoryContext);
- fmgr_info_cxt(entry->endOid, &entry->prsend, CacheMemoryContext);
+ fmgr_info_cxt(entry->startOid, &entry->prsstart, TextSearchCacheContext);
+ fmgr_info_cxt(entry->tokenOid, &entry->prstoken, TextSearchCacheContext);
+ fmgr_info_cxt(entry->endOid, &entry->prsend, TextSearchCacheContext);
if (OidIsValid(entry->headlineOid))
fmgr_info_cxt(entry->headlineOid, &entry->prsheadline,
- CacheMemoryContext);
+ TextSearchCacheContext);
entry->isvalid = true;
}
@@ -224,9 +239,9 @@ lookup_ts_dictionary_cache(Oid dictId)
CacheRegisterSyscacheCallback(TSTEMPLATEOID, InvalidateTSCacheCallBack,
PointerGetDatum(TSDictionaryCacheHash));
- /* Also make sure CacheMemoryContext exists */
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ /* Also make sure TextSearchCacheContext exists */
+ if (!TextSearchCacheContext)
+ CreateTextSearchCacheContext();
}
/* Check single-entry cache */
@@ -291,7 +306,7 @@ lookup_ts_dictionary_cache(Oid dictId)
Assert(!found); /* it wasn't there a moment ago */
/* Create private memory context the first time through */
- saveCtx = AllocSetContextCreate(CacheMemoryContext,
+ saveCtx = AllocSetContextCreate(TextSearchCacheContext,
"TS dictionary",
ALLOCSET_SMALL_SIZES);
MemoryContextCopyAndSetIdentifier(saveCtx, NameStr(dict->dictname));
@@ -373,9 +388,9 @@ init_ts_config_cache(void)
CacheRegisterSyscacheCallback(TSCONFIGMAP, InvalidateTSCacheCallBack,
PointerGetDatum(TSConfigCacheHash));
- /* Also make sure CacheMemoryContext exists */
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ /* Also make sure TextSearchCacheContext exists */
+ if (!TextSearchCacheContext)
+ CreateTextSearchCacheContext();
}
/*
@@ -498,7 +513,7 @@ lookup_ts_config_cache(Oid cfgId)
{
maplists[maxtokentype].len = ndicts;
maplists[maxtokentype].dictIds = (Oid *)
- MemoryContextAlloc(CacheMemoryContext,
+ MemoryContextAlloc(TextSearchCacheContext,
sizeof(Oid) * ndicts);
memcpy(maplists[maxtokentype].dictIds, mapdicts,
sizeof(Oid) * ndicts);
@@ -525,14 +540,14 @@ lookup_ts_config_cache(Oid cfgId)
/* save the last token type's dictionaries */
maplists[maxtokentype].len = ndicts;
maplists[maxtokentype].dictIds = (Oid *)
- MemoryContextAlloc(CacheMemoryContext,
+ MemoryContextAlloc(TextSearchCacheContext,
sizeof(Oid) * ndicts);
memcpy(maplists[maxtokentype].dictIds, mapdicts,
sizeof(Oid) * ndicts);
/* and save the overall map */
entry->lenmap = maxtokentype + 1;
entry->map = (ListDictionary *)
- MemoryContextAlloc(CacheMemoryContext,
+ MemoryContextAlloc(TextSearchCacheContext,
sizeof(ListDictionary) * entry->lenmap);
memcpy(entry->map, maplists,
sizeof(ListDictionary) * entry->lenmap);
diff --git a/src/backend/utils/cache/typcache.c b/src/backend/utils/cache/typcache.c
index 5a3b3788d0..9c0bed45cb 100644
--- a/src/backend/utils/cache/typcache.c
+++ b/src/backend/utils/cache/typcache.c
@@ -78,6 +78,8 @@
/* The main type cache hashtable searched by lookup_type_cache */
static HTAB *TypeCacheHash = NULL;
+static MemoryContext TypCacheContext = NULL;
+
/*
* The mapping of relation's OID to the corresponding composite type OID.
* We're keeping the map entry when the corresponding typentry has something
@@ -362,6 +364,20 @@ type_cache_syshash(const void *key, Size keysize)
return GetSysCacheHashValue1(TYPEOID, ObjectIdGetDatum(*(const Oid *) key));
}
+static void
+CreateTypCacheContext(void)
+{
+ if (!CacheMemoryContext)
+ CreateCacheMemoryContext();
+
+ /*
+ * TypCache does not have a separate memory context, CacheMemoryContext is
+ * used.
+ */
+ if (!TypCacheContext)
+ TypCacheContext = CacheMemoryContext;
+}
+
/*
* lookup_type_cache
*
@@ -421,16 +437,16 @@ lookup_type_cache(Oid type_id, int flags)
CacheRegisterSyscacheCallback(CLAOID, TypeCacheOpcCallback, (Datum) 0);
CacheRegisterSyscacheCallback(CONSTROID, TypeCacheConstrCallback, (Datum) 0);
- /* Also make sure CacheMemoryContext exists */
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ /* Also make sure TypCacheContext exists */
+ if (!TypCacheContext)
+ CreateTypCacheContext();
/*
* reserve enough in_progress_list slots for many cases
*/
allocsize = 4;
in_progress_list =
- MemoryContextAlloc(CacheMemoryContext,
+ MemoryContextAlloc(TypCacheContext,
allocsize * sizeof(*in_progress_list));
in_progress_list_maxlen = allocsize;
}
@@ -854,7 +870,7 @@ lookup_type_cache(Oid type_id, int flags)
/*
* Set up fmgr lookup info as requested
*
- * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
+ * Note: we tell fmgr the finfo structures live in TypCacheContext,
* which is not quite right (they're really in the hash table's private
* memory context) but this will do for our purposes.
*
@@ -872,21 +888,21 @@ lookup_type_cache(Oid type_id, int flags)
eq_opr_func = get_opcode(typentry->eq_opr);
if (eq_opr_func != InvalidOid)
fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
- CacheMemoryContext);
+ TypCacheContext);
}
if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
typentry->cmp_proc != InvalidOid)
{
fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
- CacheMemoryContext);
+ TypCacheContext);
}
if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
typentry->hash_proc_finfo.fn_oid == InvalidOid &&
typentry->hash_proc != InvalidOid)
{
fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
- CacheMemoryContext);
+ TypCacheContext);
}
if ((flags & TYPECACHE_HASH_EXTENDED_PROC_FINFO) &&
typentry->hash_extended_proc_finfo.fn_oid == InvalidOid &&
@@ -894,7 +910,7 @@ lookup_type_cache(Oid type_id, int flags)
{
fmgr_info_cxt(typentry->hash_extended_proc,
&typentry->hash_extended_proc_finfo,
- CacheMemoryContext);
+ TypCacheContext);
}
/*
@@ -1039,13 +1055,13 @@ load_rangetype_info(TypeCacheEntry *typentry)
/* set up cached fmgrinfo structs */
fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
- CacheMemoryContext);
+ TypCacheContext);
if (OidIsValid(canonicalOid))
fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
- CacheMemoryContext);
+ TypCacheContext);
if (OidIsValid(subdiffOid))
fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
- CacheMemoryContext);
+ TypCacheContext);
/* Lastly, set up link to the element type --- this marks data valid */
typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
@@ -1074,7 +1090,7 @@ load_multirangetype_info(TypeCacheEntry *typentry)
* Note: we assume we're called in a relatively short-lived context, so it's
* okay to leak data into the current context while scanning pg_constraint.
* We build the new DomainConstraintCache data in a context underneath
- * CurrentMemoryContext, and reparent it under CacheMemoryContext when
+ * CurrentMemoryContext, and reparent it under TypCacheContext when
* complete.
*/
static void
@@ -1296,12 +1312,12 @@ load_domaintype_info(TypeCacheEntry *typentry)
}
/*
- * If we made a constraint object, move it into CacheMemoryContext and
+ * If we made a constraint object, move it into TypCacheContext and
* attach it to the typcache entry.
*/
if (dcc)
{
- MemoryContextSetParent(dcc->dccContext, CacheMemoryContext);
+ MemoryContextSetParent(dcc->dccContext, TypCacheContext);
typentry->domainData = dcc;
dcc->dccRefCount++; /* count the typcache's reference */
}
@@ -1799,7 +1815,7 @@ ensure_record_cache_typmod_slot_exists(int32 typmod)
if (RecordCacheArray == NULL)
{
RecordCacheArray = (RecordCacheArrayEntry *)
- MemoryContextAllocZero(CacheMemoryContext,
+ MemoryContextAllocZero(TypCacheContext,
64 * sizeof(RecordCacheArrayEntry));
RecordCacheArrayLen = 64;
}
@@ -2059,9 +2075,9 @@ assign_record_type_typmod(TupleDesc tupDesc)
&ctl,
HASH_ELEM | HASH_FUNCTION | HASH_COMPARE);
- /* Also make sure CacheMemoryContext exists */
- if (!CacheMemoryContext)
- CreateCacheMemoryContext();
+ /* Also make sure TypCacheContext exists */
+ if (!TypCacheContext)
+ CreateTypCacheContext();
}
/*
@@ -2079,7 +2095,7 @@ assign_record_type_typmod(TupleDesc tupDesc)
}
/* Not present, so need to manufacture an entry */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ oldcxt = MemoryContextSwitchTo(TypCacheContext);
/* Look in the SharedRecordTypmodRegistry, if attached */
entDesc = find_or_make_matching_shared_tupledesc(tupDesc);
@@ -2746,8 +2762,8 @@ load_enum_cache_data(TypeCacheEntry *tcache)
/*
* Read all the information for members of the enum type. We collect the
* info in working memory in the caller's context, and then transfer it to
- * permanent memory in CacheMemoryContext. This minimizes the risk of
- * leaking memory from CacheMemoryContext in the event of an error partway
+ * permanent memory in TypCacheContext. This minimizes the risk of
+ * leaking memory from TypCacheContext in the event of an error partway
* through.
*/
maxitems = 64;
@@ -2851,8 +2867,8 @@ load_enum_cache_data(TypeCacheEntry *tcache)
break;
}
- /* OK, copy the data into CacheMemoryContext */
- oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
+ /* OK, copy the data into TypCacheContext */
+ oldcxt = MemoryContextSwitchTo(TypCacheContext);
enumdata = (TypeCacheEnumData *)
palloc(offsetof(TypeCacheEnumData, enum_values) +
numitems * sizeof(EnumItem));
diff --git a/src/include/utils/relcache.h b/src/include/utils/relcache.h
index a7c55db339..07d8ec4cd8 100644
--- a/src/include/utils/relcache.h
+++ b/src/include/utils/relcache.h
@@ -95,6 +95,7 @@ extern int errtableconstraint(Relation rel, const char *conname);
/*
* Routines for backend startup
*/
+extern void CreateRelCacheContext(void);
extern void RelationCacheInitialize(void);
extern void RelationCacheInitializePhase2(void);
extern void RelationCacheInitializePhase3(void);
@@ -152,4 +153,6 @@ extern PGDLLIMPORT bool criticalRelcachesBuilt;
/* should be used only by relcache.c and postinit.c */
extern PGDLLIMPORT bool criticalSharedRelcachesBuilt;
+extern PGDLLIMPORT MemoryContext RelCacheContext;
+
#endif /* RELCACHE_H */
--
2.34.1
v5-0002-Adjusting-cache-memory-context-sizes.patchapplication/octet-stream; name=v5-0002-Adjusting-cache-memory-context-sizes.patchDownload
From 7cdf157a3c74b095832d38985c1048bc07677ff3 Mon Sep 17 00:00:00 2001
From: Melih Mutlu <m.melihmutlu@gmail.com>
Date: Thu, 31 Oct 2024 16:34:34 +0300
Subject: [PATCH v5 2/2] Adjusting cache memory context sizes
---
src/backend/utils/cache/catcache.c | 6 ++++--
src/backend/utils/cache/relcache.c | 4 +++-
2 files changed, 7 insertions(+), 3 deletions(-)
diff --git a/src/backend/utils/cache/catcache.c b/src/backend/utils/cache/catcache.c
index 7c6a755e35..c2b435b651 100644
--- a/src/backend/utils/cache/catcache.c
+++ b/src/backend/utils/cache/catcache.c
@@ -716,7 +716,7 @@ CreateCacheMemoryContext(void)
if (!CacheMemoryContext)
CacheMemoryContext = AllocSetContextCreate(TopMemoryContext,
"CacheMemoryContext",
- ALLOCSET_DEFAULT_SIZES);
+ ALLOCSET_START_SMALL_SIZES);
}
static void
@@ -728,7 +728,9 @@ CreateCatCacheContext(void)
if (!CatCacheContext)
CatCacheContext = AllocSetContextCreate(CacheMemoryContext,
"CatCacheContext",
- ALLOCSET_DEFAULT_SIZES);
+ ALLOCSET_DEFAULT_MINSIZE,
+ 128 * 1024,
+ ALLOCSET_DEFAULT_MAXSIZE);
}
/*
diff --git a/src/backend/utils/cache/relcache.c b/src/backend/utils/cache/relcache.c
index 82e25b5ac3..b0be345224 100644
--- a/src/backend/utils/cache/relcache.c
+++ b/src/backend/utils/cache/relcache.c
@@ -1623,7 +1623,9 @@ CreateRelCacheContext(void)
if (!RelCacheContext)
RelCacheContext = AllocSetContextCreate(CacheMemoryContext,
"RelCacheContext",
- ALLOCSET_DEFAULT_SIZES);
+ ALLOCSET_DEFAULT_MINSIZE,
+ 128 * 1024,
+ ALLOCSET_DEFAULT_MAXSIZE);
}
/*
--
2.34.1