Save a few bytes in pg_attribute
After the discussion in [0]/messages/by-id/20230313204119.4mkepdvixcxrwpsc@awork3.anarazel.de ff., I was looking around in pg_attribute
and noticed that we could possibly save 4 bytes. We could change both
attstattarget and attinhcount from int4 to int2, which together with
some reordering would save 4 bytes from the fixed portion.
attstattarget is already limited to 10000, so this wouldn't lose
anything. For attinhcount, I don't see any documented limits. But it
seems unlikely to me that someone would need more than 32k immediate
inheritance parents on a column. (Maybe an overflow check would be
useful, though, to prevent shenanigans.)
The attached patch seems to work. Thoughts?
[0]: /messages/by-id/20230313204119.4mkepdvixcxrwpsc@awork3.anarazel.de
/messages/by-id/20230313204119.4mkepdvixcxrwpsc@awork3.anarazel.de
Attachments:
0001-Save-a-few-bytes-in-pg_attribute.patchtext/plain; charset=UTF-8; name=0001-Save-a-few-bytes-in-pg_attribute.patchDownload
From 400f44d18875c40605147c8d81793c6dcc46f8a6 Mon Sep 17 00:00:00 2001
From: Peter Eisentraut <peter@eisentraut.org>
Date: Mon, 20 Mar 2023 09:51:20 +0100
Subject: [PATCH] Save a few bytes in pg_attribute
attstattarget and attinhcount can be converted to int16 (from int32).
---
doc/src/sgml/catalogs.sgml | 36 +++++++++++++++---------------
src/backend/catalog/heap.c | 4 ++--
src/backend/catalog/index.c | 2 +-
src/include/catalog/pg_attribute.h | 20 ++++++++---------
4 files changed, 31 insertions(+), 31 deletions(-)
diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml
index 746baf5053..ad5d15ec49 100644
--- a/doc/src/sgml/catalogs.sgml
+++ b/doc/src/sgml/catalogs.sgml
@@ -1164,23 +1164,6 @@ <title><structname>pg_attribute</structname> Columns</title>
</para></entry>
</row>
- <row>
- <entry role="catalog_table_entry"><para role="column_definition">
- <structfield>attstattarget</structfield> <type>int4</type>
- </para>
- <para>
- <structfield>attstattarget</structfield> controls the level of detail
- of statistics accumulated for this column by
- <link linkend="sql-analyze"><command>ANALYZE</command></link>.
- A zero value indicates that no statistics should be collected.
- A negative value says to use the system default statistics target.
- The exact meaning of positive values is data type-dependent.
- For scalar data types, <structfield>attstattarget</structfield>
- is both the target number of <quote>most common values</quote>
- to collect, and the target number of histogram bins to create.
- </para></entry>
- </row>
-
<row>
<entry role="catalog_table_entry"><para role="column_definition">
<structfield>attlen</structfield> <type>int2</type>
@@ -1362,7 +1345,7 @@ <title><structname>pg_attribute</structname> Columns</title>
<row>
<entry role="catalog_table_entry"><para role="column_definition">
- <structfield>attinhcount</structfield> <type>int4</type>
+ <structfield>attinhcount</structfield> <type>int2</type>
</para>
<para>
The number of direct ancestors this column has. A column with a
@@ -1370,6 +1353,23 @@ <title><structname>pg_attribute</structname> Columns</title>
</para></entry>
</row>
+ <row>
+ <entry role="catalog_table_entry"><para role="column_definition">
+ <structfield>attstattarget</structfield> <type>int2</type>
+ </para>
+ <para>
+ <structfield>attstattarget</structfield> controls the level of detail
+ of statistics accumulated for this column by
+ <link linkend="sql-analyze"><command>ANALYZE</command></link>.
+ A zero value indicates that no statistics should be collected.
+ A negative value says to use the system default statistics target.
+ The exact meaning of positive values is data type-dependent.
+ For scalar data types, <structfield>attstattarget</structfield>
+ is both the target number of <quote>most common values</quote>
+ to collect, and the target number of histogram bins to create.
+ </para></entry>
+ </row>
+
<row>
<entry role="catalog_table_entry"><para role="column_definition">
<structfield>attcollation</structfield> <type>oid</type>
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index 4f006820b8..c67dfffa56 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -732,7 +732,6 @@ InsertPgAttributeTuples(Relation pg_attribute_rel,
slot[slotCount]->tts_values[Anum_pg_attribute_attname - 1] = NameGetDatum(&attrs->attname);
slot[slotCount]->tts_values[Anum_pg_attribute_atttypid - 1] = ObjectIdGetDatum(attrs->atttypid);
- slot[slotCount]->tts_values[Anum_pg_attribute_attstattarget - 1] = Int32GetDatum(attrs->attstattarget);
slot[slotCount]->tts_values[Anum_pg_attribute_attlen - 1] = Int16GetDatum(attrs->attlen);
slot[slotCount]->tts_values[Anum_pg_attribute_attnum - 1] = Int16GetDatum(attrs->attnum);
slot[slotCount]->tts_values[Anum_pg_attribute_attndims - 1] = Int32GetDatum(attrs->attndims);
@@ -749,7 +748,8 @@ InsertPgAttributeTuples(Relation pg_attribute_rel,
slot[slotCount]->tts_values[Anum_pg_attribute_attgenerated - 1] = CharGetDatum(attrs->attgenerated);
slot[slotCount]->tts_values[Anum_pg_attribute_attisdropped - 1] = BoolGetDatum(attrs->attisdropped);
slot[slotCount]->tts_values[Anum_pg_attribute_attislocal - 1] = BoolGetDatum(attrs->attislocal);
- slot[slotCount]->tts_values[Anum_pg_attribute_attinhcount - 1] = Int32GetDatum(attrs->attinhcount);
+ slot[slotCount]->tts_values[Anum_pg_attribute_attinhcount - 1] = Int16GetDatum(attrs->attinhcount);
+ slot[slotCount]->tts_values[Anum_pg_attribute_attstattarget - 1] = Int16GetDatum(attrs->attstattarget);
slot[slotCount]->tts_values[Anum_pg_attribute_attcollation - 1] = ObjectIdGetDatum(attrs->attcollation);
if (attoptions && attoptions[natts] != (Datum) 0)
slot[slotCount]->tts_values[Anum_pg_attribute_attoptions - 1] = attoptions[natts];
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 7777e7ec77..bccea1c749 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -1812,7 +1812,7 @@ index_concurrently_swap(Oid newIndexId, Oid oldIndexId, const char *oldName)
memset(repl_repl, false, sizeof(repl_repl));
repl_repl[Anum_pg_attribute_attstattarget - 1] = true;
- repl_val[Anum_pg_attribute_attstattarget - 1] = Int32GetDatum(attstattarget);
+ repl_val[Anum_pg_attribute_attstattarget - 1] = Int16GetDatum(attstattarget);
newTuple = heap_modify_tuple(attrTuple,
RelationGetDescr(pg_attribute),
diff --git a/src/include/catalog/pg_attribute.h b/src/include/catalog/pg_attribute.h
index b561e17781..a0d6409036 100644
--- a/src/include/catalog/pg_attribute.h
+++ b/src/include/catalog/pg_attribute.h
@@ -52,15 +52,6 @@ CATALOG(pg_attribute,1249,AttributeRelationId) BKI_BOOTSTRAP BKI_ROWTYPE_OID(75,
*/
Oid atttypid BKI_LOOKUP_OPT(pg_type);
- /*
- * attstattarget is the target number of statistics datapoints to collect
- * during VACUUM ANALYZE of this column. A zero here indicates that we do
- * not wish to collect any stats about this column. A "-1" here indicates
- * that no value has been explicitly set for this column, so ANALYZE
- * should use the default setting.
- */
- int32 attstattarget BKI_DEFAULT(-1);
-
/*
* attlen is a copy of the typlen field from pg_type for this attribute.
* See atttypid comments above.
@@ -165,7 +156,16 @@ CATALOG(pg_attribute,1249,AttributeRelationId) BKI_BOOTSTRAP BKI_ROWTYPE_OID(75,
bool attislocal BKI_DEFAULT(t);
/* Number of times inherited from direct parent relation(s) */
- int32 attinhcount BKI_DEFAULT(0);
+ int16 attinhcount BKI_DEFAULT(0);
+
+ /*
+ * attstattarget is the target number of statistics datapoints to collect
+ * during VACUUM ANALYZE of this column. A zero here indicates that we do
+ * not wish to collect any stats about this column. A "-1" here indicates
+ * that no value has been explicitly set for this column, so ANALYZE
+ * should use the default setting.
+ */
+ int16 attstattarget BKI_DEFAULT(-1);
/* attribute's collation, if any */
Oid attcollation BKI_LOOKUP_OPT(pg_collation);
base-commit: 0b51d423e974557e821d890c0a3a49e419a19caa
--
2.39.2
Peter Eisentraut <peter.eisentraut@enterprisedb.com> writes:
After the discussion in [0] ff., I was looking around in pg_attribute
and noticed that we could possibly save 4 bytes. We could change both
attstattarget and attinhcount from int4 to int2, which together with
some reordering would save 4 bytes from the fixed portion.
attstattarget is already limited to 10000, so this wouldn't lose
anything. For attinhcount, I don't see any documented limits. But it
seems unlikely to me that someone would need more than 32k immediate
inheritance parents on a column. (Maybe an overflow check would be
useful, though, to prevent shenanigans.)
The attached patch seems to work. Thoughts?
I agree that attinhcount could be narrowed, but I have some concern
about attstattarget. IIRC, the limit on attstattarget was once 1000
and then we raised it to 10000. Is it inconceivable that we might
want to raise it to 100000 someday?
regards, tom lane
On 3/20/23 15:37, Tom Lane wrote:
Peter Eisentraut <peter.eisentraut@enterprisedb.com> writes:
After the discussion in [0] ff., I was looking around in pg_attribute
and noticed that we could possibly save 4 bytes. We could change both
attstattarget and attinhcount from int4 to int2, which together with
some reordering would save 4 bytes from the fixed portion.attstattarget is already limited to 10000, so this wouldn't lose
anything. For attinhcount, I don't see any documented limits. But it
seems unlikely to me that someone would need more than 32k immediate
inheritance parents on a column. (Maybe an overflow check would be
useful, though, to prevent shenanigans.)The attached patch seems to work. Thoughts?
I agree that attinhcount could be narrowed, but I have some concern
about attstattarget. IIRC, the limit on attstattarget was once 1000
and then we raised it to 10000. Is it inconceivable that we might
want to raise it to 100000 someday?
Yeah, I don't think it'd be wise to make it harder to increase the
statistics target limit.
IMHO it'd be much better to just not store the statistics target for
attributes that have it default (which we now identify by -1), or for
system attributes (where we store 0). I'd bet vast majority of systems
will just use the default / GUC value. So if we're interested in saving
these bytes, we could just store NULL in these cases, no?
regards
--
Tomas Vondra
EnterpriseDB: http://www.enterprisedb.com
The Enterprise PostgreSQL Company
Tomas Vondra <tomas.vondra@enterprisedb.com> writes:
IMHO it'd be much better to just not store the statistics target for
attributes that have it default (which we now identify by -1), or for
system attributes (where we store 0). I'd bet vast majority of systems
will just use the default / GUC value. So if we're interested in saving
these bytes, we could just store NULL in these cases, no?
Hmm, we'd have to move it to the nullable part of the row and expend
more code to fetch it; but I don't think it's touched in many places,
so that might be a good tradeoff. Couple of notes:
* As things stand I think we have a null bitmap in every row of
pg_attribute already (surely attfdwoptions and attmissingval are never
both filled), so there's no extra cost there.
* Putting it in the variable part of the row means it wouldn't appear
in tuple descriptors, but that seems fine.
I wonder if the same is true of attinhcount. Since it's nonzero for
partitions' attributes, it might be non-null in a fairly large fraction
of pg_attribute rows in some use-cases, but it still seems like we'd not
be losing anything. It wouldn't need to be touched in any
high-performance code paths AFAICS.
regards, tom lane
On Mon, 20 Mar 2023, 11:00 pm Peter Eisentraut,
<peter.eisentraut@enterprisedb.com> wrote:
After the discussion in [0] ff., I was looking around in pg_attribute
and noticed that we could possibly save 4 bytes. We could change both
attstattarget and attinhcount from int4 to int2, which together with
some reordering would save 4 bytes from the fixed portion.
I just want to highlight 1ef61ddce9, which fixed a very long-standing
bug that meant that pg_inherits.inhseqno was effectively just 16-bit.
Perhaps because nobody seemed to report that as a limitation 16 bits
is enough space. I only noticed that as a bug from code reading.
David
Hi,
On 2023-03-20 10:37:36 -0400, Tom Lane wrote:
I agree that attinhcount could be narrowed, but I have some concern
about attstattarget. IIRC, the limit on attstattarget was once 1000
and then we raised it to 10000. Is it inconceivable that we might
want to raise it to 100000 someday?
Hard to believe that'd happen in a minor version - and I don't think there'd
an issue with widening it again in a major version?
I doubt we'll ever go to 100k without a major redesign of stats storage/access
- the size of the stats datums would make that pretty impractical right now.
Greetings,
Andres Freund
Andres Freund <andres@anarazel.de> writes:
On 2023-03-20 10:37:36 -0400, Tom Lane wrote:
I agree that attinhcount could be narrowed, but I have some concern
about attstattarget. IIRC, the limit on attstattarget was once 1000
and then we raised it to 10000. Is it inconceivable that we might
want to raise it to 100000 someday?
Hard to believe that'd happen in a minor version - and I don't think there'd
an issue with widening it again in a major version?
True. However, I think Tomas' idea of making these columns nullable
is even better than narrowing them.
regards, tom lane
Hi,
On 2023-03-20 11:00:12 +0100, Peter Eisentraut wrote:
After the discussion in [0] ff., I was looking around in pg_attribute and
noticed that we could possibly save 4 bytes. We could change both
attstattarget and attinhcount from int4 to int2, which together with some
reordering would save 4 bytes from the fixed portion.
attndims seems like another good candidate to shrink.
Greetings,
Andres Freund
On 21.03.23 00:51, Tom Lane wrote:
Andres Freund <andres@anarazel.de> writes:
On 2023-03-20 10:37:36 -0400, Tom Lane wrote:
I agree that attinhcount could be narrowed, but I have some concern
about attstattarget. IIRC, the limit on attstattarget was once 1000
and then we raised it to 10000. Is it inconceivable that we might
want to raise it to 100000 someday?Hard to believe that'd happen in a minor version - and I don't think there'd
an issue with widening it again in a major version?True. However, I think Tomas' idea of making these columns nullable
is even better than narrowing them.
The context of my message was to do the proposed change for PG16 to buy
back a few bytes that are being added by another feature, and then
consider doing a larger detangling of pg_attribute and tuple descriptors
in PG17, which might well involve taking the attstattarget out of the
hot path. Making attstattarget nullable (i.e., not part of the fixed
part of pg_attribute) would require fairly significant surgery, so I
think it would be better done as part of a more comprehensive change
that would allow the same treatment for other columns as well.
Hi,
On 2023-03-21 17:36:48 +0100, Peter Eisentraut wrote:
On 21.03.23 00:51, Tom Lane wrote:
Andres Freund <andres@anarazel.de> writes:
On 2023-03-20 10:37:36 -0400, Tom Lane wrote:
I agree that attinhcount could be narrowed, but I have some concern
about attstattarget. IIRC, the limit on attstattarget was once 1000
and then we raised it to 10000. Is it inconceivable that we might
want to raise it to 100000 someday?Hard to believe that'd happen in a minor version - and I don't think there'd
an issue with widening it again in a major version?True. However, I think Tomas' idea of making these columns nullable
is even better than narrowing them.
Why not do both?
The context of my message was to do the proposed change for PG16 to buy back
a few bytes that are being added by another feature
How much would you need to buy back to "reach parity"?
Greetings,
Andres Freund
On 21.03.23 17:43, Andres Freund wrote:
The context of my message was to do the proposed change for PG16 to buy back
a few bytes that are being added by another featureHow much would you need to buy back to "reach parity"?
I don't think we can find enough to make the impact zero bytes. It's
also not clear exactly what the impact of each byte would be (compared
to possible complications in other parts of the code, for example). But
if there are a few low-hanging fruit, it seems like we could pick them,
to old us over until we have a better solution to the underlying issue.
Hi,
On 2023-03-21 18:15:40 +0100, Peter Eisentraut wrote:
On 21.03.23 17:43, Andres Freund wrote:
The context of my message was to do the proposed change for PG16 to buy back
a few bytes that are being added by another featureHow much would you need to buy back to "reach parity"?
I don't think we can find enough to make the impact zero bytes. It's also
not clear exactly what the impact of each byte would be (compared to
possible complications in other parts of the code, for example). But if
there are a few low-hanging fruit, it seems like we could pick them, to old
us over until we have a better solution to the underlying issue.
attndims 4->2
attstattarget 4->2
attinhcount 4->2
+ some reordering only gets you from 112->108 unfortunately, due to a 1 byte
alignment hole and 2 bytes of trailing padding.
before:
/* size: 112, cachelines: 2, members: 22 */
/* sum members: 111, holes: 1, sum holes: 1 */
/* last cacheline: 48 bytes */
after:
/* size: 108, cachelines: 2, members: 22 */
/* sum members: 105, holes: 1, sum holes: 1 */
/* padding: 2 */
/* last cacheline: 44 bytes */
You might be able to fill the hole + padding with your data - but IIRC that
was 3 4byte integers?
FWIW, I think we should consider getting rid of attcacheoff. I doubt it's
worth its weight these days, because deforming via slots starts at the
beginning anyway. The overhead of maintaining it is not insubstantial, and
it's just architecturally ugly to to update tupledescs continually.
Not for your current goal, but I do wonder how hard it'd be to make it work to
store multiple booleans as bitmasks. Probably ties into the discussion around
not relying on struct "mapping" for catalog tables (which we IIRC decided is
the sensible way the NAMEDATALEN restriction).
E.g. pg_attribute has 6 booleans, and attgenerated effectively is a boolean
too, and attidentity could easily be modeled as such as well.
If were to not rely on struct mapping anymore, we could possibly transparently
do this as part of forming/deforming heap tuples. Using something like
TYPALIGN_BIT. The question is whether it'd be too expensive to decode...
Greetings,
Andres Freund
Andres Freund <andres@anarazel.de> writes:
FWIW, I think we should consider getting rid of attcacheoff. I doubt it's
worth its weight these days, because deforming via slots starts at the
beginning anyway. The overhead of maintaining it is not insubstantial, and
it's just architecturally ugly to to update tupledescs continually.
I'd be for that if we can convince ourselves there's not a material
speed penalty. As you say, it's quite ugly.
regards, tom lane
On Tue, 21 Mar 2023 at 19:55, Tom Lane <tgl@sss.pgh.pa.us> wrote:
Andres Freund <andres@anarazel.de> writes:
FWIW, I think we should consider getting rid of attcacheoff. I doubt it's
worth its weight these days, because deforming via slots starts at the
beginning anyway. The overhead of maintaining it is not insubstantial, and
it's just architecturally ugly to to update tupledescs continually.I'd be for that if we can convince ourselves there's not a material
speed penalty. As you say, it's quite ugly.
Yes, attcacheoff is a tremendous performance boon in many cases. But
all is not lost:
When I was working on other improvements I experimented with storing
the attributes used in (de)serializing tuples to disk in a separate
structured array in the TupleDesc, a prototype patch of which I shared
here [0]/messages/by-id/CAEze2Wh8-metSryZX_Ubj-uv6kb+2YnzHAejmEdubjhmGusBAg@mail.gmail.com. I didn't see a speed difference back then so I didn't
further venture into that path (as it adds complexity without
performance benefits), but I think it can be relevant to this thread
because with that patch we actually don't need the attcacheoff in the
pg_atttribute struct: it only needs to be present in the derived
"TupleAttrAlignData" structs which carry the
length/alignment/storage/byval info.
Kind regards,
Matthias van de Meent
[0]: /messages/by-id/CAEze2Wh8-metSryZX_Ubj-uv6kb+2YnzHAejmEdubjhmGusBAg@mail.gmail.com
Matthias van de Meent <boekewurm+postgres@gmail.com> writes:
... with that patch we actually don't need the attcacheoff in the
pg_atttribute struct: it only needs to be present in the derived
"TupleAttrAlignData" structs which carry the
length/alignment/storage/byval info.
Yeah, I was wondering about that too: keeping attcacheoff as local
state in slots might get us all its win without so much conceptual
dirtiness.
regards, tom lane
Hi,
On 2023-03-21 20:20:40 +0100, Matthias van de Meent wrote:
On Tue, 21 Mar 2023 at 19:55, Tom Lane <tgl@sss.pgh.pa.us> wrote:
Andres Freund <andres@anarazel.de> writes:
FWIW, I think we should consider getting rid of attcacheoff. I doubt it's
worth its weight these days, because deforming via slots starts at the
beginning anyway. The overhead of maintaining it is not insubstantial, and
it's just architecturally ugly to to update tupledescs continually.I'd be for that if we can convince ourselves there's not a material
speed penalty. As you say, it's quite ugly.Yes, attcacheoff is a tremendous performance boon in many cases.
Which? We don't use fastgetattr() in many places these days. And in some quick
measurements it's a wash or small loss when deforming slot tuples, even when
the attcacheoff optimization would apply, because the branches for managing it
add more overhead than they safe.
Greetings,
Andres Freund
On Tue, 21 Mar 2023 at 20:58, Andres Freund <andres@anarazel.de> wrote:
Hi,
On 2023-03-21 20:20:40 +0100, Matthias van de Meent wrote:
On Tue, 21 Mar 2023 at 19:55, Tom Lane <tgl@sss.pgh.pa.us> wrote:
Andres Freund <andres@anarazel.de> writes:
FWIW, I think we should consider getting rid of attcacheoff. I doubt it's
worth its weight these days, because deforming via slots starts at the
beginning anyway. The overhead of maintaining it is not insubstantial, and
it's just architecturally ugly to to update tupledescs continually.I'd be for that if we can convince ourselves there's not a material
speed penalty. As you say, it's quite ugly.Yes, attcacheoff is a tremendous performance boon in many cases.
Which? We don't use fastgetattr() in many places these days. And in some quick
measurements it's a wash or small loss when deforming slot tuples, even when
the attcacheoff optimization would apply, because the branches for managing it
add more overhead than they safe.
My experience with attcacheoff performance is in indexes, specifically
index_getattr(). Sure, multi-column indexes are uncommon, but the
difference between have and have-not for cached attribute offsets is
several %.
Kind regards,
Matthias van de Meent
Hi,
On 2023-03-21 15:26:38 -0400, Tom Lane wrote:
Matthias van de Meent <boekewurm+postgres@gmail.com> writes:
... with that patch we actually don't need the attcacheoff in the
pg_atttribute struct: it only needs to be present in the derived
"TupleAttrAlignData" structs which carry the
length/alignment/storage/byval info.Yeah, I was wondering about that too: keeping attcacheoff as local
state in slots might get us all its win without so much conceptual
dirtiness.
It's also the place where it's the least likely to help - afaict attcacheoff
is only really beneficial for fastgetattr(). Which conditions it's use more
strictly - not only can there not be any NULLs before the accessed column,
there may not be any NULLs in the tuple at all.
Greetings,
Andres Freund
Hi,
On 2023-03-21 21:02:08 +0100, Matthias van de Meent wrote:
On Tue, 21 Mar 2023 at 20:58, Andres Freund <andres@anarazel.de> wrote:
On 2023-03-21 20:20:40 +0100, Matthias van de Meent wrote:
Yes, attcacheoff is a tremendous performance boon in many cases.
Which? We don't use fastgetattr() in many places these days. And in some quick
measurements it's a wash or small loss when deforming slot tuples, even when
the attcacheoff optimization would apply, because the branches for managing it
add more overhead than they safe.My experience with attcacheoff performance is in indexes, specifically
index_getattr(). Sure, multi-column indexes are uncommon, but the
difference between have and have-not for cached attribute offsets is
several %.
I did indeed not think of index_getattr(), just heap related things.
Do you have a good test workload handy - I'm kinda curious to compare the cost
of removing attcacheoff vs the gain of not maintaining it for index workloads.
It looks like many of the index_getattr() cases could be made faster without
attcacheoff. A lot of places seem to loop over all attributes, and the key to
accelerating that is to keep state between the iterations. Attcacheoff is
that, but quite stunted, because it only works if there aren't any NULLs (even
if the NULL is in a later column).
Greetings,
Andres Freund
On Tue, 21 Mar 2023 at 23:05, Andres Freund <andres@anarazel.de> wrote:
Hi,
On 2023-03-21 21:02:08 +0100, Matthias van de Meent wrote:
On Tue, 21 Mar 2023 at 20:58, Andres Freund <andres@anarazel.de> wrote:
On 2023-03-21 20:20:40 +0100, Matthias van de Meent wrote:
Yes, attcacheoff is a tremendous performance boon in many cases.
Which? We don't use fastgetattr() in many places these days. And in some quick
measurements it's a wash or small loss when deforming slot tuples, even when
the attcacheoff optimization would apply, because the branches for managing it
add more overhead than they safe.My experience with attcacheoff performance is in indexes, specifically
index_getattr(). Sure, multi-column indexes are uncommon, but the
difference between have and have-not for cached attribute offsets is
several %.I did indeed not think of index_getattr(), just heap related things.
Do you have a good test workload handy - I'm kinda curious to compare the cost
of removing attcacheoff vs the gain of not maintaining it for index workloads.
Rebuilding indexes has been my go-to workload for comparing
attribute-related btree performance optimizations in [0]/messages/by-id/CAEze2WhyBT2bKZRdj_U0KS2Sbewa1XoO_BzgpzLC09sa5LUROg@mail.gmail.com and [1]/messages/by-id/CAEze2Wg52tsSWA9Fy7OCXx-K7pPLMNxA_fmQ6-+_pzR-AoODDA@mail.gmail.com.
Results of tests from '21 in which we're always calculating offsets
from 0 show a slowdown of 4-18% in attcacheoff-enabled workloads if
we're calculating offsets dynamically.
It looks like many of the index_getattr() cases could be made faster without
attcacheoff. A lot of places seem to loop over all attributes, and the key to
accelerating that is to keep state between the iterations.
Indeed, it's not great. You can take a look at [1]/messages/by-id/CAEze2Wg52tsSWA9Fy7OCXx-K7pPLMNxA_fmQ6-+_pzR-AoODDA@mail.gmail.com, which is where I'm
trying to optimize btree's handling of comparing tuples; which
includes work on reducing overhead for attribute accesses.
Note that each btree page should be able to do with comparing at most
2*log(ntups) columns, where this is currently natts * log(ntups).
Attcacheoff is
that, but quite stunted, because it only works if there aren't any NULLs (even
if the NULL is in a later column).
Yes, that isn't great either, but most indexes I've seen have tuples
that are either all NULL, or have no nulls; only seldom I see indexes
that have mixed NULL/not-null index tuple attributes.
Kind regards,
Matthias van de Meent.
[0]: /messages/by-id/CAEze2WhyBT2bKZRdj_U0KS2Sbewa1XoO_BzgpzLC09sa5LUROg@mail.gmail.com
[1]: /messages/by-id/CAEze2Wg52tsSWA9Fy7OCXx-K7pPLMNxA_fmQ6-+_pzR-AoODDA@mail.gmail.com
On 21.03.23 18:46, Andres Freund wrote:
FWIW, I think we should consider getting rid of attcacheoff. I doubt it's
worth its weight these days, because deforming via slots starts at the
beginning anyway. The overhead of maintaining it is not insubstantial, and
it's just architecturally ugly to to update tupledescs continually.
Btw., could attcacheoff be int16?
On Wed, 22 Mar 2023 at 10:42, Peter Eisentraut
<peter.eisentraut@enterprisedb.com> wrote:
On 21.03.23 18:46, Andres Freund wrote:
FWIW, I think we should consider getting rid of attcacheoff. I doubt it's
worth its weight these days, because deforming via slots starts at the
beginning anyway. The overhead of maintaining it is not insubstantial, and
it's just architecturally ugly to to update tupledescs continually.Btw., could attcacheoff be int16?
I had the same thought in '21, and in the patch linked upthread[0]/messages/by-id/CAEze2Wh8-metSryZX_Ubj-uv6kb+2YnzHAejmEdubjhmGusBAg@mail.gmail.com I
added an extra comment on the field:
+ Note: Although the maximum offset encountered in stored tuples is + limited to the max BLCKSZ (2**15), FormData_pg_attribute is used for + all internal tuples as well, so attcacheoff may be larger for those + tuples, and it is therefore not safe to use int16.
So, we can't reduce its size while we use attcacheoff for
(de)serialization of tuples with up to MaxAttributeNumber (=INT16_MAX)
of attributes which each can be larger than one byte (such as in
tuplestore, tuplesort, spilling hash aggregates, ...)
Kind regards,
Matthias van de Meent
[0]: /messages/by-id/CAEze2Wh8-metSryZX_Ubj-uv6kb+2YnzHAejmEdubjhmGusBAg@mail.gmail.com
On 21.03.23 18:46, Andres Freund wrote:
I don't think we can find enough to make the impact zero bytes. It's also
not clear exactly what the impact of each byte would be (compared to
possible complications in other parts of the code, for example). But if
there are a few low-hanging fruit, it seems like we could pick them, to old
us over until we have a better solution to the underlying issue.attndims 4->2
attstattarget 4->2
attinhcount 4->2+ some reordering only gets you from 112->108 unfortunately, due to a 1 byte
alignment hole and 2 bytes of trailing padding.before:
/* size: 112, cachelines: 2, members: 22 */
/* sum members: 111, holes: 1, sum holes: 1 */
/* last cacheline: 48 bytes */after:
/* size: 108, cachelines: 2, members: 22 */
/* sum members: 105, holes: 1, sum holes: 1 */
/* padding: 2 */
/* last cacheline: 44 bytes */You might be able to fill the hole + padding with your data - but IIRC that
was 3 4byte integers?
Here is an updated patch that handles those three fields, including some
overflow checks. I also changed coninhcount to match attinhcount.
I structured the inhcount overflow checks to be independent of the
integer size, but maybe others find this approach weird.
Given the calculation shown, there is no value in reducing all three
fields versus just two, but I don't find compelling reasons to leave out
one or the other field. (attstattarget got the most discussion, but
that one is actually the easiest part of the patch.)
I took another hard look at some of the other proposals, including
moving some fields to the variable length part or combining some bool or
char fields. Those changes all appear to have a really long tail of
issues all over the code that I wouldn't want to attack them now in an
ad hoc way.
My suggestion is to use this patch and then consider the column
encryption patch as it stands now.
The discussion about attcacheoff seems to be still ongoing. But it
seems whatever the outcome would be independent of this patch: Either we
keep it or we remove it; there is no proposal to resize it.
Attachments:
v2-0001-Save-a-few-bytes-in-pg_attribute.patchtext/plain; charset=UTF-8; name=v2-0001-Save-a-few-bytes-in-pg_attribute.patchDownload
From cff1daee2f12b4d625b18e6a89cb5e033f7fdfa5 Mon Sep 17 00:00:00 2001
From: Peter Eisentraut <peter@eisentraut.org>
Date: Thu, 23 Mar 2023 11:46:05 +0100
Subject: [PATCH v2] Save a few bytes in pg_attribute
attndims, attstattarget, and attinhcount can be converted to int16
(from int32).
---
doc/src/sgml/catalogs.sgml | 60 ++++++++++++++---------------
src/backend/access/common/tupdesc.c | 8 ++++
src/backend/catalog/heap.c | 11 ++++--
src/backend/catalog/index.c | 2 +-
src/backend/catalog/pg_constraint.c | 6 ++-
src/backend/commands/tablecmds.c | 28 ++++++++++++++
src/include/catalog/pg_attribute.h | 34 ++++++++--------
src/include/catalog/pg_constraint.h | 2 +-
8 files changed, 99 insertions(+), 52 deletions(-)
diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml
index 746baf5053..7c09ab3000 100644
--- a/doc/src/sgml/catalogs.sgml
+++ b/doc/src/sgml/catalogs.sgml
@@ -1164,23 +1164,6 @@ <title><structname>pg_attribute</structname> Columns</title>
</para></entry>
</row>
- <row>
- <entry role="catalog_table_entry"><para role="column_definition">
- <structfield>attstattarget</structfield> <type>int4</type>
- </para>
- <para>
- <structfield>attstattarget</structfield> controls the level of detail
- of statistics accumulated for this column by
- <link linkend="sql-analyze"><command>ANALYZE</command></link>.
- A zero value indicates that no statistics should be collected.
- A negative value says to use the system default statistics target.
- The exact meaning of positive values is data type-dependent.
- For scalar data types, <structfield>attstattarget</structfield>
- is both the target number of <quote>most common values</quote>
- to collect, and the target number of histogram bins to create.
- </para></entry>
- </row>
-
<row>
<entry role="catalog_table_entry"><para role="column_definition">
<structfield>attlen</structfield> <type>int2</type>
@@ -1202,17 +1185,6 @@ <title><structname>pg_attribute</structname> Columns</title>
</para></entry>
</row>
- <row>
- <entry role="catalog_table_entry"><para role="column_definition">
- <structfield>attndims</structfield> <type>int4</type>
- </para>
- <para>
- Number of dimensions, if the column is an array type; otherwise 0.
- (Presently, the number of dimensions of an array is not enforced,
- so any nonzero value effectively means <quote>it's an array</quote>.)
- </para></entry>
- </row>
-
<row>
<entry role="catalog_table_entry"><para role="column_definition">
<structfield>attcacheoff</structfield> <type>int4</type>
@@ -1237,6 +1209,17 @@ <title><structname>pg_attribute</structname> Columns</title>
</para></entry>
</row>
+ <row>
+ <entry role="catalog_table_entry"><para role="column_definition">
+ <structfield>attndims</structfield> <type>int2</type>
+ </para>
+ <para>
+ Number of dimensions, if the column is an array type; otherwise 0.
+ (Presently, the number of dimensions of an array is not enforced,
+ so any nonzero value effectively means <quote>it's an array</quote>.)
+ </para></entry>
+ </row>
+
<row>
<entry role="catalog_table_entry"><para role="column_definition">
<structfield>attbyval</structfield> <type>bool</type>
@@ -1362,7 +1345,7 @@ <title><structname>pg_attribute</structname> Columns</title>
<row>
<entry role="catalog_table_entry"><para role="column_definition">
- <structfield>attinhcount</structfield> <type>int4</type>
+ <structfield>attinhcount</structfield> <type>int2</type>
</para>
<para>
The number of direct ancestors this column has. A column with a
@@ -1370,6 +1353,23 @@ <title><structname>pg_attribute</structname> Columns</title>
</para></entry>
</row>
+ <row>
+ <entry role="catalog_table_entry"><para role="column_definition">
+ <structfield>attstattarget</structfield> <type>int2</type>
+ </para>
+ <para>
+ <structfield>attstattarget</structfield> controls the level of detail
+ of statistics accumulated for this column by
+ <link linkend="sql-analyze"><command>ANALYZE</command></link>.
+ A zero value indicates that no statistics should be collected.
+ A negative value says to use the system default statistics target.
+ The exact meaning of positive values is data type-dependent.
+ For scalar data types, <structfield>attstattarget</structfield>
+ is both the target number of <quote>most common values</quote>
+ to collect, and the target number of histogram bins to create.
+ </para></entry>
+ </row>
+
<row>
<entry role="catalog_table_entry"><para role="column_definition">
<structfield>attcollation</structfield> <type>oid</type>
@@ -2691,7 +2691,7 @@ <title><structname>pg_constraint</structname> Columns</title>
<row>
<entry role="catalog_table_entry"><para role="column_definition">
- <structfield>coninhcount</structfield> <type>int4</type>
+ <structfield>coninhcount</structfield> <type>int2</type>
</para>
<para>
The number of direct inheritance ancestors this constraint has.
diff --git a/src/backend/access/common/tupdesc.c b/src/backend/access/common/tupdesc.c
index 72a2c3d3db..7c5c390503 100644
--- a/src/backend/access/common/tupdesc.c
+++ b/src/backend/access/common/tupdesc.c
@@ -597,6 +597,8 @@ TupleDescInitEntry(TupleDesc desc,
Assert(PointerIsValid(desc));
Assert(attributeNumber >= 1);
Assert(attributeNumber <= desc->natts);
+ Assert(attdim >= 0);
+ Assert(attdim <= PG_INT16_MAX);
/*
* initialize the attribute fields
@@ -667,6 +669,8 @@ TupleDescInitBuiltinEntry(TupleDesc desc,
Assert(PointerIsValid(desc));
Assert(attributeNumber >= 1);
Assert(attributeNumber <= desc->natts);
+ Assert(attdim >= 0);
+ Assert(attdim <= PG_INT16_MAX);
/* initialize the attribute fields */
att = TupleDescAttr(desc, attributeNumber - 1);
@@ -827,6 +831,10 @@ BuildDescForRelation(List *schema)
attcollation = GetColumnDefCollation(NULL, entry, atttypid);
attdim = list_length(entry->typeName->arrayBounds);
+ if (attdim > PG_INT16_MAX)
+ ereport(ERROR,
+ errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("too many array dimensions"));
if (entry->typeName->setof)
ereport(ERROR,
diff --git a/src/backend/catalog/heap.c b/src/backend/catalog/heap.c
index 4f006820b8..2a0d82aedd 100644
--- a/src/backend/catalog/heap.c
+++ b/src/backend/catalog/heap.c
@@ -732,12 +732,11 @@ InsertPgAttributeTuples(Relation pg_attribute_rel,
slot[slotCount]->tts_values[Anum_pg_attribute_attname - 1] = NameGetDatum(&attrs->attname);
slot[slotCount]->tts_values[Anum_pg_attribute_atttypid - 1] = ObjectIdGetDatum(attrs->atttypid);
- slot[slotCount]->tts_values[Anum_pg_attribute_attstattarget - 1] = Int32GetDatum(attrs->attstattarget);
slot[slotCount]->tts_values[Anum_pg_attribute_attlen - 1] = Int16GetDatum(attrs->attlen);
slot[slotCount]->tts_values[Anum_pg_attribute_attnum - 1] = Int16GetDatum(attrs->attnum);
- slot[slotCount]->tts_values[Anum_pg_attribute_attndims - 1] = Int32GetDatum(attrs->attndims);
slot[slotCount]->tts_values[Anum_pg_attribute_attcacheoff - 1] = Int32GetDatum(-1);
slot[slotCount]->tts_values[Anum_pg_attribute_atttypmod - 1] = Int32GetDatum(attrs->atttypmod);
+ slot[slotCount]->tts_values[Anum_pg_attribute_attndims - 1] = Int16GetDatum(attrs->attndims);
slot[slotCount]->tts_values[Anum_pg_attribute_attbyval - 1] = BoolGetDatum(attrs->attbyval);
slot[slotCount]->tts_values[Anum_pg_attribute_attalign - 1] = CharGetDatum(attrs->attalign);
slot[slotCount]->tts_values[Anum_pg_attribute_attstorage - 1] = CharGetDatum(attrs->attstorage);
@@ -749,7 +748,8 @@ InsertPgAttributeTuples(Relation pg_attribute_rel,
slot[slotCount]->tts_values[Anum_pg_attribute_attgenerated - 1] = CharGetDatum(attrs->attgenerated);
slot[slotCount]->tts_values[Anum_pg_attribute_attisdropped - 1] = BoolGetDatum(attrs->attisdropped);
slot[slotCount]->tts_values[Anum_pg_attribute_attislocal - 1] = BoolGetDatum(attrs->attislocal);
- slot[slotCount]->tts_values[Anum_pg_attribute_attinhcount - 1] = Int32GetDatum(attrs->attinhcount);
+ slot[slotCount]->tts_values[Anum_pg_attribute_attinhcount - 1] = Int16GetDatum(attrs->attinhcount);
+ slot[slotCount]->tts_values[Anum_pg_attribute_attstattarget - 1] = Int16GetDatum(attrs->attstattarget);
slot[slotCount]->tts_values[Anum_pg_attribute_attcollation - 1] = ObjectIdGetDatum(attrs->attcollation);
if (attoptions && attoptions[natts] != (Datum) 0)
slot[slotCount]->tts_values[Anum_pg_attribute_attoptions - 1] = attoptions[natts];
@@ -2615,6 +2615,11 @@ MergeWithExistingConstraint(Relation rel, const char *ccname, Node *expr,
con->conislocal = true;
else
con->coninhcount++;
+
+ if (con->coninhcount < 0)
+ ereport(ERROR,
+ errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("too many inheritance parents"));
}
if (is_no_inherit)
diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c
index 33e3d0ec05..4992ca1fce 100644
--- a/src/backend/catalog/index.c
+++ b/src/backend/catalog/index.c
@@ -1813,7 +1813,7 @@ index_concurrently_swap(Oid newIndexId, Oid oldIndexId, const char *oldName)
memset(repl_repl, false, sizeof(repl_repl));
repl_repl[Anum_pg_attribute_attstattarget - 1] = true;
- repl_val[Anum_pg_attribute_attstattarget - 1] = Int32GetDatum(attstattarget);
+ repl_val[Anum_pg_attribute_attstattarget - 1] = Int16GetDatum(attstattarget);
newTuple = heap_modify_tuple(attrTuple,
RelationGetDescr(pg_attribute),
diff --git a/src/backend/catalog/pg_constraint.c b/src/backend/catalog/pg_constraint.c
index 7392c72e90..fb684edfa9 100644
--- a/src/backend/catalog/pg_constraint.c
+++ b/src/backend/catalog/pg_constraint.c
@@ -190,7 +190,7 @@ CreateConstraintEntry(const char *constraintName,
values[Anum_pg_constraint_confdeltype - 1] = CharGetDatum(foreignDeleteType);
values[Anum_pg_constraint_confmatchtype - 1] = CharGetDatum(foreignMatchType);
values[Anum_pg_constraint_conislocal - 1] = BoolGetDatum(conIsLocal);
- values[Anum_pg_constraint_coninhcount - 1] = Int32GetDatum(conInhCount);
+ values[Anum_pg_constraint_coninhcount - 1] = Int16GetDatum(conInhCount);
values[Anum_pg_constraint_connoinherit - 1] = BoolGetDatum(conNoInherit);
if (conkeyArray)
@@ -805,6 +805,10 @@ ConstraintSetParentConstraint(Oid childConstrId,
constrForm->conislocal = false;
constrForm->coninhcount++;
+ if (constrForm->coninhcount < 0)
+ ereport(ERROR,
+ errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("too many inheritance parents"));
constrForm->conparentid = parentConstrId;
CatalogTupleUpdate(constrRel, &tuple->t_self, newtup);
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 3e2c5f797c..27860e313e 100644
--- a/src/backend/commands/tablecmds.c
+++ b/src/backend/commands/tablecmds.c
@@ -2649,6 +2649,10 @@ MergeAttributes(List *schema, List *supers, char relpersistence,
*/
def->inhcount++;
+ if (def->inhcount < 0)
+ ereport(ERROR,
+ errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("too many inheritance parents"));
newattmap->attnums[parent_attno - 1] = exist_attno;
}
@@ -3172,6 +3176,10 @@ MergeCheckConstraint(List *constraints, char *name, Node *expr)
{
/* OK to merge */
ccon->inhcount++;
+ if (ccon->inhcount < 0)
+ ereport(ERROR,
+ errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("too many inheritance parents"));
return true;
}
@@ -6787,6 +6795,10 @@ ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel,
/* Bump the existing child att's inhcount */
childatt->attinhcount++;
+ if (childatt->attinhcount < 0)
+ ereport(ERROR,
+ errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("too many inheritance parents"));
CatalogTupleUpdate(attrdesc, &tuple->t_self, tuple);
heap_freetuple(tuple);
@@ -6878,6 +6890,10 @@ ATExecAddColumn(List **wqueue, AlteredTableInfo *tab, Relation rel,
attribute.attstattarget = (newattnum > 0) ? -1 : 0;
attribute.attlen = tform->typlen;
attribute.attnum = newattnum;
+ if (list_length(colDef->typeName->arrayBounds) > PG_INT16_MAX)
+ ereport(ERROR,
+ errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("too many array dimensions"));
attribute.attndims = list_length(colDef->typeName->arrayBounds);
attribute.atttypmod = typmod;
attribute.attbyval = tform->typbyval;
@@ -12890,6 +12906,10 @@ ATExecAlterColumnType(AlteredTableInfo *tab, Relation rel,
attTup->atttypid = targettype;
attTup->atttypmod = targettypmod;
attTup->attcollation = targetcollid;
+ if (list_length(typeName->arrayBounds) > PG_INT16_MAX)
+ ereport(ERROR,
+ errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("too many array dimensions"));
attTup->attndims = list_length(typeName->arrayBounds);
attTup->attlen = tform->typlen;
attTup->attbyval = tform->typbyval;
@@ -15124,6 +15144,10 @@ MergeAttributesIntoExisting(Relation child_rel, Relation parent_rel)
* later on, this change will just roll back.)
*/
childatt->attinhcount++;
+ if (childatt->attinhcount < 0)
+ ereport(ERROR,
+ errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("too many inheritance parents"));
/*
* In case of partitions, we must enforce that value of attislocal
@@ -15261,6 +15285,10 @@ MergeConstraintsIntoExisting(Relation child_rel, Relation parent_rel)
child_copy = heap_copytuple(child_tuple);
child_con = (Form_pg_constraint) GETSTRUCT(child_copy);
child_con->coninhcount++;
+ if (child_con->coninhcount < 0)
+ ereport(ERROR,
+ errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
+ errmsg("too many inheritance parents"));
/*
* In case of partitions, an inherited constraint must be
diff --git a/src/include/catalog/pg_attribute.h b/src/include/catalog/pg_attribute.h
index b561e17781..f8b4861b94 100644
--- a/src/include/catalog/pg_attribute.h
+++ b/src/include/catalog/pg_attribute.h
@@ -52,15 +52,6 @@ CATALOG(pg_attribute,1249,AttributeRelationId) BKI_BOOTSTRAP BKI_ROWTYPE_OID(75,
*/
Oid atttypid BKI_LOOKUP_OPT(pg_type);
- /*
- * attstattarget is the target number of statistics datapoints to collect
- * during VACUUM ANALYZE of this column. A zero here indicates that we do
- * not wish to collect any stats about this column. A "-1" here indicates
- * that no value has been explicitly set for this column, so ANALYZE
- * should use the default setting.
- */
- int32 attstattarget BKI_DEFAULT(-1);
-
/*
* attlen is a copy of the typlen field from pg_type for this attribute.
* See atttypid comments above.
@@ -82,12 +73,6 @@ CATALOG(pg_attribute,1249,AttributeRelationId) BKI_BOOTSTRAP BKI_ROWTYPE_OID(75,
*/
int16 attnum;
- /*
- * attndims is the declared number of dimensions, if an array type,
- * otherwise zero.
- */
- int32 attndims;
-
/*
* fastgetattr() uses attcacheoff to cache byte offsets of attributes in
* heap tuples. The value actually stored in pg_attribute (-1) indicates
@@ -105,6 +90,12 @@ CATALOG(pg_attribute,1249,AttributeRelationId) BKI_BOOTSTRAP BKI_ROWTYPE_OID(75,
*/
int32 atttypmod BKI_DEFAULT(-1);
+ /*
+ * attndims is the declared number of dimensions, if an array type,
+ * otherwise zero.
+ */
+ int16 attndims;
+
/*
* attbyval is a copy of the typbyval field from pg_type for this
* attribute. See atttypid comments above.
@@ -165,7 +156,18 @@ CATALOG(pg_attribute,1249,AttributeRelationId) BKI_BOOTSTRAP BKI_ROWTYPE_OID(75,
bool attislocal BKI_DEFAULT(t);
/* Number of times inherited from direct parent relation(s) */
- int32 attinhcount BKI_DEFAULT(0);
+ int16 attinhcount BKI_DEFAULT(0);
+
+ /*
+ * attstattarget is the target number of statistics datapoints to collect
+ * during VACUUM ANALYZE of this column. A zero here indicates that we do
+ * not wish to collect any stats about this column. A "-1" here indicates
+ * that no value has been explicitly set for this column, so ANALYZE
+ * should use the default setting.
+ *
+ * int16 is sufficient because the max value is currently 10000.
+ */
+ int16 attstattarget BKI_DEFAULT(-1);
/* attribute's collation, if any */
Oid attcollation BKI_LOOKUP_OPT(pg_collation);
diff --git a/src/include/catalog/pg_constraint.h b/src/include/catalog/pg_constraint.h
index 96889fddfa..16bf5f5576 100644
--- a/src/include/catalog/pg_constraint.h
+++ b/src/include/catalog/pg_constraint.h
@@ -102,7 +102,7 @@ CATALOG(pg_constraint,2606,ConstraintRelationId)
bool conislocal;
/* Number of times inherited from direct parent relation(s) */
- int32 coninhcount;
+ int16 coninhcount;
/* Has a local definition and cannot be inherited */
bool connoinherit;
base-commit: ecb696527c01908d54b7a7aa2bd9179585b46459
--
2.40.0