Use compiler intrinsics for bit ops in hash
Folks,
The recent patch for distinct windowing aggregates contained a partial
fix of the FIXME that didn't seem entirely right, so I extracted that
part, changed it to use compiler intrinsics, and submit it here.
Best,
David.
--
David Fetter <david(at)fetter(dot)org> http://fetter.org/
Phone: +1 415 235 3778
Remember to vote!
Consider donating to Postgres: http://www.postgresql.org/about/donate
Attachments:
v1-0001-Use-compiler-intrinsics-for-bit-ops-in-hash.patchtext/x-diff; charset=us-asciiDownload
From 2d0022be8cb117da0eadc769033330d6558853e4 Mon Sep 17 00:00:00 2001
From: David Fetter <david@fetter.org>
Date: Tue, 14 Jan 2020 09:32:15 -0800
Subject: [PATCH v1] Use compiler intrinsics for bit ops in hash
To: hackers
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="------------2.24.1"
This is a multi-part message in MIME format.
--------------2.24.1
Content-Type: text/plain; charset=UTF-8; format=fixed
Content-Transfer-Encoding: 8bit
- In passing, fix the FIXME by centralizing those calls.
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 55d85644a4..b1e04c0136 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -30,6 +30,7 @@
#include "access/hash.h"
#include "access/hash_xlog.h"
+#include "port/pg_bitutils.h"
#include "miscadmin.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
@@ -543,11 +544,7 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
metap->hashm_ffactor = ffactor;
metap->hashm_bsize = HashGetMaxBitmapSize(page);
/* find largest bitmap array size that will fit in page size */
- for (i = _hash_log2(metap->hashm_bsize); i > 0; --i)
- {
- if ((1 << i) <= metap->hashm_bsize)
- break;
- }
+ i = pg_leftmost_one_pos32(metap->hashm_bsize);
Assert(i > 0);
metap->hashm_bmsize = 1 << i;
metap->hashm_bmshift = i + BYTE_TO_BIT;
@@ -570,7 +567,7 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
* Set highmask as next immediate ((2 ^ x) - 1), which should be
* sufficient to cover num_buckets.
*/
- metap->hashm_highmask = (1 << (_hash_log2(num_buckets + 1))) - 1;
+ metap->hashm_highmask = next_power_of_2_32(num_buckets + 1) - 1;
metap->hashm_lowmask = (metap->hashm_highmask >> 1);
MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares));
@@ -657,13 +654,12 @@ restart_expand:
* Can't split anymore if maxbucket has reached its maximum possible
* value.
*
- * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
- * the calculation maxbucket+1 mustn't overflow). Currently we restrict
- * to half that because of overflow looping in _hash_log2() and
- * insufficient space in hashm_spares[]. It's moot anyway because an
- * index with 2^32 buckets would certainly overflow BlockNumber and hence
- * _hash_alloc_buckets() would fail, but if we supported buckets smaller
- * than a disk block then this would be an independent constraint.
+ * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because the
+ * calculation maxbucket+1 mustn't overflow). Currently we restrict to half
+ * that because of insufficient space in hashm_spares[]. It's moot anyway
+ * because an index with 2^32 buckets would certainly overflow BlockNumber
+ * and hence _hash_alloc_buckets() would fail, but if we supported buckets
+ * smaller than a disk block then this would be an independent constraint.
*
* If you change this, see also the maximum initial number of buckets in
* _hash_init().
diff --git a/src/backend/access/hash/hashsort.c b/src/backend/access/hash/hashsort.c
index 9cb41d62e7..322379788c 100644
--- a/src/backend/access/hash/hashsort.c
+++ b/src/backend/access/hash/hashsort.c
@@ -27,6 +27,7 @@
#include "access/hash.h"
#include "commands/progress.h"
+#include "port/pg_bitutils.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "utils/tuplesort.h"
@@ -69,7 +70,7 @@ _h_spoolinit(Relation heap, Relation index, uint32 num_buckets)
* NOTE : This hash mask calculation should be in sync with similar
* calculation in _hash_init_metabuffer.
*/
- hspool->high_mask = (((uint32) 1) << _hash_log2(num_buckets + 1)) - 1;
+ hspool->high_mask = next_power_of_2_32(num_buckets + 1) - 1;
hspool->low_mask = (hspool->high_mask >> 1);
hspool->max_buckets = num_buckets - 1;
diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c
index 9efc8016bc..738572ca40 100644
--- a/src/backend/access/hash/hashutil.c
+++ b/src/backend/access/hash/hashutil.c
@@ -17,6 +17,7 @@
#include "access/hash.h"
#include "access/reloptions.h"
#include "access/relscan.h"
+#include "port/pg_bitutils.h"
#include "storage/buf_internals.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
@@ -134,21 +135,6 @@ _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket,
return bucket;
}
-/*
- * _hash_log2 -- returns ceil(lg2(num))
- */
-uint32
-_hash_log2(uint32 num)
-{
- uint32 i,
- limit;
-
- limit = 1;
- for (i = 0; limit < num; limit <<= 1, i++)
- ;
- return i;
-}
-
/*
* _hash_spareindex -- returns spare index / global splitpoint phase of the
* bucket
@@ -159,7 +145,7 @@ _hash_spareindex(uint32 num_bucket)
uint32 splitpoint_group;
uint32 splitpoint_phases;
- splitpoint_group = _hash_log2(num_bucket);
+ splitpoint_group = ceil_log2_32(num_bucket);
if (splitpoint_group < HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE)
return splitpoint_group;
diff --git a/src/include/access/hash.h b/src/include/access/hash.h
index 9fc0696096..298c05e6fe 100644
--- a/src/include/access/hash.h
+++ b/src/include/access/hash.h
@@ -450,7 +450,6 @@ extern uint32 _hash_datum2hashkey(Relation rel, Datum key);
extern uint32 _hash_datum2hashkey_type(Relation rel, Datum key, Oid keytype);
extern Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket,
uint32 highmask, uint32 lowmask);
-extern uint32 _hash_log2(uint32 num);
extern uint32 _hash_spareindex(uint32 num_bucket);
extern uint32 _hash_get_totalbuckets(uint32 splitpoint_phase);
extern void _hash_checkpage(Relation rel, Buffer buf, int flags);
diff --git a/src/include/lib/simplehash.h b/src/include/lib/simplehash.h
index 5a6783f653..1a35a054d8 100644
--- a/src/include/lib/simplehash.h
+++ b/src/include/lib/simplehash.h
@@ -57,6 +57,8 @@
* backwards, unless they're empty or already at their optimal position.
*/
+#include "port/pg_bitutils.h"
+
/* helpers */
#define SH_MAKE_PREFIX(a) CppConcat(a,_)
#define SH_MAKE_NAME(name) SH_MAKE_NAME_(SH_MAKE_PREFIX(SH_PREFIX),name)
@@ -215,27 +217,6 @@ SH_SCOPE void SH_STAT(SH_TYPE * tb);
#ifndef SIMPLEHASH_H
#define SIMPLEHASH_H
-/* FIXME: can we move these to a central location? */
-
-/* calculate ceil(log base 2) of num */
-static inline uint64
-sh_log2(uint64 num)
-{
- int i;
- uint64 limit;
-
- for (i = 0, limit = 1; limit < num; i++, limit <<= 1)
- ;
- return i;
-}
-
-/* calculate first power of 2 >= num */
-static inline uint64
-sh_pow2(uint64 num)
-{
- return ((uint64) 1) << sh_log2(num);
-}
-
#ifdef FRONTEND
#define sh_error(...) pg_log_error(__VA_ARGS__)
#define sh_log(...) pg_log_info(__VA_ARGS__)
@@ -259,7 +240,7 @@ SH_COMPUTE_PARAMETERS(SH_TYPE * tb, uint32 newsize)
size = Max(newsize, 2);
/* round up size to the next power of 2, that's how bucketing works */
- size = sh_pow2(size);
+ size = next_power_of_2_64(size);
Assert(size <= SH_MAX_SIZE);
/*
@@ -434,7 +415,7 @@ SH_GROW(SH_TYPE * tb, uint32 newsize)
uint32 startelem = 0;
uint32 copyelem;
- Assert(oldsize == sh_pow2(oldsize));
+ Assert(oldsize == next_power_of_2_64(oldsize));
Assert(oldsize != SH_MAX_SIZE);
Assert(oldsize < newsize);
diff --git a/src/include/port/pg_bitutils.h b/src/include/port/pg_bitutils.h
index 498e532308..cc9338da25 100644
--- a/src/include/port/pg_bitutils.h
+++ b/src/include/port/pg_bitutils.h
@@ -145,4 +145,32 @@ pg_rotate_right32(uint32 word, int n)
return (word >> n) | (word << (sizeof(word) * BITS_PER_BYTE - n));
}
+/* ceil(lg2(num)) */
+static inline uint32
+ceil_log2_32(uint32 num)
+{
+ return pg_leftmost_one_pos32(num-1) + 1;
+}
+
+static inline uint64
+ceil_log2_64(uint64 num)
+{
+ return pg_leftmost_one_pos64(num-1) + 1;
+}
+
+/* calculate first power of 2 >= num
+ * per https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
+ * using BSR where available */
+static inline uint32
+next_power_of_2_32(uint32 num)
+{
+ return ((uint32) 1) << (pg_leftmost_one_pos32(num-1) + 1);
+}
+
+static inline uint64
+next_power_of_2_64(uint64 num)
+{
+ return ((uint64) 1) << (pg_leftmost_one_pos64(num-1) + 1);
+}
+
#endif /* PG_BITUTILS_H */
--------------2.24.1--
Hi David,
On Tue, Jan 14, 2020 at 9:36 AM David Fetter <david@fetter.org> wrote:
Folks,
The recent patch for distinct windowing aggregates contained a partial
fix of the FIXME that didn't seem entirely right, so I extracted that
part, changed it to use compiler intrinsics, and submit it here.
The changes in hash AM and SIMPLEHASH do look like a net positive
improvement. My biggest cringe might be in pg_bitutils:
diff --git a/src/include/port/pg_bitutils.h b/src/include/port/pg_bitutils.h index 498e532308..cc9338da25 100644 --- a/src/include/port/pg_bitutils.h +++ b/src/include/port/pg_bitutils.h @@ -145,4 +145,32 @@ pg_rotate_right32(uint32 word, int n) return (word >> n) | (word << (sizeof(word) * BITS_PER_BYTE - n)); }+/* ceil(lg2(num)) */ +static inline uint32 +ceil_log2_32(uint32 num) +{ + return pg_leftmost_one_pos32(num-1) + 1; +} + +static inline uint64 +ceil_log2_64(uint64 num) +{ + return pg_leftmost_one_pos64(num-1) + 1; +} + +/* calculate first power of 2 >= num + * per https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 + * using BSR where available */ +static inline uint32 +next_power_of_2_32(uint32 num) +{ + return ((uint32) 1) << (pg_leftmost_one_pos32(num-1) + 1); +} + +static inline uint64 +next_power_of_2_64(uint64 num) +{ + return ((uint64) 1) << (pg_leftmost_one_pos64(num-1) + 1); +} + #endif /* PG_BITUTILS_H */
1. Is ceil_log2_64 dead code?
2. The new utilities added here (ceil_log2_32 and company,
next_power_of_2_32 and company) all require num > 1, but don't clearly
Assert (or at the very least document) so.
3. A couple of the callers can actively pass in an argument of 1, e.g.
from _hash_spareindex in hashutil.c, while some other callers are iffy
at best (simplehash.h maybe?)
4. It seems like you *really* would like an operation like LZCNT in x86
(first appearing in Haswell) that is well defined on zero input. ISTM
the alternatives are:
a) Special case 1. That seems straightforward, but the branching cost
on a seemingly unlikely condition seems to be a lot of performance
loss
b) Use architecture specific intrinsic (and possibly with CPUID
shenanigans) like __builtin_ia32_lzcnt_u64 on x86 and use the CLZ
intrinsic elsewhere. The CLZ GCC intrinsic seems to map to
instructions that are well defined on zero in most ISA's other than
x86, so maybe we can get away with special-casing x86?
Cheers,
Jesse
On Tue, Jan 14, 2020 at 12:21:41PM -0800, Jesse Zhang wrote:
Hi David,
On Tue, Jan 14, 2020 at 9:36 AM David Fetter <david@fetter.org> wrote:
Folks,
The recent patch for distinct windowing aggregates contained a partial
fix of the FIXME that didn't seem entirely right, so I extracted that
part, changed it to use compiler intrinsics, and submit it here.The changes in hash AM and SIMPLEHASH do look like a net positive
improvement. My biggest cringe might be in pg_bitutils:
Thanks for looking at this!
diff --git a/src/include/port/pg_bitutils.h b/src/include/port/pg_bitutils.h index 498e532308..cc9338da25 100644 --- a/src/include/port/pg_bitutils.h +++ b/src/include/port/pg_bitutils.h @@ -145,4 +145,32 @@ pg_rotate_right32(uint32 word, int n) return (word >> n) | (word << (sizeof(word) * BITS_PER_BYTE - n)); }+/* ceil(lg2(num)) */ +static inline uint32 +ceil_log2_32(uint32 num) +{ + return pg_leftmost_one_pos32(num-1) + 1; +} + +static inline uint64 +ceil_log2_64(uint64 num) +{ + return pg_leftmost_one_pos64(num-1) + 1; +} + +/* calculate first power of 2 >= num + * per https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2 + * using BSR where available */ +static inline uint32 +next_power_of_2_32(uint32 num) +{ + return ((uint32) 1) << (pg_leftmost_one_pos32(num-1) + 1); +} + +static inline uint64 +next_power_of_2_64(uint64 num) +{ + return ((uint64) 1) << (pg_leftmost_one_pos64(num-1) + 1); +} + #endif /* PG_BITUTILS_H */1. Is ceil_log2_64 dead code?
Let's call it nascent code. I suspect there are places it could go, if
I look for them. Also, it seemed silly to have one without the other.
2. The new utilities added here (ceil_log2_32 and company,
next_power_of_2_32 and company) all require num > 1, but don't clearly
Assert (or at the very least document) so.
Assert()ed.
3. A couple of the callers can actively pass in an argument of 1, e.g.
from _hash_spareindex in hashutil.c, while some other callers are iffy
at best (simplehash.h maybe?)
What would you recommend be done about this?
4. It seems like you *really* would like an operation like LZCNT in x86
(first appearing in Haswell) that is well defined on zero input. ISTM
the alternatives are:a) Special case 1. That seems straightforward, but the branching cost
on a seemingly unlikely condition seems to be a lot of performance
lossb) Use architecture specific intrinsic (and possibly with CPUID
shenanigans) like __builtin_ia32_lzcnt_u64 on x86 and use the CLZ
intrinsic elsewhere. The CLZ GCC intrinsic seems to map to
instructions that are well defined on zero in most ISA's other than
x86, so maybe we can get away with special-casing x86?
b) seems much more attractive. Is there some way to tilt the tools so
that this happens? What should I be reading up on?
Best,
David.
--
David Fetter <david(at)fetter(dot)org> http://fetter.org/
Phone: +1 415 235 3778
Remember to vote!
Consider donating to Postgres: http://www.postgresql.org/about/donate
Attachments:
v2-0001-Use-compiler-intrinsics-for-bit-ops-in-hash.patchtext/x-diff; charset=us-asciiDownload
From b542c9efb4f4ec3999a9e6a6b180d98fca3a5101 Mon Sep 17 00:00:00 2001
From: David Fetter <david@fetter.org>
Date: Tue, 14 Jan 2020 09:32:15 -0800
Subject: [PATCH v2] Use compiler intrinsics for bit ops in hash
To: hackers
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="------------2.24.1"
This is a multi-part message in MIME format.
--------------2.24.1
Content-Type: text/plain; charset=UTF-8; format=fixed
Content-Transfer-Encoding: 8bit
- In passing, fix the FIXME by centralizing those calls.
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 55d85644a4..b1e04c0136 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -30,6 +30,7 @@
#include "access/hash.h"
#include "access/hash_xlog.h"
+#include "port/pg_bitutils.h"
#include "miscadmin.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
@@ -543,11 +544,7 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
metap->hashm_ffactor = ffactor;
metap->hashm_bsize = HashGetMaxBitmapSize(page);
/* find largest bitmap array size that will fit in page size */
- for (i = _hash_log2(metap->hashm_bsize); i > 0; --i)
- {
- if ((1 << i) <= metap->hashm_bsize)
- break;
- }
+ i = pg_leftmost_one_pos32(metap->hashm_bsize);
Assert(i > 0);
metap->hashm_bmsize = 1 << i;
metap->hashm_bmshift = i + BYTE_TO_BIT;
@@ -570,7 +567,7 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
* Set highmask as next immediate ((2 ^ x) - 1), which should be
* sufficient to cover num_buckets.
*/
- metap->hashm_highmask = (1 << (_hash_log2(num_buckets + 1))) - 1;
+ metap->hashm_highmask = next_power_of_2_32(num_buckets + 1) - 1;
metap->hashm_lowmask = (metap->hashm_highmask >> 1);
MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares));
@@ -657,13 +654,12 @@ restart_expand:
* Can't split anymore if maxbucket has reached its maximum possible
* value.
*
- * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
- * the calculation maxbucket+1 mustn't overflow). Currently we restrict
- * to half that because of overflow looping in _hash_log2() and
- * insufficient space in hashm_spares[]. It's moot anyway because an
- * index with 2^32 buckets would certainly overflow BlockNumber and hence
- * _hash_alloc_buckets() would fail, but if we supported buckets smaller
- * than a disk block then this would be an independent constraint.
+ * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because the
+ * calculation maxbucket+1 mustn't overflow). Currently we restrict to half
+ * that because of insufficient space in hashm_spares[]. It's moot anyway
+ * because an index with 2^32 buckets would certainly overflow BlockNumber
+ * and hence _hash_alloc_buckets() would fail, but if we supported buckets
+ * smaller than a disk block then this would be an independent constraint.
*
* If you change this, see also the maximum initial number of buckets in
* _hash_init().
diff --git a/src/backend/access/hash/hashsort.c b/src/backend/access/hash/hashsort.c
index 9cb41d62e7..322379788c 100644
--- a/src/backend/access/hash/hashsort.c
+++ b/src/backend/access/hash/hashsort.c
@@ -27,6 +27,7 @@
#include "access/hash.h"
#include "commands/progress.h"
+#include "port/pg_bitutils.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "utils/tuplesort.h"
@@ -69,7 +70,7 @@ _h_spoolinit(Relation heap, Relation index, uint32 num_buckets)
* NOTE : This hash mask calculation should be in sync with similar
* calculation in _hash_init_metabuffer.
*/
- hspool->high_mask = (((uint32) 1) << _hash_log2(num_buckets + 1)) - 1;
+ hspool->high_mask = next_power_of_2_32(num_buckets + 1) - 1;
hspool->low_mask = (hspool->high_mask >> 1);
hspool->max_buckets = num_buckets - 1;
diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c
index 9efc8016bc..738572ca40 100644
--- a/src/backend/access/hash/hashutil.c
+++ b/src/backend/access/hash/hashutil.c
@@ -17,6 +17,7 @@
#include "access/hash.h"
#include "access/reloptions.h"
#include "access/relscan.h"
+#include "port/pg_bitutils.h"
#include "storage/buf_internals.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
@@ -134,21 +135,6 @@ _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket,
return bucket;
}
-/*
- * _hash_log2 -- returns ceil(lg2(num))
- */
-uint32
-_hash_log2(uint32 num)
-{
- uint32 i,
- limit;
-
- limit = 1;
- for (i = 0; limit < num; limit <<= 1, i++)
- ;
- return i;
-}
-
/*
* _hash_spareindex -- returns spare index / global splitpoint phase of the
* bucket
@@ -159,7 +145,7 @@ _hash_spareindex(uint32 num_bucket)
uint32 splitpoint_group;
uint32 splitpoint_phases;
- splitpoint_group = _hash_log2(num_bucket);
+ splitpoint_group = ceil_log2_32(num_bucket);
if (splitpoint_group < HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE)
return splitpoint_group;
diff --git a/src/include/access/hash.h b/src/include/access/hash.h
index 9fc0696096..298c05e6fe 100644
--- a/src/include/access/hash.h
+++ b/src/include/access/hash.h
@@ -450,7 +450,6 @@ extern uint32 _hash_datum2hashkey(Relation rel, Datum key);
extern uint32 _hash_datum2hashkey_type(Relation rel, Datum key, Oid keytype);
extern Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket,
uint32 highmask, uint32 lowmask);
-extern uint32 _hash_log2(uint32 num);
extern uint32 _hash_spareindex(uint32 num_bucket);
extern uint32 _hash_get_totalbuckets(uint32 splitpoint_phase);
extern void _hash_checkpage(Relation rel, Buffer buf, int flags);
diff --git a/src/include/lib/simplehash.h b/src/include/lib/simplehash.h
index 5a6783f653..1a35a054d8 100644
--- a/src/include/lib/simplehash.h
+++ b/src/include/lib/simplehash.h
@@ -57,6 +57,8 @@
* backwards, unless they're empty or already at their optimal position.
*/
+#include "port/pg_bitutils.h"
+
/* helpers */
#define SH_MAKE_PREFIX(a) CppConcat(a,_)
#define SH_MAKE_NAME(name) SH_MAKE_NAME_(SH_MAKE_PREFIX(SH_PREFIX),name)
@@ -215,27 +217,6 @@ SH_SCOPE void SH_STAT(SH_TYPE * tb);
#ifndef SIMPLEHASH_H
#define SIMPLEHASH_H
-/* FIXME: can we move these to a central location? */
-
-/* calculate ceil(log base 2) of num */
-static inline uint64
-sh_log2(uint64 num)
-{
- int i;
- uint64 limit;
-
- for (i = 0, limit = 1; limit < num; i++, limit <<= 1)
- ;
- return i;
-}
-
-/* calculate first power of 2 >= num */
-static inline uint64
-sh_pow2(uint64 num)
-{
- return ((uint64) 1) << sh_log2(num);
-}
-
#ifdef FRONTEND
#define sh_error(...) pg_log_error(__VA_ARGS__)
#define sh_log(...) pg_log_info(__VA_ARGS__)
@@ -259,7 +240,7 @@ SH_COMPUTE_PARAMETERS(SH_TYPE * tb, uint32 newsize)
size = Max(newsize, 2);
/* round up size to the next power of 2, that's how bucketing works */
- size = sh_pow2(size);
+ size = next_power_of_2_64(size);
Assert(size <= SH_MAX_SIZE);
/*
@@ -434,7 +415,7 @@ SH_GROW(SH_TYPE * tb, uint32 newsize)
uint32 startelem = 0;
uint32 copyelem;
- Assert(oldsize == sh_pow2(oldsize));
+ Assert(oldsize == next_power_of_2_64(oldsize));
Assert(oldsize != SH_MAX_SIZE);
Assert(oldsize < newsize);
diff --git a/src/include/port/pg_bitutils.h b/src/include/port/pg_bitutils.h
index 498e532308..f968bd65d7 100644
--- a/src/include/port/pg_bitutils.h
+++ b/src/include/port/pg_bitutils.h
@@ -145,4 +145,36 @@ pg_rotate_right32(uint32 word, int n)
return (word >> n) | (word << (sizeof(word) * BITS_PER_BYTE - n));
}
+/* ceil(lg2(num)) */
+static inline uint32
+ceil_log2_32(uint32 num)
+{
+ Assert(num > 1);
+ return pg_leftmost_one_pos32(num-1) + 1;
+}
+
+static inline uint64
+ceil_log2_64(uint64 num)
+{
+ Assert(num > 1);
+ return pg_leftmost_one_pos64(num-1) + 1;
+}
+
+/* calculate first power of 2 >= num
+ * per https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
+ * using BSR where available */
+static inline uint32
+next_power_of_2_32(uint32 num)
+{
+ Assert(num > 1);
+ return ((uint32) 1) << (pg_leftmost_one_pos32(num-1) + 1);
+}
+
+static inline uint64
+next_power_of_2_64(uint64 num)
+{
+ Assert(num > 1);
+ return ((uint64) 1) << (pg_leftmost_one_pos64(num-1) + 1);
+}
+
#endif /* PG_BITUTILS_H */
--------------2.24.1--
On Tue, Jan 14, 2020 at 2:09 PM David Fetter <david@fetter.org> wrote:
The changes in hash AM and SIMPLEHASH do look like a net positive
improvement. My biggest cringe might be in pg_bitutils:1. Is ceil_log2_64 dead code?
Let's call it nascent code. I suspect there are places it could go, if
I look for them. Also, it seemed silly to have one without the other.
While not absolutely required, I'd like us to find at least one
place and start using it. (Clang also nags at me when we have
unused functions).
On Tue, Jan 14, 2020 at 12:21:41PM -0800, Jesse Zhang wrote:
4. It seems like you *really* would like an operation like LZCNT in x86
(first appearing in Haswell) that is well defined on zero input. ISTM
the alternatives are:a) Special case 1. That seems straightforward, but the branching cost
on a seemingly unlikely condition seems to be a lot of performance
lossb) Use architecture specific intrinsic (and possibly with CPUID
shenanigans) like __builtin_ia32_lzcnt_u64 on x86 and use the CLZ
intrinsic elsewhere. The CLZ GCC intrinsic seems to map to
instructions that are well defined on zero in most ISA's other than
x86, so maybe we can get away with special-casing x86?
i. We can detect LZCNT instruction by checking one of the
"extended feature" (EAX=80000001) bits using CPUID. Unlike the
"basic features" (EAX=1), extended feature flags have been more
vendor-specific, but fortunately it seems that the feature bit
for LZCNT is the same [1]"How to detect New Instruction support in the 4th generation Intel® Core™ processor family" https://software.intel.com/en-us/articles/how-to-detect-new-instruction-support-in-the-4th-generation-intel-core-processor-family[2]"Bit Manipulation Instruction Sets" https://en.wikipedia.org/wiki/Bit_Manipulation_Instruction_Sets.
ii. We'll most likely still need to provide a fallback
implementation for processors that don't have LZCNT (either
because they are from a different vendor, or an older Intel/AMD
processor). I wonder if simply checking for 1 is "good enough".
Maybe a micro benchmark is in order?
Is there some way to tilt the tools so that this happens?
We have a couple options here:
1. Use a separate object (a la our SSE 4.2 implemenation of
CRC). On Clang and GCC (I don't have MSVC at hand), -mabm or
-mlzcnt should cause __builtin_clz to generate the LZCNT
instruction, which is well defined on zero input. The default
configuration would translate __builtin_clz to code that
subtracts BSR from the width of the input, but BSR leaves the
destination undefined on zero input.
2. (My least favorite) use inline asm (a la our popcount
implementation).
b) seems much more attractive. Is there some way to tilt the tools so
that this happens? What should I be reading up on?
The enclosed references hopefully are good places to start. Let
me know if you have more ideas.
Cheers,
Jesse
References:
[1]: "How to detect New Instruction support in the 4th generation Intel® Core™ processor family" https://software.intel.com/en-us/articles/how-to-detect-new-instruction-support-in-the-4th-generation-intel-core-processor-family
Core™ processor family"
https://software.intel.com/en-us/articles/how-to-detect-new-instruction-support-in-the-4th-generation-intel-core-processor-family
[2]: "Bit Manipulation Instruction Sets" https://en.wikipedia.org/wiki/Bit_Manipulation_Instruction_Sets
https://en.wikipedia.org/wiki/Bit_Manipulation_Instruction_Sets
On Wed, Jan 15, 2020 at 6:09 AM David Fetter <david@fetter.org> wrote:
[v2 patch]
Hi David,
I have a stylistic comment on this snippet:
- for (i = _hash_log2(metap->hashm_bsize); i > 0; --i)
- {
- if ((1 << i) <= metap->hashm_bsize)
- break;
- }
+ i = pg_leftmost_one_pos32(metap->hashm_bsize);
Assert(i > 0);
metap->hashm_bmsize = 1 << i;
metap->hashm_bmshift = i + BYTE_TO_BIT;
Naming the variable "i" made sense when it was a loop counter, but it
seems out of place now. Same with the Assert.
Also, this
+ * using BSR where available */
is not directly tied to anything in this function, or even in the
function it calls, and could get out of date easily.
--
John Naylor https://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services
On Sat, Jan 18, 2020 at 11:46:24AM +0800, John Naylor wrote:
On Wed, Jan 15, 2020 at 6:09 AM David Fetter <david@fetter.org> wrote:
[v2 patch]
Hi David,
I have a stylistic comment on this snippet:
- for (i = _hash_log2(metap->hashm_bsize); i > 0; --i) - { - if ((1 << i) <= metap->hashm_bsize) - break; - } + i = pg_leftmost_one_pos32(metap->hashm_bsize); Assert(i > 0); metap->hashm_bmsize = 1 << i; metap->hashm_bmshift = i + BYTE_TO_BIT;Naming the variable "i" made sense when it was a loop counter, but it
seems out of place now. Same with the Assert.
Fixed by removing the variable entirely.
Also, this
+ * using BSR where available */
is not directly tied to anything in this function, or even in the
function it calls, and could get out of date easily.
Removed.
Best,
David.
--
David Fetter <david(at)fetter(dot)org> http://fetter.org/
Phone: +1 415 235 3778
Remember to vote!
Consider donating to Postgres: http://www.postgresql.org/about/donate
Attachments:
v3-0001-Use-compiler-intrinsics-for-bit-ops-in-hash.patchtext/x-diff; charset=us-asciiDownload
From e7716fad797362308a19d4a78304e0e167d37f48 Mon Sep 17 00:00:00 2001
From: David Fetter <david@fetter.org>
Date: Tue, 14 Jan 2020 09:32:15 -0800
Subject: [PATCH v3] Use compiler intrinsics for bit ops in hash
To: hackers
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="------------2.24.1"
This is a multi-part message in MIME format.
--------------2.24.1
Content-Type: text/plain; charset=UTF-8; format=fixed
Content-Transfer-Encoding: 8bit
- In passing, fix the FIXME by centralizing those calls.
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 55d85644a4..29dca21be2 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -30,6 +30,7 @@
#include "access/hash.h"
#include "access/hash_xlog.h"
+#include "port/pg_bitutils.h"
#include "miscadmin.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
@@ -502,7 +503,6 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
double dnumbuckets;
uint32 num_buckets;
uint32 spare_index;
- uint32 i;
/*
* Choose the number of initial bucket pages to match the fill factor
@@ -543,14 +543,8 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
metap->hashm_ffactor = ffactor;
metap->hashm_bsize = HashGetMaxBitmapSize(page);
/* find largest bitmap array size that will fit in page size */
- for (i = _hash_log2(metap->hashm_bsize); i > 0; --i)
- {
- if ((1 << i) <= metap->hashm_bsize)
- break;
- }
- Assert(i > 0);
- metap->hashm_bmsize = 1 << i;
- metap->hashm_bmshift = i + BYTE_TO_BIT;
+ metap->hashm_bmsize = 1 << pg_leftmost_one_pos32(metap->hashm_bsize);
+ metap->hashm_bmshift = pg_leftmost_one_pos32(metap->hashm_bsize) + BYTE_TO_BIT;
Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1));
/*
@@ -570,7 +564,7 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
* Set highmask as next immediate ((2 ^ x) - 1), which should be
* sufficient to cover num_buckets.
*/
- metap->hashm_highmask = (1 << (_hash_log2(num_buckets + 1))) - 1;
+ metap->hashm_highmask = next_power_of_2_32(num_buckets + 1) - 1;
metap->hashm_lowmask = (metap->hashm_highmask >> 1);
MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares));
@@ -657,13 +651,12 @@ restart_expand:
* Can't split anymore if maxbucket has reached its maximum possible
* value.
*
- * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
- * the calculation maxbucket+1 mustn't overflow). Currently we restrict
- * to half that because of overflow looping in _hash_log2() and
- * insufficient space in hashm_spares[]. It's moot anyway because an
- * index with 2^32 buckets would certainly overflow BlockNumber and hence
- * _hash_alloc_buckets() would fail, but if we supported buckets smaller
- * than a disk block then this would be an independent constraint.
+ * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because the
+ * calculation maxbucket+1 mustn't overflow). Currently we restrict to half
+ * that because of insufficient space in hashm_spares[]. It's moot anyway
+ * because an index with 2^32 buckets would certainly overflow BlockNumber
+ * and hence _hash_alloc_buckets() would fail, but if we supported buckets
+ * smaller than a disk block then this would be an independent constraint.
*
* If you change this, see also the maximum initial number of buckets in
* _hash_init().
diff --git a/src/backend/access/hash/hashsort.c b/src/backend/access/hash/hashsort.c
index 9cb41d62e7..322379788c 100644
--- a/src/backend/access/hash/hashsort.c
+++ b/src/backend/access/hash/hashsort.c
@@ -27,6 +27,7 @@
#include "access/hash.h"
#include "commands/progress.h"
+#include "port/pg_bitutils.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "utils/tuplesort.h"
@@ -69,7 +70,7 @@ _h_spoolinit(Relation heap, Relation index, uint32 num_buckets)
* NOTE : This hash mask calculation should be in sync with similar
* calculation in _hash_init_metabuffer.
*/
- hspool->high_mask = (((uint32) 1) << _hash_log2(num_buckets + 1)) - 1;
+ hspool->high_mask = next_power_of_2_32(num_buckets + 1) - 1;
hspool->low_mask = (hspool->high_mask >> 1);
hspool->max_buckets = num_buckets - 1;
diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c
index 9efc8016bc..738572ca40 100644
--- a/src/backend/access/hash/hashutil.c
+++ b/src/backend/access/hash/hashutil.c
@@ -17,6 +17,7 @@
#include "access/hash.h"
#include "access/reloptions.h"
#include "access/relscan.h"
+#include "port/pg_bitutils.h"
#include "storage/buf_internals.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
@@ -134,21 +135,6 @@ _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket,
return bucket;
}
-/*
- * _hash_log2 -- returns ceil(lg2(num))
- */
-uint32
-_hash_log2(uint32 num)
-{
- uint32 i,
- limit;
-
- limit = 1;
- for (i = 0; limit < num; limit <<= 1, i++)
- ;
- return i;
-}
-
/*
* _hash_spareindex -- returns spare index / global splitpoint phase of the
* bucket
@@ -159,7 +145,7 @@ _hash_spareindex(uint32 num_bucket)
uint32 splitpoint_group;
uint32 splitpoint_phases;
- splitpoint_group = _hash_log2(num_bucket);
+ splitpoint_group = ceil_log2_32(num_bucket);
if (splitpoint_group < HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE)
return splitpoint_group;
diff --git a/src/include/access/hash.h b/src/include/access/hash.h
index 9fc0696096..298c05e6fe 100644
--- a/src/include/access/hash.h
+++ b/src/include/access/hash.h
@@ -450,7 +450,6 @@ extern uint32 _hash_datum2hashkey(Relation rel, Datum key);
extern uint32 _hash_datum2hashkey_type(Relation rel, Datum key, Oid keytype);
extern Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket,
uint32 highmask, uint32 lowmask);
-extern uint32 _hash_log2(uint32 num);
extern uint32 _hash_spareindex(uint32 num_bucket);
extern uint32 _hash_get_totalbuckets(uint32 splitpoint_phase);
extern void _hash_checkpage(Relation rel, Buffer buf, int flags);
diff --git a/src/include/lib/simplehash.h b/src/include/lib/simplehash.h
index 5a6783f653..1a35a054d8 100644
--- a/src/include/lib/simplehash.h
+++ b/src/include/lib/simplehash.h
@@ -57,6 +57,8 @@
* backwards, unless they're empty or already at their optimal position.
*/
+#include "port/pg_bitutils.h"
+
/* helpers */
#define SH_MAKE_PREFIX(a) CppConcat(a,_)
#define SH_MAKE_NAME(name) SH_MAKE_NAME_(SH_MAKE_PREFIX(SH_PREFIX),name)
@@ -215,27 +217,6 @@ SH_SCOPE void SH_STAT(SH_TYPE * tb);
#ifndef SIMPLEHASH_H
#define SIMPLEHASH_H
-/* FIXME: can we move these to a central location? */
-
-/* calculate ceil(log base 2) of num */
-static inline uint64
-sh_log2(uint64 num)
-{
- int i;
- uint64 limit;
-
- for (i = 0, limit = 1; limit < num; i++, limit <<= 1)
- ;
- return i;
-}
-
-/* calculate first power of 2 >= num */
-static inline uint64
-sh_pow2(uint64 num)
-{
- return ((uint64) 1) << sh_log2(num);
-}
-
#ifdef FRONTEND
#define sh_error(...) pg_log_error(__VA_ARGS__)
#define sh_log(...) pg_log_info(__VA_ARGS__)
@@ -259,7 +240,7 @@ SH_COMPUTE_PARAMETERS(SH_TYPE * tb, uint32 newsize)
size = Max(newsize, 2);
/* round up size to the next power of 2, that's how bucketing works */
- size = sh_pow2(size);
+ size = next_power_of_2_64(size);
Assert(size <= SH_MAX_SIZE);
/*
@@ -434,7 +415,7 @@ SH_GROW(SH_TYPE * tb, uint32 newsize)
uint32 startelem = 0;
uint32 copyelem;
- Assert(oldsize == sh_pow2(oldsize));
+ Assert(oldsize == next_power_of_2_64(oldsize));
Assert(oldsize != SH_MAX_SIZE);
Assert(oldsize < newsize);
diff --git a/src/include/port/pg_bitutils.h b/src/include/port/pg_bitutils.h
index 498e532308..2ba608f520 100644
--- a/src/include/port/pg_bitutils.h
+++ b/src/include/port/pg_bitutils.h
@@ -145,4 +145,34 @@ pg_rotate_right32(uint32 word, int n)
return (word >> n) | (word << (sizeof(word) * BITS_PER_BYTE - n));
}
+/* ceil(lg2(num)) */
+static inline uint32
+ceil_log2_32(uint32 num)
+{
+ Assert(num > 1);
+ return pg_leftmost_one_pos32(num-1) + 1;
+}
+
+static inline uint64
+ceil_log2_64(uint64 num)
+{
+ Assert(num > 1);
+ return pg_leftmost_one_pos64(num-1) + 1;
+}
+
+/* Calculate the first power of 2 >= num */
+static inline uint32
+next_power_of_2_32(uint32 num)
+{
+ Assert(num > 1);
+ return ((uint32) 1) << (pg_leftmost_one_pos32(num-1) + 1);
+}
+
+static inline uint64
+next_power_of_2_64(uint64 num)
+{
+ Assert(num > 1);
+ return ((uint64) 1) << (pg_leftmost_one_pos64(num-1) + 1);
+}
+
#endif /* PG_BITUTILS_H */
--------------2.24.1--
On Wed, Jan 15, 2020 at 03:45:12PM -0800, Jesse Zhang wrote:
On Tue, Jan 14, 2020 at 2:09 PM David Fetter <david@fetter.org> wrote:
The changes in hash AM and SIMPLEHASH do look like a net positive
improvement. My biggest cringe might be in pg_bitutils:1. Is ceil_log2_64 dead code?
Let's call it nascent code. I suspect there are places it could go, if
I look for them. Also, it seemed silly to have one without the other.While not absolutely required, I'd like us to find at least one
place and start using it. (Clang also nags at me when we have
unused functions).
Done in the expanded patches attached.
On Tue, Jan 14, 2020 at 12:21:41PM -0800, Jesse Zhang wrote:
4. It seems like you *really* would like an operation like LZCNT in x86
(first appearing in Haswell) that is well defined on zero input. ISTM
the alternatives are:a) Special case 1. That seems straightforward, but the branching cost
on a seemingly unlikely condition seems to be a lot of performance
lossb) Use architecture specific intrinsic (and possibly with CPUID
shenanigans) like __builtin_ia32_lzcnt_u64 on x86 and use the CLZ
intrinsic elsewhere. The CLZ GCC intrinsic seems to map to
instructions that are well defined on zero in most ISA's other than
x86, so maybe we can get away with special-casing x86?i. We can detect LZCNT instruction by checking one of the
"extended feature" (EAX=80000001) bits using CPUID. Unlike the
"basic features" (EAX=1), extended feature flags have been more
vendor-specific, but fortunately it seems that the feature bit
for LZCNT is the same [1][2].ii. We'll most likely still need to provide a fallback
implementation for processors that don't have LZCNT (either
because they are from a different vendor, or an older Intel/AMD
processor). I wonder if simply checking for 1 is "good enough".
Maybe a micro benchmark is in order?
I'm not sure how I'd run one on the architectures we support. What
I've done here is generalize our implementation to be basically like
LZCNT and TZCNT at the cost of a brief branch that might go away at
runtime.
2. (My least favorite) use inline asm (a la our popcount
implementation).
Yeah, I'd like to fix that, but I kept the scope of this one
relatively narrow.
Best,
David.
--
David Fetter <david(at)fetter(dot)org> http://fetter.org/
Phone: +1 415 235 3778
Remember to vote!
Consider donating to Postgres: http://www.postgresql.org/about/donate
Attachments:
v4-0001-de-long-ify.patchtext/x-diff; charset=us-asciiDownload
From 5fcaa74146206e4de05ca8cbd863aca20bba94bf Mon Sep 17 00:00:00 2001
From: David Fetter <david@fetter.org>
Date: Wed, 29 Jan 2020 02:09:59 -0800
Subject: [PATCH v4 1/2] de-long-ify
To: hackers
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="------------2.24.1"
This is a multi-part message in MIME format.
--------------2.24.1
Content-Type: text/plain; charset=UTF-8; format=fixed
Content-Transfer-Encoding: 8bit
diff --git a/src/backend/access/gist/gistbuildbuffers.c b/src/backend/access/gist/gistbuildbuffers.c
index 4b562d8d3f..482a569814 100644
--- a/src/backend/access/gist/gistbuildbuffers.c
+++ b/src/backend/access/gist/gistbuildbuffers.c
@@ -34,11 +34,11 @@ static void gistPlaceItupToPage(GISTNodeBufferPage *pageBuffer,
IndexTuple item);
static void gistGetItupFromPage(GISTNodeBufferPage *pageBuffer,
IndexTuple *item);
-static long gistBuffersGetFreeBlock(GISTBuildBuffers *gfbb);
-static void gistBuffersReleaseBlock(GISTBuildBuffers *gfbb, long blocknum);
+static uint64 gistBuffersGetFreeBlock(GISTBuildBuffers *gfbb);
+static void gistBuffersReleaseBlock(GISTBuildBuffers *gfbb, uint64 blocknum);
-static void ReadTempFileBlock(BufFile *file, long blknum, void *ptr);
-static void WriteTempFileBlock(BufFile *file, long blknum, void *ptr);
+static void ReadTempFileBlock(BufFile *file, uint64 blknum, void *ptr);
+static void WriteTempFileBlock(BufFile *file, uint64 blknum, void *ptr);
/*
@@ -64,7 +64,7 @@ gistInitBuildBuffers(int pagesPerBuffer, int levelStep, int maxLevel)
/* Initialize free page management. */
gfbb->nFreeBlocks = 0;
gfbb->freeBlocksLen = 32;
- gfbb->freeBlocks = (long *) palloc(gfbb->freeBlocksLen * sizeof(long));
+ gfbb->freeBlocks = (int64 *) palloc(gfbb->freeBlocksLen * sizeof(int64));
/*
* Current memory context will be used for all in-memory data structures
@@ -469,7 +469,7 @@ gistPopItupFromNodeBuffer(GISTBuildBuffers *gfbb, GISTNodeBuffer *nodeBuffer,
/*
* Select a currently unused block for writing to.
*/
-static long
+static uint64
gistBuffersGetFreeBlock(GISTBuildBuffers *gfbb)
{
/*
@@ -487,7 +487,7 @@ gistBuffersGetFreeBlock(GISTBuildBuffers *gfbb)
* Return a block# to the freelist.
*/
static void
-gistBuffersReleaseBlock(GISTBuildBuffers *gfbb, long blocknum)
+gistBuffersReleaseBlock(GISTBuildBuffers *gfbb, uint64 blocknum)
{
int ndx;
@@ -495,9 +495,9 @@ gistBuffersReleaseBlock(GISTBuildBuffers *gfbb, long blocknum)
if (gfbb->nFreeBlocks >= gfbb->freeBlocksLen)
{
gfbb->freeBlocksLen *= 2;
- gfbb->freeBlocks = (long *) repalloc(gfbb->freeBlocks,
+ gfbb->freeBlocks = (int64 *) repalloc(gfbb->freeBlocks,
gfbb->freeBlocksLen *
- sizeof(long));
+ sizeof(uint64));
}
/* Add blocknum to array */
@@ -755,7 +755,7 @@ gistRelocateBuildBuffersOnSplit(GISTBuildBuffers *gfbb, GISTSTATE *giststate,
*/
static void
-ReadTempFileBlock(BufFile *file, long blknum, void *ptr)
+ReadTempFileBlock(BufFile *file, uint64 blknum, void *ptr)
{
if (BufFileSeekBlock(file, blknum) != 0)
elog(ERROR, "could not seek temporary file: %m");
@@ -764,7 +764,7 @@ ReadTempFileBlock(BufFile *file, long blknum, void *ptr)
}
static void
-WriteTempFileBlock(BufFile *file, long blknum, void *ptr)
+WriteTempFileBlock(BufFile *file, uint64 blknum, void *ptr)
{
if (BufFileSeekBlock(file, blknum) != 0)
elog(ERROR, "could not seek temporary file: %m");
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index c46764bf42..4fc478640a 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -70,7 +70,7 @@ static int _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount);
static void _SPI_error_callback(void *arg);
static void _SPI_cursor_operation(Portal portal,
- FetchDirection direction, long count,
+ FetchDirection direction, uint64 count,
DestReceiver *dest);
static SPIPlanPtr _SPI_make_plan_non_temp(SPIPlanPtr plan);
@@ -493,7 +493,7 @@ SPI_inside_nonatomic_context(void)
/* Parse, plan, and execute a query string */
int
-SPI_execute(const char *src, bool read_only, long tcount)
+SPI_execute(const char *src, bool read_only, int64 tcount)
{
_SPI_plan plan;
int res;
@@ -521,7 +521,7 @@ SPI_execute(const char *src, bool read_only, long tcount)
/* Obsolete version of SPI_execute */
int
-SPI_exec(const char *src, long tcount)
+SPI_exec(const char *src, int64 tcount)
{
return SPI_execute(src, false, tcount);
}
@@ -529,7 +529,7 @@ SPI_exec(const char *src, long tcount)
/* Execute a previously prepared plan */
int
SPI_execute_plan(SPIPlanPtr plan, Datum *Values, const char *Nulls,
- bool read_only, long tcount)
+ bool read_only, int64 tcount)
{
int res;
@@ -555,7 +555,7 @@ SPI_execute_plan(SPIPlanPtr plan, Datum *Values, const char *Nulls,
/* Obsolete version of SPI_execute_plan */
int
-SPI_execp(SPIPlanPtr plan, Datum *Values, const char *Nulls, long tcount)
+SPI_execp(SPIPlanPtr plan, Datum *Values, const char *Nulls, int64 tcount)
{
return SPI_execute_plan(plan, Values, Nulls, false, tcount);
}
@@ -563,7 +563,7 @@ SPI_execp(SPIPlanPtr plan, Datum *Values, const char *Nulls, long tcount)
/* Execute a previously prepared plan */
int
SPI_execute_plan_with_paramlist(SPIPlanPtr plan, ParamListInfo params,
- bool read_only, long tcount)
+ bool read_only, int64 tcount)
{
int res;
@@ -599,7 +599,7 @@ int
SPI_execute_snapshot(SPIPlanPtr plan,
Datum *Values, const char *Nulls,
Snapshot snapshot, Snapshot crosscheck_snapshot,
- bool read_only, bool fire_triggers, long tcount)
+ bool read_only, bool fire_triggers, int64 tcount)
{
int res;
@@ -633,7 +633,7 @@ int
SPI_execute_with_args(const char *src,
int nargs, Oid *argtypes,
Datum *Values, const char *Nulls,
- bool read_only, long tcount)
+ bool read_only, int64 tcount)
{
int res;
_SPI_plan plan;
@@ -1530,7 +1530,7 @@ SPI_cursor_find(const char *name)
* Fetch rows in a cursor
*/
void
-SPI_cursor_fetch(Portal portal, bool forward, long count)
+SPI_cursor_fetch(Portal portal, bool forward, int64 count)
{
_SPI_cursor_operation(portal,
forward ? FETCH_FORWARD : FETCH_BACKWARD, count,
@@ -1545,7 +1545,7 @@ SPI_cursor_fetch(Portal portal, bool forward, long count)
* Move in a cursor
*/
void
-SPI_cursor_move(Portal portal, bool forward, long count)
+SPI_cursor_move(Portal portal, bool forward, int64 count)
{
_SPI_cursor_operation(portal,
forward ? FETCH_FORWARD : FETCH_BACKWARD, count,
@@ -1559,7 +1559,7 @@ SPI_cursor_move(Portal portal, bool forward, long count)
* Fetch rows in a scrollable cursor
*/
void
-SPI_scroll_cursor_fetch(Portal portal, FetchDirection direction, long count)
+SPI_scroll_cursor_fetch(Portal portal, FetchDirection direction, int64 count)
{
_SPI_cursor_operation(portal,
direction, count,
@@ -1574,7 +1574,7 @@ SPI_scroll_cursor_fetch(Portal portal, FetchDirection direction, long count)
* Move in a scrollable cursor
*/
void
-SPI_scroll_cursor_move(Portal portal, FetchDirection direction, long count)
+SPI_scroll_cursor_move(Portal portal, FetchDirection direction, int64 count)
{
_SPI_cursor_operation(portal, direction, count, None_Receiver);
}
@@ -2567,7 +2567,7 @@ _SPI_error_callback(void *arg)
* Do a FETCH or MOVE in a cursor
*/
static void
-_SPI_cursor_operation(Portal portal, FetchDirection direction, long count,
+_SPI_cursor_operation(Portal portal, FetchDirection direction, uint64 count,
DestReceiver *dest)
{
uint64 nfetched;
diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c
index 35e8f12e62..db73a9f159 100644
--- a/src/backend/storage/file/buffile.c
+++ b/src/backend/storage/file/buffile.c
@@ -749,7 +749,7 @@ BufFileTell(BufFile *file, int *fileno, off_t *offset)
* impossible seek is attempted.
*/
int
-BufFileSeekBlock(BufFile *file, long blknum)
+BufFileSeekBlock(BufFile *file, uint64 blknum)
{
return BufFileSeek(file,
(int) (blknum / BUFFILE_SEG_SIZE),
@@ -760,13 +760,11 @@ BufFileSeekBlock(BufFile *file, long blknum)
#ifdef NOT_USED
/*
* BufFileTellBlock --- block-oriented tell
- *
- * Any fractional part of a block in the current seek position is ignored.
*/
-long
+uint64
BufFileTellBlock(BufFile *file)
{
- long blknum;
+ uint64 blknum;
blknum = (file->curOffset + file->pos) / BLCKSZ;
blknum += file->curFile * BUFFILE_SEG_SIZE;
@@ -820,10 +818,10 @@ BufFileSize(BufFile *file)
* begins. Caller should apply this as an offset when working off block
* positions that are in terms of the original BufFile space.
*/
-long
+uint64
BufFileAppend(BufFile *target, BufFile *source)
{
- long startBlock = target->numFiles * BUFFILE_SEG_SIZE;
+ uint64 startBlock = target->numFiles * BUFFILE_SEG_SIZE;
int newNumFiles = target->numFiles + source->numFiles;
int i;
diff --git a/src/backend/storage/ipc/latch.c b/src/backend/storage/ipc/latch.c
index d677ffbda7..29219690f8 100644
--- a/src/backend/storage/ipc/latch.c
+++ b/src/backend/storage/ipc/latch.c
@@ -328,7 +328,7 @@ DisownLatch(Latch *latch)
* function returns immediately.
*
* The "timeout" is given in milliseconds. It must be >= 0 if WL_TIMEOUT flag
- * is given. Although it is declared as "long", we don't actually support
+ * is given. Although it is declared as "int64", we don't actually support
* timeouts longer than INT_MAX milliseconds. Note that some extra overhead
* is incurred when WL_TIMEOUT is given, so avoid using a timeout if possible.
*
@@ -341,7 +341,7 @@ DisownLatch(Latch *latch)
* we return all of them in one call, but we will return at least one.
*/
int
-WaitLatch(Latch *latch, int wakeEvents, long timeout,
+WaitLatch(Latch *latch, int wakeEvents, int64 timeout,
uint32 wait_event_info)
{
return WaitLatchOrSocket(latch, wakeEvents, PGINVALID_SOCKET, timeout,
@@ -367,7 +367,7 @@ WaitLatch(Latch *latch, int wakeEvents, long timeout,
*/
int
WaitLatchOrSocket(Latch *latch, int wakeEvents, pgsocket sock,
- long timeout, uint32 wait_event_info)
+ int64 timeout, uint32 wait_event_info)
{
int ret = 0;
int rc;
@@ -950,14 +950,14 @@ WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event)
* values associated with the registered event.
*/
int
-WaitEventSetWait(WaitEventSet *set, long timeout,
+WaitEventSetWait(WaitEventSet *set, int64 timeout,
WaitEvent *occurred_events, int nevents,
uint32 wait_event_info)
{
int returned_events = 0;
instr_time start_time;
instr_time cur_time;
- long cur_timeout = -1;
+ int64 cur_timeout = -1;
Assert(nevents > 0);
@@ -1042,7 +1042,7 @@ WaitEventSetWait(WaitEventSet *set, long timeout,
{
INSTR_TIME_SET_CURRENT(cur_time);
INSTR_TIME_SUBTRACT(cur_time, start_time);
- cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time);
+ cur_timeout = timeout - INSTR_TIME_GET_MILLISEC(cur_time);
if (cur_timeout <= 0)
break;
}
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index 2892a573e4..582c63a5e6 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -335,8 +335,8 @@ InitShmemIndex(void)
*/
HTAB *
ShmemInitHash(const char *name, /* table string name for shmem index */
- long init_size, /* initial table size */
- long max_size, /* max size of the table */
+ uint64 init_size, /* initial table size */
+ uint64 max_size, /* max size of the table */
HASHCTL *infoP, /* info about key and bucket size */
int hash_flags) /* info about infoP */
{
diff --git a/src/backend/storage/lmgr/condition_variable.c b/src/backend/storage/lmgr/condition_variable.c
index 37b6a4eecd..43c662aaeb 100644
--- a/src/backend/storage/lmgr/condition_variable.c
+++ b/src/backend/storage/lmgr/condition_variable.c
@@ -129,10 +129,10 @@ ConditionVariableSleep(ConditionVariable *cv, uint32 wait_event_info)
* See ConditionVariableSleep() for general usage.
*/
bool
-ConditionVariableTimedSleep(ConditionVariable *cv, long timeout,
+ConditionVariableTimedSleep(ConditionVariable *cv, int64 timeout,
uint32 wait_event_info)
{
- long cur_timeout = -1;
+ int64 cur_timeout = -1;
instr_time start_time;
instr_time cur_time;
@@ -217,7 +217,7 @@ ConditionVariableTimedSleep(ConditionVariable *cv, long timeout,
{
INSTR_TIME_SET_CURRENT(cur_time);
INSTR_TIME_SUBTRACT(cur_time, start_time);
- cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time);
+ cur_timeout = timeout - (int64) INSTR_TIME_GET_MILLISEC(cur_time);
/* Have we crossed the timeout threshold? */
if (cur_timeout <= 0)
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 0a6f80963b..9bb9c1fda9 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -109,7 +109,7 @@ int PostAuthDelay = 0;
*/
/* max_stack_depth converted to bytes for speed of checking */
-static long max_stack_depth_bytes = 100 * 1024L;
+static uint64 max_stack_depth_bytes = 100 * 1024UL;
/*
* Stack base pointer -- initialized by PostmasterMain and inherited by
@@ -2016,7 +2016,7 @@ exec_bind_message(StringInfo input_message)
* Process an "Execute" message for a portal
*/
static void
-exec_execute_message(const char *portal_name, long max_rows)
+exec_execute_message(const char *portal_name, uint64 max_rows)
{
CommandDest dest;
DestReceiver *receiver;
@@ -2302,7 +2302,7 @@ check_log_duration(char *msec_str, bool was_logged)
if (log_duration || log_min_duration_sample >= 0 ||
log_min_duration_statement >= 0 || xact_is_sampled)
{
- long secs;
+ int64 secs;
int usecs;
int msecs;
bool exceeded_duration;
@@ -3302,12 +3302,12 @@ bool
stack_is_too_deep(void)
{
char stack_top_loc;
- long stack_depth;
+ int64 stack_depth;
/*
* Compute distance from reference point to my local variables
*/
- stack_depth = (long) (stack_base_ptr - &stack_top_loc);
+ stack_depth = (int64) (stack_base_ptr - &stack_top_loc);
/*
* Take abs value, since stacks grow up on some machines, down on others
@@ -3336,7 +3336,7 @@ stack_is_too_deep(void)
* Note we assume that the same max_stack_depth applies to both stacks.
*/
#if defined(__ia64__) || defined(__ia64)
- stack_depth = (long) (ia64_get_bsp() - register_stack_base_ptr);
+ stack_depth = (int64) (ia64_get_bsp() - register_stack_base_ptr);
if (stack_depth > max_stack_depth_bytes &&
register_stack_base_ptr != NULL)
@@ -3350,8 +3350,8 @@ stack_is_too_deep(void)
bool
check_max_stack_depth(int *newval, void **extra, GucSource source)
{
- long newval_bytes = *newval * 1024L;
- long stack_rlimit = get_stack_depth_rlimit();
+ uint64 newval_bytes = *newval * 1024UL;
+ uint64 stack_rlimit = get_stack_depth_rlimit();
if (stack_rlimit > 0 && newval_bytes > stack_rlimit - STACK_DEPTH_SLOP)
{
@@ -3367,7 +3367,7 @@ check_max_stack_depth(int *newval, void **extra, GucSource source)
void
assign_max_stack_depth(int newval, void *extra)
{
- long newval_bytes = newval * 1024L;
+ uint64 newval_bytes = newval * 1024UL;
max_stack_depth_bytes = newval_bytes;
}
@@ -4696,7 +4696,7 @@ static void
log_disconnections(int code, Datum arg)
{
Port *port = MyProcPort;
- long secs;
+ int64 secs;
int usecs;
int msecs;
int hours,
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index 0f5801e046..f519ce7def 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -44,7 +44,7 @@ static void ProcessQuery(PlannedStmt *plan,
static void FillPortalStore(Portal portal, bool isTopLevel);
static uint64 RunFromStore(Portal portal, ScanDirection direction, uint64 count,
DestReceiver *dest);
-static uint64 PortalRunSelect(Portal portal, bool forward, long count,
+static uint64 PortalRunSelect(Portal portal, bool forward, int64 count,
DestReceiver *dest);
static void PortalRunUtility(Portal portal, PlannedStmt *pstmt,
bool isTopLevel, bool setHoldSnapshot,
@@ -55,7 +55,7 @@ static void PortalRunMulti(Portal portal,
char *completionTag);
static uint64 DoPortalRunFetch(Portal portal,
FetchDirection fdirection,
- long count,
+ int64 count,
DestReceiver *dest);
static void DoPortalRewind(Portal portal);
@@ -683,7 +683,7 @@ PortalSetResultFormat(Portal portal, int nFormats, int16 *formats)
* suspended due to exhaustion of the count parameter.
*/
bool
-PortalRun(Portal portal, long count, bool isTopLevel, bool run_once,
+PortalRun(Portal portal, int64 count, bool isTopLevel, bool run_once,
DestReceiver *dest, DestReceiver *altdest,
char *completionTag)
{
@@ -871,7 +871,7 @@ PortalRun(Portal portal, long count, bool isTopLevel, bool run_once,
static uint64
PortalRunSelect(Portal portal,
bool forward,
- long count,
+ int64 count,
DestReceiver *dest)
{
QueryDesc *queryDesc;
@@ -1391,7 +1391,7 @@ PortalRunMulti(Portal portal,
uint64
PortalRunFetch(Portal portal,
FetchDirection fdirection,
- long count,
+ int64 count,
DestReceiver *dest)
{
uint64 result;
@@ -1493,7 +1493,7 @@ PortalRunFetch(Portal portal,
static uint64
DoPortalRunFetch(Portal portal,
FetchDirection fdirection,
- long count,
+ int64 count,
DestReceiver *dest)
{
bool forward;
@@ -1531,7 +1531,7 @@ DoPortalRunFetch(Portal portal,
* In practice, if the goal is less than halfway back to the
* start, it's better to scan from where we are.
*
- * Also, if current portalPos is outside the range of "long",
+ * Also, if current portalPos is outside the range of "int64",
* do it the hard way to avoid possible overflow of the count
* argument to PortalRunSelect. We must exclude exactly
* LONG_MAX, as well, lest the count look like FETCH_ALL.
@@ -1549,7 +1549,7 @@ DoPortalRunFetch(Portal portal,
}
else
{
- long pos = (long) portal->portalPos;
+ int64 pos = (int64) portal->portalPos;
if (portal->atEnd)
pos++; /* need one extra fetch if off end */
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index 0b6c9d5ea8..ddd471c059 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -1655,7 +1655,7 @@ timeofday(PG_FUNCTION_ARGS)
*/
void
TimestampDifference(TimestampTz start_time, TimestampTz stop_time,
- long *secs, int *microsecs)
+ int64 *secs, int *microsecs)
{
TimestampTz diff = stop_time - start_time;
@@ -1666,7 +1666,7 @@ TimestampDifference(TimestampTz start_time, TimestampTz stop_time,
}
else
{
- *secs = (long) (diff / USECS_PER_SEC);
+ *secs = (int64) (diff / USECS_PER_SEC);
*microsecs = (int) (diff % USECS_PER_SEC);
}
}
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index c9026f0e1a..43d5584eab 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -141,7 +141,7 @@ typedef HASHBUCKET *HASHSEGMENT;
typedef struct
{
slock_t mutex; /* spinlock for this freelist */
- long nentries; /* number of entries in associated buckets */
+ uint64 nentries; /* number of entries in associated buckets */
HASHELEMENT *freeList; /* chain of free elements */
} FreeListData;
@@ -169,8 +169,8 @@ struct HASHHDR
/* These fields can change, but not in a partitioned table */
/* Also, dsize can't change in a shared table, even if unpartitioned */
- long dsize; /* directory size */
- long nsegs; /* number of allocated segments (<= dsize) */
+ uint64 dsize; /* directory size */
+ uint64 nsegs; /* number of allocated segments (<= dsize) */
uint32 max_bucket; /* ID of maximum bucket in use */
uint32 high_mask; /* mask to modulo into entire table */
uint32 low_mask; /* mask to modulo into lower half of table */
@@ -178,10 +178,10 @@ struct HASHHDR
/* These fields are fixed at hashtable creation */
Size keysize; /* hash key length in bytes */
Size entrysize; /* total user element size in bytes */
- long num_partitions; /* # partitions (must be power of 2), or 0 */
- long ffactor; /* target fill factor */
- long max_dsize; /* 'dsize' limit if directory is fixed size */
- long ssize; /* segment size --- must be power of 2 */
+ uint64 num_partitions; /* # partitions (must be power of 2), or 0 */
+ uint64 ffactor; /* target fill factor */
+ uint64 max_dsize; /* 'dsize' limit if directory is fixed size */
+ uint64 ssize; /* segment size --- must be power of 2 */
int sshift; /* segment shift = log2(ssize) */
int nelem_alloc; /* number of entries to allocate at once */
@@ -191,8 +191,8 @@ struct HASHHDR
* Count statistics here. NB: stats code doesn't bother with mutex, so
* counts could be corrupted a bit in a partitioned table.
*/
- long accesses;
- long collisions;
+ uint64 accesses;
+ uint64 collisions;
#endif
};
@@ -223,7 +223,7 @@ struct HTAB
/* We keep local copies of these fixed values to reduce contention */
Size keysize; /* hash key length in bytes */
- long ssize; /* segment size --- must be power of 2 */
+ uint64 ssize; /* segment size --- must be power of 2 */
int sshift; /* segment shift = log2(ssize) */
};
@@ -244,9 +244,9 @@ struct HTAB
#define MOD(x,y) ((x) & ((y)-1))
#ifdef HASH_STATISTICS
-static long hash_accesses,
- hash_collisions,
- hash_expansions;
+static uint64 hash_accesses,
+ hash_collisions,
+ hash_expansions;
#endif
/*
@@ -260,10 +260,10 @@ static bool expand_table(HTAB *hashp);
static HASHBUCKET get_hash_entry(HTAB *hashp, int freelist_idx);
static void hdefault(HTAB *hashp);
static int choose_nelem_alloc(Size entrysize);
-static bool init_htab(HTAB *hashp, long nelem);
+static bool init_htab(HTAB *hashp, uint64 nelem);
static void hash_corrupted(HTAB *hashp);
-static long next_pow2_long(long num);
-static int next_pow2_int(long num);
+static uint64 next_pow2_uint64(uint64 num);
+static int next_pow2_int(uint64 num);
static void register_seq_scan(HTAB *hashp);
static void deregister_seq_scan(HTAB *hashp);
static bool has_seq_scans(HTAB *hashp);
@@ -313,7 +313,7 @@ string_compare(const char *key1, const char *key2, Size keysize)
* large nelem will penalize hash_seq_search speed without buying much.
*/
HTAB *
-hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
+hash_create(const char *tabname, uint64 nelem, HASHCTL *info, int flags)
{
HTAB *hashp;
HASHHDR *hctl;
@@ -633,7 +633,7 @@ choose_nelem_alloc(Size entrysize)
* arrays
*/
static bool
-init_htab(HTAB *hashp, long nelem)
+init_htab(HTAB *hashp, uint64 nelem)
{
HASHHDR *hctl = hashp->hctl;
HASHSEGMENT *segp;
@@ -729,10 +729,10 @@ init_htab(HTAB *hashp, long nelem)
* NB: assumes that all hash structure parameters have default values!
*/
Size
-hash_estimate_size(long num_entries, Size entrysize)
+hash_estimate_size(uint64 num_entries, Size entrysize)
{
Size size;
- long nBuckets,
+ uint64 nBuckets,
nSegments,
nDirEntries,
nElementAllocs,
@@ -740,9 +740,9 @@ hash_estimate_size(long num_entries, Size entrysize)
elementAllocCnt;
/* estimate number of buckets wanted */
- nBuckets = next_pow2_long((num_entries - 1) / DEF_FFACTOR + 1);
+ nBuckets = next_pow2_uint64((num_entries - 1) / DEF_FFACTOR + 1);
/* # of segments needed for nBuckets */
- nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1);
+ nSegments = next_pow2_uint64((nBuckets - 1) / DEF_SEGSIZE + 1);
/* directory entries */
nDirEntries = DEF_DIRSIZE;
while (nDirEntries < nSegments)
@@ -775,17 +775,17 @@ hash_estimate_size(long num_entries, Size entrysize)
*
* XXX this had better agree with the behavior of init_htab()...
*/
-long
-hash_select_dirsize(long num_entries)
+uint64
+hash_select_dirsize(uint64 num_entries)
{
- long nBuckets,
+ uint64 nBuckets,
nSegments,
nDirEntries;
/* estimate number of buckets wanted */
- nBuckets = next_pow2_long((num_entries - 1) / DEF_FFACTOR + 1);
+ nBuckets = next_pow2_uint64((num_entries - 1) / DEF_FFACTOR + 1);
/* # of segments needed for nBuckets */
- nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1);
+ nSegments = next_pow2_uint64((nBuckets - 1) / DEF_SEGSIZE + 1);
/* directory entries */
nDirEntries = DEF_DIRSIZE;
while (nDirEntries < nSegments)
@@ -836,8 +836,8 @@ hash_stats(const char *where, HTAB *hashp)
fprintf(stderr, "%s: this HTAB -- accesses %ld collisions %ld\n",
where, hashp->hctl->accesses, hashp->hctl->collisions);
- fprintf(stderr, "hash_stats: entries %ld keysize %ld maxp %u segmentcount %ld\n",
- hash_get_num_entries(hashp), (long) hashp->hctl->keysize,
+ fprintf(stderr, "hash_stats: entries %ld keysize %llu maxp %u segmentcount %ld\n",
+ hash_get_num_entries(hashp), (uint64) hashp->hctl->keysize,
hashp->hctl->max_bucket, hashp->hctl->nsegs);
fprintf(stderr, "%s: total accesses %ld total collisions %ld\n",
where, hash_accesses, hash_collisions);
@@ -926,8 +926,8 @@ hash_search_with_hash_value(HTAB *hashp,
int freelist_idx = FREELIST_IDX(hctl, hashvalue);
Size keysize;
uint32 bucket;
- long segment_num;
- long segment_ndx;
+ uint64 segment_num;
+ uint64 segment_ndx;
HASHSEGMENT segp;
HASHBUCKET currBucket;
HASHBUCKET *prevBucketPtr;
@@ -954,7 +954,7 @@ hash_search_with_hash_value(HTAB *hashp,
* order of these tests is to try to check cheaper conditions first.
*/
if (!IS_PARTITIONED(hctl) && !hashp->frozen &&
- hctl->freeList[0].nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor &&
+ hctl->freeList[0].nentries / (uint64) (hctl->max_bucket + 1) >= hctl->ffactor &&
!has_seq_scans(hashp))
(void) expand_table(hashp);
}
@@ -1122,8 +1122,8 @@ hash_update_hash_key(HTAB *hashp,
Size keysize;
uint32 bucket;
uint32 newbucket;
- long segment_num;
- long segment_ndx;
+ uint64 segment_num;
+ uint64 segment_ndx;
HASHSEGMENT segp;
HASHBUCKET currBucket;
HASHBUCKET *prevBucketPtr;
@@ -1331,11 +1331,11 @@ get_hash_entry(HTAB *hashp, int freelist_idx)
/*
* hash_get_num_entries -- get the number of entries in a hashtable
*/
-long
+uint64
hash_get_num_entries(HTAB *hashp)
{
int i;
- long sum = hashp->hctl->freeList[0].nentries;
+ uint64 sum = hashp->hctl->freeList[0].nentries;
/*
* We currently don't bother with acquiring the mutexes; it's only
@@ -1391,9 +1391,9 @@ hash_seq_search(HASH_SEQ_STATUS *status)
HTAB *hashp;
HASHHDR *hctl;
uint32 max_bucket;
- long ssize;
- long segment_num;
- long segment_ndx;
+ uint64 ssize;
+ uint64 segment_num;
+ uint64 segment_ndx;
HASHSEGMENT segp;
uint32 curBucket;
HASHELEMENT *curElem;
@@ -1504,11 +1504,11 @@ expand_table(HTAB *hashp)
HASHHDR *hctl = hashp->hctl;
HASHSEGMENT old_seg,
new_seg;
- long old_bucket,
+ uint64 old_bucket,
new_bucket;
- long new_segnum,
+ uint64 new_segnum,
new_segndx;
- long old_segnum,
+ uint64 old_segnum,
old_segndx;
HASHBUCKET *oldlink,
*newlink;
@@ -1576,7 +1576,7 @@ expand_table(HTAB *hashp)
currElement = nextElement)
{
nextElement = currElement->link;
- if ((long) calc_bucket(hctl, currElement->hashvalue) == old_bucket)
+ if ((uint64) calc_bucket(hctl, currElement->hashvalue) == old_bucket)
{
*oldlink = currElement;
oldlink = &currElement->link;
@@ -1600,9 +1600,9 @@ dir_realloc(HTAB *hashp)
{
HASHSEGMENT *p;
HASHSEGMENT *old_p;
- long new_dsize;
- long old_dirsize;
- long new_dirsize;
+ uint64 new_dsize;
+ uint64 old_dirsize;
+ uint64 new_dirsize;
if (hashp->hctl->max_dsize != NO_MAX_DSIZE)
return false;
@@ -1715,10 +1715,10 @@ hash_corrupted(HTAB *hashp)
/* calculate ceil(log base 2) of num */
int
-my_log2(long num)
+my_log2(uint64 num)
{
int i;
- long limit;
+ uint64 limit;
/* guard against too-large input, which would put us into infinite loop */
if (num > LONG_MAX / 2)
@@ -1729,9 +1729,9 @@ my_log2(long num)
return i;
}
-/* calculate first power of 2 >= num, bounded to what will fit in a long */
-static long
-next_pow2_long(long num)
+/* calculate first power of 2 >= num, bounded to what will fit in a uint64 */
+static uint64
+next_pow2_uint64(uint64 num)
{
/* my_log2's internal range check is sufficient */
return 1L << my_log2(num);
@@ -1739,7 +1739,7 @@ next_pow2_long(long num)
/* calculate first power of 2 >= num, bounded to what will fit in an int */
static int
-next_pow2_int(long num)
+next_pow2_int(uint64 num)
{
if (num > INT_MAX / 2)
num = INT_MAX / 2;
diff --git a/src/backend/utils/misc/sampling.c b/src/backend/utils/misc/sampling.c
index 361c15614e..05d2fd581c 100644
--- a/src/backend/utils/misc/sampling.c
+++ b/src/backend/utils/misc/sampling.c
@@ -37,7 +37,7 @@
*/
BlockNumber
BlockSampler_Init(BlockSampler bs, BlockNumber nblocks, int samplesize,
- long randseed)
+ uint64 randseed)
{
bs->N = nblocks; /* measured table size */
@@ -230,7 +230,7 @@ reservoir_get_next_S(ReservoirState rs, double t, int n)
*----------
*/
void
-sampler_random_init_state(long seed, SamplerRandomState randstate)
+sampler_random_init_state(uint64 seed, SamplerRandomState randstate)
{
randstate[0] = 0x330e; /* same as pg_erand48, but could be anything */
randstate[1] = (unsigned short) seed;
diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c
index 42cfb1f9f9..818117e4a0 100644
--- a/src/backend/utils/sort/logtape.c
+++ b/src/backend/utils/sort/logtape.c
@@ -97,9 +97,9 @@
*/
typedef struct TapeBlockTrailer
{
- long prev; /* previous block on this tape, or -1 on first
+ int64 prev; /* previous block on this tape, or -1 on first
* block */
- long next; /* next block on this tape, or # of valid
+ int64 next; /* next block on this tape, or # of valid
* bytes on last block (if < 0) */
} TapeBlockTrailer;
@@ -142,10 +142,10 @@ typedef struct LogicalTape
* When concatenation of worker tape BufFiles is performed, an offset to
* the first block in the unified BufFile space is applied during reads.
*/
- long firstBlockNumber;
- long curBlockNumber;
- long nextBlockNumber;
- long offsetBlockNumber;
+ int64 firstBlockNumber;
+ int64 curBlockNumber;
+ int64 nextBlockNumber;
+ int64 offsetBlockNumber;
/*
* Buffer for current data block(s).
@@ -177,9 +177,9 @@ struct LogicalTapeSet
* blocks that are in unused holes between worker spaces following BufFile
* concatenation.
*/
- long nBlocksAllocated; /* # of blocks allocated */
- long nBlocksWritten; /* # of blocks used in underlying file */
- long nHoleBlocks; /* # of "hole" blocks left */
+ uint64 nBlocksAllocated; /* # of blocks allocated */
+ uint64 nBlocksWritten; /* # of blocks used in underlying file */
+ uint64 nHoleBlocks; /* # of "hole" blocks left */
/*
* We store the numbers of recycled-and-available blocks in freeBlocks[].
@@ -196,7 +196,7 @@ struct LogicalTapeSet
*/
bool forgetFreeSpace; /* are we remembering free blocks? */
bool blocksSorted; /* is freeBlocks[] currently in order? */
- long *freeBlocks; /* resizable array */
+ uint64 *freeBlocks; /* resizable array */
int nFreeBlocks; /* # of currently free blocks */
int freeBlocksLen; /* current allocated length of freeBlocks[] */
@@ -205,10 +205,10 @@ struct LogicalTapeSet
LogicalTape tapes[FLEXIBLE_ARRAY_MEMBER]; /* has nTapes nentries */
};
-static void ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer);
-static void ltsReadBlock(LogicalTapeSet *lts, long blocknum, void *buffer);
-static long ltsGetFreeBlock(LogicalTapeSet *lts);
-static void ltsReleaseBlock(LogicalTapeSet *lts, long blocknum);
+static void ltsWriteBlock(LogicalTapeSet *lts, int64 blocknum, void *buffer);
+static void ltsReadBlock(LogicalTapeSet *lts, int64 blocknum, void *buffer);
+static int64 ltsGetFreeBlock(LogicalTapeSet *lts);
+static void ltsReleaseBlock(LogicalTapeSet *lts, int64 blocknum);
static void ltsConcatWorkerTapes(LogicalTapeSet *lts, TapeShare *shared,
SharedFileSet *fileset);
@@ -219,7 +219,7 @@ static void ltsConcatWorkerTapes(LogicalTapeSet *lts, TapeShare *shared,
* No need for an error return convention; we ereport() on any error.
*/
static void
-ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer)
+ltsWriteBlock(LogicalTapeSet *lts, int64 blocknum, void *buffer)
{
/*
* BufFile does not support "holes", so if we're about to write a block
@@ -267,7 +267,7 @@ ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer)
* module should never attempt to read a block it doesn't know is there.
*/
static void
-ltsReadBlock(LogicalTapeSet *lts, long blocknum, void *buffer)
+ltsReadBlock(LogicalTapeSet *lts, int64 blocknum, void *buffer)
{
if (BufFileSeekBlock(lts->pfile, blocknum) != 0 ||
BufFileRead(lts->pfile, buffer, BLCKSZ) != BLCKSZ)
@@ -291,7 +291,7 @@ ltsReadFillBuffer(LogicalTapeSet *lts, LogicalTape *lt)
do
{
char *thisbuf = lt->buffer + lt->nbytes;
- long datablocknum = lt->nextBlockNumber;
+ int64 datablocknum = lt->nextBlockNumber;
/* Fetch next block number */
if (datablocknum == -1L)
@@ -327,10 +327,10 @@ ltsReadFillBuffer(LogicalTapeSet *lts, LogicalTape *lt)
static int
freeBlocks_cmp(const void *a, const void *b)
{
- long ablk = *((const long *) a);
- long bblk = *((const long *) b);
+ int64 ablk = *((const int64 *) a);
+ int64 bblk = *((const int64 *) b);
- /* can't just subtract because long might be wider than int */
+ /* can't just subtract because int64 might be wider than int */
if (ablk < bblk)
return 1;
if (ablk > bblk)
@@ -341,7 +341,7 @@ freeBlocks_cmp(const void *a, const void *b)
/*
* Select a currently unused block for writing to.
*/
-static long
+static int64
ltsGetFreeBlock(LogicalTapeSet *lts)
{
/*
@@ -354,7 +354,7 @@ ltsGetFreeBlock(LogicalTapeSet *lts)
if (!lts->blocksSorted)
{
qsort((void *) lts->freeBlocks, lts->nFreeBlocks,
- sizeof(long), freeBlocks_cmp);
+ sizeof(int64), freeBlocks_cmp);
lts->blocksSorted = true;
}
return lts->freeBlocks[--lts->nFreeBlocks];
@@ -367,7 +367,7 @@ ltsGetFreeBlock(LogicalTapeSet *lts)
* Return a block# to the freelist.
*/
static void
-ltsReleaseBlock(LogicalTapeSet *lts, long blocknum)
+ltsReleaseBlock(LogicalTapeSet *lts, int64 blocknum)
{
int ndx;
@@ -383,8 +383,8 @@ ltsReleaseBlock(LogicalTapeSet *lts, long blocknum)
if (lts->nFreeBlocks >= lts->freeBlocksLen)
{
lts->freeBlocksLen *= 2;
- lts->freeBlocks = (long *) repalloc(lts->freeBlocks,
- lts->freeBlocksLen * sizeof(long));
+ lts->freeBlocks = (uint64 *) repalloc(lts->freeBlocks,
+ lts->freeBlocksLen * sizeof(int64));
}
/*
@@ -410,8 +410,8 @@ ltsConcatWorkerTapes(LogicalTapeSet *lts, TapeShare *shared,
SharedFileSet *fileset)
{
LogicalTape *lt = NULL;
- long tapeblocks = 0L;
- long nphysicalblocks = 0L;
+ int64 tapeblocks = 0L;
+ int64 nphysicalblocks = 0L;
int i;
/* Should have at least one worker tape, plus leader's tape */
@@ -526,7 +526,7 @@ LogicalTapeSetCreate(int ntapes, TapeShare *shared, SharedFileSet *fileset,
lts->forgetFreeSpace = false;
lts->blocksSorted = true; /* a zero-length array is sorted ... */
lts->freeBlocksLen = 32; /* reasonable initial guess */
- lts->freeBlocks = (long *) palloc(lts->freeBlocksLen * sizeof(long));
+ lts->freeBlocks = (uint64 *) palloc(lts->freeBlocksLen * sizeof(uint64));
lts->nFreeBlocks = 0;
lts->nTapes = ntapes;
@@ -652,7 +652,7 @@ LogicalTapeWrite(LogicalTapeSet *lts, int tapenum,
if (lt->pos >= TapeBlockPayloadSize)
{
/* Buffer full, dump it out */
- long nextBlockNumber;
+ int64 nextBlockNumber;
if (!lt->dirty)
{
@@ -984,7 +984,7 @@ LogicalTapeBackspace(LogicalTapeSet *lts, int tapenum, size_t size)
seekpos = (size_t) lt->pos; /* part within this block */
while (size > seekpos)
{
- long prev = TapeBlockGetTrailer(lt->buffer)->prev;
+ int64 prev = TapeBlockGetTrailer(lt->buffer)->prev;
if (prev == -1L)
{
@@ -1029,7 +1029,7 @@ LogicalTapeBackspace(LogicalTapeSet *lts, int tapenum, size_t size)
*/
void
LogicalTapeSeek(LogicalTapeSet *lts, int tapenum,
- long blocknum, int offset)
+ int64 blocknum, int offset)
{
LogicalTape *lt;
@@ -1060,7 +1060,7 @@ LogicalTapeSeek(LogicalTapeSet *lts, int tapenum,
*/
void
LogicalTapeTell(LogicalTapeSet *lts, int tapenum,
- long *blocknum, int *offset)
+ int64 *blocknum, int *offset)
{
LogicalTape *lt;
@@ -1078,7 +1078,7 @@ LogicalTapeTell(LogicalTapeSet *lts, int tapenum,
/*
* Obtain total disk space currently used by a LogicalTapeSet, in blocks.
*/
-long
+int64
LogicalTapeSetBlocks(LogicalTapeSet *lts)
{
return lts->nBlocksAllocated - lts->nHoleBlocks;
diff --git a/src/bin/pg_basebackup/receivelog.c b/src/bin/pg_basebackup/receivelog.c
index 62a342f77c..488f12dc6f 100644
--- a/src/bin/pg_basebackup/receivelog.c
+++ b/src/bin/pg_basebackup/receivelog.c
@@ -37,8 +37,8 @@ static bool still_sending = true; /* feedback still needs to be sent? */
static PGresult *HandleCopyStream(PGconn *conn, StreamCtl *stream,
XLogRecPtr *stoppos);
-static int CopyStreamPoll(PGconn *conn, long timeout_ms, pgsocket stop_socket);
-static int CopyStreamReceive(PGconn *conn, long timeout, pgsocket stop_socket,
+static int CopyStreamPoll(PGconn *conn, int64 timeout_ms, pgsocket stop_socket);
+static int CopyStreamReceive(PGconn *conn, int64 timeout, pgsocket stop_socket,
char **buffer);
static bool ProcessKeepaliveMsg(PGconn *conn, StreamCtl *stream, char *copybuf,
int len, XLogRecPtr blockpos, TimestampTz *last_status);
@@ -48,7 +48,7 @@ static PGresult *HandleEndOfCopyStream(PGconn *conn, StreamCtl *stream, char *co
XLogRecPtr blockpos, XLogRecPtr *stoppos);
static bool CheckCopyStreamStop(PGconn *conn, StreamCtl *stream, XLogRecPtr blockpos,
XLogRecPtr *stoppos);
-static long CalculateCopyStreamSleeptime(TimestampTz now, int standby_message_timeout,
+static int64 CalculateCopyStreamSleeptime(TimestampTz now, int standby_message_timeout,
TimestampTz last_status);
static bool ReadEndOfStreamingResult(PGresult *res, XLogRecPtr *startpos,
@@ -742,7 +742,7 @@ HandleCopyStream(PGconn *conn, StreamCtl *stream,
{
int r;
TimestampTz now;
- long sleeptime;
+ int64 sleeptime;
/*
* Check if we should continue streaming, or abort at this point.
@@ -858,7 +858,7 @@ error:
* or interrupted by signal or stop_socket input, and -1 on an error.
*/
static int
-CopyStreamPoll(PGconn *conn, long timeout_ms, pgsocket stop_socket)
+CopyStreamPoll(PGconn *conn, int64 timeout_ms, pgsocket stop_socket)
{
int ret;
fd_set input_mask;
@@ -920,7 +920,7 @@ CopyStreamPoll(PGconn *conn, long timeout_ms, pgsocket stop_socket)
* -1 on error. -2 if the server ended the COPY.
*/
static int
-CopyStreamReceive(PGconn *conn, long timeout, pgsocket stop_socket,
+CopyStreamReceive(PGconn *conn, int64 timeout, pgsocket stop_socket,
char **buffer)
{
char *copybuf = NULL;
@@ -1228,12 +1228,12 @@ CheckCopyStreamStop(PGconn *conn, StreamCtl *stream, XLogRecPtr blockpos,
/*
* Calculate how long send/receive loops should sleep
*/
-static long
+static int64
CalculateCopyStreamSleeptime(TimestampTz now, int standby_message_timeout,
TimestampTz last_status)
{
TimestampTz status_targettime = 0;
- long sleeptime;
+ int64 sleeptime;
if (standby_message_timeout && still_sending)
status_targettime = last_status +
@@ -1241,7 +1241,7 @@ CalculateCopyStreamSleeptime(TimestampTz now, int standby_message_timeout,
if (status_targettime > 0)
{
- long secs;
+ int64 secs;
int usecs;
feTimestampDifference(now,
diff --git a/src/include/executor/spi.h b/src/include/executor/spi.h
index 06de20ada5..e54c968aca 100644
--- a/src/include/executor/spi.h
+++ b/src/include/executor/spi.h
@@ -84,24 +84,24 @@ extern PGDLLIMPORT int SPI_result;
extern int SPI_connect(void);
extern int SPI_connect_ext(int options);
extern int SPI_finish(void);
-extern int SPI_execute(const char *src, bool read_only, long tcount);
+extern int SPI_execute(const char *src, bool read_only, int64 tcount);
extern int SPI_execute_plan(SPIPlanPtr plan, Datum *Values, const char *Nulls,
- bool read_only, long tcount);
+ bool read_only, int64 tcount);
extern int SPI_execute_plan_with_paramlist(SPIPlanPtr plan,
ParamListInfo params,
- bool read_only, long tcount);
-extern int SPI_exec(const char *src, long tcount);
+ bool read_only, int64 tcount);
+extern int SPI_exec(const char *src, int64 tcount);
extern int SPI_execp(SPIPlanPtr plan, Datum *Values, const char *Nulls,
- long tcount);
+ int64 tcount);
extern int SPI_execute_snapshot(SPIPlanPtr plan,
Datum *Values, const char *Nulls,
Snapshot snapshot,
Snapshot crosscheck_snapshot,
- bool read_only, bool fire_triggers, long tcount);
+ bool read_only, bool fire_triggers, int64 tcount);
extern int SPI_execute_with_args(const char *src,
int nargs, Oid *argtypes,
Datum *Values, const char *Nulls,
- bool read_only, long tcount);
+ bool read_only, int64 tcount);
extern SPIPlanPtr SPI_prepare(const char *src, int nargs, Oid *argtypes);
extern SPIPlanPtr SPI_prepare_cursor(const char *src, int nargs, Oid *argtypes,
int cursorOptions);
@@ -151,10 +151,10 @@ extern Portal SPI_cursor_open_with_args(const char *name,
extern Portal SPI_cursor_open_with_paramlist(const char *name, SPIPlanPtr plan,
ParamListInfo params, bool read_only);
extern Portal SPI_cursor_find(const char *name);
-extern void SPI_cursor_fetch(Portal portal, bool forward, long count);
-extern void SPI_cursor_move(Portal portal, bool forward, long count);
-extern void SPI_scroll_cursor_fetch(Portal, FetchDirection direction, long count);
-extern void SPI_scroll_cursor_move(Portal, FetchDirection direction, long count);
+extern void SPI_cursor_fetch(Portal portal, bool forward, int64 count);
+extern void SPI_cursor_move(Portal portal, bool forward, int64 count);
+extern void SPI_scroll_cursor_fetch(Portal, FetchDirection direction, int64 count);
+extern void SPI_scroll_cursor_move(Portal, FetchDirection direction, int64 count);
extern void SPI_cursor_close(Portal portal);
extern int SPI_register_relation(EphemeralNamedRelation enr);
diff --git a/src/include/pgtime.h b/src/include/pgtime.h
index 0fc76d0e60..3d2fdedd89 100644
--- a/src/include/pgtime.h
+++ b/src/include/pgtime.h
@@ -33,7 +33,7 @@ struct pg_tm
int tm_wday;
int tm_yday;
int tm_isdst;
- long int tm_gmtoff;
+ int64 tm_gmtoff;
const char *tm_zone;
};
@@ -48,18 +48,18 @@ typedef struct pg_tzenum pg_tzenum;
extern struct pg_tm *pg_localtime(const pg_time_t *timep, const pg_tz *tz);
extern struct pg_tm *pg_gmtime(const pg_time_t *timep);
extern int pg_next_dst_boundary(const pg_time_t *timep,
- long int *before_gmtoff,
+ int64 *before_gmtoff,
int *before_isdst,
pg_time_t *boundary,
- long int *after_gmtoff,
+ int64 *after_gmtoff,
int *after_isdst,
const pg_tz *tz);
extern bool pg_interpret_timezone_abbrev(const char *abbrev,
const pg_time_t *timep,
- long int *gmtoff,
+ int64 *gmtoff,
int *isdst,
const pg_tz *tz);
-extern bool pg_get_timezone_offset(const pg_tz *tz, long int *gmtoff);
+extern bool pg_get_timezone_offset(const pg_tz *tz, int64 *gmtoff);
extern const char *pg_get_timezone_name(pg_tz *tz);
extern bool pg_tz_acceptable(pg_tz *tz);
@@ -75,7 +75,7 @@ extern pg_tz *log_timezone;
extern void pg_timezone_initialize(void);
extern pg_tz *pg_tzset(const char *tzname);
-extern pg_tz *pg_tzset_offset(long gmtoffset);
+extern pg_tz *pg_tzset_offset(int64 gmtoffset);
extern pg_tzenum *pg_tzenumerate_start(void);
extern pg_tz *pg_tzenumerate_next(pg_tzenum *dir);
diff --git a/src/include/portability/instr_time.h b/src/include/portability/instr_time.h
index d6459327cc..0a51120fc4 100644
--- a/src/include/portability/instr_time.h
+++ b/src/include/portability/instr_time.h
@@ -136,7 +136,7 @@ typedef struct timespec instr_time;
(((double) (t).tv_sec) + ((double) (t).tv_nsec) / 1000000000.0)
#define INSTR_TIME_GET_MILLISEC(t) \
- (((double) (t).tv_sec * 1000.0) + ((double) (t).tv_nsec) / 1000000.0)
+ (((int64) (t).tv_sec * 1000.0) + ((int64) (t).tv_nsec) / 1000000.0)
#define INSTR_TIME_GET_MICROSEC(t) \
(((uint64) (t).tv_sec * (uint64) 1000000) + (uint64) ((t).tv_nsec / 1000))
diff --git a/src/include/storage/buffile.h b/src/include/storage/buffile.h
index 60433f35b4..7178247396 100644
--- a/src/include/storage/buffile.h
+++ b/src/include/storage/buffile.h
@@ -42,9 +42,9 @@ extern size_t BufFileRead(BufFile *file, void *ptr, size_t size);
extern size_t BufFileWrite(BufFile *file, void *ptr, size_t size);
extern int BufFileSeek(BufFile *file, int fileno, off_t offset, int whence);
extern void BufFileTell(BufFile *file, int *fileno, off_t *offset);
-extern int BufFileSeekBlock(BufFile *file, long blknum);
+extern int BufFileSeekBlock(BufFile *file, uint64 blknum);
extern int64 BufFileSize(BufFile *file);
-extern long BufFileAppend(BufFile *target, BufFile *source);
+extern uint64 BufFileAppend(BufFile *target, BufFile *source);
extern BufFile *BufFileCreateShared(SharedFileSet *fileset, const char *name);
extern void BufFileExportShared(BufFile *file);
diff --git a/src/include/storage/condition_variable.h b/src/include/storage/condition_variable.h
index bfe5c89b54..cb245c09c5 100644
--- a/src/include/storage/condition_variable.h
+++ b/src/include/storage/condition_variable.h
@@ -43,7 +43,7 @@ extern void ConditionVariableInit(ConditionVariable *cv);
* the condition variable.
*/
extern void ConditionVariableSleep(ConditionVariable *cv, uint32 wait_event_info);
-extern bool ConditionVariableTimedSleep(ConditionVariable *cv, long timeout,
+extern bool ConditionVariableTimedSleep(ConditionVariable *cv, int64 timeout,
uint32 wait_event_info);
extern void ConditionVariableCancelSleep(void);
diff --git a/src/include/storage/latch.h b/src/include/storage/latch.h
index 46ae56cae3..3c013ca860 100644
--- a/src/include/storage/latch.h
+++ b/src/include/storage/latch.h
@@ -169,13 +169,13 @@ extern int AddWaitEventToSet(WaitEventSet *set, uint32 events, pgsocket fd,
Latch *latch, void *user_data);
extern void ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch);
-extern int WaitEventSetWait(WaitEventSet *set, long timeout,
+extern int WaitEventSetWait(WaitEventSet *set, int64 timeout,
WaitEvent *occurred_events, int nevents,
uint32 wait_event_info);
-extern int WaitLatch(Latch *latch, int wakeEvents, long timeout,
+extern int WaitLatch(Latch *latch, int wakeEvents, int64 timeout,
uint32 wait_event_info);
extern int WaitLatchOrSocket(Latch *latch, int wakeEvents,
- pgsocket sock, long timeout, uint32 wait_event_info);
+ pgsocket sock, int64 timeout, uint32 wait_event_info);
/*
* Unix implementation uses SIGUSR1 for inter-process signaling.
diff --git a/src/include/storage/shmem.h b/src/include/storage/shmem.h
index 0c1af89206..4b75f33be9 100644
--- a/src/include/storage/shmem.h
+++ b/src/include/storage/shmem.h
@@ -39,7 +39,7 @@ extern void *ShmemAllocNoError(Size size);
extern void *ShmemAllocUnlocked(Size size);
extern bool ShmemAddrIsValid(const void *addr);
extern void InitShmemIndex(void);
-extern HTAB *ShmemInitHash(const char *name, long init_size, long max_size,
+extern HTAB *ShmemInitHash(const char *name, uint64 init_size, uint64 max_size,
HASHCTL *infoP, int hash_flags);
extern void *ShmemInitStruct(const char *name, Size size, bool *foundPtr);
extern Size add_size(Size s1, Size s2);
diff --git a/src/include/tcop/pquery.h b/src/include/tcop/pquery.h
index 4ad6324e2d..367c31cabb 100644
--- a/src/include/tcop/pquery.h
+++ b/src/include/tcop/pquery.h
@@ -33,13 +33,13 @@ extern void PortalStart(Portal portal, ParamListInfo params,
extern void PortalSetResultFormat(Portal portal, int nFormats,
int16 *formats);
-extern bool PortalRun(Portal portal, long count, bool isTopLevel,
+extern bool PortalRun(Portal portal, int64 count, bool isTopLevel,
bool run_once, DestReceiver *dest, DestReceiver *altdest,
char *completionTag);
extern uint64 PortalRunFetch(Portal portal,
FetchDirection fdirection,
- long count,
+ int64 count,
DestReceiver *dest);
#endif /* PQUERY_H */
diff --git a/src/include/utils/dynahash.h b/src/include/utils/dynahash.h
index 768b952176..0c75534897 100644
--- a/src/include/utils/dynahash.h
+++ b/src/include/utils/dynahash.h
@@ -14,6 +14,6 @@
#ifndef DYNAHASH_H
#define DYNAHASH_H
-extern int my_log2(long num);
+extern int my_log2(uint64 num);
#endif /* DYNAHASH_H */
diff --git a/src/include/utils/hsearch.h b/src/include/utils/hsearch.h
index b0077b7827..4559cda0f8 100644
--- a/src/include/utils/hsearch.h
+++ b/src/include/utils/hsearch.h
@@ -64,11 +64,11 @@ typedef struct HTAB HTAB;
/* Only those fields indicated by hash_flags need be set */
typedef struct HASHCTL
{
- long num_partitions; /* # partitions (must be power of 2) */
- long ssize; /* segment size */
- long dsize; /* (initial) directory size */
- long max_dsize; /* limit to dsize if dir size is limited */
- long ffactor; /* fill factor */
+ uint64 num_partitions; /* # partitions (must be power of 2) */
+ uint64 ssize; /* segment size */
+ uint64 dsize; /* (initial) directory size */
+ uint64 max_dsize; /* limit to dsize if dir size is limited */
+ uint64 ffactor; /* fill factor */
Size keysize; /* hash key length in bytes */
Size entrysize; /* total user element size in bytes */
HashValueFunc hash; /* hash function */
@@ -119,7 +119,7 @@ typedef struct
/*
* prototypes for functions in dynahash.c
*/
-extern HTAB *hash_create(const char *tabname, long nelem,
+extern HTAB *hash_create(const char *tabname, uint64 nelem,
HASHCTL *info, int flags);
extern void hash_destroy(HTAB *hashp);
extern void hash_stats(const char *where, HTAB *hashp);
@@ -131,13 +131,13 @@ extern void *hash_search_with_hash_value(HTAB *hashp, const void *keyPtr,
bool *foundPtr);
extern bool hash_update_hash_key(HTAB *hashp, void *existingEntry,
const void *newKeyPtr);
-extern long hash_get_num_entries(HTAB *hashp);
+extern uint64 hash_get_num_entries(HTAB *hashp);
extern void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp);
extern void *hash_seq_search(HASH_SEQ_STATUS *status);
extern void hash_seq_term(HASH_SEQ_STATUS *status);
extern void hash_freeze(HTAB *hashp);
-extern Size hash_estimate_size(long num_entries, Size entrysize);
-extern long hash_select_dirsize(long num_entries);
+extern Size hash_estimate_size(uint64 num_entries, Size entrysize);
+extern uint64 hash_select_dirsize(uint64 num_entries);
extern Size hash_get_shared_size(HASHCTL *info, int flags);
extern void AtEOXact_HashTables(bool isCommit);
extern void AtEOSubXact_HashTables(bool isCommit, int nestDepth);
diff --git a/src/include/utils/sampling.h b/src/include/utils/sampling.h
index 74646846b2..cbcbeb6fd7 100644
--- a/src/include/utils/sampling.h
+++ b/src/include/utils/sampling.h
@@ -19,7 +19,7 @@
/* Random generator for sampling code */
typedef unsigned short SamplerRandomState[3];
-extern void sampler_random_init_state(long seed,
+extern void sampler_random_init_state(uint64 seed,
SamplerRandomState randstate);
extern double sampler_random_fract(SamplerRandomState randstate);
@@ -38,7 +38,7 @@ typedef struct
typedef BlockSamplerData *BlockSampler;
extern BlockNumber BlockSampler_Init(BlockSampler bs, BlockNumber nblocks,
- int samplesize, long randseed);
+ int samplesize, uint64 randseed);
extern bool BlockSampler_HasMore(BlockSampler bs);
extern BlockNumber BlockSampler_Next(BlockSampler bs);
diff --git a/src/include/utils/timestamp.h b/src/include/utils/timestamp.h
index 03a1de569f..78fdf05583 100644
--- a/src/include/utils/timestamp.h
+++ b/src/include/utils/timestamp.h
@@ -71,7 +71,7 @@ extern TimestampTz GetCurrentTimestamp(void);
extern TimestampTz GetSQLCurrentTimestamp(int32 typmod);
extern Timestamp GetSQLLocalTimestamp(int32 typmod);
extern void TimestampDifference(TimestampTz start_time, TimestampTz stop_time,
- long *secs, int *microsecs);
+ int64 *secs, int *microsecs);
extern bool TimestampDifferenceExceeds(TimestampTz start_time,
TimestampTz stop_time,
int msec);
diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c
index 5acf604f63..3ce5848247 100644
--- a/src/pl/plpgsql/src/pl_exec.c
+++ b/src/pl/plpgsql/src/pl_exec.c
@@ -358,7 +358,7 @@ static Datum exec_eval_expr(PLpgSQL_execstate *estate,
Oid *rettype,
int32 *rettypmod);
static int exec_run_select(PLpgSQL_execstate *estate,
- PLpgSQL_expr *expr, long maxtuples, Portal *portalP);
+ PLpgSQL_expr *expr, uint64 maxtuples, Portal *portalP);
static int exec_for_query(PLpgSQL_execstate *estate, PLpgSQL_stmt_forq *stmt,
Portal portal, bool prefetch_ok);
static ParamListInfo setup_param_list(PLpgSQL_execstate *estate,
@@ -4085,7 +4085,7 @@ exec_stmt_execsql(PLpgSQL_execstate *estate,
PLpgSQL_stmt_execsql *stmt)
{
ParamListInfo paramLI;
- long tcount;
+ int64 tcount;
int rc;
PLpgSQL_expr *expr = stmt->sqlstmt;
int too_many_rows_level = 0;
@@ -4683,7 +4683,7 @@ static int
exec_stmt_fetch(PLpgSQL_execstate *estate, PLpgSQL_stmt_fetch *stmt)
{
PLpgSQL_var *curvar;
- long how_many = stmt->how_many;
+ int64 how_many = stmt->how_many;
SPITupleTable *tuptab;
Portal portal;
char *curname;
@@ -5831,7 +5831,7 @@ exec_eval_expr(PLpgSQL_execstate *estate,
*/
static int
exec_run_select(PLpgSQL_execstate *estate,
- PLpgSQL_expr *expr, long maxtuples, Portal *portalP)
+ PLpgSQL_expr *expr, uint64 maxtuples, Portal *portalP)
{
ParamListInfo paramLI;
int rc;
diff --git a/src/pl/plpgsql/src/plpgsql.h b/src/pl/plpgsql/src/plpgsql.h
index b599f67fc5..e12429e5f2 100644
--- a/src/pl/plpgsql/src/plpgsql.h
+++ b/src/pl/plpgsql/src/plpgsql.h
@@ -798,7 +798,7 @@ typedef struct PLpgSQL_stmt_fetch
PLpgSQL_variable *target; /* target (record or row) */
int curvar; /* cursor variable to fetch from */
FetchDirection direction; /* fetch direction */
- long how_many; /* count, if constant (expr is NULL) */
+ int64 how_many; /* count, if constant (expr is NULL) */
PLpgSQL_expr *expr; /* count, if expression */
bool is_move; /* is this a fetch or move? */
bool returns_multiple_rows; /* can return more than one row? */
@@ -1026,7 +1026,7 @@ typedef struct PLpgSQL_function
/* these fields change when the function is used */
struct PLpgSQL_execstate *cur_estate;
- unsigned long use_count;
+ uint64 use_count;
} PLpgSQL_function;
/*
diff --git a/src/pl/plpython/plpy_spi.c b/src/pl/plpython/plpy_spi.c
index 99c1b4f28f..91079f4c99 100644
--- a/src/pl/plpython/plpy_spi.c
+++ b/src/pl/plpython/plpy_spi.c
@@ -25,7 +25,7 @@
#include "utils/memutils.h"
#include "utils/syscache.h"
-static PyObject *PLy_spi_execute_query(char *query, long limit);
+static PyObject *PLy_spi_execute_query(char *query, uint64 limit);
static PyObject *PLy_spi_execute_fetch_result(SPITupleTable *tuptable,
uint64 rows, int status);
static void PLy_spi_exception_set(PyObject *excclass, ErrorData *edata);
@@ -158,7 +158,7 @@ PLy_spi_execute(PyObject *self, PyObject *args)
char *query;
PyObject *plan;
PyObject *list = NULL;
- long limit = 0;
+ uint64 limit = 0;
if (PyArg_ParseTuple(args, "s|l", &query, &limit))
return PLy_spi_execute_query(query, limit);
@@ -174,7 +174,7 @@ PLy_spi_execute(PyObject *self, PyObject *args)
}
PyObject *
-PLy_spi_execute_plan(PyObject *ob, PyObject *list, long limit)
+PLy_spi_execute_plan(PyObject *ob, PyObject *list, uint64 limit)
{
volatile int nargs;
int i,
@@ -305,7 +305,7 @@ PLy_spi_execute_plan(PyObject *ob, PyObject *list, long limit)
}
static PyObject *
-PLy_spi_execute_query(char *query, long limit)
+PLy_spi_execute_query(char *query, uint64 limit)
{
int rv;
volatile MemoryContext oldcontext;
diff --git a/src/pl/plpython/plpy_spi.h b/src/pl/plpython/plpy_spi.h
index a5e2e60da7..66e8a43aae 100644
--- a/src/pl/plpython/plpy_spi.h
+++ b/src/pl/plpython/plpy_spi.h
@@ -10,7 +10,7 @@
extern PyObject *PLy_spi_prepare(PyObject *self, PyObject *args);
extern PyObject *PLy_spi_execute(PyObject *self, PyObject *args);
-extern PyObject *PLy_spi_execute_plan(PyObject *ob, PyObject *list, long limit);
+extern PyObject *PLy_spi_execute_plan(PyObject *ob, PyObject *list, uint64 limit);
typedef struct PLyExceptionEntry
{
diff --git a/src/timezone/localtime.c b/src/timezone/localtime.c
index 333f27300a..926c677c41 100644
--- a/src/timezone/localtime.c
+++ b/src/timezone/localtime.c
@@ -1623,10 +1623,10 @@ increment_overflow_time(pg_time_t *tp, int32 j)
*/
int
pg_next_dst_boundary(const pg_time_t *timep,
- long int *before_gmtoff,
+ int64 *before_gmtoff,
int *before_isdst,
pg_time_t *boundary,
- long int *after_gmtoff,
+ int64 *after_gmtoff,
int *after_isdst,
const pg_tz *tz)
{
@@ -1771,7 +1771,7 @@ pg_next_dst_boundary(const pg_time_t *timep,
bool
pg_interpret_timezone_abbrev(const char *abbrev,
const pg_time_t *timep,
- long int *gmtoff,
+ int64 *gmtoff,
int *isdst,
const pg_tz *tz)
{
@@ -1863,7 +1863,7 @@ pg_interpret_timezone_abbrev(const char *abbrev,
* into *gmtoff and return true, else return false.
*/
bool
-pg_get_timezone_offset(const pg_tz *tz, long int *gmtoff)
+pg_get_timezone_offset(const pg_tz *tz, int64 *gmtoff)
{
/*
* The zone could have more than one ttinfo, if it's historically used
--------------2.24.1--
v4-0002-Spread-bitutils-into-hashing.patchtext/x-diff; charset=us-asciiDownload
From 590eafcd095ad385079d5ad93a6dcf7abe094b6a Mon Sep 17 00:00:00 2001
From: David Fetter <david@fetter.org>
Date: Fri, 31 Jan 2020 07:08:48 -0800
Subject: [PATCH v4 2/2] Spread bitutils into hashing
To: hackers
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="------------2.24.1"
This is a multi-part message in MIME format.
--------------2.24.1
Content-Type: text/plain; charset=UTF-8; format=fixed
Content-Transfer-Encoding: 8bit
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 55d85644a4..29dca21be2 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -30,6 +30,7 @@
#include "access/hash.h"
#include "access/hash_xlog.h"
+#include "port/pg_bitutils.h"
#include "miscadmin.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
@@ -502,7 +503,6 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
double dnumbuckets;
uint32 num_buckets;
uint32 spare_index;
- uint32 i;
/*
* Choose the number of initial bucket pages to match the fill factor
@@ -543,14 +543,8 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
metap->hashm_ffactor = ffactor;
metap->hashm_bsize = HashGetMaxBitmapSize(page);
/* find largest bitmap array size that will fit in page size */
- for (i = _hash_log2(metap->hashm_bsize); i > 0; --i)
- {
- if ((1 << i) <= metap->hashm_bsize)
- break;
- }
- Assert(i > 0);
- metap->hashm_bmsize = 1 << i;
- metap->hashm_bmshift = i + BYTE_TO_BIT;
+ metap->hashm_bmsize = 1 << pg_leftmost_one_pos32(metap->hashm_bsize);
+ metap->hashm_bmshift = pg_leftmost_one_pos32(metap->hashm_bsize) + BYTE_TO_BIT;
Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1));
/*
@@ -570,7 +564,7 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
* Set highmask as next immediate ((2 ^ x) - 1), which should be
* sufficient to cover num_buckets.
*/
- metap->hashm_highmask = (1 << (_hash_log2(num_buckets + 1))) - 1;
+ metap->hashm_highmask = next_power_of_2_32(num_buckets + 1) - 1;
metap->hashm_lowmask = (metap->hashm_highmask >> 1);
MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares));
@@ -657,13 +651,12 @@ restart_expand:
* Can't split anymore if maxbucket has reached its maximum possible
* value.
*
- * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
- * the calculation maxbucket+1 mustn't overflow). Currently we restrict
- * to half that because of overflow looping in _hash_log2() and
- * insufficient space in hashm_spares[]. It's moot anyway because an
- * index with 2^32 buckets would certainly overflow BlockNumber and hence
- * _hash_alloc_buckets() would fail, but if we supported buckets smaller
- * than a disk block then this would be an independent constraint.
+ * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because the
+ * calculation maxbucket+1 mustn't overflow). Currently we restrict to half
+ * that because of insufficient space in hashm_spares[]. It's moot anyway
+ * because an index with 2^32 buckets would certainly overflow BlockNumber
+ * and hence _hash_alloc_buckets() would fail, but if we supported buckets
+ * smaller than a disk block then this would be an independent constraint.
*
* If you change this, see also the maximum initial number of buckets in
* _hash_init().
diff --git a/src/backend/access/hash/hashsort.c b/src/backend/access/hash/hashsort.c
index 9cb41d62e7..322379788c 100644
--- a/src/backend/access/hash/hashsort.c
+++ b/src/backend/access/hash/hashsort.c
@@ -27,6 +27,7 @@
#include "access/hash.h"
#include "commands/progress.h"
+#include "port/pg_bitutils.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "utils/tuplesort.h"
@@ -69,7 +70,7 @@ _h_spoolinit(Relation heap, Relation index, uint32 num_buckets)
* NOTE : This hash mask calculation should be in sync with similar
* calculation in _hash_init_metabuffer.
*/
- hspool->high_mask = (((uint32) 1) << _hash_log2(num_buckets + 1)) - 1;
+ hspool->high_mask = next_power_of_2_32(num_buckets + 1) - 1;
hspool->low_mask = (hspool->high_mask >> 1);
hspool->max_buckets = num_buckets - 1;
diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c
index 9efc8016bc..738572ca40 100644
--- a/src/backend/access/hash/hashutil.c
+++ b/src/backend/access/hash/hashutil.c
@@ -17,6 +17,7 @@
#include "access/hash.h"
#include "access/reloptions.h"
#include "access/relscan.h"
+#include "port/pg_bitutils.h"
#include "storage/buf_internals.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
@@ -134,21 +135,6 @@ _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket,
return bucket;
}
-/*
- * _hash_log2 -- returns ceil(lg2(num))
- */
-uint32
-_hash_log2(uint32 num)
-{
- uint32 i,
- limit;
-
- limit = 1;
- for (i = 0; limit < num; limit <<= 1, i++)
- ;
- return i;
-}
-
/*
* _hash_spareindex -- returns spare index / global splitpoint phase of the
* bucket
@@ -159,7 +145,7 @@ _hash_spareindex(uint32 num_bucket)
uint32 splitpoint_group;
uint32 splitpoint_phases;
- splitpoint_group = _hash_log2(num_bucket);
+ splitpoint_group = ceil_log2_32(num_bucket);
if (splitpoint_group < HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE)
return splitpoint_group;
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 43d5584eab..89b394916d 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -86,6 +86,7 @@
#include <limits.h>
#include "access/xact.h"
+#include "port/pg_bitutils.h"
#include "storage/shmem.h"
#include "storage/spin.h"
#include "utils/dynahash.h"
@@ -1717,16 +1718,11 @@ hash_corrupted(HTAB *hashp)
int
my_log2(uint64 num)
{
- int i;
- uint64 limit;
-
/* guard against too-large input, which would put us into infinite loop */
if (num > LONG_MAX / 2)
num = LONG_MAX / 2;
- for (i = 0, limit = 1; limit < num; i++, limit <<= 1)
- ;
- return i;
+ return ceil_log2_64(num);
}
/* calculate first power of 2 >= num, bounded to what will fit in a uint64 */
diff --git a/src/include/access/hash.h b/src/include/access/hash.h
index 9fc0696096..298c05e6fe 100644
--- a/src/include/access/hash.h
+++ b/src/include/access/hash.h
@@ -450,7 +450,6 @@ extern uint32 _hash_datum2hashkey(Relation rel, Datum key);
extern uint32 _hash_datum2hashkey_type(Relation rel, Datum key, Oid keytype);
extern Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket,
uint32 highmask, uint32 lowmask);
-extern uint32 _hash_log2(uint32 num);
extern uint32 _hash_spareindex(uint32 num_bucket);
extern uint32 _hash_get_totalbuckets(uint32 splitpoint_phase);
extern void _hash_checkpage(Relation rel, Buffer buf, int flags);
diff --git a/src/include/lib/simplehash.h b/src/include/lib/simplehash.h
index 5a6783f653..1a35a054d8 100644
--- a/src/include/lib/simplehash.h
+++ b/src/include/lib/simplehash.h
@@ -57,6 +57,8 @@
* backwards, unless they're empty or already at their optimal position.
*/
+#include "port/pg_bitutils.h"
+
/* helpers */
#define SH_MAKE_PREFIX(a) CppConcat(a,_)
#define SH_MAKE_NAME(name) SH_MAKE_NAME_(SH_MAKE_PREFIX(SH_PREFIX),name)
@@ -215,27 +217,6 @@ SH_SCOPE void SH_STAT(SH_TYPE * tb);
#ifndef SIMPLEHASH_H
#define SIMPLEHASH_H
-/* FIXME: can we move these to a central location? */
-
-/* calculate ceil(log base 2) of num */
-static inline uint64
-sh_log2(uint64 num)
-{
- int i;
- uint64 limit;
-
- for (i = 0, limit = 1; limit < num; i++, limit <<= 1)
- ;
- return i;
-}
-
-/* calculate first power of 2 >= num */
-static inline uint64
-sh_pow2(uint64 num)
-{
- return ((uint64) 1) << sh_log2(num);
-}
-
#ifdef FRONTEND
#define sh_error(...) pg_log_error(__VA_ARGS__)
#define sh_log(...) pg_log_info(__VA_ARGS__)
@@ -259,7 +240,7 @@ SH_COMPUTE_PARAMETERS(SH_TYPE * tb, uint32 newsize)
size = Max(newsize, 2);
/* round up size to the next power of 2, that's how bucketing works */
- size = sh_pow2(size);
+ size = next_power_of_2_64(size);
Assert(size <= SH_MAX_SIZE);
/*
@@ -434,7 +415,7 @@ SH_GROW(SH_TYPE * tb, uint32 newsize)
uint32 startelem = 0;
uint32 copyelem;
- Assert(oldsize == sh_pow2(oldsize));
+ Assert(oldsize == next_power_of_2_64(oldsize));
Assert(oldsize != SH_MAX_SIZE);
Assert(oldsize < newsize);
diff --git a/src/include/port/pg_bitutils.h b/src/include/port/pg_bitutils.h
index 498e532308..88a9ea5b7f 100644
--- a/src/include/port/pg_bitutils.h
+++ b/src/include/port/pg_bitutils.h
@@ -20,19 +20,18 @@ extern PGDLLIMPORT const uint8 pg_number_of_ones[256];
/*
* pg_leftmost_one_pos32
* Returns the position of the most significant set bit in "word",
- * measured from the least significant bit. word must not be 0.
+ * measured from the least significant bit.
*/
static inline int
pg_leftmost_one_pos32(uint32 word)
{
#ifdef HAVE__BUILTIN_CLZ
- Assert(word != 0);
-
- return 31 - __builtin_clz(word);
+ return word ? 31 - __builtin_clz(word) : 0;
#else
int shift = 32 - 8;
- Assert(word != 0);
+ if (word == 0)
+ return 0;
while ((word >> shift) == 0)
shift -= 8;
@@ -49,19 +48,18 @@ static inline int
pg_leftmost_one_pos64(uint64 word)
{
#ifdef HAVE__BUILTIN_CLZ
- Assert(word != 0);
-
#if defined(HAVE_LONG_INT_64)
- return 63 - __builtin_clzl(word);
+ return word ? 63 - __builtin_clzl(word) : 0;
#elif defined(HAVE_LONG_LONG_INT_64)
- return 63 - __builtin_clzll(word);
+ return word ? 63 - __builtin_clzll(word) : 0;
#else
#error must have a working 64-bit integer datatype
#endif
#else /* !HAVE__BUILTIN_CLZ */
int shift = 64 - 8;
- Assert(word != 0);
+ if (word == 0)
+ return 0;
while ((word >> shift) == 0)
shift -= 8;
@@ -73,19 +71,18 @@ pg_leftmost_one_pos64(uint64 word)
/*
* pg_rightmost_one_pos32
* Returns the position of the least significant set bit in "word",
- * measured from the least significant bit. word must not be 0.
+ * measured from the least significant bit.
*/
static inline int
pg_rightmost_one_pos32(uint32 word)
{
#ifdef HAVE__BUILTIN_CTZ
- Assert(word != 0);
-
- return __builtin_ctz(word);
+ return word ? __builtin_ctz(word) : 32;
#else
int result = 0;
- Assert(word != 0);
+ if (word == 0)
+ return 32;
while ((word & 255) == 0)
{
@@ -105,19 +102,18 @@ static inline int
pg_rightmost_one_pos64(uint64 word)
{
#ifdef HAVE__BUILTIN_CTZ
- Assert(word != 0);
-
#if defined(HAVE_LONG_INT_64)
- return __builtin_ctzl(word);
+ return word ? __builtin_ctzl(word) : 64;
#elif defined(HAVE_LONG_LONG_INT_64)
- return __builtin_ctzll(word);
+ return word ? __builtin_ctzll(word) : 64;
#else
#error must have a working 64-bit integer datatype
#endif
#else /* !HAVE__BUILTIN_CTZ */
int result = 0;
- Assert(word != 0);
+ if (word == 0)
+ return 64;
while ((word & 255) == 0)
{
@@ -145,4 +141,34 @@ pg_rotate_right32(uint32 word, int n)
return (word >> n) | (word << (sizeof(word) * BITS_PER_BYTE - n));
}
+/* ceil(lg2(num)) */
+static inline uint32
+ceil_log2_32(uint32 num)
+{
+ Assert(num > 0);
+ return pg_leftmost_one_pos32(num-1) + 1;
+}
+
+static inline uint64
+ceil_log2_64(uint64 num)
+{
+ Assert(num > 0);
+ return pg_leftmost_one_pos64(num-1) + 1;
+}
+
+/* Calculate the first power of 2 >= num */
+static inline uint32
+next_power_of_2_32(uint32 num)
+{
+ Assert(num > 0);
+ return ((uint32) 1) << (pg_leftmost_one_pos32(num-1) + 1);
+}
+
+static inline uint64
+next_power_of_2_64(uint64 num)
+{
+ Assert(num > 0);
+ return ((uint64) 1) << (pg_leftmost_one_pos64(num-1) + 1);
+}
+
#endif /* PG_BITUTILS_H */
--------------2.24.1--
On Fri, Jan 31, 2020 at 04:59:18PM +0100, David Fetter wrote:
On Wed, Jan 15, 2020 at 03:45:12PM -0800, Jesse Zhang wrote:
On Tue, Jan 14, 2020 at 2:09 PM David Fetter <david@fetter.org> wrote:
The changes in hash AM and SIMPLEHASH do look like a net positive
improvement. My biggest cringe might be in pg_bitutils:1. Is ceil_log2_64 dead code?
Let's call it nascent code. I suspect there are places it could go, if
I look for them. Also, it seemed silly to have one without the other.While not absolutely required, I'd like us to find at least one
place and start using it. (Clang also nags at me when we have
unused functions).Done in the expanded patches attached.
These bit-rotted a little, so I've updated them.
Best,
David.
--
David Fetter <david(at)fetter(dot)org> http://fetter.org/
Phone: +1 415 235 3778
Remember to vote!
Consider donating to Postgres: http://www.postgresql.org/about/donate
Attachments:
v5-0001-de-long-ify.patchtext/x-diff; charset=us-asciiDownload
From 90d3575b0716839b76953023bb63242b1d0698ef Mon Sep 17 00:00:00 2001
From: David Fetter <david@fetter.org>
Date: Wed, 29 Jan 2020 02:09:59 -0800
Subject: [PATCH v5 1/3] de-long-ify
To: hackers
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="------------2.24.1"
This is a multi-part message in MIME format.
--------------2.24.1
Content-Type: text/plain; charset=UTF-8; format=fixed
Content-Transfer-Encoding: 8bit
diff --git a/src/backend/access/gist/gistbuildbuffers.c b/src/backend/access/gist/gistbuildbuffers.c
index 4b562d8d3f..482a569814 100644
--- a/src/backend/access/gist/gistbuildbuffers.c
+++ b/src/backend/access/gist/gistbuildbuffers.c
@@ -34,11 +34,11 @@ static void gistPlaceItupToPage(GISTNodeBufferPage *pageBuffer,
IndexTuple item);
static void gistGetItupFromPage(GISTNodeBufferPage *pageBuffer,
IndexTuple *item);
-static long gistBuffersGetFreeBlock(GISTBuildBuffers *gfbb);
-static void gistBuffersReleaseBlock(GISTBuildBuffers *gfbb, long blocknum);
+static uint64 gistBuffersGetFreeBlock(GISTBuildBuffers *gfbb);
+static void gistBuffersReleaseBlock(GISTBuildBuffers *gfbb, uint64 blocknum);
-static void ReadTempFileBlock(BufFile *file, long blknum, void *ptr);
-static void WriteTempFileBlock(BufFile *file, long blknum, void *ptr);
+static void ReadTempFileBlock(BufFile *file, uint64 blknum, void *ptr);
+static void WriteTempFileBlock(BufFile *file, uint64 blknum, void *ptr);
/*
@@ -64,7 +64,7 @@ gistInitBuildBuffers(int pagesPerBuffer, int levelStep, int maxLevel)
/* Initialize free page management. */
gfbb->nFreeBlocks = 0;
gfbb->freeBlocksLen = 32;
- gfbb->freeBlocks = (long *) palloc(gfbb->freeBlocksLen * sizeof(long));
+ gfbb->freeBlocks = (int64 *) palloc(gfbb->freeBlocksLen * sizeof(int64));
/*
* Current memory context will be used for all in-memory data structures
@@ -469,7 +469,7 @@ gistPopItupFromNodeBuffer(GISTBuildBuffers *gfbb, GISTNodeBuffer *nodeBuffer,
/*
* Select a currently unused block for writing to.
*/
-static long
+static uint64
gistBuffersGetFreeBlock(GISTBuildBuffers *gfbb)
{
/*
@@ -487,7 +487,7 @@ gistBuffersGetFreeBlock(GISTBuildBuffers *gfbb)
* Return a block# to the freelist.
*/
static void
-gistBuffersReleaseBlock(GISTBuildBuffers *gfbb, long blocknum)
+gistBuffersReleaseBlock(GISTBuildBuffers *gfbb, uint64 blocknum)
{
int ndx;
@@ -495,9 +495,9 @@ gistBuffersReleaseBlock(GISTBuildBuffers *gfbb, long blocknum)
if (gfbb->nFreeBlocks >= gfbb->freeBlocksLen)
{
gfbb->freeBlocksLen *= 2;
- gfbb->freeBlocks = (long *) repalloc(gfbb->freeBlocks,
+ gfbb->freeBlocks = (int64 *) repalloc(gfbb->freeBlocks,
gfbb->freeBlocksLen *
- sizeof(long));
+ sizeof(uint64));
}
/* Add blocknum to array */
@@ -755,7 +755,7 @@ gistRelocateBuildBuffersOnSplit(GISTBuildBuffers *gfbb, GISTSTATE *giststate,
*/
static void
-ReadTempFileBlock(BufFile *file, long blknum, void *ptr)
+ReadTempFileBlock(BufFile *file, uint64 blknum, void *ptr)
{
if (BufFileSeekBlock(file, blknum) != 0)
elog(ERROR, "could not seek temporary file: %m");
@@ -764,7 +764,7 @@ ReadTempFileBlock(BufFile *file, long blknum, void *ptr)
}
static void
-WriteTempFileBlock(BufFile *file, long blknum, void *ptr)
+WriteTempFileBlock(BufFile *file, uint64 blknum, void *ptr)
{
if (BufFileSeekBlock(file, blknum) != 0)
elog(ERROR, "could not seek temporary file: %m");
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index c46764bf42..4fc478640a 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -70,7 +70,7 @@ static int _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount);
static void _SPI_error_callback(void *arg);
static void _SPI_cursor_operation(Portal portal,
- FetchDirection direction, long count,
+ FetchDirection direction, uint64 count,
DestReceiver *dest);
static SPIPlanPtr _SPI_make_plan_non_temp(SPIPlanPtr plan);
@@ -493,7 +493,7 @@ SPI_inside_nonatomic_context(void)
/* Parse, plan, and execute a query string */
int
-SPI_execute(const char *src, bool read_only, long tcount)
+SPI_execute(const char *src, bool read_only, int64 tcount)
{
_SPI_plan plan;
int res;
@@ -521,7 +521,7 @@ SPI_execute(const char *src, bool read_only, long tcount)
/* Obsolete version of SPI_execute */
int
-SPI_exec(const char *src, long tcount)
+SPI_exec(const char *src, int64 tcount)
{
return SPI_execute(src, false, tcount);
}
@@ -529,7 +529,7 @@ SPI_exec(const char *src, long tcount)
/* Execute a previously prepared plan */
int
SPI_execute_plan(SPIPlanPtr plan, Datum *Values, const char *Nulls,
- bool read_only, long tcount)
+ bool read_only, int64 tcount)
{
int res;
@@ -555,7 +555,7 @@ SPI_execute_plan(SPIPlanPtr plan, Datum *Values, const char *Nulls,
/* Obsolete version of SPI_execute_plan */
int
-SPI_execp(SPIPlanPtr plan, Datum *Values, const char *Nulls, long tcount)
+SPI_execp(SPIPlanPtr plan, Datum *Values, const char *Nulls, int64 tcount)
{
return SPI_execute_plan(plan, Values, Nulls, false, tcount);
}
@@ -563,7 +563,7 @@ SPI_execp(SPIPlanPtr plan, Datum *Values, const char *Nulls, long tcount)
/* Execute a previously prepared plan */
int
SPI_execute_plan_with_paramlist(SPIPlanPtr plan, ParamListInfo params,
- bool read_only, long tcount)
+ bool read_only, int64 tcount)
{
int res;
@@ -599,7 +599,7 @@ int
SPI_execute_snapshot(SPIPlanPtr plan,
Datum *Values, const char *Nulls,
Snapshot snapshot, Snapshot crosscheck_snapshot,
- bool read_only, bool fire_triggers, long tcount)
+ bool read_only, bool fire_triggers, int64 tcount)
{
int res;
@@ -633,7 +633,7 @@ int
SPI_execute_with_args(const char *src,
int nargs, Oid *argtypes,
Datum *Values, const char *Nulls,
- bool read_only, long tcount)
+ bool read_only, int64 tcount)
{
int res;
_SPI_plan plan;
@@ -1530,7 +1530,7 @@ SPI_cursor_find(const char *name)
* Fetch rows in a cursor
*/
void
-SPI_cursor_fetch(Portal portal, bool forward, long count)
+SPI_cursor_fetch(Portal portal, bool forward, int64 count)
{
_SPI_cursor_operation(portal,
forward ? FETCH_FORWARD : FETCH_BACKWARD, count,
@@ -1545,7 +1545,7 @@ SPI_cursor_fetch(Portal portal, bool forward, long count)
* Move in a cursor
*/
void
-SPI_cursor_move(Portal portal, bool forward, long count)
+SPI_cursor_move(Portal portal, bool forward, int64 count)
{
_SPI_cursor_operation(portal,
forward ? FETCH_FORWARD : FETCH_BACKWARD, count,
@@ -1559,7 +1559,7 @@ SPI_cursor_move(Portal portal, bool forward, long count)
* Fetch rows in a scrollable cursor
*/
void
-SPI_scroll_cursor_fetch(Portal portal, FetchDirection direction, long count)
+SPI_scroll_cursor_fetch(Portal portal, FetchDirection direction, int64 count)
{
_SPI_cursor_operation(portal,
direction, count,
@@ -1574,7 +1574,7 @@ SPI_scroll_cursor_fetch(Portal portal, FetchDirection direction, long count)
* Move in a scrollable cursor
*/
void
-SPI_scroll_cursor_move(Portal portal, FetchDirection direction, long count)
+SPI_scroll_cursor_move(Portal portal, FetchDirection direction, int64 count)
{
_SPI_cursor_operation(portal, direction, count, None_Receiver);
}
@@ -2567,7 +2567,7 @@ _SPI_error_callback(void *arg)
* Do a FETCH or MOVE in a cursor
*/
static void
-_SPI_cursor_operation(Portal portal, FetchDirection direction, long count,
+_SPI_cursor_operation(Portal portal, FetchDirection direction, uint64 count,
DestReceiver *dest)
{
uint64 nfetched;
diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c
index 35e8f12e62..db73a9f159 100644
--- a/src/backend/storage/file/buffile.c
+++ b/src/backend/storage/file/buffile.c
@@ -749,7 +749,7 @@ BufFileTell(BufFile *file, int *fileno, off_t *offset)
* impossible seek is attempted.
*/
int
-BufFileSeekBlock(BufFile *file, long blknum)
+BufFileSeekBlock(BufFile *file, uint64 blknum)
{
return BufFileSeek(file,
(int) (blknum / BUFFILE_SEG_SIZE),
@@ -760,13 +760,11 @@ BufFileSeekBlock(BufFile *file, long blknum)
#ifdef NOT_USED
/*
* BufFileTellBlock --- block-oriented tell
- *
- * Any fractional part of a block in the current seek position is ignored.
*/
-long
+uint64
BufFileTellBlock(BufFile *file)
{
- long blknum;
+ uint64 blknum;
blknum = (file->curOffset + file->pos) / BLCKSZ;
blknum += file->curFile * BUFFILE_SEG_SIZE;
@@ -820,10 +818,10 @@ BufFileSize(BufFile *file)
* begins. Caller should apply this as an offset when working off block
* positions that are in terms of the original BufFile space.
*/
-long
+uint64
BufFileAppend(BufFile *target, BufFile *source)
{
- long startBlock = target->numFiles * BUFFILE_SEG_SIZE;
+ uint64 startBlock = target->numFiles * BUFFILE_SEG_SIZE;
int newNumFiles = target->numFiles + source->numFiles;
int i;
diff --git a/src/backend/storage/ipc/latch.c b/src/backend/storage/ipc/latch.c
index 046ca5c6c7..cb00c67303 100644
--- a/src/backend/storage/ipc/latch.c
+++ b/src/backend/storage/ipc/latch.c
@@ -349,7 +349,7 @@ DisownLatch(Latch *latch)
* function returns immediately.
*
* The "timeout" is given in milliseconds. It must be >= 0 if WL_TIMEOUT flag
- * is given. Although it is declared as "long", we don't actually support
+ * is given. Although it is declared as "int64", we don't actually support
* timeouts longer than INT_MAX milliseconds. Note that some extra overhead
* is incurred when WL_TIMEOUT is given, so avoid using a timeout if possible.
*
@@ -362,7 +362,7 @@ DisownLatch(Latch *latch)
* we return all of them in one call, but we will return at least one.
*/
int
-WaitLatch(Latch *latch, int wakeEvents, long timeout,
+WaitLatch(Latch *latch, int wakeEvents, int64 timeout,
uint32 wait_event_info)
{
return WaitLatchOrSocket(latch, wakeEvents, PGINVALID_SOCKET, timeout,
@@ -388,7 +388,7 @@ WaitLatch(Latch *latch, int wakeEvents, long timeout,
*/
int
WaitLatchOrSocket(Latch *latch, int wakeEvents, pgsocket sock,
- long timeout, uint32 wait_event_info)
+ int64 timeout, uint32 wait_event_info)
{
int ret = 0;
int rc;
@@ -1155,14 +1155,14 @@ WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event)
* values associated with the registered event.
*/
int
-WaitEventSetWait(WaitEventSet *set, long timeout,
+WaitEventSetWait(WaitEventSet *set, int64 timeout,
WaitEvent *occurred_events, int nevents,
uint32 wait_event_info)
{
int returned_events = 0;
instr_time start_time;
instr_time cur_time;
- long cur_timeout = -1;
+ int64 cur_timeout = -1;
Assert(nevents > 0);
@@ -1247,7 +1247,7 @@ WaitEventSetWait(WaitEventSet *set, long timeout,
{
INSTR_TIME_SET_CURRENT(cur_time);
INSTR_TIME_SUBTRACT(cur_time, start_time);
- cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time);
+ cur_timeout = timeout - INSTR_TIME_GET_MILLISEC(cur_time);
if (cur_timeout <= 0)
break;
}
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index 2892a573e4..582c63a5e6 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -335,8 +335,8 @@ InitShmemIndex(void)
*/
HTAB *
ShmemInitHash(const char *name, /* table string name for shmem index */
- long init_size, /* initial table size */
- long max_size, /* max size of the table */
+ uint64 init_size, /* initial table size */
+ uint64 max_size, /* max size of the table */
HASHCTL *infoP, /* info about key and bucket size */
int hash_flags) /* info about infoP */
{
diff --git a/src/backend/storage/lmgr/condition_variable.c b/src/backend/storage/lmgr/condition_variable.c
index 37b6a4eecd..43c662aaeb 100644
--- a/src/backend/storage/lmgr/condition_variable.c
+++ b/src/backend/storage/lmgr/condition_variable.c
@@ -129,10 +129,10 @@ ConditionVariableSleep(ConditionVariable *cv, uint32 wait_event_info)
* See ConditionVariableSleep() for general usage.
*/
bool
-ConditionVariableTimedSleep(ConditionVariable *cv, long timeout,
+ConditionVariableTimedSleep(ConditionVariable *cv, int64 timeout,
uint32 wait_event_info)
{
- long cur_timeout = -1;
+ int64 cur_timeout = -1;
instr_time start_time;
instr_time cur_time;
@@ -217,7 +217,7 @@ ConditionVariableTimedSleep(ConditionVariable *cv, long timeout,
{
INSTR_TIME_SET_CURRENT(cur_time);
INSTR_TIME_SUBTRACT(cur_time, start_time);
- cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time);
+ cur_timeout = timeout - (int64) INSTR_TIME_GET_MILLISEC(cur_time);
/* Have we crossed the timeout threshold? */
if (cur_timeout <= 0)
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 0a6f80963b..9bb9c1fda9 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -109,7 +109,7 @@ int PostAuthDelay = 0;
*/
/* max_stack_depth converted to bytes for speed of checking */
-static long max_stack_depth_bytes = 100 * 1024L;
+static uint64 max_stack_depth_bytes = 100 * 1024UL;
/*
* Stack base pointer -- initialized by PostmasterMain and inherited by
@@ -2016,7 +2016,7 @@ exec_bind_message(StringInfo input_message)
* Process an "Execute" message for a portal
*/
static void
-exec_execute_message(const char *portal_name, long max_rows)
+exec_execute_message(const char *portal_name, uint64 max_rows)
{
CommandDest dest;
DestReceiver *receiver;
@@ -2302,7 +2302,7 @@ check_log_duration(char *msec_str, bool was_logged)
if (log_duration || log_min_duration_sample >= 0 ||
log_min_duration_statement >= 0 || xact_is_sampled)
{
- long secs;
+ int64 secs;
int usecs;
int msecs;
bool exceeded_duration;
@@ -3302,12 +3302,12 @@ bool
stack_is_too_deep(void)
{
char stack_top_loc;
- long stack_depth;
+ int64 stack_depth;
/*
* Compute distance from reference point to my local variables
*/
- stack_depth = (long) (stack_base_ptr - &stack_top_loc);
+ stack_depth = (int64) (stack_base_ptr - &stack_top_loc);
/*
* Take abs value, since stacks grow up on some machines, down on others
@@ -3336,7 +3336,7 @@ stack_is_too_deep(void)
* Note we assume that the same max_stack_depth applies to both stacks.
*/
#if defined(__ia64__) || defined(__ia64)
- stack_depth = (long) (ia64_get_bsp() - register_stack_base_ptr);
+ stack_depth = (int64) (ia64_get_bsp() - register_stack_base_ptr);
if (stack_depth > max_stack_depth_bytes &&
register_stack_base_ptr != NULL)
@@ -3350,8 +3350,8 @@ stack_is_too_deep(void)
bool
check_max_stack_depth(int *newval, void **extra, GucSource source)
{
- long newval_bytes = *newval * 1024L;
- long stack_rlimit = get_stack_depth_rlimit();
+ uint64 newval_bytes = *newval * 1024UL;
+ uint64 stack_rlimit = get_stack_depth_rlimit();
if (stack_rlimit > 0 && newval_bytes > stack_rlimit - STACK_DEPTH_SLOP)
{
@@ -3367,7 +3367,7 @@ check_max_stack_depth(int *newval, void **extra, GucSource source)
void
assign_max_stack_depth(int newval, void *extra)
{
- long newval_bytes = newval * 1024L;
+ uint64 newval_bytes = newval * 1024UL;
max_stack_depth_bytes = newval_bytes;
}
@@ -4696,7 +4696,7 @@ static void
log_disconnections(int code, Datum arg)
{
Port *port = MyProcPort;
- long secs;
+ int64 secs;
int usecs;
int msecs;
int hours,
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index 0f5801e046..f519ce7def 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -44,7 +44,7 @@ static void ProcessQuery(PlannedStmt *plan,
static void FillPortalStore(Portal portal, bool isTopLevel);
static uint64 RunFromStore(Portal portal, ScanDirection direction, uint64 count,
DestReceiver *dest);
-static uint64 PortalRunSelect(Portal portal, bool forward, long count,
+static uint64 PortalRunSelect(Portal portal, bool forward, int64 count,
DestReceiver *dest);
static void PortalRunUtility(Portal portal, PlannedStmt *pstmt,
bool isTopLevel, bool setHoldSnapshot,
@@ -55,7 +55,7 @@ static void PortalRunMulti(Portal portal,
char *completionTag);
static uint64 DoPortalRunFetch(Portal portal,
FetchDirection fdirection,
- long count,
+ int64 count,
DestReceiver *dest);
static void DoPortalRewind(Portal portal);
@@ -683,7 +683,7 @@ PortalSetResultFormat(Portal portal, int nFormats, int16 *formats)
* suspended due to exhaustion of the count parameter.
*/
bool
-PortalRun(Portal portal, long count, bool isTopLevel, bool run_once,
+PortalRun(Portal portal, int64 count, bool isTopLevel, bool run_once,
DestReceiver *dest, DestReceiver *altdest,
char *completionTag)
{
@@ -871,7 +871,7 @@ PortalRun(Portal portal, long count, bool isTopLevel, bool run_once,
static uint64
PortalRunSelect(Portal portal,
bool forward,
- long count,
+ int64 count,
DestReceiver *dest)
{
QueryDesc *queryDesc;
@@ -1391,7 +1391,7 @@ PortalRunMulti(Portal portal,
uint64
PortalRunFetch(Portal portal,
FetchDirection fdirection,
- long count,
+ int64 count,
DestReceiver *dest)
{
uint64 result;
@@ -1493,7 +1493,7 @@ PortalRunFetch(Portal portal,
static uint64
DoPortalRunFetch(Portal portal,
FetchDirection fdirection,
- long count,
+ int64 count,
DestReceiver *dest)
{
bool forward;
@@ -1531,7 +1531,7 @@ DoPortalRunFetch(Portal portal,
* In practice, if the goal is less than halfway back to the
* start, it's better to scan from where we are.
*
- * Also, if current portalPos is outside the range of "long",
+ * Also, if current portalPos is outside the range of "int64",
* do it the hard way to avoid possible overflow of the count
* argument to PortalRunSelect. We must exclude exactly
* LONG_MAX, as well, lest the count look like FETCH_ALL.
@@ -1549,7 +1549,7 @@ DoPortalRunFetch(Portal portal,
}
else
{
- long pos = (long) portal->portalPos;
+ int64 pos = (int64) portal->portalPos;
if (portal->atEnd)
pos++; /* need one extra fetch if off end */
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index 0b6c9d5ea8..ddd471c059 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -1655,7 +1655,7 @@ timeofday(PG_FUNCTION_ARGS)
*/
void
TimestampDifference(TimestampTz start_time, TimestampTz stop_time,
- long *secs, int *microsecs)
+ int64 *secs, int *microsecs)
{
TimestampTz diff = stop_time - start_time;
@@ -1666,7 +1666,7 @@ TimestampDifference(TimestampTz start_time, TimestampTz stop_time,
}
else
{
- *secs = (long) (diff / USECS_PER_SEC);
+ *secs = (int64) (diff / USECS_PER_SEC);
*microsecs = (int) (diff % USECS_PER_SEC);
}
}
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index d245e1aa12..6e98b61b2f 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -142,7 +142,7 @@ typedef HASHBUCKET *HASHSEGMENT;
typedef struct
{
slock_t mutex; /* spinlock for this freelist */
- long nentries; /* number of entries in associated buckets */
+ uint64 nentries; /* number of entries in associated buckets */
HASHELEMENT *freeList; /* chain of free elements */
} FreeListData;
@@ -170,8 +170,8 @@ struct HASHHDR
/* These fields can change, but not in a partitioned table */
/* Also, dsize can't change in a shared table, even if unpartitioned */
- long dsize; /* directory size */
- long nsegs; /* number of allocated segments (<= dsize) */
+ uint64 dsize; /* directory size */
+ uint64 nsegs; /* number of allocated segments (<= dsize) */
uint32 max_bucket; /* ID of maximum bucket in use */
uint32 high_mask; /* mask to modulo into entire table */
uint32 low_mask; /* mask to modulo into lower half of table */
@@ -179,10 +179,10 @@ struct HASHHDR
/* These fields are fixed at hashtable creation */
Size keysize; /* hash key length in bytes */
Size entrysize; /* total user element size in bytes */
- long num_partitions; /* # partitions (must be power of 2), or 0 */
- long ffactor; /* target fill factor */
- long max_dsize; /* 'dsize' limit if directory is fixed size */
- long ssize; /* segment size --- must be power of 2 */
+ uint64 num_partitions; /* # partitions (must be power of 2), or 0 */
+ uint64 ffactor; /* target fill factor */
+ uint64 max_dsize; /* 'dsize' limit if directory is fixed size */
+ uint64 ssize; /* segment size --- must be power of 2 */
int sshift; /* segment shift = log2(ssize) */
int nelem_alloc; /* number of entries to allocate at once */
@@ -192,8 +192,8 @@ struct HASHHDR
* Count statistics here. NB: stats code doesn't bother with mutex, so
* counts could be corrupted a bit in a partitioned table.
*/
- long accesses;
- long collisions;
+ uint64 accesses;
+ uint64 collisions;
#endif
};
@@ -224,7 +224,7 @@ struct HTAB
/* We keep local copies of these fixed values to reduce contention */
Size keysize; /* hash key length in bytes */
- long ssize; /* segment size --- must be power of 2 */
+ uint64 ssize; /* segment size --- must be power of 2 */
int sshift; /* segment shift = log2(ssize) */
};
@@ -245,9 +245,9 @@ struct HTAB
#define MOD(x,y) ((x) & ((y)-1))
#ifdef HASH_STATISTICS
-static long hash_accesses,
- hash_collisions,
- hash_expansions;
+static uint64 hash_accesses,
+ hash_collisions,
+ hash_expansions;
#endif
/*
@@ -261,10 +261,10 @@ static bool expand_table(HTAB *hashp);
static HASHBUCKET get_hash_entry(HTAB *hashp, int freelist_idx);
static void hdefault(HTAB *hashp);
static int choose_nelem_alloc(Size entrysize);
-static bool init_htab(HTAB *hashp, long nelem);
+static bool init_htab(HTAB *hashp, uint64 nelem);
static void hash_corrupted(HTAB *hashp);
-static long next_pow2_long(long num);
-static int next_pow2_int(long num);
+static uint64 next_pow2_uint64(uint64 num);
+static int next_pow2_int(uint64 num);
static void register_seq_scan(HTAB *hashp);
static void deregister_seq_scan(HTAB *hashp);
static bool has_seq_scans(HTAB *hashp);
@@ -314,7 +314,7 @@ string_compare(const char *key1, const char *key2, Size keysize)
* large nelem will penalize hash_seq_search speed without buying much.
*/
HTAB *
-hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
+hash_create(const char *tabname, uint64 nelem, HASHCTL *info, int flags)
{
HTAB *hashp;
HASHHDR *hctl;
@@ -634,7 +634,7 @@ choose_nelem_alloc(Size entrysize)
* arrays
*/
static bool
-init_htab(HTAB *hashp, long nelem)
+init_htab(HTAB *hashp, uint64 nelem)
{
HASHHDR *hctl = hashp->hctl;
HASHSEGMENT *segp;
@@ -730,10 +730,10 @@ init_htab(HTAB *hashp, long nelem)
* NB: assumes that all hash structure parameters have default values!
*/
Size
-hash_estimate_size(long num_entries, Size entrysize)
+hash_estimate_size(uint64 num_entries, Size entrysize)
{
Size size;
- long nBuckets,
+ uint64 nBuckets,
nSegments,
nDirEntries,
nElementAllocs,
@@ -741,9 +741,9 @@ hash_estimate_size(long num_entries, Size entrysize)
elementAllocCnt;
/* estimate number of buckets wanted */
- nBuckets = next_pow2_long((num_entries - 1) / DEF_FFACTOR + 1);
+ nBuckets = next_pow2_uint64((num_entries - 1) / DEF_FFACTOR + 1);
/* # of segments needed for nBuckets */
- nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1);
+ nSegments = next_pow2_uint64((nBuckets - 1) / DEF_SEGSIZE + 1);
/* directory entries */
nDirEntries = DEF_DIRSIZE;
while (nDirEntries < nSegments)
@@ -776,17 +776,17 @@ hash_estimate_size(long num_entries, Size entrysize)
*
* XXX this had better agree with the behavior of init_htab()...
*/
-long
-hash_select_dirsize(long num_entries)
+uint64
+hash_select_dirsize(uint64 num_entries)
{
- long nBuckets,
+ uint64 nBuckets,
nSegments,
nDirEntries;
/* estimate number of buckets wanted */
- nBuckets = next_pow2_long((num_entries - 1) / DEF_FFACTOR + 1);
+ nBuckets = next_pow2_uint64((num_entries - 1) / DEF_FFACTOR + 1);
/* # of segments needed for nBuckets */
- nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1);
+ nSegments = next_pow2_uint64((nBuckets - 1) / DEF_SEGSIZE + 1);
/* directory entries */
nDirEntries = DEF_DIRSIZE;
while (nDirEntries < nSegments)
@@ -837,8 +837,8 @@ hash_stats(const char *where, HTAB *hashp)
fprintf(stderr, "%s: this HTAB -- accesses %ld collisions %ld\n",
where, hashp->hctl->accesses, hashp->hctl->collisions);
- fprintf(stderr, "hash_stats: entries %ld keysize %ld maxp %u segmentcount %ld\n",
- hash_get_num_entries(hashp), (long) hashp->hctl->keysize,
+ fprintf(stderr, "hash_stats: entries %ld keysize %llu maxp %u segmentcount %ld\n",
+ hash_get_num_entries(hashp), (uint64) hashp->hctl->keysize,
hashp->hctl->max_bucket, hashp->hctl->nsegs);
fprintf(stderr, "%s: total accesses %ld total collisions %ld\n",
where, hash_accesses, hash_collisions);
@@ -927,8 +927,8 @@ hash_search_with_hash_value(HTAB *hashp,
int freelist_idx = FREELIST_IDX(hctl, hashvalue);
Size keysize;
uint32 bucket;
- long segment_num;
- long segment_ndx;
+ uint64 segment_num;
+ uint64 segment_ndx;
HASHSEGMENT segp;
HASHBUCKET currBucket;
HASHBUCKET *prevBucketPtr;
@@ -955,7 +955,7 @@ hash_search_with_hash_value(HTAB *hashp,
* order of these tests is to try to check cheaper conditions first.
*/
if (!IS_PARTITIONED(hctl) && !hashp->frozen &&
- hctl->freeList[0].nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor &&
+ hctl->freeList[0].nentries / (uint64) (hctl->max_bucket + 1) >= hctl->ffactor &&
!has_seq_scans(hashp))
(void) expand_table(hashp);
}
@@ -1123,8 +1123,8 @@ hash_update_hash_key(HTAB *hashp,
Size keysize;
uint32 bucket;
uint32 newbucket;
- long segment_num;
- long segment_ndx;
+ uint64 segment_num;
+ uint64 segment_ndx;
HASHSEGMENT segp;
HASHBUCKET currBucket;
HASHBUCKET *prevBucketPtr;
@@ -1332,11 +1332,11 @@ get_hash_entry(HTAB *hashp, int freelist_idx)
/*
* hash_get_num_entries -- get the number of entries in a hashtable
*/
-long
+uint64
hash_get_num_entries(HTAB *hashp)
{
int i;
- long sum = hashp->hctl->freeList[0].nentries;
+ uint64 sum = hashp->hctl->freeList[0].nentries;
/*
* We currently don't bother with acquiring the mutexes; it's only
@@ -1392,9 +1392,9 @@ hash_seq_search(HASH_SEQ_STATUS *status)
HTAB *hashp;
HASHHDR *hctl;
uint32 max_bucket;
- long ssize;
- long segment_num;
- long segment_ndx;
+ uint64 ssize;
+ uint64 segment_num;
+ uint64 segment_ndx;
HASHSEGMENT segp;
uint32 curBucket;
HASHELEMENT *curElem;
@@ -1505,11 +1505,11 @@ expand_table(HTAB *hashp)
HASHHDR *hctl = hashp->hctl;
HASHSEGMENT old_seg,
new_seg;
- long old_bucket,
+ uint64 old_bucket,
new_bucket;
- long new_segnum,
+ uint64 new_segnum,
new_segndx;
- long old_segnum,
+ uint64 old_segnum,
old_segndx;
HASHBUCKET *oldlink,
*newlink;
@@ -1577,7 +1577,7 @@ expand_table(HTAB *hashp)
currElement = nextElement)
{
nextElement = currElement->link;
- if ((long) calc_bucket(hctl, currElement->hashvalue) == old_bucket)
+ if ((uint64) calc_bucket(hctl, currElement->hashvalue) == old_bucket)
{
*oldlink = currElement;
oldlink = &currElement->link;
@@ -1601,9 +1601,9 @@ dir_realloc(HTAB *hashp)
{
HASHSEGMENT *p;
HASHSEGMENT *old_p;
- long new_dsize;
- long old_dirsize;
- long new_dirsize;
+ uint64 new_dsize;
+ uint64 old_dirsize;
+ uint64 new_dirsize;
if (hashp->hctl->max_dsize != NO_MAX_DSIZE)
return false;
@@ -1716,10 +1716,10 @@ hash_corrupted(HTAB *hashp)
/* calculate ceil(log base 2) of num */
int
-my_log2(long num)
+my_log2(uint64 num)
{
int i;
- long limit;
+ uint64 limit;
/* guard against too-large input, which would put us into infinite loop */
if (num > LONG_MAX / 2)
@@ -1730,9 +1730,9 @@ my_log2(long num)
return i;
}
-/* calculate first power of 2 >= num, bounded to what will fit in a long */
-static long
-next_pow2_long(long num)
+/* calculate first power of 2 >= num, bounded to what will fit in a uint64 */
+static uint64
+next_pow2_uint64(uint64 num)
{
/* my_log2's internal range check is sufficient */
return 1L << my_log2(num);
@@ -1740,7 +1740,7 @@ next_pow2_long(long num)
/* calculate first power of 2 >= num, bounded to what will fit in an int */
static int
-next_pow2_int(long num)
+next_pow2_int(uint64 num)
{
if (num > INT_MAX / 2)
num = INT_MAX / 2;
diff --git a/src/backend/utils/misc/sampling.c b/src/backend/utils/misc/sampling.c
index 361c15614e..05d2fd581c 100644
--- a/src/backend/utils/misc/sampling.c
+++ b/src/backend/utils/misc/sampling.c
@@ -37,7 +37,7 @@
*/
BlockNumber
BlockSampler_Init(BlockSampler bs, BlockNumber nblocks, int samplesize,
- long randseed)
+ uint64 randseed)
{
bs->N = nblocks; /* measured table size */
@@ -230,7 +230,7 @@ reservoir_get_next_S(ReservoirState rs, double t, int n)
*----------
*/
void
-sampler_random_init_state(long seed, SamplerRandomState randstate)
+sampler_random_init_state(uint64 seed, SamplerRandomState randstate)
{
randstate[0] = 0x330e; /* same as pg_erand48, but could be anything */
randstate[1] = (unsigned short) seed;
diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c
index 4f78b55fba..01396ca230 100644
--- a/src/backend/utils/sort/logtape.c
+++ b/src/backend/utils/sort/logtape.c
@@ -93,9 +93,9 @@
*/
typedef struct TapeBlockTrailer
{
- long prev; /* previous block on this tape, or -1 on first
+ int64 prev; /* previous block on this tape, or -1 on first
* block */
- long next; /* next block on this tape, or # of valid
+ int64 next; /* next block on this tape, or # of valid
* bytes on last block (if < 0) */
} TapeBlockTrailer;
@@ -138,10 +138,10 @@ typedef struct LogicalTape
* When concatenation of worker tape BufFiles is performed, an offset to
* the first block in the unified BufFile space is applied during reads.
*/
- long firstBlockNumber;
- long curBlockNumber;
- long nextBlockNumber;
- long offsetBlockNumber;
+ int64 firstBlockNumber;
+ int64 curBlockNumber;
+ int64 nextBlockNumber;
+ int64 offsetBlockNumber;
/*
* Buffer for current data block(s).
@@ -173,9 +173,9 @@ struct LogicalTapeSet
* blocks that are in unused holes between worker spaces following BufFile
* concatenation.
*/
- long nBlocksAllocated; /* # of blocks allocated */
- long nBlocksWritten; /* # of blocks used in underlying file */
- long nHoleBlocks; /* # of "hole" blocks left */
+ uint64 nBlocksAllocated; /* # of blocks allocated */
+ uint64 nBlocksWritten; /* # of blocks used in underlying file */
+ uint64 nHoleBlocks; /* # of "hole" blocks left */
/*
* We store the numbers of recycled-and-available blocks in freeBlocks[].
@@ -186,19 +186,19 @@ struct LogicalTapeSet
* LogicalTapeSetForgetFreeSpace().
*/
bool forgetFreeSpace; /* are we remembering free blocks? */
- long *freeBlocks; /* resizable array holding minheap */
- long nFreeBlocks; /* # of currently free blocks */
- Size freeBlocksLen; /* current allocated length of freeBlocks[] */
+ uint64 *freeBlocks; /* resizable array */
+ int nFreeBlocks; /* # of currently free blocks */
+ int freeBlocksLen; /* current allocated length of freeBlocks[] */
/* The array of logical tapes. */
int nTapes; /* # of logical tapes in set */
LogicalTape tapes[FLEXIBLE_ARRAY_MEMBER]; /* has nTapes nentries */
};
-static void ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer);
-static void ltsReadBlock(LogicalTapeSet *lts, long blocknum, void *buffer);
-static long ltsGetFreeBlock(LogicalTapeSet *lts);
-static void ltsReleaseBlock(LogicalTapeSet *lts, long blocknum);
+static void ltsWriteBlock(LogicalTapeSet *lts, int64 blocknum, void *buffer);
+static void ltsReadBlock(LogicalTapeSet *lts, int64 blocknum, void *buffer);
+static int64 ltsGetFreeBlock(LogicalTapeSet *lts);
+static void ltsReleaseBlock(LogicalTapeSet *lts, int64 blocknum);
static void ltsConcatWorkerTapes(LogicalTapeSet *lts, TapeShare *shared,
SharedFileSet *fileset);
static void ltsInitReadBuffer(LogicalTapeSet *lts, LogicalTape *lt);
@@ -210,7 +210,7 @@ static void ltsInitReadBuffer(LogicalTapeSet *lts, LogicalTape *lt);
* No need for an error return convention; we ereport() on any error.
*/
static void
-ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer)
+ltsWriteBlock(LogicalTapeSet *lts, int64 blocknum, void *buffer)
{
/*
* BufFile does not support "holes", so if we're about to write a block
@@ -258,7 +258,7 @@ ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer)
* module should never attempt to read a block it doesn't know is there.
*/
static void
-ltsReadBlock(LogicalTapeSet *lts, long blocknum, void *buffer)
+ltsReadBlock(LogicalTapeSet *lts, int64 blocknum, void *buffer)
{
if (BufFileSeekBlock(lts->pfile, blocknum) != 0 ||
BufFileRead(lts->pfile, buffer, BLCKSZ) != BLCKSZ)
@@ -282,7 +282,7 @@ ltsReadFillBuffer(LogicalTapeSet *lts, LogicalTape *lt)
do
{
char *thisbuf = lt->buffer + lt->nbytes;
- long datablocknum = lt->nextBlockNumber;
+ int64 datablocknum = lt->nextBlockNumber;
/* Fetch next block number */
if (datablocknum == -1L)
@@ -344,7 +344,7 @@ parent_offset(unsigned long i)
* Select the lowest currently unused block by taking the first element from
* the freelist min heap.
*/
-static long
+static int64
ltsGetFreeBlock(LogicalTapeSet *lts)
{
long *heap = lts->freeBlocks;
@@ -400,7 +400,7 @@ ltsGetFreeBlock(LogicalTapeSet *lts)
* Return a block# to the freelist.
*/
static void
-ltsReleaseBlock(LogicalTapeSet *lts, long blocknum)
+ltsReleaseBlock(LogicalTapeSet *lts, int64 blocknum)
{
long *heap;
unsigned long pos;
@@ -424,8 +424,8 @@ ltsReleaseBlock(LogicalTapeSet *lts, long blocknum)
return;
lts->freeBlocksLen *= 2;
- lts->freeBlocks = (long *) repalloc(lts->freeBlocks,
- lts->freeBlocksLen * sizeof(long));
+ lts->freeBlocks = (uint64 *) repalloc(lts->freeBlocks,
+ lts->freeBlocksLen * sizeof(int64));
}
heap = lts->freeBlocks;
@@ -460,8 +460,8 @@ ltsConcatWorkerTapes(LogicalTapeSet *lts, TapeShare *shared,
SharedFileSet *fileset)
{
LogicalTape *lt = NULL;
- long tapeblocks = 0L;
- long nphysicalblocks = 0L;
+ int64 tapeblocks = 0L;
+ int64 nphysicalblocks = 0L;
int i;
/* Should have at least one worker tape, plus leader's tape */
@@ -593,7 +593,7 @@ LogicalTapeSetCreate(int ntapes, TapeShare *shared, SharedFileSet *fileset,
lts->nHoleBlocks = 0L;
lts->forgetFreeSpace = false;
lts->freeBlocksLen = 32; /* reasonable initial guess */
- lts->freeBlocks = (long *) palloc(lts->freeBlocksLen * sizeof(long));
+ lts->freeBlocks = (uint64 *) palloc(lts->freeBlocksLen * sizeof(uint64));
lts->nFreeBlocks = 0;
lts->nTapes = ntapes;
@@ -719,7 +719,7 @@ LogicalTapeWrite(LogicalTapeSet *lts, int tapenum,
if (lt->pos >= TapeBlockPayloadSize)
{
/* Buffer full, dump it out */
- long nextBlockNumber;
+ int64 nextBlockNumber;
if (!lt->dirty)
{
@@ -1048,7 +1048,7 @@ LogicalTapeBackspace(LogicalTapeSet *lts, int tapenum, size_t size)
seekpos = (size_t) lt->pos; /* part within this block */
while (size > seekpos)
{
- long prev = TapeBlockGetTrailer(lt->buffer)->prev;
+ int64 prev = TapeBlockGetTrailer(lt->buffer)->prev;
if (prev == -1L)
{
@@ -1093,7 +1093,7 @@ LogicalTapeBackspace(LogicalTapeSet *lts, int tapenum, size_t size)
*/
void
LogicalTapeSeek(LogicalTapeSet *lts, int tapenum,
- long blocknum, int offset)
+ int64 blocknum, int offset)
{
LogicalTape *lt;
@@ -1127,7 +1127,7 @@ LogicalTapeSeek(LogicalTapeSet *lts, int tapenum,
*/
void
LogicalTapeTell(LogicalTapeSet *lts, int tapenum,
- long *blocknum, int *offset)
+ int64 *blocknum, int *offset)
{
LogicalTape *lt;
@@ -1149,7 +1149,7 @@ LogicalTapeTell(LogicalTapeSet *lts, int tapenum,
/*
* Obtain total disk space currently used by a LogicalTapeSet, in blocks.
*/
-long
+int64
LogicalTapeSetBlocks(LogicalTapeSet *lts)
{
return lts->nBlocksAllocated - lts->nHoleBlocks;
diff --git a/src/bin/pg_basebackup/receivelog.c b/src/bin/pg_basebackup/receivelog.c
index 62a342f77c..488f12dc6f 100644
--- a/src/bin/pg_basebackup/receivelog.c
+++ b/src/bin/pg_basebackup/receivelog.c
@@ -37,8 +37,8 @@ static bool still_sending = true; /* feedback still needs to be sent? */
static PGresult *HandleCopyStream(PGconn *conn, StreamCtl *stream,
XLogRecPtr *stoppos);
-static int CopyStreamPoll(PGconn *conn, long timeout_ms, pgsocket stop_socket);
-static int CopyStreamReceive(PGconn *conn, long timeout, pgsocket stop_socket,
+static int CopyStreamPoll(PGconn *conn, int64 timeout_ms, pgsocket stop_socket);
+static int CopyStreamReceive(PGconn *conn, int64 timeout, pgsocket stop_socket,
char **buffer);
static bool ProcessKeepaliveMsg(PGconn *conn, StreamCtl *stream, char *copybuf,
int len, XLogRecPtr blockpos, TimestampTz *last_status);
@@ -48,7 +48,7 @@ static PGresult *HandleEndOfCopyStream(PGconn *conn, StreamCtl *stream, char *co
XLogRecPtr blockpos, XLogRecPtr *stoppos);
static bool CheckCopyStreamStop(PGconn *conn, StreamCtl *stream, XLogRecPtr blockpos,
XLogRecPtr *stoppos);
-static long CalculateCopyStreamSleeptime(TimestampTz now, int standby_message_timeout,
+static int64 CalculateCopyStreamSleeptime(TimestampTz now, int standby_message_timeout,
TimestampTz last_status);
static bool ReadEndOfStreamingResult(PGresult *res, XLogRecPtr *startpos,
@@ -742,7 +742,7 @@ HandleCopyStream(PGconn *conn, StreamCtl *stream,
{
int r;
TimestampTz now;
- long sleeptime;
+ int64 sleeptime;
/*
* Check if we should continue streaming, or abort at this point.
@@ -858,7 +858,7 @@ error:
* or interrupted by signal or stop_socket input, and -1 on an error.
*/
static int
-CopyStreamPoll(PGconn *conn, long timeout_ms, pgsocket stop_socket)
+CopyStreamPoll(PGconn *conn, int64 timeout_ms, pgsocket stop_socket)
{
int ret;
fd_set input_mask;
@@ -920,7 +920,7 @@ CopyStreamPoll(PGconn *conn, long timeout_ms, pgsocket stop_socket)
* -1 on error. -2 if the server ended the COPY.
*/
static int
-CopyStreamReceive(PGconn *conn, long timeout, pgsocket stop_socket,
+CopyStreamReceive(PGconn *conn, int64 timeout, pgsocket stop_socket,
char **buffer)
{
char *copybuf = NULL;
@@ -1228,12 +1228,12 @@ CheckCopyStreamStop(PGconn *conn, StreamCtl *stream, XLogRecPtr blockpos,
/*
* Calculate how long send/receive loops should sleep
*/
-static long
+static int64
CalculateCopyStreamSleeptime(TimestampTz now, int standby_message_timeout,
TimestampTz last_status)
{
TimestampTz status_targettime = 0;
- long sleeptime;
+ int64 sleeptime;
if (standby_message_timeout && still_sending)
status_targettime = last_status +
@@ -1241,7 +1241,7 @@ CalculateCopyStreamSleeptime(TimestampTz now, int standby_message_timeout,
if (status_targettime > 0)
{
- long secs;
+ int64 secs;
int usecs;
feTimestampDifference(now,
diff --git a/src/include/executor/spi.h b/src/include/executor/spi.h
index 06de20ada5..e54c968aca 100644
--- a/src/include/executor/spi.h
+++ b/src/include/executor/spi.h
@@ -84,24 +84,24 @@ extern PGDLLIMPORT int SPI_result;
extern int SPI_connect(void);
extern int SPI_connect_ext(int options);
extern int SPI_finish(void);
-extern int SPI_execute(const char *src, bool read_only, long tcount);
+extern int SPI_execute(const char *src, bool read_only, int64 tcount);
extern int SPI_execute_plan(SPIPlanPtr plan, Datum *Values, const char *Nulls,
- bool read_only, long tcount);
+ bool read_only, int64 tcount);
extern int SPI_execute_plan_with_paramlist(SPIPlanPtr plan,
ParamListInfo params,
- bool read_only, long tcount);
-extern int SPI_exec(const char *src, long tcount);
+ bool read_only, int64 tcount);
+extern int SPI_exec(const char *src, int64 tcount);
extern int SPI_execp(SPIPlanPtr plan, Datum *Values, const char *Nulls,
- long tcount);
+ int64 tcount);
extern int SPI_execute_snapshot(SPIPlanPtr plan,
Datum *Values, const char *Nulls,
Snapshot snapshot,
Snapshot crosscheck_snapshot,
- bool read_only, bool fire_triggers, long tcount);
+ bool read_only, bool fire_triggers, int64 tcount);
extern int SPI_execute_with_args(const char *src,
int nargs, Oid *argtypes,
Datum *Values, const char *Nulls,
- bool read_only, long tcount);
+ bool read_only, int64 tcount);
extern SPIPlanPtr SPI_prepare(const char *src, int nargs, Oid *argtypes);
extern SPIPlanPtr SPI_prepare_cursor(const char *src, int nargs, Oid *argtypes,
int cursorOptions);
@@ -151,10 +151,10 @@ extern Portal SPI_cursor_open_with_args(const char *name,
extern Portal SPI_cursor_open_with_paramlist(const char *name, SPIPlanPtr plan,
ParamListInfo params, bool read_only);
extern Portal SPI_cursor_find(const char *name);
-extern void SPI_cursor_fetch(Portal portal, bool forward, long count);
-extern void SPI_cursor_move(Portal portal, bool forward, long count);
-extern void SPI_scroll_cursor_fetch(Portal, FetchDirection direction, long count);
-extern void SPI_scroll_cursor_move(Portal, FetchDirection direction, long count);
+extern void SPI_cursor_fetch(Portal portal, bool forward, int64 count);
+extern void SPI_cursor_move(Portal portal, bool forward, int64 count);
+extern void SPI_scroll_cursor_fetch(Portal, FetchDirection direction, int64 count);
+extern void SPI_scroll_cursor_move(Portal, FetchDirection direction, int64 count);
extern void SPI_cursor_close(Portal portal);
extern int SPI_register_relation(EphemeralNamedRelation enr);
diff --git a/src/include/pgtime.h b/src/include/pgtime.h
index 0fc76d0e60..3d2fdedd89 100644
--- a/src/include/pgtime.h
+++ b/src/include/pgtime.h
@@ -33,7 +33,7 @@ struct pg_tm
int tm_wday;
int tm_yday;
int tm_isdst;
- long int tm_gmtoff;
+ int64 tm_gmtoff;
const char *tm_zone;
};
@@ -48,18 +48,18 @@ typedef struct pg_tzenum pg_tzenum;
extern struct pg_tm *pg_localtime(const pg_time_t *timep, const pg_tz *tz);
extern struct pg_tm *pg_gmtime(const pg_time_t *timep);
extern int pg_next_dst_boundary(const pg_time_t *timep,
- long int *before_gmtoff,
+ int64 *before_gmtoff,
int *before_isdst,
pg_time_t *boundary,
- long int *after_gmtoff,
+ int64 *after_gmtoff,
int *after_isdst,
const pg_tz *tz);
extern bool pg_interpret_timezone_abbrev(const char *abbrev,
const pg_time_t *timep,
- long int *gmtoff,
+ int64 *gmtoff,
int *isdst,
const pg_tz *tz);
-extern bool pg_get_timezone_offset(const pg_tz *tz, long int *gmtoff);
+extern bool pg_get_timezone_offset(const pg_tz *tz, int64 *gmtoff);
extern const char *pg_get_timezone_name(pg_tz *tz);
extern bool pg_tz_acceptable(pg_tz *tz);
@@ -75,7 +75,7 @@ extern pg_tz *log_timezone;
extern void pg_timezone_initialize(void);
extern pg_tz *pg_tzset(const char *tzname);
-extern pg_tz *pg_tzset_offset(long gmtoffset);
+extern pg_tz *pg_tzset_offset(int64 gmtoffset);
extern pg_tzenum *pg_tzenumerate_start(void);
extern pg_tz *pg_tzenumerate_next(pg_tzenum *dir);
diff --git a/src/include/portability/instr_time.h b/src/include/portability/instr_time.h
index d6459327cc..0a51120fc4 100644
--- a/src/include/portability/instr_time.h
+++ b/src/include/portability/instr_time.h
@@ -136,7 +136,7 @@ typedef struct timespec instr_time;
(((double) (t).tv_sec) + ((double) (t).tv_nsec) / 1000000000.0)
#define INSTR_TIME_GET_MILLISEC(t) \
- (((double) (t).tv_sec * 1000.0) + ((double) (t).tv_nsec) / 1000000.0)
+ (((int64) (t).tv_sec * 1000.0) + ((int64) (t).tv_nsec) / 1000000.0)
#define INSTR_TIME_GET_MICROSEC(t) \
(((uint64) (t).tv_sec * (uint64) 1000000) + (uint64) ((t).tv_nsec / 1000))
diff --git a/src/include/storage/buffile.h b/src/include/storage/buffile.h
index 60433f35b4..7178247396 100644
--- a/src/include/storage/buffile.h
+++ b/src/include/storage/buffile.h
@@ -42,9 +42,9 @@ extern size_t BufFileRead(BufFile *file, void *ptr, size_t size);
extern size_t BufFileWrite(BufFile *file, void *ptr, size_t size);
extern int BufFileSeek(BufFile *file, int fileno, off_t offset, int whence);
extern void BufFileTell(BufFile *file, int *fileno, off_t *offset);
-extern int BufFileSeekBlock(BufFile *file, long blknum);
+extern int BufFileSeekBlock(BufFile *file, uint64 blknum);
extern int64 BufFileSize(BufFile *file);
-extern long BufFileAppend(BufFile *target, BufFile *source);
+extern uint64 BufFileAppend(BufFile *target, BufFile *source);
extern BufFile *BufFileCreateShared(SharedFileSet *fileset, const char *name);
extern void BufFileExportShared(BufFile *file);
diff --git a/src/include/storage/condition_variable.h b/src/include/storage/condition_variable.h
index bfe5c89b54..cb245c09c5 100644
--- a/src/include/storage/condition_variable.h
+++ b/src/include/storage/condition_variable.h
@@ -43,7 +43,7 @@ extern void ConditionVariableInit(ConditionVariable *cv);
* the condition variable.
*/
extern void ConditionVariableSleep(ConditionVariable *cv, uint32 wait_event_info);
-extern bool ConditionVariableTimedSleep(ConditionVariable *cv, long timeout,
+extern bool ConditionVariableTimedSleep(ConditionVariable *cv, int64 timeout,
uint32 wait_event_info);
extern void ConditionVariableCancelSleep(void);
diff --git a/src/include/storage/latch.h b/src/include/storage/latch.h
index 46ae56cae3..3c013ca860 100644
--- a/src/include/storage/latch.h
+++ b/src/include/storage/latch.h
@@ -169,13 +169,13 @@ extern int AddWaitEventToSet(WaitEventSet *set, uint32 events, pgsocket fd,
Latch *latch, void *user_data);
extern void ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch);
-extern int WaitEventSetWait(WaitEventSet *set, long timeout,
+extern int WaitEventSetWait(WaitEventSet *set, int64 timeout,
WaitEvent *occurred_events, int nevents,
uint32 wait_event_info);
-extern int WaitLatch(Latch *latch, int wakeEvents, long timeout,
+extern int WaitLatch(Latch *latch, int wakeEvents, int64 timeout,
uint32 wait_event_info);
extern int WaitLatchOrSocket(Latch *latch, int wakeEvents,
- pgsocket sock, long timeout, uint32 wait_event_info);
+ pgsocket sock, int64 timeout, uint32 wait_event_info);
/*
* Unix implementation uses SIGUSR1 for inter-process signaling.
diff --git a/src/include/storage/shmem.h b/src/include/storage/shmem.h
index 0c1af89206..4b75f33be9 100644
--- a/src/include/storage/shmem.h
+++ b/src/include/storage/shmem.h
@@ -39,7 +39,7 @@ extern void *ShmemAllocNoError(Size size);
extern void *ShmemAllocUnlocked(Size size);
extern bool ShmemAddrIsValid(const void *addr);
extern void InitShmemIndex(void);
-extern HTAB *ShmemInitHash(const char *name, long init_size, long max_size,
+extern HTAB *ShmemInitHash(const char *name, uint64 init_size, uint64 max_size,
HASHCTL *infoP, int hash_flags);
extern void *ShmemInitStruct(const char *name, Size size, bool *foundPtr);
extern Size add_size(Size s1, Size s2);
diff --git a/src/include/tcop/pquery.h b/src/include/tcop/pquery.h
index 4ad6324e2d..367c31cabb 100644
--- a/src/include/tcop/pquery.h
+++ b/src/include/tcop/pquery.h
@@ -33,13 +33,13 @@ extern void PortalStart(Portal portal, ParamListInfo params,
extern void PortalSetResultFormat(Portal portal, int nFormats,
int16 *formats);
-extern bool PortalRun(Portal portal, long count, bool isTopLevel,
+extern bool PortalRun(Portal portal, int64 count, bool isTopLevel,
bool run_once, DestReceiver *dest, DestReceiver *altdest,
char *completionTag);
extern uint64 PortalRunFetch(Portal portal,
FetchDirection fdirection,
- long count,
+ int64 count,
DestReceiver *dest);
#endif /* PQUERY_H */
diff --git a/src/include/utils/dynahash.h b/src/include/utils/dynahash.h
index 768b952176..0c75534897 100644
--- a/src/include/utils/dynahash.h
+++ b/src/include/utils/dynahash.h
@@ -14,6 +14,6 @@
#ifndef DYNAHASH_H
#define DYNAHASH_H
-extern int my_log2(long num);
+extern int my_log2(uint64 num);
#endif /* DYNAHASH_H */
diff --git a/src/include/utils/hsearch.h b/src/include/utils/hsearch.h
index f1deb9beab..dd0cd98962 100644
--- a/src/include/utils/hsearch.h
+++ b/src/include/utils/hsearch.h
@@ -64,11 +64,11 @@ typedef struct HTAB HTAB;
/* Only those fields indicated by hash_flags need be set */
typedef struct HASHCTL
{
- long num_partitions; /* # partitions (must be power of 2) */
- long ssize; /* segment size */
- long dsize; /* (initial) directory size */
- long max_dsize; /* limit to dsize if dir size is limited */
- long ffactor; /* fill factor */
+ uint64 num_partitions; /* # partitions (must be power of 2) */
+ uint64 ssize; /* segment size */
+ uint64 dsize; /* (initial) directory size */
+ uint64 max_dsize; /* limit to dsize if dir size is limited */
+ uint64 ffactor; /* fill factor */
Size keysize; /* hash key length in bytes */
Size entrysize; /* total user element size in bytes */
HashValueFunc hash; /* hash function */
@@ -123,7 +123,7 @@ typedef struct
* string_hash, tag_hash, uint32_hash, or oid_hash. Just set HASH_BLOBS or
* not. Use HASH_FUNCTION only when you want something other than those.
*/
-extern HTAB *hash_create(const char *tabname, long nelem,
+extern HTAB *hash_create(const char *tabname, uint64 nelem,
HASHCTL *info, int flags);
extern void hash_destroy(HTAB *hashp);
extern void hash_stats(const char *where, HTAB *hashp);
@@ -135,13 +135,13 @@ extern void *hash_search_with_hash_value(HTAB *hashp, const void *keyPtr,
bool *foundPtr);
extern bool hash_update_hash_key(HTAB *hashp, void *existingEntry,
const void *newKeyPtr);
-extern long hash_get_num_entries(HTAB *hashp);
+extern uint64 hash_get_num_entries(HTAB *hashp);
extern void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp);
extern void *hash_seq_search(HASH_SEQ_STATUS *status);
extern void hash_seq_term(HASH_SEQ_STATUS *status);
extern void hash_freeze(HTAB *hashp);
-extern Size hash_estimate_size(long num_entries, Size entrysize);
-extern long hash_select_dirsize(long num_entries);
+extern Size hash_estimate_size(uint64 num_entries, Size entrysize);
+extern uint64 hash_select_dirsize(uint64 num_entries);
extern Size hash_get_shared_size(HASHCTL *info, int flags);
extern void AtEOXact_HashTables(bool isCommit);
extern void AtEOSubXact_HashTables(bool isCommit, int nestDepth);
diff --git a/src/include/utils/sampling.h b/src/include/utils/sampling.h
index 74646846b2..cbcbeb6fd7 100644
--- a/src/include/utils/sampling.h
+++ b/src/include/utils/sampling.h
@@ -19,7 +19,7 @@
/* Random generator for sampling code */
typedef unsigned short SamplerRandomState[3];
-extern void sampler_random_init_state(long seed,
+extern void sampler_random_init_state(uint64 seed,
SamplerRandomState randstate);
extern double sampler_random_fract(SamplerRandomState randstate);
@@ -38,7 +38,7 @@ typedef struct
typedef BlockSamplerData *BlockSampler;
extern BlockNumber BlockSampler_Init(BlockSampler bs, BlockNumber nblocks,
- int samplesize, long randseed);
+ int samplesize, uint64 randseed);
extern bool BlockSampler_HasMore(BlockSampler bs);
extern BlockNumber BlockSampler_Next(BlockSampler bs);
diff --git a/src/include/utils/timestamp.h b/src/include/utils/timestamp.h
index 03a1de569f..78fdf05583 100644
--- a/src/include/utils/timestamp.h
+++ b/src/include/utils/timestamp.h
@@ -71,7 +71,7 @@ extern TimestampTz GetCurrentTimestamp(void);
extern TimestampTz GetSQLCurrentTimestamp(int32 typmod);
extern Timestamp GetSQLLocalTimestamp(int32 typmod);
extern void TimestampDifference(TimestampTz start_time, TimestampTz stop_time,
- long *secs, int *microsecs);
+ int64 *secs, int *microsecs);
extern bool TimestampDifferenceExceeds(TimestampTz start_time,
TimestampTz stop_time,
int msec);
diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c
index 5acf604f63..3ce5848247 100644
--- a/src/pl/plpgsql/src/pl_exec.c
+++ b/src/pl/plpgsql/src/pl_exec.c
@@ -358,7 +358,7 @@ static Datum exec_eval_expr(PLpgSQL_execstate *estate,
Oid *rettype,
int32 *rettypmod);
static int exec_run_select(PLpgSQL_execstate *estate,
- PLpgSQL_expr *expr, long maxtuples, Portal *portalP);
+ PLpgSQL_expr *expr, uint64 maxtuples, Portal *portalP);
static int exec_for_query(PLpgSQL_execstate *estate, PLpgSQL_stmt_forq *stmt,
Portal portal, bool prefetch_ok);
static ParamListInfo setup_param_list(PLpgSQL_execstate *estate,
@@ -4085,7 +4085,7 @@ exec_stmt_execsql(PLpgSQL_execstate *estate,
PLpgSQL_stmt_execsql *stmt)
{
ParamListInfo paramLI;
- long tcount;
+ int64 tcount;
int rc;
PLpgSQL_expr *expr = stmt->sqlstmt;
int too_many_rows_level = 0;
@@ -4683,7 +4683,7 @@ static int
exec_stmt_fetch(PLpgSQL_execstate *estate, PLpgSQL_stmt_fetch *stmt)
{
PLpgSQL_var *curvar;
- long how_many = stmt->how_many;
+ int64 how_many = stmt->how_many;
SPITupleTable *tuptab;
Portal portal;
char *curname;
@@ -5831,7 +5831,7 @@ exec_eval_expr(PLpgSQL_execstate *estate,
*/
static int
exec_run_select(PLpgSQL_execstate *estate,
- PLpgSQL_expr *expr, long maxtuples, Portal *portalP)
+ PLpgSQL_expr *expr, uint64 maxtuples, Portal *portalP)
{
ParamListInfo paramLI;
int rc;
diff --git a/src/pl/plpgsql/src/plpgsql.h b/src/pl/plpgsql/src/plpgsql.h
index 69df3306fd..0cea7270cf 100644
--- a/src/pl/plpgsql/src/plpgsql.h
+++ b/src/pl/plpgsql/src/plpgsql.h
@@ -798,7 +798,7 @@ typedef struct PLpgSQL_stmt_fetch
PLpgSQL_variable *target; /* target (record or row) */
int curvar; /* cursor variable to fetch from */
FetchDirection direction; /* fetch direction */
- long how_many; /* count, if constant (expr is NULL) */
+ int64 how_many; /* count, if constant (expr is NULL) */
PLpgSQL_expr *expr; /* count, if expression */
bool is_move; /* is this a fetch or move? */
bool returns_multiple_rows; /* can return more than one row? */
@@ -1027,7 +1027,7 @@ typedef struct PLpgSQL_function
/* these fields change when the function is used */
struct PLpgSQL_execstate *cur_estate;
- unsigned long use_count;
+ uint64 use_count;
} PLpgSQL_function;
/*
diff --git a/src/pl/plpython/plpy_spi.c b/src/pl/plpython/plpy_spi.c
index 99c1b4f28f..91079f4c99 100644
--- a/src/pl/plpython/plpy_spi.c
+++ b/src/pl/plpython/plpy_spi.c
@@ -25,7 +25,7 @@
#include "utils/memutils.h"
#include "utils/syscache.h"
-static PyObject *PLy_spi_execute_query(char *query, long limit);
+static PyObject *PLy_spi_execute_query(char *query, uint64 limit);
static PyObject *PLy_spi_execute_fetch_result(SPITupleTable *tuptable,
uint64 rows, int status);
static void PLy_spi_exception_set(PyObject *excclass, ErrorData *edata);
@@ -158,7 +158,7 @@ PLy_spi_execute(PyObject *self, PyObject *args)
char *query;
PyObject *plan;
PyObject *list = NULL;
- long limit = 0;
+ uint64 limit = 0;
if (PyArg_ParseTuple(args, "s|l", &query, &limit))
return PLy_spi_execute_query(query, limit);
@@ -174,7 +174,7 @@ PLy_spi_execute(PyObject *self, PyObject *args)
}
PyObject *
-PLy_spi_execute_plan(PyObject *ob, PyObject *list, long limit)
+PLy_spi_execute_plan(PyObject *ob, PyObject *list, uint64 limit)
{
volatile int nargs;
int i,
@@ -305,7 +305,7 @@ PLy_spi_execute_plan(PyObject *ob, PyObject *list, long limit)
}
static PyObject *
-PLy_spi_execute_query(char *query, long limit)
+PLy_spi_execute_query(char *query, uint64 limit)
{
int rv;
volatile MemoryContext oldcontext;
diff --git a/src/pl/plpython/plpy_spi.h b/src/pl/plpython/plpy_spi.h
index a5e2e60da7..66e8a43aae 100644
--- a/src/pl/plpython/plpy_spi.h
+++ b/src/pl/plpython/plpy_spi.h
@@ -10,7 +10,7 @@
extern PyObject *PLy_spi_prepare(PyObject *self, PyObject *args);
extern PyObject *PLy_spi_execute(PyObject *self, PyObject *args);
-extern PyObject *PLy_spi_execute_plan(PyObject *ob, PyObject *list, long limit);
+extern PyObject *PLy_spi_execute_plan(PyObject *ob, PyObject *list, uint64 limit);
typedef struct PLyExceptionEntry
{
diff --git a/src/timezone/localtime.c b/src/timezone/localtime.c
index 333f27300a..926c677c41 100644
--- a/src/timezone/localtime.c
+++ b/src/timezone/localtime.c
@@ -1623,10 +1623,10 @@ increment_overflow_time(pg_time_t *tp, int32 j)
*/
int
pg_next_dst_boundary(const pg_time_t *timep,
- long int *before_gmtoff,
+ int64 *before_gmtoff,
int *before_isdst,
pg_time_t *boundary,
- long int *after_gmtoff,
+ int64 *after_gmtoff,
int *after_isdst,
const pg_tz *tz)
{
@@ -1771,7 +1771,7 @@ pg_next_dst_boundary(const pg_time_t *timep,
bool
pg_interpret_timezone_abbrev(const char *abbrev,
const pg_time_t *timep,
- long int *gmtoff,
+ int64 *gmtoff,
int *isdst,
const pg_tz *tz)
{
@@ -1863,7 +1863,7 @@ pg_interpret_timezone_abbrev(const char *abbrev,
* into *gmtoff and return true, else return false.
*/
bool
-pg_get_timezone_offset(const pg_tz *tz, long int *gmtoff)
+pg_get_timezone_offset(const pg_tz *tz, int64 *gmtoff)
{
/*
* The zone could have more than one ttinfo, if it's historically used
--------------2.24.1--
v5-0002-Spread-bitutils-into-hashing.patchtext/x-diff; charset=us-asciiDownload
From a73cdc21d2d62df32bc087b2b4620e7c7f3f5b24 Mon Sep 17 00:00:00 2001
From: David Fetter <david@fetter.org>
Date: Fri, 31 Jan 2020 07:08:48 -0800
Subject: [PATCH v5 2/3] Spread bitutils into hashing
To: hackers
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="------------2.24.1"
This is a multi-part message in MIME format.
--------------2.24.1
Content-Type: text/plain; charset=UTF-8; format=fixed
Content-Transfer-Encoding: 8bit
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 55d85644a4..29dca21be2 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -30,6 +30,7 @@
#include "access/hash.h"
#include "access/hash_xlog.h"
+#include "port/pg_bitutils.h"
#include "miscadmin.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
@@ -502,7 +503,6 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
double dnumbuckets;
uint32 num_buckets;
uint32 spare_index;
- uint32 i;
/*
* Choose the number of initial bucket pages to match the fill factor
@@ -543,14 +543,8 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
metap->hashm_ffactor = ffactor;
metap->hashm_bsize = HashGetMaxBitmapSize(page);
/* find largest bitmap array size that will fit in page size */
- for (i = _hash_log2(metap->hashm_bsize); i > 0; --i)
- {
- if ((1 << i) <= metap->hashm_bsize)
- break;
- }
- Assert(i > 0);
- metap->hashm_bmsize = 1 << i;
- metap->hashm_bmshift = i + BYTE_TO_BIT;
+ metap->hashm_bmsize = 1 << pg_leftmost_one_pos32(metap->hashm_bsize);
+ metap->hashm_bmshift = pg_leftmost_one_pos32(metap->hashm_bsize) + BYTE_TO_BIT;
Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1));
/*
@@ -570,7 +564,7 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
* Set highmask as next immediate ((2 ^ x) - 1), which should be
* sufficient to cover num_buckets.
*/
- metap->hashm_highmask = (1 << (_hash_log2(num_buckets + 1))) - 1;
+ metap->hashm_highmask = next_power_of_2_32(num_buckets + 1) - 1;
metap->hashm_lowmask = (metap->hashm_highmask >> 1);
MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares));
@@ -657,13 +651,12 @@ restart_expand:
* Can't split anymore if maxbucket has reached its maximum possible
* value.
*
- * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
- * the calculation maxbucket+1 mustn't overflow). Currently we restrict
- * to half that because of overflow looping in _hash_log2() and
- * insufficient space in hashm_spares[]. It's moot anyway because an
- * index with 2^32 buckets would certainly overflow BlockNumber and hence
- * _hash_alloc_buckets() would fail, but if we supported buckets smaller
- * than a disk block then this would be an independent constraint.
+ * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because the
+ * calculation maxbucket+1 mustn't overflow). Currently we restrict to half
+ * that because of insufficient space in hashm_spares[]. It's moot anyway
+ * because an index with 2^32 buckets would certainly overflow BlockNumber
+ * and hence _hash_alloc_buckets() would fail, but if we supported buckets
+ * smaller than a disk block then this would be an independent constraint.
*
* If you change this, see also the maximum initial number of buckets in
* _hash_init().
diff --git a/src/backend/access/hash/hashsort.c b/src/backend/access/hash/hashsort.c
index 9cb41d62e7..322379788c 100644
--- a/src/backend/access/hash/hashsort.c
+++ b/src/backend/access/hash/hashsort.c
@@ -27,6 +27,7 @@
#include "access/hash.h"
#include "commands/progress.h"
+#include "port/pg_bitutils.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "utils/tuplesort.h"
@@ -69,7 +70,7 @@ _h_spoolinit(Relation heap, Relation index, uint32 num_buckets)
* NOTE : This hash mask calculation should be in sync with similar
* calculation in _hash_init_metabuffer.
*/
- hspool->high_mask = (((uint32) 1) << _hash_log2(num_buckets + 1)) - 1;
+ hspool->high_mask = next_power_of_2_32(num_buckets + 1) - 1;
hspool->low_mask = (hspool->high_mask >> 1);
hspool->max_buckets = num_buckets - 1;
diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c
index 9efc8016bc..738572ca40 100644
--- a/src/backend/access/hash/hashutil.c
+++ b/src/backend/access/hash/hashutil.c
@@ -17,6 +17,7 @@
#include "access/hash.h"
#include "access/reloptions.h"
#include "access/relscan.h"
+#include "port/pg_bitutils.h"
#include "storage/buf_internals.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
@@ -134,21 +135,6 @@ _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket,
return bucket;
}
-/*
- * _hash_log2 -- returns ceil(lg2(num))
- */
-uint32
-_hash_log2(uint32 num)
-{
- uint32 i,
- limit;
-
- limit = 1;
- for (i = 0; limit < num; limit <<= 1, i++)
- ;
- return i;
-}
-
/*
* _hash_spareindex -- returns spare index / global splitpoint phase of the
* bucket
@@ -159,7 +145,7 @@ _hash_spareindex(uint32 num_bucket)
uint32 splitpoint_group;
uint32 splitpoint_phases;
- splitpoint_group = _hash_log2(num_bucket);
+ splitpoint_group = ceil_log2_32(num_bucket);
if (splitpoint_group < HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE)
return splitpoint_group;
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 6e98b61b2f..deef317520 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -86,6 +86,7 @@
#include <limits.h>
#include "access/xact.h"
+#include "port/pg_bitutils.h"
#include "storage/shmem.h"
#include "storage/spin.h"
#include "utils/dynahash.h"
@@ -1718,16 +1719,11 @@ hash_corrupted(HTAB *hashp)
int
my_log2(uint64 num)
{
- int i;
- uint64 limit;
-
/* guard against too-large input, which would put us into infinite loop */
if (num > LONG_MAX / 2)
num = LONG_MAX / 2;
- for (i = 0, limit = 1; limit < num; i++, limit <<= 1)
- ;
- return i;
+ return ceil_log2_64(num);
}
/* calculate first power of 2 >= num, bounded to what will fit in a uint64 */
diff --git a/src/include/access/hash.h b/src/include/access/hash.h
index 9fc0696096..298c05e6fe 100644
--- a/src/include/access/hash.h
+++ b/src/include/access/hash.h
@@ -450,7 +450,6 @@ extern uint32 _hash_datum2hashkey(Relation rel, Datum key);
extern uint32 _hash_datum2hashkey_type(Relation rel, Datum key, Oid keytype);
extern Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket,
uint32 highmask, uint32 lowmask);
-extern uint32 _hash_log2(uint32 num);
extern uint32 _hash_spareindex(uint32 num_bucket);
extern uint32 _hash_get_totalbuckets(uint32 splitpoint_phase);
extern void _hash_checkpage(Relation rel, Buffer buf, int flags);
diff --git a/src/include/lib/simplehash.h b/src/include/lib/simplehash.h
index 5a6783f653..1a35a054d8 100644
--- a/src/include/lib/simplehash.h
+++ b/src/include/lib/simplehash.h
@@ -57,6 +57,8 @@
* backwards, unless they're empty or already at their optimal position.
*/
+#include "port/pg_bitutils.h"
+
/* helpers */
#define SH_MAKE_PREFIX(a) CppConcat(a,_)
#define SH_MAKE_NAME(name) SH_MAKE_NAME_(SH_MAKE_PREFIX(SH_PREFIX),name)
@@ -215,27 +217,6 @@ SH_SCOPE void SH_STAT(SH_TYPE * tb);
#ifndef SIMPLEHASH_H
#define SIMPLEHASH_H
-/* FIXME: can we move these to a central location? */
-
-/* calculate ceil(log base 2) of num */
-static inline uint64
-sh_log2(uint64 num)
-{
- int i;
- uint64 limit;
-
- for (i = 0, limit = 1; limit < num; i++, limit <<= 1)
- ;
- return i;
-}
-
-/* calculate first power of 2 >= num */
-static inline uint64
-sh_pow2(uint64 num)
-{
- return ((uint64) 1) << sh_log2(num);
-}
-
#ifdef FRONTEND
#define sh_error(...) pg_log_error(__VA_ARGS__)
#define sh_log(...) pg_log_info(__VA_ARGS__)
@@ -259,7 +240,7 @@ SH_COMPUTE_PARAMETERS(SH_TYPE * tb, uint32 newsize)
size = Max(newsize, 2);
/* round up size to the next power of 2, that's how bucketing works */
- size = sh_pow2(size);
+ size = next_power_of_2_64(size);
Assert(size <= SH_MAX_SIZE);
/*
@@ -434,7 +415,7 @@ SH_GROW(SH_TYPE * tb, uint32 newsize)
uint32 startelem = 0;
uint32 copyelem;
- Assert(oldsize == sh_pow2(oldsize));
+ Assert(oldsize == next_power_of_2_64(oldsize));
Assert(oldsize != SH_MAX_SIZE);
Assert(oldsize < newsize);
diff --git a/src/include/port/pg_bitutils.h b/src/include/port/pg_bitutils.h
index 498e532308..88a9ea5b7f 100644
--- a/src/include/port/pg_bitutils.h
+++ b/src/include/port/pg_bitutils.h
@@ -20,19 +20,18 @@ extern PGDLLIMPORT const uint8 pg_number_of_ones[256];
/*
* pg_leftmost_one_pos32
* Returns the position of the most significant set bit in "word",
- * measured from the least significant bit. word must not be 0.
+ * measured from the least significant bit.
*/
static inline int
pg_leftmost_one_pos32(uint32 word)
{
#ifdef HAVE__BUILTIN_CLZ
- Assert(word != 0);
-
- return 31 - __builtin_clz(word);
+ return word ? 31 - __builtin_clz(word) : 0;
#else
int shift = 32 - 8;
- Assert(word != 0);
+ if (word == 0)
+ return 0;
while ((word >> shift) == 0)
shift -= 8;
@@ -49,19 +48,18 @@ static inline int
pg_leftmost_one_pos64(uint64 word)
{
#ifdef HAVE__BUILTIN_CLZ
- Assert(word != 0);
-
#if defined(HAVE_LONG_INT_64)
- return 63 - __builtin_clzl(word);
+ return word ? 63 - __builtin_clzl(word) : 0;
#elif defined(HAVE_LONG_LONG_INT_64)
- return 63 - __builtin_clzll(word);
+ return word ? 63 - __builtin_clzll(word) : 0;
#else
#error must have a working 64-bit integer datatype
#endif
#else /* !HAVE__BUILTIN_CLZ */
int shift = 64 - 8;
- Assert(word != 0);
+ if (word == 0)
+ return 0;
while ((word >> shift) == 0)
shift -= 8;
@@ -73,19 +71,18 @@ pg_leftmost_one_pos64(uint64 word)
/*
* pg_rightmost_one_pos32
* Returns the position of the least significant set bit in "word",
- * measured from the least significant bit. word must not be 0.
+ * measured from the least significant bit.
*/
static inline int
pg_rightmost_one_pos32(uint32 word)
{
#ifdef HAVE__BUILTIN_CTZ
- Assert(word != 0);
-
- return __builtin_ctz(word);
+ return word ? __builtin_ctz(word) : 32;
#else
int result = 0;
- Assert(word != 0);
+ if (word == 0)
+ return 32;
while ((word & 255) == 0)
{
@@ -105,19 +102,18 @@ static inline int
pg_rightmost_one_pos64(uint64 word)
{
#ifdef HAVE__BUILTIN_CTZ
- Assert(word != 0);
-
#if defined(HAVE_LONG_INT_64)
- return __builtin_ctzl(word);
+ return word ? __builtin_ctzl(word) : 64;
#elif defined(HAVE_LONG_LONG_INT_64)
- return __builtin_ctzll(word);
+ return word ? __builtin_ctzll(word) : 64;
#else
#error must have a working 64-bit integer datatype
#endif
#else /* !HAVE__BUILTIN_CTZ */
int result = 0;
- Assert(word != 0);
+ if (word == 0)
+ return 64;
while ((word & 255) == 0)
{
@@ -145,4 +141,34 @@ pg_rotate_right32(uint32 word, int n)
return (word >> n) | (word << (sizeof(word) * BITS_PER_BYTE - n));
}
+/* ceil(lg2(num)) */
+static inline uint32
+ceil_log2_32(uint32 num)
+{
+ Assert(num > 0);
+ return pg_leftmost_one_pos32(num-1) + 1;
+}
+
+static inline uint64
+ceil_log2_64(uint64 num)
+{
+ Assert(num > 0);
+ return pg_leftmost_one_pos64(num-1) + 1;
+}
+
+/* Calculate the first power of 2 >= num */
+static inline uint32
+next_power_of_2_32(uint32 num)
+{
+ Assert(num > 0);
+ return ((uint32) 1) << (pg_leftmost_one_pos32(num-1) + 1);
+}
+
+static inline uint64
+next_power_of_2_64(uint64 num)
+{
+ Assert(num > 0);
+ return ((uint64) 1) << (pg_leftmost_one_pos64(num-1) + 1);
+}
+
#endif /* PG_BITUTILS_H */
--------------2.24.1--
v5-0003-Reduced-operations-in-floor_log2.patchtext/x-diff; charset=us-asciiDownload
From f89be8307d945a636a1bb351372513ee4f4a7872 Mon Sep 17 00:00:00 2001
From: David Fetter <david@fetter.org>
Date: Fri, 31 Jan 2020 08:32:26 -0800
Subject: [PATCH v5 3/3] Reduced operations in floor_log2
To: hackers
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="------------2.24.1"
This is a multi-part message in MIME format.
--------------2.24.1
Content-Type: text/plain; charset=UTF-8; format=fixed
Content-Transfer-Encoding: 8bit
diff --git a/src/backend/utils/adt/array_selfuncs.c b/src/backend/utils/adt/array_selfuncs.c
index d97e60a3ab..12170cb7f2 100644
--- a/src/backend/utils/adt/array_selfuncs.c
+++ b/src/backend/utils/adt/array_selfuncs.c
@@ -20,6 +20,7 @@
#include "catalog/pg_collation.h"
#include "catalog/pg_operator.h"
#include "catalog/pg_statistic.h"
+#include "port/pg_bitutils.h"
#include "utils/array.h"
#include "utils/builtins.h"
#include "utils/lsyscache.h"
@@ -1089,35 +1090,9 @@ calc_distr(const float *p, int n, int m, float rest)
static int
floor_log2(uint32 n)
{
- int logval = 0;
-
if (n == 0)
return -1;
- if (n >= (1 << 16))
- {
- n >>= 16;
- logval += 16;
- }
- if (n >= (1 << 8))
- {
- n >>= 8;
- logval += 8;
- }
- if (n >= (1 << 4))
- {
- n >>= 4;
- logval += 4;
- }
- if (n >= (1 << 2))
- {
- n >>= 2;
- logval += 2;
- }
- if (n >= (1 << 1))
- {
- logval += 1;
- }
- return logval;
+ return 1 << pg_leftmost_one_pos32(n);
}
/*
--------------2.24.1--
On Wed, Feb 26, 2020 at 09:12:24AM +0100, David Fetter wrote:
On Fri, Jan 31, 2020 at 04:59:18PM +0100, David Fetter wrote:
On Wed, Jan 15, 2020 at 03:45:12PM -0800, Jesse Zhang wrote:
On Tue, Jan 14, 2020 at 2:09 PM David Fetter <david@fetter.org> wrote:
The changes in hash AM and SIMPLEHASH do look like a net positive
improvement. My biggest cringe might be in pg_bitutils:1. Is ceil_log2_64 dead code?
Let's call it nascent code. I suspect there are places it could go, if
I look for them. Also, it seemed silly to have one without the other.While not absolutely required, I'd like us to find at least one
place and start using it. (Clang also nags at me when we have
unused functions).Done in the expanded patches attached.
These bit-rotted a little, so I've updated them.
05d8449e73694585b59f8b03aaa087f04cc4679a broke this patch set, so fix.
Best,
David.
--
David Fetter <david(at)fetter(dot)org> http://fetter.org/
Phone: +1 415 235 3778
Remember to vote!
Consider donating to Postgres: http://www.postgresql.org/about/donate
Attachments:
v6-0001-de-long-ify.patchtext/x-diff; charset=us-asciiDownload
From 2f8778f6133be23f6b7c375a39e9940ecc9fb063 Mon Sep 17 00:00:00 2001
From: David Fetter <david@fetter.org>
Date: Wed, 29 Jan 2020 02:09:59 -0800
Subject: [PATCH v6 1/3] de-long-ify
To: hackers
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="------------2.24.1"
This is a multi-part message in MIME format.
--------------2.24.1
Content-Type: text/plain; charset=UTF-8; format=fixed
Content-Transfer-Encoding: 8bit
diff --git a/src/backend/access/gist/gistbuildbuffers.c b/src/backend/access/gist/gistbuildbuffers.c
index 4b562d8d3f..482a569814 100644
--- a/src/backend/access/gist/gistbuildbuffers.c
+++ b/src/backend/access/gist/gistbuildbuffers.c
@@ -34,11 +34,11 @@ static void gistPlaceItupToPage(GISTNodeBufferPage *pageBuffer,
IndexTuple item);
static void gistGetItupFromPage(GISTNodeBufferPage *pageBuffer,
IndexTuple *item);
-static long gistBuffersGetFreeBlock(GISTBuildBuffers *gfbb);
-static void gistBuffersReleaseBlock(GISTBuildBuffers *gfbb, long blocknum);
+static uint64 gistBuffersGetFreeBlock(GISTBuildBuffers *gfbb);
+static void gistBuffersReleaseBlock(GISTBuildBuffers *gfbb, uint64 blocknum);
-static void ReadTempFileBlock(BufFile *file, long blknum, void *ptr);
-static void WriteTempFileBlock(BufFile *file, long blknum, void *ptr);
+static void ReadTempFileBlock(BufFile *file, uint64 blknum, void *ptr);
+static void WriteTempFileBlock(BufFile *file, uint64 blknum, void *ptr);
/*
@@ -64,7 +64,7 @@ gistInitBuildBuffers(int pagesPerBuffer, int levelStep, int maxLevel)
/* Initialize free page management. */
gfbb->nFreeBlocks = 0;
gfbb->freeBlocksLen = 32;
- gfbb->freeBlocks = (long *) palloc(gfbb->freeBlocksLen * sizeof(long));
+ gfbb->freeBlocks = (int64 *) palloc(gfbb->freeBlocksLen * sizeof(int64));
/*
* Current memory context will be used for all in-memory data structures
@@ -469,7 +469,7 @@ gistPopItupFromNodeBuffer(GISTBuildBuffers *gfbb, GISTNodeBuffer *nodeBuffer,
/*
* Select a currently unused block for writing to.
*/
-static long
+static uint64
gistBuffersGetFreeBlock(GISTBuildBuffers *gfbb)
{
/*
@@ -487,7 +487,7 @@ gistBuffersGetFreeBlock(GISTBuildBuffers *gfbb)
* Return a block# to the freelist.
*/
static void
-gistBuffersReleaseBlock(GISTBuildBuffers *gfbb, long blocknum)
+gistBuffersReleaseBlock(GISTBuildBuffers *gfbb, uint64 blocknum)
{
int ndx;
@@ -495,9 +495,9 @@ gistBuffersReleaseBlock(GISTBuildBuffers *gfbb, long blocknum)
if (gfbb->nFreeBlocks >= gfbb->freeBlocksLen)
{
gfbb->freeBlocksLen *= 2;
- gfbb->freeBlocks = (long *) repalloc(gfbb->freeBlocks,
+ gfbb->freeBlocks = (int64 *) repalloc(gfbb->freeBlocks,
gfbb->freeBlocksLen *
- sizeof(long));
+ sizeof(uint64));
}
/* Add blocknum to array */
@@ -755,7 +755,7 @@ gistRelocateBuildBuffersOnSplit(GISTBuildBuffers *gfbb, GISTSTATE *giststate,
*/
static void
-ReadTempFileBlock(BufFile *file, long blknum, void *ptr)
+ReadTempFileBlock(BufFile *file, uint64 blknum, void *ptr)
{
if (BufFileSeekBlock(file, blknum) != 0)
elog(ERROR, "could not seek temporary file: %m");
@@ -764,7 +764,7 @@ ReadTempFileBlock(BufFile *file, long blknum, void *ptr)
}
static void
-WriteTempFileBlock(BufFile *file, long blknum, void *ptr)
+WriteTempFileBlock(BufFile *file, uint64 blknum, void *ptr)
{
if (BufFileSeekBlock(file, blknum) != 0)
elog(ERROR, "could not seek temporary file: %m");
diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c
index c46764bf42..4fc478640a 100644
--- a/src/backend/executor/spi.c
+++ b/src/backend/executor/spi.c
@@ -70,7 +70,7 @@ static int _SPI_pquery(QueryDesc *queryDesc, bool fire_triggers, uint64 tcount);
static void _SPI_error_callback(void *arg);
static void _SPI_cursor_operation(Portal portal,
- FetchDirection direction, long count,
+ FetchDirection direction, uint64 count,
DestReceiver *dest);
static SPIPlanPtr _SPI_make_plan_non_temp(SPIPlanPtr plan);
@@ -493,7 +493,7 @@ SPI_inside_nonatomic_context(void)
/* Parse, plan, and execute a query string */
int
-SPI_execute(const char *src, bool read_only, long tcount)
+SPI_execute(const char *src, bool read_only, int64 tcount)
{
_SPI_plan plan;
int res;
@@ -521,7 +521,7 @@ SPI_execute(const char *src, bool read_only, long tcount)
/* Obsolete version of SPI_execute */
int
-SPI_exec(const char *src, long tcount)
+SPI_exec(const char *src, int64 tcount)
{
return SPI_execute(src, false, tcount);
}
@@ -529,7 +529,7 @@ SPI_exec(const char *src, long tcount)
/* Execute a previously prepared plan */
int
SPI_execute_plan(SPIPlanPtr plan, Datum *Values, const char *Nulls,
- bool read_only, long tcount)
+ bool read_only, int64 tcount)
{
int res;
@@ -555,7 +555,7 @@ SPI_execute_plan(SPIPlanPtr plan, Datum *Values, const char *Nulls,
/* Obsolete version of SPI_execute_plan */
int
-SPI_execp(SPIPlanPtr plan, Datum *Values, const char *Nulls, long tcount)
+SPI_execp(SPIPlanPtr plan, Datum *Values, const char *Nulls, int64 tcount)
{
return SPI_execute_plan(plan, Values, Nulls, false, tcount);
}
@@ -563,7 +563,7 @@ SPI_execp(SPIPlanPtr plan, Datum *Values, const char *Nulls, long tcount)
/* Execute a previously prepared plan */
int
SPI_execute_plan_with_paramlist(SPIPlanPtr plan, ParamListInfo params,
- bool read_only, long tcount)
+ bool read_only, int64 tcount)
{
int res;
@@ -599,7 +599,7 @@ int
SPI_execute_snapshot(SPIPlanPtr plan,
Datum *Values, const char *Nulls,
Snapshot snapshot, Snapshot crosscheck_snapshot,
- bool read_only, bool fire_triggers, long tcount)
+ bool read_only, bool fire_triggers, int64 tcount)
{
int res;
@@ -633,7 +633,7 @@ int
SPI_execute_with_args(const char *src,
int nargs, Oid *argtypes,
Datum *Values, const char *Nulls,
- bool read_only, long tcount)
+ bool read_only, int64 tcount)
{
int res;
_SPI_plan plan;
@@ -1530,7 +1530,7 @@ SPI_cursor_find(const char *name)
* Fetch rows in a cursor
*/
void
-SPI_cursor_fetch(Portal portal, bool forward, long count)
+SPI_cursor_fetch(Portal portal, bool forward, int64 count)
{
_SPI_cursor_operation(portal,
forward ? FETCH_FORWARD : FETCH_BACKWARD, count,
@@ -1545,7 +1545,7 @@ SPI_cursor_fetch(Portal portal, bool forward, long count)
* Move in a cursor
*/
void
-SPI_cursor_move(Portal portal, bool forward, long count)
+SPI_cursor_move(Portal portal, bool forward, int64 count)
{
_SPI_cursor_operation(portal,
forward ? FETCH_FORWARD : FETCH_BACKWARD, count,
@@ -1559,7 +1559,7 @@ SPI_cursor_move(Portal portal, bool forward, long count)
* Fetch rows in a scrollable cursor
*/
void
-SPI_scroll_cursor_fetch(Portal portal, FetchDirection direction, long count)
+SPI_scroll_cursor_fetch(Portal portal, FetchDirection direction, int64 count)
{
_SPI_cursor_operation(portal,
direction, count,
@@ -1574,7 +1574,7 @@ SPI_scroll_cursor_fetch(Portal portal, FetchDirection direction, long count)
* Move in a scrollable cursor
*/
void
-SPI_scroll_cursor_move(Portal portal, FetchDirection direction, long count)
+SPI_scroll_cursor_move(Portal portal, FetchDirection direction, int64 count)
{
_SPI_cursor_operation(portal, direction, count, None_Receiver);
}
@@ -2567,7 +2567,7 @@ _SPI_error_callback(void *arg)
* Do a FETCH or MOVE in a cursor
*/
static void
-_SPI_cursor_operation(Portal portal, FetchDirection direction, long count,
+_SPI_cursor_operation(Portal portal, FetchDirection direction, uint64 count,
DestReceiver *dest)
{
uint64 nfetched;
diff --git a/src/backend/storage/file/buffile.c b/src/backend/storage/file/buffile.c
index 35e8f12e62..db73a9f159 100644
--- a/src/backend/storage/file/buffile.c
+++ b/src/backend/storage/file/buffile.c
@@ -749,7 +749,7 @@ BufFileTell(BufFile *file, int *fileno, off_t *offset)
* impossible seek is attempted.
*/
int
-BufFileSeekBlock(BufFile *file, long blknum)
+BufFileSeekBlock(BufFile *file, uint64 blknum)
{
return BufFileSeek(file,
(int) (blknum / BUFFILE_SEG_SIZE),
@@ -760,13 +760,11 @@ BufFileSeekBlock(BufFile *file, long blknum)
#ifdef NOT_USED
/*
* BufFileTellBlock --- block-oriented tell
- *
- * Any fractional part of a block in the current seek position is ignored.
*/
-long
+uint64
BufFileTellBlock(BufFile *file)
{
- long blknum;
+ uint64 blknum;
blknum = (file->curOffset + file->pos) / BLCKSZ;
blknum += file->curFile * BUFFILE_SEG_SIZE;
@@ -820,10 +818,10 @@ BufFileSize(BufFile *file)
* begins. Caller should apply this as an offset when working off block
* positions that are in terms of the original BufFile space.
*/
-long
+uint64
BufFileAppend(BufFile *target, BufFile *source)
{
- long startBlock = target->numFiles * BUFFILE_SEG_SIZE;
+ uint64 startBlock = target->numFiles * BUFFILE_SEG_SIZE;
int newNumFiles = target->numFiles + source->numFiles;
int i;
diff --git a/src/backend/storage/ipc/latch.c b/src/backend/storage/ipc/latch.c
index 046ca5c6c7..cb00c67303 100644
--- a/src/backend/storage/ipc/latch.c
+++ b/src/backend/storage/ipc/latch.c
@@ -349,7 +349,7 @@ DisownLatch(Latch *latch)
* function returns immediately.
*
* The "timeout" is given in milliseconds. It must be >= 0 if WL_TIMEOUT flag
- * is given. Although it is declared as "long", we don't actually support
+ * is given. Although it is declared as "int64", we don't actually support
* timeouts longer than INT_MAX milliseconds. Note that some extra overhead
* is incurred when WL_TIMEOUT is given, so avoid using a timeout if possible.
*
@@ -362,7 +362,7 @@ DisownLatch(Latch *latch)
* we return all of them in one call, but we will return at least one.
*/
int
-WaitLatch(Latch *latch, int wakeEvents, long timeout,
+WaitLatch(Latch *latch, int wakeEvents, int64 timeout,
uint32 wait_event_info)
{
return WaitLatchOrSocket(latch, wakeEvents, PGINVALID_SOCKET, timeout,
@@ -388,7 +388,7 @@ WaitLatch(Latch *latch, int wakeEvents, long timeout,
*/
int
WaitLatchOrSocket(Latch *latch, int wakeEvents, pgsocket sock,
- long timeout, uint32 wait_event_info)
+ int64 timeout, uint32 wait_event_info)
{
int ret = 0;
int rc;
@@ -1155,14 +1155,14 @@ WaitEventAdjustWin32(WaitEventSet *set, WaitEvent *event)
* values associated with the registered event.
*/
int
-WaitEventSetWait(WaitEventSet *set, long timeout,
+WaitEventSetWait(WaitEventSet *set, int64 timeout,
WaitEvent *occurred_events, int nevents,
uint32 wait_event_info)
{
int returned_events = 0;
instr_time start_time;
instr_time cur_time;
- long cur_timeout = -1;
+ int64 cur_timeout = -1;
Assert(nevents > 0);
@@ -1247,7 +1247,7 @@ WaitEventSetWait(WaitEventSet *set, long timeout,
{
INSTR_TIME_SET_CURRENT(cur_time);
INSTR_TIME_SUBTRACT(cur_time, start_time);
- cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time);
+ cur_timeout = timeout - INSTR_TIME_GET_MILLISEC(cur_time);
if (cur_timeout <= 0)
break;
}
diff --git a/src/backend/storage/ipc/shmem.c b/src/backend/storage/ipc/shmem.c
index 2892a573e4..582c63a5e6 100644
--- a/src/backend/storage/ipc/shmem.c
+++ b/src/backend/storage/ipc/shmem.c
@@ -335,8 +335,8 @@ InitShmemIndex(void)
*/
HTAB *
ShmemInitHash(const char *name, /* table string name for shmem index */
- long init_size, /* initial table size */
- long max_size, /* max size of the table */
+ uint64 init_size, /* initial table size */
+ uint64 max_size, /* max size of the table */
HASHCTL *infoP, /* info about key and bucket size */
int hash_flags) /* info about infoP */
{
diff --git a/src/backend/storage/lmgr/condition_variable.c b/src/backend/storage/lmgr/condition_variable.c
index 37b6a4eecd..43c662aaeb 100644
--- a/src/backend/storage/lmgr/condition_variable.c
+++ b/src/backend/storage/lmgr/condition_variable.c
@@ -129,10 +129,10 @@ ConditionVariableSleep(ConditionVariable *cv, uint32 wait_event_info)
* See ConditionVariableSleep() for general usage.
*/
bool
-ConditionVariableTimedSleep(ConditionVariable *cv, long timeout,
+ConditionVariableTimedSleep(ConditionVariable *cv, int64 timeout,
uint32 wait_event_info)
{
- long cur_timeout = -1;
+ int64 cur_timeout = -1;
instr_time start_time;
instr_time cur_time;
@@ -217,7 +217,7 @@ ConditionVariableTimedSleep(ConditionVariable *cv, long timeout,
{
INSTR_TIME_SET_CURRENT(cur_time);
INSTR_TIME_SUBTRACT(cur_time, start_time);
- cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time);
+ cur_timeout = timeout - (int64) INSTR_TIME_GET_MILLISEC(cur_time);
/* Have we crossed the timeout threshold? */
if (cur_timeout <= 0)
diff --git a/src/backend/tcop/postgres.c b/src/backend/tcop/postgres.c
index 0a6f80963b..9bb9c1fda9 100644
--- a/src/backend/tcop/postgres.c
+++ b/src/backend/tcop/postgres.c
@@ -109,7 +109,7 @@ int PostAuthDelay = 0;
*/
/* max_stack_depth converted to bytes for speed of checking */
-static long max_stack_depth_bytes = 100 * 1024L;
+static uint64 max_stack_depth_bytes = 100 * 1024UL;
/*
* Stack base pointer -- initialized by PostmasterMain and inherited by
@@ -2016,7 +2016,7 @@ exec_bind_message(StringInfo input_message)
* Process an "Execute" message for a portal
*/
static void
-exec_execute_message(const char *portal_name, long max_rows)
+exec_execute_message(const char *portal_name, uint64 max_rows)
{
CommandDest dest;
DestReceiver *receiver;
@@ -2302,7 +2302,7 @@ check_log_duration(char *msec_str, bool was_logged)
if (log_duration || log_min_duration_sample >= 0 ||
log_min_duration_statement >= 0 || xact_is_sampled)
{
- long secs;
+ int64 secs;
int usecs;
int msecs;
bool exceeded_duration;
@@ -3302,12 +3302,12 @@ bool
stack_is_too_deep(void)
{
char stack_top_loc;
- long stack_depth;
+ int64 stack_depth;
/*
* Compute distance from reference point to my local variables
*/
- stack_depth = (long) (stack_base_ptr - &stack_top_loc);
+ stack_depth = (int64) (stack_base_ptr - &stack_top_loc);
/*
* Take abs value, since stacks grow up on some machines, down on others
@@ -3336,7 +3336,7 @@ stack_is_too_deep(void)
* Note we assume that the same max_stack_depth applies to both stacks.
*/
#if defined(__ia64__) || defined(__ia64)
- stack_depth = (long) (ia64_get_bsp() - register_stack_base_ptr);
+ stack_depth = (int64) (ia64_get_bsp() - register_stack_base_ptr);
if (stack_depth > max_stack_depth_bytes &&
register_stack_base_ptr != NULL)
@@ -3350,8 +3350,8 @@ stack_is_too_deep(void)
bool
check_max_stack_depth(int *newval, void **extra, GucSource source)
{
- long newval_bytes = *newval * 1024L;
- long stack_rlimit = get_stack_depth_rlimit();
+ uint64 newval_bytes = *newval * 1024UL;
+ uint64 stack_rlimit = get_stack_depth_rlimit();
if (stack_rlimit > 0 && newval_bytes > stack_rlimit - STACK_DEPTH_SLOP)
{
@@ -3367,7 +3367,7 @@ check_max_stack_depth(int *newval, void **extra, GucSource source)
void
assign_max_stack_depth(int newval, void *extra)
{
- long newval_bytes = newval * 1024L;
+ uint64 newval_bytes = newval * 1024UL;
max_stack_depth_bytes = newval_bytes;
}
@@ -4696,7 +4696,7 @@ static void
log_disconnections(int code, Datum arg)
{
Port *port = MyProcPort;
- long secs;
+ int64 secs;
int usecs;
int msecs;
int hours,
diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c
index 0f5801e046..f519ce7def 100644
--- a/src/backend/tcop/pquery.c
+++ b/src/backend/tcop/pquery.c
@@ -44,7 +44,7 @@ static void ProcessQuery(PlannedStmt *plan,
static void FillPortalStore(Portal portal, bool isTopLevel);
static uint64 RunFromStore(Portal portal, ScanDirection direction, uint64 count,
DestReceiver *dest);
-static uint64 PortalRunSelect(Portal portal, bool forward, long count,
+static uint64 PortalRunSelect(Portal portal, bool forward, int64 count,
DestReceiver *dest);
static void PortalRunUtility(Portal portal, PlannedStmt *pstmt,
bool isTopLevel, bool setHoldSnapshot,
@@ -55,7 +55,7 @@ static void PortalRunMulti(Portal portal,
char *completionTag);
static uint64 DoPortalRunFetch(Portal portal,
FetchDirection fdirection,
- long count,
+ int64 count,
DestReceiver *dest);
static void DoPortalRewind(Portal portal);
@@ -683,7 +683,7 @@ PortalSetResultFormat(Portal portal, int nFormats, int16 *formats)
* suspended due to exhaustion of the count parameter.
*/
bool
-PortalRun(Portal portal, long count, bool isTopLevel, bool run_once,
+PortalRun(Portal portal, int64 count, bool isTopLevel, bool run_once,
DestReceiver *dest, DestReceiver *altdest,
char *completionTag)
{
@@ -871,7 +871,7 @@ PortalRun(Portal portal, long count, bool isTopLevel, bool run_once,
static uint64
PortalRunSelect(Portal portal,
bool forward,
- long count,
+ int64 count,
DestReceiver *dest)
{
QueryDesc *queryDesc;
@@ -1391,7 +1391,7 @@ PortalRunMulti(Portal portal,
uint64
PortalRunFetch(Portal portal,
FetchDirection fdirection,
- long count,
+ int64 count,
DestReceiver *dest)
{
uint64 result;
@@ -1493,7 +1493,7 @@ PortalRunFetch(Portal portal,
static uint64
DoPortalRunFetch(Portal portal,
FetchDirection fdirection,
- long count,
+ int64 count,
DestReceiver *dest)
{
bool forward;
@@ -1531,7 +1531,7 @@ DoPortalRunFetch(Portal portal,
* In practice, if the goal is less than halfway back to the
* start, it's better to scan from where we are.
*
- * Also, if current portalPos is outside the range of "long",
+ * Also, if current portalPos is outside the range of "int64",
* do it the hard way to avoid possible overflow of the count
* argument to PortalRunSelect. We must exclude exactly
* LONG_MAX, as well, lest the count look like FETCH_ALL.
@@ -1549,7 +1549,7 @@ DoPortalRunFetch(Portal portal,
}
else
{
- long pos = (long) portal->portalPos;
+ int64 pos = (int64) portal->portalPos;
if (portal->atEnd)
pos++; /* need one extra fetch if off end */
diff --git a/src/backend/utils/adt/timestamp.c b/src/backend/utils/adt/timestamp.c
index 0b6c9d5ea8..ddd471c059 100644
--- a/src/backend/utils/adt/timestamp.c
+++ b/src/backend/utils/adt/timestamp.c
@@ -1655,7 +1655,7 @@ timeofday(PG_FUNCTION_ARGS)
*/
void
TimestampDifference(TimestampTz start_time, TimestampTz stop_time,
- long *secs, int *microsecs)
+ int64 *secs, int *microsecs)
{
TimestampTz diff = stop_time - start_time;
@@ -1666,7 +1666,7 @@ TimestampDifference(TimestampTz start_time, TimestampTz stop_time,
}
else
{
- *secs = (long) (diff / USECS_PER_SEC);
+ *secs = (int64) (diff / USECS_PER_SEC);
*microsecs = (int) (diff % USECS_PER_SEC);
}
}
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index b5381958e7..7769c79861 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -142,7 +142,7 @@ typedef HASHBUCKET *HASHSEGMENT;
typedef struct
{
slock_t mutex; /* spinlock for this freelist */
- long nentries; /* number of entries in associated buckets */
+ uint64 nentries; /* number of entries in associated buckets */
HASHELEMENT *freeList; /* chain of free elements */
} FreeListData;
@@ -170,8 +170,8 @@ struct HASHHDR
/* These fields can change, but not in a partitioned table */
/* Also, dsize can't change in a shared table, even if unpartitioned */
- long dsize; /* directory size */
- long nsegs; /* number of allocated segments (<= dsize) */
+ uint64 dsize; /* directory size */
+ uint64 nsegs; /* number of allocated segments (<= dsize) */
uint32 max_bucket; /* ID of maximum bucket in use */
uint32 high_mask; /* mask to modulo into entire table */
uint32 low_mask; /* mask to modulo into lower half of table */
@@ -179,10 +179,10 @@ struct HASHHDR
/* These fields are fixed at hashtable creation */
Size keysize; /* hash key length in bytes */
Size entrysize; /* total user element size in bytes */
- long num_partitions; /* # partitions (must be power of 2), or 0 */
- long ffactor; /* target fill factor */
- long max_dsize; /* 'dsize' limit if directory is fixed size */
- long ssize; /* segment size --- must be power of 2 */
+ uint64 num_partitions; /* # partitions (must be power of 2), or 0 */
+ uint64 ffactor; /* target fill factor */
+ uint64 max_dsize; /* 'dsize' limit if directory is fixed size */
+ uint64 ssize; /* segment size --- must be power of 2 */
int sshift; /* segment shift = log2(ssize) */
int nelem_alloc; /* number of entries to allocate at once */
@@ -192,8 +192,8 @@ struct HASHHDR
* Count statistics here. NB: stats code doesn't bother with mutex, so
* counts could be corrupted a bit in a partitioned table.
*/
- long accesses;
- long collisions;
+ uint64 accesses;
+ uint64 collisions;
#endif
};
@@ -224,7 +224,7 @@ struct HTAB
/* We keep local copies of these fixed values to reduce contention */
Size keysize; /* hash key length in bytes */
- long ssize; /* segment size --- must be power of 2 */
+ uint64 ssize; /* segment size --- must be power of 2 */
int sshift; /* segment shift = log2(ssize) */
};
@@ -245,9 +245,9 @@ struct HTAB
#define MOD(x,y) ((x) & ((y)-1))
#ifdef HASH_STATISTICS
-static long hash_accesses,
- hash_collisions,
- hash_expansions;
+static uint64 hash_accesses,
+ hash_collisions,
+ hash_expansions;
#endif
/*
@@ -261,10 +261,10 @@ static bool expand_table(HTAB *hashp);
static HASHBUCKET get_hash_entry(HTAB *hashp, int freelist_idx);
static void hdefault(HTAB *hashp);
static int choose_nelem_alloc(Size entrysize);
-static bool init_htab(HTAB *hashp, long nelem);
+static bool init_htab(HTAB *hashp, uint64 nelem);
static void hash_corrupted(HTAB *hashp);
-static long next_pow2_long(long num);
-static int next_pow2_int(long num);
+static uint64 next_pow2_uint64(uint64 num);
+static int next_pow2_int(uint64 num);
static void register_seq_scan(HTAB *hashp);
static void deregister_seq_scan(HTAB *hashp);
static bool has_seq_scans(HTAB *hashp);
@@ -314,7 +314,7 @@ string_compare(const char *key1, const char *key2, Size keysize)
* large nelem will penalize hash_seq_search speed without buying much.
*/
HTAB *
-hash_create(const char *tabname, long nelem, HASHCTL *info, int flags)
+hash_create(const char *tabname, uint64 nelem, HASHCTL *info, int flags)
{
HTAB *hashp;
HASHHDR *hctl;
@@ -634,7 +634,7 @@ choose_nelem_alloc(Size entrysize)
* arrays
*/
static bool
-init_htab(HTAB *hashp, long nelem)
+init_htab(HTAB *hashp, uint64 nelem)
{
HASHHDR *hctl = hashp->hctl;
HASHSEGMENT *segp;
@@ -730,10 +730,10 @@ init_htab(HTAB *hashp, long nelem)
* NB: assumes that all hash structure parameters have default values!
*/
Size
-hash_estimate_size(long num_entries, Size entrysize)
+hash_estimate_size(uint64 num_entries, Size entrysize)
{
Size size;
- long nBuckets,
+ uint64 nBuckets,
nSegments,
nDirEntries,
nElementAllocs,
@@ -741,9 +741,9 @@ hash_estimate_size(long num_entries, Size entrysize)
elementAllocCnt;
/* estimate number of buckets wanted */
- nBuckets = next_pow2_long((num_entries - 1) / DEF_FFACTOR + 1);
+ nBuckets = next_pow2_uint64((num_entries - 1) / DEF_FFACTOR + 1);
/* # of segments needed for nBuckets */
- nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1);
+ nSegments = next_pow2_uint64((nBuckets - 1) / DEF_SEGSIZE + 1);
/* directory entries */
nDirEntries = DEF_DIRSIZE;
while (nDirEntries < nSegments)
@@ -776,17 +776,17 @@ hash_estimate_size(long num_entries, Size entrysize)
*
* XXX this had better agree with the behavior of init_htab()...
*/
-long
-hash_select_dirsize(long num_entries)
+uint64
+hash_select_dirsize(uint64 num_entries)
{
- long nBuckets,
+ uint64 nBuckets,
nSegments,
nDirEntries;
/* estimate number of buckets wanted */
- nBuckets = next_pow2_long((num_entries - 1) / DEF_FFACTOR + 1);
+ nBuckets = next_pow2_uint64((num_entries - 1) / DEF_FFACTOR + 1);
/* # of segments needed for nBuckets */
- nSegments = next_pow2_long((nBuckets - 1) / DEF_SEGSIZE + 1);
+ nSegments = next_pow2_uint64((nBuckets - 1) / DEF_SEGSIZE + 1);
/* directory entries */
nDirEntries = DEF_DIRSIZE;
while (nDirEntries < nSegments)
@@ -837,8 +837,8 @@ hash_stats(const char *where, HTAB *hashp)
fprintf(stderr, "%s: this HTAB -- accesses %ld collisions %ld\n",
where, hashp->hctl->accesses, hashp->hctl->collisions);
- fprintf(stderr, "hash_stats: entries %ld keysize %ld maxp %u segmentcount %ld\n",
- hash_get_num_entries(hashp), (long) hashp->hctl->keysize,
+ fprintf(stderr, "hash_stats: entries %ld keysize %llu maxp %u segmentcount %ld\n",
+ hash_get_num_entries(hashp), (uint64) hashp->hctl->keysize,
hashp->hctl->max_bucket, hashp->hctl->nsegs);
fprintf(stderr, "%s: total accesses %ld total collisions %ld\n",
where, hash_accesses, hash_collisions);
@@ -927,8 +927,8 @@ hash_search_with_hash_value(HTAB *hashp,
int freelist_idx = FREELIST_IDX(hctl, hashvalue);
Size keysize;
uint32 bucket;
- long segment_num;
- long segment_ndx;
+ uint64 segment_num;
+ uint64 segment_ndx;
HASHSEGMENT segp;
HASHBUCKET currBucket;
HASHBUCKET *prevBucketPtr;
@@ -955,7 +955,7 @@ hash_search_with_hash_value(HTAB *hashp,
* order of these tests is to try to check cheaper conditions first.
*/
if (!IS_PARTITIONED(hctl) && !hashp->frozen &&
- hctl->freeList[0].nentries / (long) (hctl->max_bucket + 1) >= hctl->ffactor &&
+ hctl->freeList[0].nentries / (uint64) (hctl->max_bucket + 1) >= hctl->ffactor &&
!has_seq_scans(hashp))
(void) expand_table(hashp);
}
@@ -1123,8 +1123,8 @@ hash_update_hash_key(HTAB *hashp,
Size keysize;
uint32 bucket;
uint32 newbucket;
- long segment_num;
- long segment_ndx;
+ uint64 segment_num;
+ uint64 segment_ndx;
HASHSEGMENT segp;
HASHBUCKET currBucket;
HASHBUCKET *prevBucketPtr;
@@ -1332,11 +1332,11 @@ get_hash_entry(HTAB *hashp, int freelist_idx)
/*
* hash_get_num_entries -- get the number of entries in a hashtable
*/
-long
+uint64
hash_get_num_entries(HTAB *hashp)
{
int i;
- long sum = hashp->hctl->freeList[0].nentries;
+ uint64 sum = hashp->hctl->freeList[0].nentries;
/*
* We currently don't bother with acquiring the mutexes; it's only
@@ -1392,9 +1392,9 @@ hash_seq_search(HASH_SEQ_STATUS *status)
HTAB *hashp;
HASHHDR *hctl;
uint32 max_bucket;
- long ssize;
- long segment_num;
- long segment_ndx;
+ uint64 ssize;
+ uint64 segment_num;
+ uint64 segment_ndx;
HASHSEGMENT segp;
uint32 curBucket;
HASHELEMENT *curElem;
@@ -1505,11 +1505,11 @@ expand_table(HTAB *hashp)
HASHHDR *hctl = hashp->hctl;
HASHSEGMENT old_seg,
new_seg;
- long old_bucket,
+ uint64 old_bucket,
new_bucket;
- long new_segnum,
+ uint64 new_segnum,
new_segndx;
- long old_segnum,
+ uint64 old_segnum,
old_segndx;
HASHBUCKET *oldlink,
*newlink;
@@ -1577,7 +1577,7 @@ expand_table(HTAB *hashp)
currElement = nextElement)
{
nextElement = currElement->link;
- if ((long) calc_bucket(hctl, currElement->hashvalue) == old_bucket)
+ if ((uint64) calc_bucket(hctl, currElement->hashvalue) == old_bucket)
{
*oldlink = currElement;
oldlink = &currElement->link;
@@ -1601,9 +1601,9 @@ dir_realloc(HTAB *hashp)
{
HASHSEGMENT *p;
HASHSEGMENT *old_p;
- long new_dsize;
- long old_dirsize;
- long new_dirsize;
+ uint64 new_dsize;
+ uint64 old_dirsize;
+ uint64 new_dirsize;
if (hashp->hctl->max_dsize != NO_MAX_DSIZE)
return false;
@@ -1716,10 +1716,10 @@ hash_corrupted(HTAB *hashp)
/* calculate ceil(log base 2) of num */
int
-my_log2(long num)
+my_log2(uint64 num)
{
int i;
- long limit;
+ uint64 limit;
/* guard against too-large input, which would put us into infinite loop */
if (num > LONG_MAX / 2)
@@ -1730,9 +1730,9 @@ my_log2(long num)
return i;
}
-/* calculate first power of 2 >= num, bounded to what will fit in a long */
-static long
-next_pow2_long(long num)
+/* calculate first power of 2 >= num, bounded to what will fit in a uint64 */
+static uint64
+next_pow2_uint64(uint64 num)
{
/* my_log2's internal range check is sufficient */
return 1L << my_log2(num);
@@ -1740,7 +1740,7 @@ next_pow2_long(long num)
/* calculate first power of 2 >= num, bounded to what will fit in an int */
static int
-next_pow2_int(long num)
+next_pow2_int(uint64 num)
{
if (num > INT_MAX / 2)
num = INT_MAX / 2;
diff --git a/src/backend/utils/misc/sampling.c b/src/backend/utils/misc/sampling.c
index 361c15614e..05d2fd581c 100644
--- a/src/backend/utils/misc/sampling.c
+++ b/src/backend/utils/misc/sampling.c
@@ -37,7 +37,7 @@
*/
BlockNumber
BlockSampler_Init(BlockSampler bs, BlockNumber nblocks, int samplesize,
- long randseed)
+ uint64 randseed)
{
bs->N = nblocks; /* measured table size */
@@ -230,7 +230,7 @@ reservoir_get_next_S(ReservoirState rs, double t, int n)
*----------
*/
void
-sampler_random_init_state(long seed, SamplerRandomState randstate)
+sampler_random_init_state(uint64 seed, SamplerRandomState randstate)
{
randstate[0] = 0x330e; /* same as pg_erand48, but could be anything */
randstate[1] = (unsigned short) seed;
diff --git a/src/backend/utils/sort/logtape.c b/src/backend/utils/sort/logtape.c
index 4f78b55fba..01396ca230 100644
--- a/src/backend/utils/sort/logtape.c
+++ b/src/backend/utils/sort/logtape.c
@@ -93,9 +93,9 @@
*/
typedef struct TapeBlockTrailer
{
- long prev; /* previous block on this tape, or -1 on first
+ int64 prev; /* previous block on this tape, or -1 on first
* block */
- long next; /* next block on this tape, or # of valid
+ int64 next; /* next block on this tape, or # of valid
* bytes on last block (if < 0) */
} TapeBlockTrailer;
@@ -138,10 +138,10 @@ typedef struct LogicalTape
* When concatenation of worker tape BufFiles is performed, an offset to
* the first block in the unified BufFile space is applied during reads.
*/
- long firstBlockNumber;
- long curBlockNumber;
- long nextBlockNumber;
- long offsetBlockNumber;
+ int64 firstBlockNumber;
+ int64 curBlockNumber;
+ int64 nextBlockNumber;
+ int64 offsetBlockNumber;
/*
* Buffer for current data block(s).
@@ -173,9 +173,9 @@ struct LogicalTapeSet
* blocks that are in unused holes between worker spaces following BufFile
* concatenation.
*/
- long nBlocksAllocated; /* # of blocks allocated */
- long nBlocksWritten; /* # of blocks used in underlying file */
- long nHoleBlocks; /* # of "hole" blocks left */
+ uint64 nBlocksAllocated; /* # of blocks allocated */
+ uint64 nBlocksWritten; /* # of blocks used in underlying file */
+ uint64 nHoleBlocks; /* # of "hole" blocks left */
/*
* We store the numbers of recycled-and-available blocks in freeBlocks[].
@@ -186,19 +186,19 @@ struct LogicalTapeSet
* LogicalTapeSetForgetFreeSpace().
*/
bool forgetFreeSpace; /* are we remembering free blocks? */
- long *freeBlocks; /* resizable array holding minheap */
- long nFreeBlocks; /* # of currently free blocks */
- Size freeBlocksLen; /* current allocated length of freeBlocks[] */
+ uint64 *freeBlocks; /* resizable array */
+ int nFreeBlocks; /* # of currently free blocks */
+ int freeBlocksLen; /* current allocated length of freeBlocks[] */
/* The array of logical tapes. */
int nTapes; /* # of logical tapes in set */
LogicalTape tapes[FLEXIBLE_ARRAY_MEMBER]; /* has nTapes nentries */
};
-static void ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer);
-static void ltsReadBlock(LogicalTapeSet *lts, long blocknum, void *buffer);
-static long ltsGetFreeBlock(LogicalTapeSet *lts);
-static void ltsReleaseBlock(LogicalTapeSet *lts, long blocknum);
+static void ltsWriteBlock(LogicalTapeSet *lts, int64 blocknum, void *buffer);
+static void ltsReadBlock(LogicalTapeSet *lts, int64 blocknum, void *buffer);
+static int64 ltsGetFreeBlock(LogicalTapeSet *lts);
+static void ltsReleaseBlock(LogicalTapeSet *lts, int64 blocknum);
static void ltsConcatWorkerTapes(LogicalTapeSet *lts, TapeShare *shared,
SharedFileSet *fileset);
static void ltsInitReadBuffer(LogicalTapeSet *lts, LogicalTape *lt);
@@ -210,7 +210,7 @@ static void ltsInitReadBuffer(LogicalTapeSet *lts, LogicalTape *lt);
* No need for an error return convention; we ereport() on any error.
*/
static void
-ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer)
+ltsWriteBlock(LogicalTapeSet *lts, int64 blocknum, void *buffer)
{
/*
* BufFile does not support "holes", so if we're about to write a block
@@ -258,7 +258,7 @@ ltsWriteBlock(LogicalTapeSet *lts, long blocknum, void *buffer)
* module should never attempt to read a block it doesn't know is there.
*/
static void
-ltsReadBlock(LogicalTapeSet *lts, long blocknum, void *buffer)
+ltsReadBlock(LogicalTapeSet *lts, int64 blocknum, void *buffer)
{
if (BufFileSeekBlock(lts->pfile, blocknum) != 0 ||
BufFileRead(lts->pfile, buffer, BLCKSZ) != BLCKSZ)
@@ -282,7 +282,7 @@ ltsReadFillBuffer(LogicalTapeSet *lts, LogicalTape *lt)
do
{
char *thisbuf = lt->buffer + lt->nbytes;
- long datablocknum = lt->nextBlockNumber;
+ int64 datablocknum = lt->nextBlockNumber;
/* Fetch next block number */
if (datablocknum == -1L)
@@ -344,7 +344,7 @@ parent_offset(unsigned long i)
* Select the lowest currently unused block by taking the first element from
* the freelist min heap.
*/
-static long
+static int64
ltsGetFreeBlock(LogicalTapeSet *lts)
{
long *heap = lts->freeBlocks;
@@ -400,7 +400,7 @@ ltsGetFreeBlock(LogicalTapeSet *lts)
* Return a block# to the freelist.
*/
static void
-ltsReleaseBlock(LogicalTapeSet *lts, long blocknum)
+ltsReleaseBlock(LogicalTapeSet *lts, int64 blocknum)
{
long *heap;
unsigned long pos;
@@ -424,8 +424,8 @@ ltsReleaseBlock(LogicalTapeSet *lts, long blocknum)
return;
lts->freeBlocksLen *= 2;
- lts->freeBlocks = (long *) repalloc(lts->freeBlocks,
- lts->freeBlocksLen * sizeof(long));
+ lts->freeBlocks = (uint64 *) repalloc(lts->freeBlocks,
+ lts->freeBlocksLen * sizeof(int64));
}
heap = lts->freeBlocks;
@@ -460,8 +460,8 @@ ltsConcatWorkerTapes(LogicalTapeSet *lts, TapeShare *shared,
SharedFileSet *fileset)
{
LogicalTape *lt = NULL;
- long tapeblocks = 0L;
- long nphysicalblocks = 0L;
+ int64 tapeblocks = 0L;
+ int64 nphysicalblocks = 0L;
int i;
/* Should have at least one worker tape, plus leader's tape */
@@ -593,7 +593,7 @@ LogicalTapeSetCreate(int ntapes, TapeShare *shared, SharedFileSet *fileset,
lts->nHoleBlocks = 0L;
lts->forgetFreeSpace = false;
lts->freeBlocksLen = 32; /* reasonable initial guess */
- lts->freeBlocks = (long *) palloc(lts->freeBlocksLen * sizeof(long));
+ lts->freeBlocks = (uint64 *) palloc(lts->freeBlocksLen * sizeof(uint64));
lts->nFreeBlocks = 0;
lts->nTapes = ntapes;
@@ -719,7 +719,7 @@ LogicalTapeWrite(LogicalTapeSet *lts, int tapenum,
if (lt->pos >= TapeBlockPayloadSize)
{
/* Buffer full, dump it out */
- long nextBlockNumber;
+ int64 nextBlockNumber;
if (!lt->dirty)
{
@@ -1048,7 +1048,7 @@ LogicalTapeBackspace(LogicalTapeSet *lts, int tapenum, size_t size)
seekpos = (size_t) lt->pos; /* part within this block */
while (size > seekpos)
{
- long prev = TapeBlockGetTrailer(lt->buffer)->prev;
+ int64 prev = TapeBlockGetTrailer(lt->buffer)->prev;
if (prev == -1L)
{
@@ -1093,7 +1093,7 @@ LogicalTapeBackspace(LogicalTapeSet *lts, int tapenum, size_t size)
*/
void
LogicalTapeSeek(LogicalTapeSet *lts, int tapenum,
- long blocknum, int offset)
+ int64 blocknum, int offset)
{
LogicalTape *lt;
@@ -1127,7 +1127,7 @@ LogicalTapeSeek(LogicalTapeSet *lts, int tapenum,
*/
void
LogicalTapeTell(LogicalTapeSet *lts, int tapenum,
- long *blocknum, int *offset)
+ int64 *blocknum, int *offset)
{
LogicalTape *lt;
@@ -1149,7 +1149,7 @@ LogicalTapeTell(LogicalTapeSet *lts, int tapenum,
/*
* Obtain total disk space currently used by a LogicalTapeSet, in blocks.
*/
-long
+int64
LogicalTapeSetBlocks(LogicalTapeSet *lts)
{
return lts->nBlocksAllocated - lts->nHoleBlocks;
diff --git a/src/bin/pg_basebackup/receivelog.c b/src/bin/pg_basebackup/receivelog.c
index 62a342f77c..488f12dc6f 100644
--- a/src/bin/pg_basebackup/receivelog.c
+++ b/src/bin/pg_basebackup/receivelog.c
@@ -37,8 +37,8 @@ static bool still_sending = true; /* feedback still needs to be sent? */
static PGresult *HandleCopyStream(PGconn *conn, StreamCtl *stream,
XLogRecPtr *stoppos);
-static int CopyStreamPoll(PGconn *conn, long timeout_ms, pgsocket stop_socket);
-static int CopyStreamReceive(PGconn *conn, long timeout, pgsocket stop_socket,
+static int CopyStreamPoll(PGconn *conn, int64 timeout_ms, pgsocket stop_socket);
+static int CopyStreamReceive(PGconn *conn, int64 timeout, pgsocket stop_socket,
char **buffer);
static bool ProcessKeepaliveMsg(PGconn *conn, StreamCtl *stream, char *copybuf,
int len, XLogRecPtr blockpos, TimestampTz *last_status);
@@ -48,7 +48,7 @@ static PGresult *HandleEndOfCopyStream(PGconn *conn, StreamCtl *stream, char *co
XLogRecPtr blockpos, XLogRecPtr *stoppos);
static bool CheckCopyStreamStop(PGconn *conn, StreamCtl *stream, XLogRecPtr blockpos,
XLogRecPtr *stoppos);
-static long CalculateCopyStreamSleeptime(TimestampTz now, int standby_message_timeout,
+static int64 CalculateCopyStreamSleeptime(TimestampTz now, int standby_message_timeout,
TimestampTz last_status);
static bool ReadEndOfStreamingResult(PGresult *res, XLogRecPtr *startpos,
@@ -742,7 +742,7 @@ HandleCopyStream(PGconn *conn, StreamCtl *stream,
{
int r;
TimestampTz now;
- long sleeptime;
+ int64 sleeptime;
/*
* Check if we should continue streaming, or abort at this point.
@@ -858,7 +858,7 @@ error:
* or interrupted by signal or stop_socket input, and -1 on an error.
*/
static int
-CopyStreamPoll(PGconn *conn, long timeout_ms, pgsocket stop_socket)
+CopyStreamPoll(PGconn *conn, int64 timeout_ms, pgsocket stop_socket)
{
int ret;
fd_set input_mask;
@@ -920,7 +920,7 @@ CopyStreamPoll(PGconn *conn, long timeout_ms, pgsocket stop_socket)
* -1 on error. -2 if the server ended the COPY.
*/
static int
-CopyStreamReceive(PGconn *conn, long timeout, pgsocket stop_socket,
+CopyStreamReceive(PGconn *conn, int64 timeout, pgsocket stop_socket,
char **buffer)
{
char *copybuf = NULL;
@@ -1228,12 +1228,12 @@ CheckCopyStreamStop(PGconn *conn, StreamCtl *stream, XLogRecPtr blockpos,
/*
* Calculate how long send/receive loops should sleep
*/
-static long
+static int64
CalculateCopyStreamSleeptime(TimestampTz now, int standby_message_timeout,
TimestampTz last_status)
{
TimestampTz status_targettime = 0;
- long sleeptime;
+ int64 sleeptime;
if (standby_message_timeout && still_sending)
status_targettime = last_status +
@@ -1241,7 +1241,7 @@ CalculateCopyStreamSleeptime(TimestampTz now, int standby_message_timeout,
if (status_targettime > 0)
{
- long secs;
+ int64 secs;
int usecs;
feTimestampDifference(now,
diff --git a/src/include/executor/spi.h b/src/include/executor/spi.h
index 06de20ada5..e54c968aca 100644
--- a/src/include/executor/spi.h
+++ b/src/include/executor/spi.h
@@ -84,24 +84,24 @@ extern PGDLLIMPORT int SPI_result;
extern int SPI_connect(void);
extern int SPI_connect_ext(int options);
extern int SPI_finish(void);
-extern int SPI_execute(const char *src, bool read_only, long tcount);
+extern int SPI_execute(const char *src, bool read_only, int64 tcount);
extern int SPI_execute_plan(SPIPlanPtr plan, Datum *Values, const char *Nulls,
- bool read_only, long tcount);
+ bool read_only, int64 tcount);
extern int SPI_execute_plan_with_paramlist(SPIPlanPtr plan,
ParamListInfo params,
- bool read_only, long tcount);
-extern int SPI_exec(const char *src, long tcount);
+ bool read_only, int64 tcount);
+extern int SPI_exec(const char *src, int64 tcount);
extern int SPI_execp(SPIPlanPtr plan, Datum *Values, const char *Nulls,
- long tcount);
+ int64 tcount);
extern int SPI_execute_snapshot(SPIPlanPtr plan,
Datum *Values, const char *Nulls,
Snapshot snapshot,
Snapshot crosscheck_snapshot,
- bool read_only, bool fire_triggers, long tcount);
+ bool read_only, bool fire_triggers, int64 tcount);
extern int SPI_execute_with_args(const char *src,
int nargs, Oid *argtypes,
Datum *Values, const char *Nulls,
- bool read_only, long tcount);
+ bool read_only, int64 tcount);
extern SPIPlanPtr SPI_prepare(const char *src, int nargs, Oid *argtypes);
extern SPIPlanPtr SPI_prepare_cursor(const char *src, int nargs, Oid *argtypes,
int cursorOptions);
@@ -151,10 +151,10 @@ extern Portal SPI_cursor_open_with_args(const char *name,
extern Portal SPI_cursor_open_with_paramlist(const char *name, SPIPlanPtr plan,
ParamListInfo params, bool read_only);
extern Portal SPI_cursor_find(const char *name);
-extern void SPI_cursor_fetch(Portal portal, bool forward, long count);
-extern void SPI_cursor_move(Portal portal, bool forward, long count);
-extern void SPI_scroll_cursor_fetch(Portal, FetchDirection direction, long count);
-extern void SPI_scroll_cursor_move(Portal, FetchDirection direction, long count);
+extern void SPI_cursor_fetch(Portal portal, bool forward, int64 count);
+extern void SPI_cursor_move(Portal portal, bool forward, int64 count);
+extern void SPI_scroll_cursor_fetch(Portal, FetchDirection direction, int64 count);
+extern void SPI_scroll_cursor_move(Portal, FetchDirection direction, int64 count);
extern void SPI_cursor_close(Portal portal);
extern int SPI_register_relation(EphemeralNamedRelation enr);
diff --git a/src/include/pgtime.h b/src/include/pgtime.h
index 0fc76d0e60..3d2fdedd89 100644
--- a/src/include/pgtime.h
+++ b/src/include/pgtime.h
@@ -33,7 +33,7 @@ struct pg_tm
int tm_wday;
int tm_yday;
int tm_isdst;
- long int tm_gmtoff;
+ int64 tm_gmtoff;
const char *tm_zone;
};
@@ -48,18 +48,18 @@ typedef struct pg_tzenum pg_tzenum;
extern struct pg_tm *pg_localtime(const pg_time_t *timep, const pg_tz *tz);
extern struct pg_tm *pg_gmtime(const pg_time_t *timep);
extern int pg_next_dst_boundary(const pg_time_t *timep,
- long int *before_gmtoff,
+ int64 *before_gmtoff,
int *before_isdst,
pg_time_t *boundary,
- long int *after_gmtoff,
+ int64 *after_gmtoff,
int *after_isdst,
const pg_tz *tz);
extern bool pg_interpret_timezone_abbrev(const char *abbrev,
const pg_time_t *timep,
- long int *gmtoff,
+ int64 *gmtoff,
int *isdst,
const pg_tz *tz);
-extern bool pg_get_timezone_offset(const pg_tz *tz, long int *gmtoff);
+extern bool pg_get_timezone_offset(const pg_tz *tz, int64 *gmtoff);
extern const char *pg_get_timezone_name(pg_tz *tz);
extern bool pg_tz_acceptable(pg_tz *tz);
@@ -75,7 +75,7 @@ extern pg_tz *log_timezone;
extern void pg_timezone_initialize(void);
extern pg_tz *pg_tzset(const char *tzname);
-extern pg_tz *pg_tzset_offset(long gmtoffset);
+extern pg_tz *pg_tzset_offset(int64 gmtoffset);
extern pg_tzenum *pg_tzenumerate_start(void);
extern pg_tz *pg_tzenumerate_next(pg_tzenum *dir);
diff --git a/src/include/portability/instr_time.h b/src/include/portability/instr_time.h
index d6459327cc..0a51120fc4 100644
--- a/src/include/portability/instr_time.h
+++ b/src/include/portability/instr_time.h
@@ -136,7 +136,7 @@ typedef struct timespec instr_time;
(((double) (t).tv_sec) + ((double) (t).tv_nsec) / 1000000000.0)
#define INSTR_TIME_GET_MILLISEC(t) \
- (((double) (t).tv_sec * 1000.0) + ((double) (t).tv_nsec) / 1000000.0)
+ (((int64) (t).tv_sec * 1000.0) + ((int64) (t).tv_nsec) / 1000000.0)
#define INSTR_TIME_GET_MICROSEC(t) \
(((uint64) (t).tv_sec * (uint64) 1000000) + (uint64) ((t).tv_nsec / 1000))
diff --git a/src/include/storage/buffile.h b/src/include/storage/buffile.h
index 60433f35b4..7178247396 100644
--- a/src/include/storage/buffile.h
+++ b/src/include/storage/buffile.h
@@ -42,9 +42,9 @@ extern size_t BufFileRead(BufFile *file, void *ptr, size_t size);
extern size_t BufFileWrite(BufFile *file, void *ptr, size_t size);
extern int BufFileSeek(BufFile *file, int fileno, off_t offset, int whence);
extern void BufFileTell(BufFile *file, int *fileno, off_t *offset);
-extern int BufFileSeekBlock(BufFile *file, long blknum);
+extern int BufFileSeekBlock(BufFile *file, uint64 blknum);
extern int64 BufFileSize(BufFile *file);
-extern long BufFileAppend(BufFile *target, BufFile *source);
+extern uint64 BufFileAppend(BufFile *target, BufFile *source);
extern BufFile *BufFileCreateShared(SharedFileSet *fileset, const char *name);
extern void BufFileExportShared(BufFile *file);
diff --git a/src/include/storage/condition_variable.h b/src/include/storage/condition_variable.h
index bfe5c89b54..cb245c09c5 100644
--- a/src/include/storage/condition_variable.h
+++ b/src/include/storage/condition_variable.h
@@ -43,7 +43,7 @@ extern void ConditionVariableInit(ConditionVariable *cv);
* the condition variable.
*/
extern void ConditionVariableSleep(ConditionVariable *cv, uint32 wait_event_info);
-extern bool ConditionVariableTimedSleep(ConditionVariable *cv, long timeout,
+extern bool ConditionVariableTimedSleep(ConditionVariable *cv, int64 timeout,
uint32 wait_event_info);
extern void ConditionVariableCancelSleep(void);
diff --git a/src/include/storage/latch.h b/src/include/storage/latch.h
index 46ae56cae3..3c013ca860 100644
--- a/src/include/storage/latch.h
+++ b/src/include/storage/latch.h
@@ -169,13 +169,13 @@ extern int AddWaitEventToSet(WaitEventSet *set, uint32 events, pgsocket fd,
Latch *latch, void *user_data);
extern void ModifyWaitEvent(WaitEventSet *set, int pos, uint32 events, Latch *latch);
-extern int WaitEventSetWait(WaitEventSet *set, long timeout,
+extern int WaitEventSetWait(WaitEventSet *set, int64 timeout,
WaitEvent *occurred_events, int nevents,
uint32 wait_event_info);
-extern int WaitLatch(Latch *latch, int wakeEvents, long timeout,
+extern int WaitLatch(Latch *latch, int wakeEvents, int64 timeout,
uint32 wait_event_info);
extern int WaitLatchOrSocket(Latch *latch, int wakeEvents,
- pgsocket sock, long timeout, uint32 wait_event_info);
+ pgsocket sock, int64 timeout, uint32 wait_event_info);
/*
* Unix implementation uses SIGUSR1 for inter-process signaling.
diff --git a/src/include/storage/shmem.h b/src/include/storage/shmem.h
index 0c1af89206..4b75f33be9 100644
--- a/src/include/storage/shmem.h
+++ b/src/include/storage/shmem.h
@@ -39,7 +39,7 @@ extern void *ShmemAllocNoError(Size size);
extern void *ShmemAllocUnlocked(Size size);
extern bool ShmemAddrIsValid(const void *addr);
extern void InitShmemIndex(void);
-extern HTAB *ShmemInitHash(const char *name, long init_size, long max_size,
+extern HTAB *ShmemInitHash(const char *name, uint64 init_size, uint64 max_size,
HASHCTL *infoP, int hash_flags);
extern void *ShmemInitStruct(const char *name, Size size, bool *foundPtr);
extern Size add_size(Size s1, Size s2);
diff --git a/src/include/tcop/pquery.h b/src/include/tcop/pquery.h
index 4ad6324e2d..367c31cabb 100644
--- a/src/include/tcop/pquery.h
+++ b/src/include/tcop/pquery.h
@@ -33,13 +33,13 @@ extern void PortalStart(Portal portal, ParamListInfo params,
extern void PortalSetResultFormat(Portal portal, int nFormats,
int16 *formats);
-extern bool PortalRun(Portal portal, long count, bool isTopLevel,
+extern bool PortalRun(Portal portal, int64 count, bool isTopLevel,
bool run_once, DestReceiver *dest, DestReceiver *altdest,
char *completionTag);
extern uint64 PortalRunFetch(Portal portal,
FetchDirection fdirection,
- long count,
+ int64 count,
DestReceiver *dest);
#endif /* PQUERY_H */
diff --git a/src/include/utils/dynahash.h b/src/include/utils/dynahash.h
index 768b952176..0c75534897 100644
--- a/src/include/utils/dynahash.h
+++ b/src/include/utils/dynahash.h
@@ -14,6 +14,6 @@
#ifndef DYNAHASH_H
#define DYNAHASH_H
-extern int my_log2(long num);
+extern int my_log2(uint64 num);
#endif /* DYNAHASH_H */
diff --git a/src/include/utils/hsearch.h b/src/include/utils/hsearch.h
index f1deb9beab..dd0cd98962 100644
--- a/src/include/utils/hsearch.h
+++ b/src/include/utils/hsearch.h
@@ -64,11 +64,11 @@ typedef struct HTAB HTAB;
/* Only those fields indicated by hash_flags need be set */
typedef struct HASHCTL
{
- long num_partitions; /* # partitions (must be power of 2) */
- long ssize; /* segment size */
- long dsize; /* (initial) directory size */
- long max_dsize; /* limit to dsize if dir size is limited */
- long ffactor; /* fill factor */
+ uint64 num_partitions; /* # partitions (must be power of 2) */
+ uint64 ssize; /* segment size */
+ uint64 dsize; /* (initial) directory size */
+ uint64 max_dsize; /* limit to dsize if dir size is limited */
+ uint64 ffactor; /* fill factor */
Size keysize; /* hash key length in bytes */
Size entrysize; /* total user element size in bytes */
HashValueFunc hash; /* hash function */
@@ -123,7 +123,7 @@ typedef struct
* string_hash, tag_hash, uint32_hash, or oid_hash. Just set HASH_BLOBS or
* not. Use HASH_FUNCTION only when you want something other than those.
*/
-extern HTAB *hash_create(const char *tabname, long nelem,
+extern HTAB *hash_create(const char *tabname, uint64 nelem,
HASHCTL *info, int flags);
extern void hash_destroy(HTAB *hashp);
extern void hash_stats(const char *where, HTAB *hashp);
@@ -135,13 +135,13 @@ extern void *hash_search_with_hash_value(HTAB *hashp, const void *keyPtr,
bool *foundPtr);
extern bool hash_update_hash_key(HTAB *hashp, void *existingEntry,
const void *newKeyPtr);
-extern long hash_get_num_entries(HTAB *hashp);
+extern uint64 hash_get_num_entries(HTAB *hashp);
extern void hash_seq_init(HASH_SEQ_STATUS *status, HTAB *hashp);
extern void *hash_seq_search(HASH_SEQ_STATUS *status);
extern void hash_seq_term(HASH_SEQ_STATUS *status);
extern void hash_freeze(HTAB *hashp);
-extern Size hash_estimate_size(long num_entries, Size entrysize);
-extern long hash_select_dirsize(long num_entries);
+extern Size hash_estimate_size(uint64 num_entries, Size entrysize);
+extern uint64 hash_select_dirsize(uint64 num_entries);
extern Size hash_get_shared_size(HASHCTL *info, int flags);
extern void AtEOXact_HashTables(bool isCommit);
extern void AtEOSubXact_HashTables(bool isCommit, int nestDepth);
diff --git a/src/include/utils/sampling.h b/src/include/utils/sampling.h
index 74646846b2..cbcbeb6fd7 100644
--- a/src/include/utils/sampling.h
+++ b/src/include/utils/sampling.h
@@ -19,7 +19,7 @@
/* Random generator for sampling code */
typedef unsigned short SamplerRandomState[3];
-extern void sampler_random_init_state(long seed,
+extern void sampler_random_init_state(uint64 seed,
SamplerRandomState randstate);
extern double sampler_random_fract(SamplerRandomState randstate);
@@ -38,7 +38,7 @@ typedef struct
typedef BlockSamplerData *BlockSampler;
extern BlockNumber BlockSampler_Init(BlockSampler bs, BlockNumber nblocks,
- int samplesize, long randseed);
+ int samplesize, uint64 randseed);
extern bool BlockSampler_HasMore(BlockSampler bs);
extern BlockNumber BlockSampler_Next(BlockSampler bs);
diff --git a/src/include/utils/timestamp.h b/src/include/utils/timestamp.h
index 03a1de569f..78fdf05583 100644
--- a/src/include/utils/timestamp.h
+++ b/src/include/utils/timestamp.h
@@ -71,7 +71,7 @@ extern TimestampTz GetCurrentTimestamp(void);
extern TimestampTz GetSQLCurrentTimestamp(int32 typmod);
extern Timestamp GetSQLLocalTimestamp(int32 typmod);
extern void TimestampDifference(TimestampTz start_time, TimestampTz stop_time,
- long *secs, int *microsecs);
+ int64 *secs, int *microsecs);
extern bool TimestampDifferenceExceeds(TimestampTz start_time,
TimestampTz stop_time,
int msec);
diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c
index 5acf604f63..3ce5848247 100644
--- a/src/pl/plpgsql/src/pl_exec.c
+++ b/src/pl/plpgsql/src/pl_exec.c
@@ -358,7 +358,7 @@ static Datum exec_eval_expr(PLpgSQL_execstate *estate,
Oid *rettype,
int32 *rettypmod);
static int exec_run_select(PLpgSQL_execstate *estate,
- PLpgSQL_expr *expr, long maxtuples, Portal *portalP);
+ PLpgSQL_expr *expr, uint64 maxtuples, Portal *portalP);
static int exec_for_query(PLpgSQL_execstate *estate, PLpgSQL_stmt_forq *stmt,
Portal portal, bool prefetch_ok);
static ParamListInfo setup_param_list(PLpgSQL_execstate *estate,
@@ -4085,7 +4085,7 @@ exec_stmt_execsql(PLpgSQL_execstate *estate,
PLpgSQL_stmt_execsql *stmt)
{
ParamListInfo paramLI;
- long tcount;
+ int64 tcount;
int rc;
PLpgSQL_expr *expr = stmt->sqlstmt;
int too_many_rows_level = 0;
@@ -4683,7 +4683,7 @@ static int
exec_stmt_fetch(PLpgSQL_execstate *estate, PLpgSQL_stmt_fetch *stmt)
{
PLpgSQL_var *curvar;
- long how_many = stmt->how_many;
+ int64 how_many = stmt->how_many;
SPITupleTable *tuptab;
Portal portal;
char *curname;
@@ -5831,7 +5831,7 @@ exec_eval_expr(PLpgSQL_execstate *estate,
*/
static int
exec_run_select(PLpgSQL_execstate *estate,
- PLpgSQL_expr *expr, long maxtuples, Portal *portalP)
+ PLpgSQL_expr *expr, uint64 maxtuples, Portal *portalP)
{
ParamListInfo paramLI;
int rc;
diff --git a/src/pl/plpgsql/src/plpgsql.h b/src/pl/plpgsql/src/plpgsql.h
index 69df3306fd..0cea7270cf 100644
--- a/src/pl/plpgsql/src/plpgsql.h
+++ b/src/pl/plpgsql/src/plpgsql.h
@@ -798,7 +798,7 @@ typedef struct PLpgSQL_stmt_fetch
PLpgSQL_variable *target; /* target (record or row) */
int curvar; /* cursor variable to fetch from */
FetchDirection direction; /* fetch direction */
- long how_many; /* count, if constant (expr is NULL) */
+ int64 how_many; /* count, if constant (expr is NULL) */
PLpgSQL_expr *expr; /* count, if expression */
bool is_move; /* is this a fetch or move? */
bool returns_multiple_rows; /* can return more than one row? */
@@ -1027,7 +1027,7 @@ typedef struct PLpgSQL_function
/* these fields change when the function is used */
struct PLpgSQL_execstate *cur_estate;
- unsigned long use_count;
+ uint64 use_count;
} PLpgSQL_function;
/*
diff --git a/src/pl/plpython/plpy_spi.c b/src/pl/plpython/plpy_spi.c
index 99c1b4f28f..91079f4c99 100644
--- a/src/pl/plpython/plpy_spi.c
+++ b/src/pl/plpython/plpy_spi.c
@@ -25,7 +25,7 @@
#include "utils/memutils.h"
#include "utils/syscache.h"
-static PyObject *PLy_spi_execute_query(char *query, long limit);
+static PyObject *PLy_spi_execute_query(char *query, uint64 limit);
static PyObject *PLy_spi_execute_fetch_result(SPITupleTable *tuptable,
uint64 rows, int status);
static void PLy_spi_exception_set(PyObject *excclass, ErrorData *edata);
@@ -158,7 +158,7 @@ PLy_spi_execute(PyObject *self, PyObject *args)
char *query;
PyObject *plan;
PyObject *list = NULL;
- long limit = 0;
+ uint64 limit = 0;
if (PyArg_ParseTuple(args, "s|l", &query, &limit))
return PLy_spi_execute_query(query, limit);
@@ -174,7 +174,7 @@ PLy_spi_execute(PyObject *self, PyObject *args)
}
PyObject *
-PLy_spi_execute_plan(PyObject *ob, PyObject *list, long limit)
+PLy_spi_execute_plan(PyObject *ob, PyObject *list, uint64 limit)
{
volatile int nargs;
int i,
@@ -305,7 +305,7 @@ PLy_spi_execute_plan(PyObject *ob, PyObject *list, long limit)
}
static PyObject *
-PLy_spi_execute_query(char *query, long limit)
+PLy_spi_execute_query(char *query, uint64 limit)
{
int rv;
volatile MemoryContext oldcontext;
diff --git a/src/pl/plpython/plpy_spi.h b/src/pl/plpython/plpy_spi.h
index a5e2e60da7..66e8a43aae 100644
--- a/src/pl/plpython/plpy_spi.h
+++ b/src/pl/plpython/plpy_spi.h
@@ -10,7 +10,7 @@
extern PyObject *PLy_spi_prepare(PyObject *self, PyObject *args);
extern PyObject *PLy_spi_execute(PyObject *self, PyObject *args);
-extern PyObject *PLy_spi_execute_plan(PyObject *ob, PyObject *list, long limit);
+extern PyObject *PLy_spi_execute_plan(PyObject *ob, PyObject *list, uint64 limit);
typedef struct PLyExceptionEntry
{
diff --git a/src/timezone/localtime.c b/src/timezone/localtime.c
index 333f27300a..926c677c41 100644
--- a/src/timezone/localtime.c
+++ b/src/timezone/localtime.c
@@ -1623,10 +1623,10 @@ increment_overflow_time(pg_time_t *tp, int32 j)
*/
int
pg_next_dst_boundary(const pg_time_t *timep,
- long int *before_gmtoff,
+ int64 *before_gmtoff,
int *before_isdst,
pg_time_t *boundary,
- long int *after_gmtoff,
+ int64 *after_gmtoff,
int *after_isdst,
const pg_tz *tz)
{
@@ -1771,7 +1771,7 @@ pg_next_dst_boundary(const pg_time_t *timep,
bool
pg_interpret_timezone_abbrev(const char *abbrev,
const pg_time_t *timep,
- long int *gmtoff,
+ int64 *gmtoff,
int *isdst,
const pg_tz *tz)
{
@@ -1863,7 +1863,7 @@ pg_interpret_timezone_abbrev(const char *abbrev,
* into *gmtoff and return true, else return false.
*/
bool
-pg_get_timezone_offset(const pg_tz *tz, long int *gmtoff)
+pg_get_timezone_offset(const pg_tz *tz, int64 *gmtoff)
{
/*
* The zone could have more than one ttinfo, if it's historically used
--------------2.24.1--
v6-0002-Spread-bitutils-into-hashing.patchtext/x-diff; charset=us-asciiDownload
From c51fe40fcb6841a888fe3f5311845486180b92d1 Mon Sep 17 00:00:00 2001
From: David Fetter <david@fetter.org>
Date: Fri, 31 Jan 2020 07:08:48 -0800
Subject: [PATCH v6 2/3] Spread bitutils into hashing
To: hackers
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="------------2.24.1"
This is a multi-part message in MIME format.
--------------2.24.1
Content-Type: text/plain; charset=UTF-8; format=fixed
Content-Transfer-Encoding: 8bit
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 55d85644a4..29dca21be2 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -30,6 +30,7 @@
#include "access/hash.h"
#include "access/hash_xlog.h"
+#include "port/pg_bitutils.h"
#include "miscadmin.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
@@ -502,7 +503,6 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
double dnumbuckets;
uint32 num_buckets;
uint32 spare_index;
- uint32 i;
/*
* Choose the number of initial bucket pages to match the fill factor
@@ -543,14 +543,8 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
metap->hashm_ffactor = ffactor;
metap->hashm_bsize = HashGetMaxBitmapSize(page);
/* find largest bitmap array size that will fit in page size */
- for (i = _hash_log2(metap->hashm_bsize); i > 0; --i)
- {
- if ((1 << i) <= metap->hashm_bsize)
- break;
- }
- Assert(i > 0);
- metap->hashm_bmsize = 1 << i;
- metap->hashm_bmshift = i + BYTE_TO_BIT;
+ metap->hashm_bmsize = 1 << pg_leftmost_one_pos32(metap->hashm_bsize);
+ metap->hashm_bmshift = pg_leftmost_one_pos32(metap->hashm_bsize) + BYTE_TO_BIT;
Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1));
/*
@@ -570,7 +564,7 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
* Set highmask as next immediate ((2 ^ x) - 1), which should be
* sufficient to cover num_buckets.
*/
- metap->hashm_highmask = (1 << (_hash_log2(num_buckets + 1))) - 1;
+ metap->hashm_highmask = next_power_of_2_32(num_buckets + 1) - 1;
metap->hashm_lowmask = (metap->hashm_highmask >> 1);
MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares));
@@ -657,13 +651,12 @@ restart_expand:
* Can't split anymore if maxbucket has reached its maximum possible
* value.
*
- * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
- * the calculation maxbucket+1 mustn't overflow). Currently we restrict
- * to half that because of overflow looping in _hash_log2() and
- * insufficient space in hashm_spares[]. It's moot anyway because an
- * index with 2^32 buckets would certainly overflow BlockNumber and hence
- * _hash_alloc_buckets() would fail, but if we supported buckets smaller
- * than a disk block then this would be an independent constraint.
+ * Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because the
+ * calculation maxbucket+1 mustn't overflow). Currently we restrict to half
+ * that because of insufficient space in hashm_spares[]. It's moot anyway
+ * because an index with 2^32 buckets would certainly overflow BlockNumber
+ * and hence _hash_alloc_buckets() would fail, but if we supported buckets
+ * smaller than a disk block then this would be an independent constraint.
*
* If you change this, see also the maximum initial number of buckets in
* _hash_init().
diff --git a/src/backend/access/hash/hashsort.c b/src/backend/access/hash/hashsort.c
index 9cb41d62e7..322379788c 100644
--- a/src/backend/access/hash/hashsort.c
+++ b/src/backend/access/hash/hashsort.c
@@ -27,6 +27,7 @@
#include "access/hash.h"
#include "commands/progress.h"
+#include "port/pg_bitutils.h"
#include "miscadmin.h"
#include "pgstat.h"
#include "utils/tuplesort.h"
@@ -69,7 +70,7 @@ _h_spoolinit(Relation heap, Relation index, uint32 num_buckets)
* NOTE : This hash mask calculation should be in sync with similar
* calculation in _hash_init_metabuffer.
*/
- hspool->high_mask = (((uint32) 1) << _hash_log2(num_buckets + 1)) - 1;
+ hspool->high_mask = next_power_of_2_32(num_buckets + 1) - 1;
hspool->low_mask = (hspool->high_mask >> 1);
hspool->max_buckets = num_buckets - 1;
diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c
index 9efc8016bc..738572ca40 100644
--- a/src/backend/access/hash/hashutil.c
+++ b/src/backend/access/hash/hashutil.c
@@ -17,6 +17,7 @@
#include "access/hash.h"
#include "access/reloptions.h"
#include "access/relscan.h"
+#include "port/pg_bitutils.h"
#include "storage/buf_internals.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
@@ -134,21 +135,6 @@ _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket,
return bucket;
}
-/*
- * _hash_log2 -- returns ceil(lg2(num))
- */
-uint32
-_hash_log2(uint32 num)
-{
- uint32 i,
- limit;
-
- limit = 1;
- for (i = 0; limit < num; limit <<= 1, i++)
- ;
- return i;
-}
-
/*
* _hash_spareindex -- returns spare index / global splitpoint phase of the
* bucket
@@ -159,7 +145,7 @@ _hash_spareindex(uint32 num_bucket)
uint32 splitpoint_group;
uint32 splitpoint_phases;
- splitpoint_group = _hash_log2(num_bucket);
+ splitpoint_group = ceil_log2_32(num_bucket);
if (splitpoint_group < HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE)
return splitpoint_group;
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index 7769c79861..2409a413d2 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -87,6 +87,7 @@
#include "access/xact.h"
#include "common/hashfn.h"
+#include "port/pg_bitutils.h"
#include "storage/shmem.h"
#include "storage/spin.h"
#include "utils/dynahash.h"
@@ -1718,16 +1719,11 @@ hash_corrupted(HTAB *hashp)
int
my_log2(uint64 num)
{
- int i;
- uint64 limit;
-
/* guard against too-large input, which would put us into infinite loop */
if (num > LONG_MAX / 2)
num = LONG_MAX / 2;
- for (i = 0, limit = 1; limit < num; i++, limit <<= 1)
- ;
- return i;
+ return ceil_log2_64(num);
}
/* calculate first power of 2 >= num, bounded to what will fit in a uint64 */
diff --git a/src/include/access/hash.h b/src/include/access/hash.h
index 2707e1924b..b6407813e7 100644
--- a/src/include/access/hash.h
+++ b/src/include/access/hash.h
@@ -450,7 +450,6 @@ extern uint32 _hash_datum2hashkey(Relation rel, Datum key);
extern uint32 _hash_datum2hashkey_type(Relation rel, Datum key, Oid keytype);
extern Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket,
uint32 highmask, uint32 lowmask);
-extern uint32 _hash_log2(uint32 num);
extern uint32 _hash_spareindex(uint32 num_bucket);
extern uint32 _hash_get_totalbuckets(uint32 splitpoint_phase);
extern void _hash_checkpage(Relation rel, Buffer buf, int flags);
diff --git a/src/include/lib/simplehash.h b/src/include/lib/simplehash.h
index 5a6783f653..1a35a054d8 100644
--- a/src/include/lib/simplehash.h
+++ b/src/include/lib/simplehash.h
@@ -57,6 +57,8 @@
* backwards, unless they're empty or already at their optimal position.
*/
+#include "port/pg_bitutils.h"
+
/* helpers */
#define SH_MAKE_PREFIX(a) CppConcat(a,_)
#define SH_MAKE_NAME(name) SH_MAKE_NAME_(SH_MAKE_PREFIX(SH_PREFIX),name)
@@ -215,27 +217,6 @@ SH_SCOPE void SH_STAT(SH_TYPE * tb);
#ifndef SIMPLEHASH_H
#define SIMPLEHASH_H
-/* FIXME: can we move these to a central location? */
-
-/* calculate ceil(log base 2) of num */
-static inline uint64
-sh_log2(uint64 num)
-{
- int i;
- uint64 limit;
-
- for (i = 0, limit = 1; limit < num; i++, limit <<= 1)
- ;
- return i;
-}
-
-/* calculate first power of 2 >= num */
-static inline uint64
-sh_pow2(uint64 num)
-{
- return ((uint64) 1) << sh_log2(num);
-}
-
#ifdef FRONTEND
#define sh_error(...) pg_log_error(__VA_ARGS__)
#define sh_log(...) pg_log_info(__VA_ARGS__)
@@ -259,7 +240,7 @@ SH_COMPUTE_PARAMETERS(SH_TYPE * tb, uint32 newsize)
size = Max(newsize, 2);
/* round up size to the next power of 2, that's how bucketing works */
- size = sh_pow2(size);
+ size = next_power_of_2_64(size);
Assert(size <= SH_MAX_SIZE);
/*
@@ -434,7 +415,7 @@ SH_GROW(SH_TYPE * tb, uint32 newsize)
uint32 startelem = 0;
uint32 copyelem;
- Assert(oldsize == sh_pow2(oldsize));
+ Assert(oldsize == next_power_of_2_64(oldsize));
Assert(oldsize != SH_MAX_SIZE);
Assert(oldsize < newsize);
diff --git a/src/include/port/pg_bitutils.h b/src/include/port/pg_bitutils.h
index 498e532308..88a9ea5b7f 100644
--- a/src/include/port/pg_bitutils.h
+++ b/src/include/port/pg_bitutils.h
@@ -20,19 +20,18 @@ extern PGDLLIMPORT const uint8 pg_number_of_ones[256];
/*
* pg_leftmost_one_pos32
* Returns the position of the most significant set bit in "word",
- * measured from the least significant bit. word must not be 0.
+ * measured from the least significant bit.
*/
static inline int
pg_leftmost_one_pos32(uint32 word)
{
#ifdef HAVE__BUILTIN_CLZ
- Assert(word != 0);
-
- return 31 - __builtin_clz(word);
+ return word ? 31 - __builtin_clz(word) : 0;
#else
int shift = 32 - 8;
- Assert(word != 0);
+ if (word == 0)
+ return 0;
while ((word >> shift) == 0)
shift -= 8;
@@ -49,19 +48,18 @@ static inline int
pg_leftmost_one_pos64(uint64 word)
{
#ifdef HAVE__BUILTIN_CLZ
- Assert(word != 0);
-
#if defined(HAVE_LONG_INT_64)
- return 63 - __builtin_clzl(word);
+ return word ? 63 - __builtin_clzl(word) : 0;
#elif defined(HAVE_LONG_LONG_INT_64)
- return 63 - __builtin_clzll(word);
+ return word ? 63 - __builtin_clzll(word) : 0;
#else
#error must have a working 64-bit integer datatype
#endif
#else /* !HAVE__BUILTIN_CLZ */
int shift = 64 - 8;
- Assert(word != 0);
+ if (word == 0)
+ return 0;
while ((word >> shift) == 0)
shift -= 8;
@@ -73,19 +71,18 @@ pg_leftmost_one_pos64(uint64 word)
/*
* pg_rightmost_one_pos32
* Returns the position of the least significant set bit in "word",
- * measured from the least significant bit. word must not be 0.
+ * measured from the least significant bit.
*/
static inline int
pg_rightmost_one_pos32(uint32 word)
{
#ifdef HAVE__BUILTIN_CTZ
- Assert(word != 0);
-
- return __builtin_ctz(word);
+ return word ? __builtin_ctz(word) : 32;
#else
int result = 0;
- Assert(word != 0);
+ if (word == 0)
+ return 32;
while ((word & 255) == 0)
{
@@ -105,19 +102,18 @@ static inline int
pg_rightmost_one_pos64(uint64 word)
{
#ifdef HAVE__BUILTIN_CTZ
- Assert(word != 0);
-
#if defined(HAVE_LONG_INT_64)
- return __builtin_ctzl(word);
+ return word ? __builtin_ctzl(word) : 64;
#elif defined(HAVE_LONG_LONG_INT_64)
- return __builtin_ctzll(word);
+ return word ? __builtin_ctzll(word) : 64;
#else
#error must have a working 64-bit integer datatype
#endif
#else /* !HAVE__BUILTIN_CTZ */
int result = 0;
- Assert(word != 0);
+ if (word == 0)
+ return 64;
while ((word & 255) == 0)
{
@@ -145,4 +141,34 @@ pg_rotate_right32(uint32 word, int n)
return (word >> n) | (word << (sizeof(word) * BITS_PER_BYTE - n));
}
+/* ceil(lg2(num)) */
+static inline uint32
+ceil_log2_32(uint32 num)
+{
+ Assert(num > 0);
+ return pg_leftmost_one_pos32(num-1) + 1;
+}
+
+static inline uint64
+ceil_log2_64(uint64 num)
+{
+ Assert(num > 0);
+ return pg_leftmost_one_pos64(num-1) + 1;
+}
+
+/* Calculate the first power of 2 >= num */
+static inline uint32
+next_power_of_2_32(uint32 num)
+{
+ Assert(num > 0);
+ return ((uint32) 1) << (pg_leftmost_one_pos32(num-1) + 1);
+}
+
+static inline uint64
+next_power_of_2_64(uint64 num)
+{
+ Assert(num > 0);
+ return ((uint64) 1) << (pg_leftmost_one_pos64(num-1) + 1);
+}
+
#endif /* PG_BITUTILS_H */
--------------2.24.1--
v6-0003-Reduced-operations-in-floor_log2.patchtext/x-diff; charset=us-asciiDownload
From b7eb194c473ca399a8e8afcd279bb3480fe9dacf Mon Sep 17 00:00:00 2001
From: David Fetter <david@fetter.org>
Date: Fri, 31 Jan 2020 08:32:26 -0800
Subject: [PATCH v6 3/3] Reduced operations in floor_log2
To: hackers
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="------------2.24.1"
This is a multi-part message in MIME format.
--------------2.24.1
Content-Type: text/plain; charset=UTF-8; format=fixed
Content-Transfer-Encoding: 8bit
diff --git a/src/backend/utils/adt/array_selfuncs.c b/src/backend/utils/adt/array_selfuncs.c
index d97e60a3ab..12170cb7f2 100644
--- a/src/backend/utils/adt/array_selfuncs.c
+++ b/src/backend/utils/adt/array_selfuncs.c
@@ -20,6 +20,7 @@
#include "catalog/pg_collation.h"
#include "catalog/pg_operator.h"
#include "catalog/pg_statistic.h"
+#include "port/pg_bitutils.h"
#include "utils/array.h"
#include "utils/builtins.h"
#include "utils/lsyscache.h"
@@ -1089,35 +1090,9 @@ calc_distr(const float *p, int n, int m, float rest)
static int
floor_log2(uint32 n)
{
- int logval = 0;
-
if (n == 0)
return -1;
- if (n >= (1 << 16))
- {
- n >>= 16;
- logval += 16;
- }
- if (n >= (1 << 8))
- {
- n >>= 8;
- logval += 8;
- }
- if (n >= (1 << 4))
- {
- n >>= 4;
- logval += 4;
- }
- if (n >= (1 << 2))
- {
- n >>= 2;
- logval += 2;
- }
- if (n >= (1 << 1))
- {
- logval += 1;
- }
- return logval;
+ return 1 << pg_leftmost_one_pos32(n);
}
/*
--------------2.24.1--
On Thu, Feb 27, 2020 at 1:56 PM David Fetter <david@fetter.org> wrote:
[v6 set]
Hi David,
In 0002, the pg_bitutils functions have a test (input > 0), and the
new callers ceil_log2_* and next_power_of_2_* have asserts. That seems
backward to me. I imagine some callers of bitutils will already know
the value > 0, and it's probably good to keep that branch out of the
lowest level functions. What do you think?
--
John Naylor https://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services
On Thu, Feb 27, 2020 at 02:41:49PM +0800, John Naylor wrote:
On Thu, Feb 27, 2020 at 1:56 PM David Fetter <david@fetter.org> wrote:
[v6 set]
Hi David,
In 0002, the pg_bitutils functions have a test (input > 0), and the
new callers ceil_log2_* and next_power_of_2_* have asserts. That seems
backward to me.
To me, too, now that you mention it. My thinking was a little fuzzed
by trying to accommodate platforms with intrinsics where clz is
defined for 0 inputs.
I imagine some callers of bitutils will already know the value > 0,
and it's probably good to keep that branch out of the lowest level
functions. What do you think?
I don't know quite how smart compilers and CPUs are these days, so
it's unclear to me how often that branch would actually happen.
Anyhow, I'll get a revised patch set out later today.
Best,
David.
--
David Fetter <david(at)fetter(dot)org> http://fetter.org/
Phone: +1 415 235 3778
Remember to vote!
Consider donating to Postgres: http://www.postgresql.org/about/donate
Hi David,
On Wed, Feb 26, 2020 at 9:56 PM David Fetter <david@fetter.org> wrote:
On Wed, Feb 26, 2020 at 09:12:24AM +0100, David Fetter wrote:
On Fri, Jan 31, 2020 at 04:59:18PM +0100, David Fetter wrote:
On Wed, Jan 15, 2020 at 03:45:12PM -0800, Jesse Zhang wrote:
On Tue, Jan 14, 2020 at 2:09 PM David Fetter <david@fetter.org> wrote:
The changes in hash AM and SIMPLEHASH do look like a net positive
improvement. My biggest cringe might be in pg_bitutils:1. Is ceil_log2_64 dead code?
Let's call it nascent code. I suspect there are places it could go, if
I look for them. Also, it seemed silly to have one without the other.While not absolutely required, I'd like us to find at least one
place and start using it. (Clang also nags at me when we have
unused functions).Done in the expanded patches attached.
I see that you've found use of it in dynahash, thanks!
The math in the new (from v4 to v6) patch is wrong: it yields
ceil_log2(1) = 1 or next_power_of_2(1) = 2. I can see that you lifted
the restriction of "num greater than one" for ceil_log2() in this patch
set, but it's now _more_ problematic to base those functions on
pg_leftmost_one_pos().
I'm not comfortable with your changes to pg_leftmost_one_pos() to remove
the restriction on word being non-zero. Specifically
pg_leftmost_one_pos() is made to return 0 on 0 input. While none of its
current callers (in HEAD) is harmed, this introduces muddy semantics:
1. pg_leftmost_one_pos is semantically undefined on 0 input: scanning
for a set bit in a zero word won't find it anywhere.
2. we can _try_ generalizing it to accommodate ceil_log2 by
extrapolating based on the invariant that BSR + LZCNT = 31 (or 63). In
that case, the extrapolation yields -1 for pg_leftmost_one_pos(0).
I'm not convinced that others on the list will be comfortable with the
generalization suggested in 2 above.
I've quickly put together a PoC patch on top of yours, which
re-implements ceil_log2 using LZCNT coupled with a CPUID check.
Thoughts?
Cheers,
Jesse
Attachments:
0001-Use-LZCOUNT-when-possible.patchtext/x-patch; charset=US-ASCII; name=0001-Use-LZCOUNT-when-possible.patchDownload
From 0e4392d77b6132a508b7da14871cc99066a9d114 Mon Sep 17 00:00:00 2001
From: Jesse Zhang <sbjesse@gmail.com>
Date: Fri, 28 Feb 2020 16:22:04 -0800
Subject: [PATCH] Use LZCOUNT when possible
This patch reverts the changes to pg_leftmost_one and friends (which is
really bit-scan-reverse, and is legitimately undefined one zero) and
reworks ceil_log2 and friends to use a count-leading-zeros operation
that is well-defined on zero.
---
src/include/port/pg_bitutils.h | 47 ++++++++++++---------
src/port/pg_bitutils.c | 77 ++++++++++++++++++++++++++++++++++
2 files changed, 104 insertions(+), 20 deletions(-)
diff --git a/src/include/port/pg_bitutils.h b/src/include/port/pg_bitutils.h
index 88a9ea5b7fb..b4d5724ee1d 100644
--- a/src/include/port/pg_bitutils.h
+++ b/src/include/port/pg_bitutils.h
@@ -20,18 +20,19 @@ extern PGDLLIMPORT const uint8 pg_number_of_ones[256];
/*
* pg_leftmost_one_pos32
* Returns the position of the most significant set bit in "word",
- * measured from the least significant bit.
+ * measured from the least significant bit. word must not be 0.
*/
static inline int
pg_leftmost_one_pos32(uint32 word)
{
#ifdef HAVE__BUILTIN_CLZ
- return word ? 31 - __builtin_clz(word) : 0;
+ Assert(word != 0);
+
+ return 31 - __builtin_clz(word);
#else
int shift = 32 - 8;
- if (word == 0)
- return 0;
+ Assert(word != 0);
while ((word >> shift) == 0)
shift -= 8;
@@ -48,18 +49,19 @@ static inline int
pg_leftmost_one_pos64(uint64 word)
{
#ifdef HAVE__BUILTIN_CLZ
+ Assert(word != 0);
+
#if defined(HAVE_LONG_INT_64)
- return word ? 63 - __builtin_clzl(word) : 0;
+ return 63 - __builtin_clzl(word);
#elif defined(HAVE_LONG_LONG_INT_64)
- return word ? 63 - __builtin_clzll(word) : 0;
+ return 63 - __builtin_clzll(word);
#else
#error must have a working 64-bit integer datatype
#endif
#else /* !HAVE__BUILTIN_CLZ */
int shift = 64 - 8;
- if (word == 0)
- return 0;
+ Assert(word != 0);
while ((word >> shift) == 0)
shift -= 8;
@@ -71,18 +73,19 @@ pg_leftmost_one_pos64(uint64 word)
/*
* pg_rightmost_one_pos32
* Returns the position of the least significant set bit in "word",
- * measured from the least significant bit.
+ * measured from the least significant bit. word must not be 0.
*/
static inline int
pg_rightmost_one_pos32(uint32 word)
{
#ifdef HAVE__BUILTIN_CTZ
- return word ? __builtin_ctz(word) : 32;
+ Assert(word != 0);
+
+ return __builtin_ctz(word);
#else
int result = 0;
- if (word == 0)
- return 32;
+ Assert(word != 0);
while ((word & 255) == 0)
{
@@ -102,18 +105,19 @@ static inline int
pg_rightmost_one_pos64(uint64 word)
{
#ifdef HAVE__BUILTIN_CTZ
+ Assert(word != 0);
+
#if defined(HAVE_LONG_INT_64)
- return word ? __builtin_ctzl(word) : 64;
+ return __builtin_ctzl(word);
#elif defined(HAVE_LONG_LONG_INT_64)
- return word ? __builtin_ctzll(word) : 64;
+ return __builtin_ctzll(word);
#else
#error must have a working 64-bit integer datatype
#endif
#else /* !HAVE__BUILTIN_CTZ */
int result = 0;
- if (word == 0)
- return 64;
+ Assert(word != 0);
while ((word & 255) == 0)
{
@@ -141,19 +145,22 @@ pg_rotate_right32(uint32 word, int n)
return (word >> n) | (word << (sizeof(word) * BITS_PER_BYTE - n));
}
+extern int (*pg_count_leading_zeros_32)(uint32);
+extern int (*pg_count_leading_zeros_64)(uint64);
+
/* ceil(lg2(num)) */
static inline uint32
ceil_log2_32(uint32 num)
{
Assert(num > 0);
- return pg_leftmost_one_pos32(num-1) + 1;
+ return 8 * sizeof(num) - pg_count_leading_zeros_32(num-1);
}
static inline uint64
ceil_log2_64(uint64 num)
{
Assert(num > 0);
- return pg_leftmost_one_pos64(num-1) + 1;
+ return 8 * sizeof(num) - pg_count_leading_zeros_64(num-1);
}
/* Calculate the first power of 2 >= num */
@@ -161,14 +168,14 @@ static inline uint32
next_power_of_2_32(uint32 num)
{
Assert(num > 0);
- return ((uint32) 1) << (pg_leftmost_one_pos32(num-1) + 1);
+ return ((uint32) 1) << ceil_log2_32(num);
}
static inline uint64
next_power_of_2_64(uint64 num)
{
Assert(num > 0);
- return ((uint64) 1) << (pg_leftmost_one_pos64(num-1) + 1);
+ return ((uint64) 1) << ceil_log2_64(num);
}
#endif /* PG_BITUTILS_H */
diff --git a/src/port/pg_bitutils.c b/src/port/pg_bitutils.c
index 392fbd33845..2732953a466 100644
--- a/src/port/pg_bitutils.c
+++ b/src/port/pg_bitutils.c
@@ -319,3 +319,80 @@ pg_popcount(const char *buf, int bytes)
return popcnt;
}
+
+static bool pg_lzcnt_available(void)
+{
+ unsigned int exx[4] = {0, 0, 0, 0};
+
+#if defined(HAVE__GET_CPUID)
+ __get_cpuid(0x80000001, &exx[0], &exx[1], &exx[2], &exx[3]);
+#elif defined(HAVE__CPUID)
+ __cpuid(exx, 0x80000001);
+#else
+#error cpuid instruction not available
+#endif
+
+ return (exx[2] & bit_ABM) != 0; /* ABM / LZCNT */
+}
+
+static int
+__attribute__((target("lzcnt")))
+pg_fast_count_leading_zeros_32(uint32 num)
+{
+ return __builtin_clz(num);
+}
+
+static int
+pg_slow_count_leading_zeros_32(uint32 num)
+{
+ if (num == 0)
+ return 8 * sizeof(num);
+ return __builtin_clz(num);
+}
+
+
+static int
+__attribute__((target("lzcnt")))
+pg_fast_count_leading_zeros_64(uint64 num)
+{
+ return __builtin_clzll(num);
+}
+
+static int
+pg_slow_count_leading_zeros_64(uint64 num)
+{
+ if (num == 0)
+ return 8 * sizeof(num);
+ return __builtin_clzll(num);
+}
+
+static int pg_count_leading_zeros_32_choose(uint32 num)
+{
+ if (pg_lzcnt_available())
+ {
+ pg_count_leading_zeros_64 = pg_fast_count_leading_zeros_64;
+ pg_count_leading_zeros_32 = pg_fast_count_leading_zeros_32;
+ return pg_count_leading_zeros_32(num);
+ }
+ pg_count_leading_zeros_64 = pg_slow_count_leading_zeros_64;
+ pg_count_leading_zeros_32 = pg_slow_count_leading_zeros_32;
+
+ return pg_slow_count_leading_zeros_32(num);
+}
+
+static int pg_count_leading_zeros_64_choose(uint64 num)
+{
+ if (pg_lzcnt_available())
+ {
+ pg_count_leading_zeros_64 = pg_fast_count_leading_zeros_64;
+ pg_count_leading_zeros_32 = pg_fast_count_leading_zeros_32;
+ return pg_count_leading_zeros_64(num);
+ }
+ pg_count_leading_zeros_64 = pg_slow_count_leading_zeros_64;
+ pg_count_leading_zeros_32 = pg_slow_count_leading_zeros_32;
+
+ return pg_slow_count_leading_zeros_64(num);
+}
+
+int (*pg_count_leading_zeros_32)(uint32) = pg_count_leading_zeros_32_choose;
+int (*pg_count_leading_zeros_64)(uint64) = pg_count_leading_zeros_64_choose;
--
2.25.0
On Thu, Feb 27, 2020 at 1:56 PM David Fetter <david@fetter.org> wrote:
[v6 patch set]
Here I'm only looking at 0001. It needs rebasing, but it's trivial to
see what it does. I noticed in some places, you've replaced "long"
with uint64, but many are int64. I started making a list, but it got
too long, and I had to stop and ask: Is there a reason to change from
signed to unsigned for any of the ones that aren't directly related to
hashing code? Is there some larger pattern I'm missing?
-static long gistBuffersGetFreeBlock(GISTBuildBuffers *gfbb);
-static void gistBuffersReleaseBlock(GISTBuildBuffers *gfbb, long blocknum);
+static uint64 gistBuffersGetFreeBlock(GISTBuildBuffers *gfbb);
+static void gistBuffersReleaseBlock(GISTBuildBuffers *gfbb, uint64 blocknum);
I believe these should actually use BlockNumber, if these refer to
relation blocks as opposed to temp file blocks (I haven't read the
code).
-exec_execute_message(const char *portal_name, long max_rows)
+exec_execute_message(const char *portal_name, uint64 max_rows)
The only call site of this function uses an int32, which gets its
value from pq_getmsgint, which returns uint32.
--
John Naylor https://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services
On Tue, Mar 3, 2020 at 4:46 AM Jesse Zhang <sbjesse@gmail.com> wrote:
The math in the new (from v4 to v6) patch is wrong: it yields
ceil_log2(1) = 1 or next_power_of_2(1) = 2.
I think you're right.
I can see that you lifted
the restriction of "num greater than one" for ceil_log2() in this patch
set, but it's now _more_ problematic to base those functions on
pg_leftmost_one_pos().
I'm not comfortable with your changes to pg_leftmost_one_pos() to remove
the restriction on word being non-zero. Specifically
pg_leftmost_one_pos() is made to return 0 on 0 input. While none of its
current callers (in HEAD) is harmed, this introduces muddy semantics:1. pg_leftmost_one_pos is semantically undefined on 0 input: scanning
for a set bit in a zero word won't find it anywhere.
Right.
I've quickly put together a PoC patch on top of yours, which
re-implements ceil_log2 using LZCNT coupled with a CPUID check.
Thoughts?
This patch seems to be making an assumption that an indirect function
call is faster than taking a branch (in inlined code) that the CPU
will almost always predict correctly. It would be nice to have some
numbers to compare. (against pg_count_leading_zeros_* using the "slow"
versions but statically inlined).
Stylistically, "8 * sizeof(num)" is a bit overly formal, since the
hard-coded number we want is in the name of the function.
--
John Naylor https://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services
On Mon, Mar 02, 2020 at 12:45:21PM -0800, Jesse Zhang wrote:
Hi David,
On Wed, Feb 26, 2020 at 9:56 PM David Fetter <david@fetter.org> wrote:
On Wed, Feb 26, 2020 at 09:12:24AM +0100, David Fetter wrote:
On Fri, Jan 31, 2020 at 04:59:18PM +0100, David Fetter wrote:
On Wed, Jan 15, 2020 at 03:45:12PM -0800, Jesse Zhang wrote:
On Tue, Jan 14, 2020 at 2:09 PM David Fetter <david@fetter.org> wrote:
The changes in hash AM and SIMPLEHASH do look like a net positive
improvement. My biggest cringe might be in pg_bitutils:1. Is ceil_log2_64 dead code?
Let's call it nascent code. I suspect there are places it could go, if
I look for them. Also, it seemed silly to have one without the other.While not absolutely required, I'd like us to find at least one
place and start using it. (Clang also nags at me when we have
unused functions).Done in the expanded patches attached.
I see that you've found use of it in dynahash, thanks!
The math in the new (from v4 to v6) patch is wrong: it yields
ceil_log2(1) = 1 or next_power_of_2(1) = 2. I can see that you lifted
the restriction of "num greater than one" for ceil_log2() in this patch
set, but it's now _more_ problematic to base those functions on
pg_leftmost_one_pos().I'm not comfortable with your changes to pg_leftmost_one_pos() to remove
the restriction on word being non-zero. Specifically
pg_leftmost_one_pos() is made to return 0 on 0 input. While none of its
current callers (in HEAD) is harmed, this introduces muddy semantics:1. pg_leftmost_one_pos is semantically undefined on 0 input: scanning
for a set bit in a zero word won't find it anywhere.2. we can _try_ generalizing it to accommodate ceil_log2 by
extrapolating based on the invariant that BSR + LZCNT = 31 (or 63). In
that case, the extrapolation yields -1 for pg_leftmost_one_pos(0).I'm not convinced that others on the list will be comfortable with the
generalization suggested in 2 above.I've quickly put together a PoC patch on top of yours, which
re-implements ceil_log2 using LZCNT coupled with a CPUID check.
Thoughts?
Per discussion on IRC with Andrew (RhodiumToad) Gierth:
The runtime detection means there's always an indirect call overhead
and no way to inline. This is counter to what using compiler
intrinsics is supposed to do.
It's better to rely on the compiler, because:
(a) The compiler often knows whether the value can or can't be 0 and
can therefore skip a conditional jump.
(b) If you're targeting a recent microarchitecture, the compiler can
just use the right instruction.
(c) Even if the conditional branch is left in, it's not a big overhead.
Best,
David.
--
David Fetter <david(at)fetter(dot)org> http://fetter.org/
Phone: +1 415 235 3778
Remember to vote!
Consider donating to Postgres: http://www.postgresql.org/about/donate
Hi John,
Oops this email has been sitting in my outbox for 3 days...
On Wed, Mar 4, 2020 at 1:46 AM John Naylor <john.naylor@2ndquadrant.com> wrote:
On Tue, Mar 3, 2020 at 4:46 AM Jesse Zhang <sbjesse@gmail.com> wrote:
I've quickly put together a PoC patch on top of yours, which
re-implements ceil_log2 using LZCNT coupled with a CPUID check.
Thoughts?This patch seems to be making an assumption that an indirect function
call is faster than taking a branch (in inlined code) that the CPU
will almost always predict correctly. It would be nice to have some
numbers to compare. (against pg_count_leading_zeros_* using the "slow"
versions but statically inlined).
Ah, how could I forget that... I ran a quick benchmark on my laptop, and
indeed, even though the GCC-generated code takes a hit on zero input
(Clang generates slightly different code that gives indistinguishable
runtime for zero and non-zero inputs), the inlined code (the function
input in my benchmark is never a constant literal so the branch does get
exercised at runtime) is still more than twice as fast as the function
call.
------------------------------------------------------
Benchmark Time CPU Iterations
------------------------------------------------------
BM_pfunc/0 1.57 ns 1.56 ns 447127265
BM_pfunc/1 1.56 ns 1.56 ns 449618696
BM_pfunc/8 1.57 ns 1.57 ns 443013856
BM_pfunc/64 1.57 ns 1.57 ns 448784369
BM_slow/0 0.602 ns 0.600 ns 1000000000
BM_slow/1 0.391 ns 0.390 ns 1000000000
BM_slow/8 0.392 ns 0.391 ns 1000000000
BM_slow/64 0.391 ns 0.390 ns 1000000000
BM_fast/0 1.47 ns 1.46 ns 477513921
BM_fast/1 1.47 ns 1.46 ns 473992040
BM_fast/8 1.46 ns 1.46 ns 474895755
BM_fast/64 1.47 ns 1.46 ns 477215268
For your amusement, I've attached the meat of the benchmark. To build
the code you can grab the repository at
https://github.com/d/glowing-chainsaw/tree/pfunc
Stylistically, "8 * sizeof(num)" is a bit overly formal, since the
hard-coded number we want is in the name of the function.
Oh yeah, overly generic code is indicative of the remnants of my C++
brain, will fix.
Cheers,
Jesse
Hi David,
On Sun, Mar 8, 2020 at 11:34 AM David Fetter <david@fetter.org> wrote:
On Mon, Mar 02, 2020 at 12:45:21PM -0800, Jesse Zhang wrote:
Hi David,
Per discussion on IRC with Andrew (RhodiumToad) Gierth:
The runtime detection means there's always an indirect call overhead
and no way to inline. This is counter to what using compiler
intrinsics is supposed to do.It's better to rely on the compiler, because:
(a) The compiler often knows whether the value can or can't be 0 and
can therefore skip a conditional jump.
Yes, the compiler would know to eliminate the branch if the inlined
function is called with a literal argument, or it infers an invariant
from the context (like nesting inside a conditional block, or a previous
conditional "noreturn" path).
(b) If you're targeting a recent microarchitecture, the compiler can
just use the right instruction.
I might be more conservative than you are on (b). The thought of
building a binary that cannot run "somewhere" where the compiler
supports by default still mortifies me.
(c) Even if the conditional branch is left in, it's not a big overhead.
I 100% agree with (c), see benchmarking results upthread.
Cheers,
Jesse
On Sat, 29 Feb 2020 at 04:13, David Fetter <david@fetter.org> wrote:
On Thu, Feb 27, 2020 at 02:41:49PM +0800, John Naylor wrote:
In 0002, the pg_bitutils functions have a test (input > 0), and the
new callers ceil_log2_* and next_power_of_2_* have asserts. That seems
backward to me.To me, too, now that you mention it. My thinking was a little fuzzed
by trying to accommodate platforms with intrinsics where clz is
defined for 0 inputs.
Wouldn't it be better just to leave the existing definitions of the
pg_leftmost_one_pos* function alone? It seems to me you're hacking
away at those just so you can support passing 1 to the new functions,
and that's giving you trouble now because you're doing num-1 to handle
the case where the number is already a power of 2. Which is
troublesome because 1-1 is 0, which you're trying to code around.
Isn't it better just to put in a run-time check for numbers that are
already a power of 2 and then get rid of the num - 1? Something like:
/*
* pg_nextpow2_32
* Returns the next highest power of 2 of 'num', or 'num', if
it's already a
* power of 2. 'num' mustn't be 0 or be above UINT_MAX / 2.
*/
static inline uint32
pg_nextpow2_32(uint32 num)
{
Assert(num > 0 && num <= UINT_MAX / 2);
/* use some bitmasking tricks to see if only 1 bit is on */
return (num & (num - 1)) == 0 ? num : ((uint32) 1) <<
(pg_leftmost_one_pos32(num) + 1);
}
I think you'll also want to mention the issue about numbers greater
than UINT_MAX / 2, as I've done above and also align your naming
conversion to what else is in that file.
I don't think Jesse's proposed solution is that great due to the
additional function call overhead for pg_count_leading_zeros_32(). The
(num & (num - 1)) == 0 I imagine will perform better, but I didn't
test it.
Also, wondering if you've looked at any of the other places where we
do "*= 2;" or "<<= 1;" inside a loop? There's quite a number that look
like candidates for using the new function.
On Thu, Mar 12, 2020 at 7:42 AM David Rowley <dgrowleyml@gmail.com> wrote:
I don't think Jesse's proposed solution is that great due to the
additional function call overhead for pg_count_leading_zeros_32(). The
(num & (num - 1)) == 0 I imagine will perform better, but I didn't
test it.
Right, I believe we've all landed on the same page about that. I see
two ways of doing next_power_of_2_32 without an indirect function
call, and leaving pg_leftmost_one_pos32 the same as it is now. I
haven't measured either yet (or tested for that matter):
static inline uint32
next_power_of_2_32(uint32 num)
{
Assert(num > 0 && num <= UINT_MAX / 2);
/* use some bitmasking tricks to see if only 1 bit is on */
if (num & (num - 1)) == 0)
return num;
return ((uint32) 1) << (pg_leftmost_one_pos32(num) + 1)
}
OR
{
Assert(num > 0 && num <= UINT_MAX / 2);
return ((uint32) 1) << ceil_log2_32(num);
}
static inline uint32
ceil_log2_32(uint32 num)
{
Assert(num > 0);
if (num == 1)
return 0;
return pg_leftmost_one_pos32(num-1) + 1;
}
One naming thing I noticed: the name "next power of two" implies to me
num *= 2 for a power of two, not the same as the input. The latter
behavior is better called "ceil power of 2".
Also, wondering if you've looked at any of the other places where we
do "*= 2;" or "<<= 1;" inside a loop? There's quite a number that look
like candidates for using the new function.
A brief look shows a few functions where this is done in a tight loop:
nodes/list.c:new_list
LWLockRegisterTranche
ensure_record_cache_typmod_slot_exists
pqCheckOutBufferSpace
ExecChooseHashTableSize
ExecHashBuildSkewHash
choose_nelem_alloc
init_htab
hash_estimate_size
hash_select_dirsize
AllocSetAlloc
--
John Naylor https://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services
On Thu, 12 Mar 2020 at 22:59, John Naylor <john.naylor@2ndquadrant.com> wrote:
On Thu, Mar 12, 2020 at 7:42 AM David Rowley <dgrowleyml@gmail.com> wrote:
I don't think Jesse's proposed solution is that great due to the
additional function call overhead for pg_count_leading_zeros_32(). The
(num & (num - 1)) == 0 I imagine will perform better, but I didn't
test it.Right, I believe we've all landed on the same page about that. I see
two ways of doing next_power_of_2_32 without an indirect function
call, and leaving pg_leftmost_one_pos32 the same as it is now. I
haven't measured either yet (or tested for that matter):
I've attached an updated patch. It includes the modifications
mentioned above to pre-check for a power of 2 number with the bit
masking hack mentioned above. I also renamed the functions to be more
aligned to the other functions in pg_bitutils.h I'm not convinced
pg_ceil_log2_* needs the word "ceil" in there.
I dropped the part of the patch that was changing longs to ints of a
known size. I went on and did some additional conversion in the 0003
patch. There are more laying around the code base, but I ended up
finding a bit to fix up than i had thought I would. e.g. various
places that repalloc() inside a loop that is multiplying the
allocation size by 2 each time. The repalloc should be done at the
end, not during the loop. I thought I might come back to those some
time in the future.
Is anyone able to have a look at this?
David
Attachments:
v7-0001-Add-functions-to-calculate-the-next-power-of-2.patchtext/x-patch; charset=US-ASCII; name=v7-0001-Add-functions-to-calculate-the-next-power-of-2.patchDownload
From b69f994a8f284b7931f5383a8de323a2986d86e5 Mon Sep 17 00:00:00 2001
From: "dgrowley@gmail.com" <dgrowley@gmail.com>
Date: Tue, 7 Apr 2020 11:02:47 +1200
Subject: [PATCH v7 1/3] Add functions to calculate the next power of 2
There are many areas in the code where we need to determine the next
highest power of 2 of a given number. We tend to always do that in an
ad-hoc way each time, generally with some tight for loop which performs a
bitshift left once per loop and goes until it finds a number above the
given number.
Here we add two generic functions which make use of the existing
pg_leftmost_one_pos* functions which, when available will allow us to
calculate the next power of 2 without any looping.
Here we don't add any code which uses these new functions. That will be
done in followup commits.
---
src/include/port/pg_bitutils.h | 72 ++++++++++++++++++++++++++++++++++
1 file changed, 72 insertions(+)
diff --git a/src/include/port/pg_bitutils.h b/src/include/port/pg_bitutils.h
index 498e532308..4ca92f076d 100644
--- a/src/include/port/pg_bitutils.h
+++ b/src/include/port/pg_bitutils.h
@@ -129,6 +129,78 @@ pg_rightmost_one_pos64(uint64 word)
#endif /* HAVE__BUILTIN_CTZ */
}
+/*
+ * pg_nextpower2_32
+ * Returns the next highest power of 2 of 'num', or 'num', if it's
+ * already a power of 2.
+ *
+ * 'num' mustn't be 0 or be above PG_UINT32_MAX / 2 + 1.
+ */
+static inline uint32
+pg_nextpower2_32(uint32 num)
+{
+ Assert(num > 0 && num <= PG_UINT32_MAX / 2 + 1);
+
+ /*
+ * A power 2 number has only 1 bit set. Subtracting 1 from such a number
+ * will turn on all previous bits resulting in no common bits being set
+ * between num and num-1.
+ */
+ if ((num & (num - 1)) == 0)
+ return num; /* already power 2 */
+
+ return ((uint32) 1) << (pg_leftmost_one_pos32(num) + 1);
+}
+
+/*
+ * pg_nextpower2_64
+ * Returns the next highest power of 2 of 'num', or 'num', if it's
+ * already a power of 2.
+ *
+ * 'num' mustn't be 0 or be above PG_UINT64_MAX / 2 + 1.
+ */
+static inline uint64
+pg_nextpower2_64(uint64 num)
+{
+ Assert(num > 0 && num <= PG_UINT64_MAX / 2 + 1);
+
+ /*
+ * A power 2 number has only 1 bit set. Subtracting 1 from such a number
+ * will turn on all previous bits resulting in no common bits being set
+ * between num and num-1.
+ */
+ if ((num & (num - 1)) == 0)
+ return num; /* already power 2 */
+
+ return ((uint64) 1) << (pg_leftmost_one_pos64(num) + 1);
+}
+
+/*
+ * pg_ceil_log2_32
+ * Returns equivalent of ceil(log2(num))
+ */
+static inline uint32
+pg_ceil_log2_32(uint32 num)
+{
+ if (num < 2)
+ return 0;
+ else
+ return pg_leftmost_one_pos32(num - 1) + 1;
+}
+
+/*
+ * pg_ceil_log2_64
+ * Returns equivalent of ceil(log2(num))
+ */
+static inline uint64
+pg_ceil_log2_64(uint64 num)
+{
+ if (num < 2)
+ return 0;
+ else
+ return pg_leftmost_one_pos64(num - 1) + 1;
+}
+
/* Count the number of one-bits in a uint32 or uint64 */
extern int (*pg_popcount32) (uint32 word);
extern int (*pg_popcount64) (uint64 word);
--
2.25.1
v7-0003-Modify-additional-power-2-calculations-to-use-new.patchtext/x-patch; charset=US-ASCII; name=v7-0003-Modify-additional-power-2-calculations-to-use-new.patchDownload
From 63cc6c0d302bf4c0f6136ae237ce1b7acfa47db6 Mon Sep 17 00:00:00 2001
From: "dgrowley@gmail.com" <dgrowley@gmail.com>
Date: Tue, 7 Apr 2020 22:50:27 +1200
Subject: [PATCH v7 3/3] Modify additional power 2 calculations to use new
helper functions
2nd pass of modifying various places which obtain the next power
of 2 of a number and make them use the new functions added in
pg_bitutils instead.
---
src/backend/access/gin/ginfast.c | 12 +++---------
src/backend/executor/nodeHash.c | 8 ++------
src/backend/nodes/list.c | 15 +++++++--------
src/backend/statistics/mvdistinct.c | 10 +---------
src/backend/utils/adt/arrayfuncs.c | 9 +++------
5 files changed, 16 insertions(+), 38 deletions(-)
diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c
index 11d7ec067a..2e41b34d8d 100644
--- a/src/backend/access/gin/ginfast.c
+++ b/src/backend/access/gin/ginfast.c
@@ -25,6 +25,7 @@
#include "catalog/pg_am.h"
#include "commands/vacuum.h"
#include "miscadmin.h"
+#include "port/pg_bitutils.h"
#include "postmaster/autovacuum.h"
#include "storage/indexfsm.h"
#include "storage/lmgr.h"
@@ -503,10 +504,7 @@ ginHeapTupleFastCollect(GinState *ginstate,
* initially. Make it a power of 2 to avoid wasting memory when
* resizing (since palloc likes powers of 2).
*/
- collector->lentuples = 16;
- while (collector->lentuples < nentries)
- collector->lentuples *= 2;
-
+ collector->lentuples = pg_nextpower2_32(Max(16, nentries));
collector->tuples = (IndexTuple *) palloc(sizeof(IndexTuple) * collector->lentuples);
}
else if (collector->lentuples < collector->ntuples + nentries)
@@ -516,11 +514,7 @@ ginHeapTupleFastCollect(GinState *ginstate,
* overflow, though we could get to a value that exceeds
* MaxAllocSize/sizeof(IndexTuple), causing an error in repalloc.
*/
- do
- {
- collector->lentuples *= 2;
- } while (collector->lentuples < collector->ntuples + nentries);
-
+ collector->lentuples = pg_nextpower2_32(collector->ntuples + nentries);
collector->tuples = (IndexTuple *) repalloc(collector->tuples,
sizeof(IndexTuple) * collector->lentuples);
}
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index b6d5084908..c881dc1de8 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -831,9 +831,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
dbatch = ceil(inner_rel_bytes / (hash_table_bytes - bucket_bytes));
dbatch = Min(dbatch, max_pointers);
minbatch = (int) dbatch;
- nbatch = 2;
- while (nbatch < minbatch)
- nbatch <<= 1;
+ nbatch = pg_nextpower2_32(Max(2, minbatch));
}
Assert(nbuckets > 0);
@@ -2272,9 +2270,7 @@ ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
* MaxAllocSize/sizeof(void *)/8, but that is not currently possible
* since we limit pg_statistic entries to much less than that.
*/
- nbuckets = 2;
- while (nbuckets <= mcvsToUse)
- nbuckets <<= 1;
+ nbuckets = pg_nextpower2_32(mcvsToUse + 1);
/* use two more bits just to help avoid collisions */
nbuckets <<= 2;
diff --git a/src/backend/nodes/list.c b/src/backend/nodes/list.c
index bd0c58cd81..80fa8c84e4 100644
--- a/src/backend/nodes/list.c
+++ b/src/backend/nodes/list.c
@@ -18,6 +18,7 @@
#include "postgres.h"
#include "nodes/pg_list.h"
+#include "port/pg_bitutils.h"
#include "utils/memdebug.h"
#include "utils/memutils.h"
@@ -119,9 +120,7 @@ new_list(NodeTag type, int min_size)
* that's more than twice the size of an existing list, so the size limits
* within palloc will ensure that we don't overflow here.
*/
- max_size = 8; /* semi-arbitrary small power of 2 */
- while (max_size < min_size + LIST_HEADER_OVERHEAD)
- max_size *= 2;
+ max_size = pg_nextpower2_32(Max(8, min_size + LIST_HEADER_OVERHEAD));
max_size -= LIST_HEADER_OVERHEAD;
#else
@@ -160,12 +159,12 @@ enlarge_list(List *list, int min_size)
/*
* As above, we prefer power-of-two total allocations; but here we need
- * not account for list header overhead. The existing max length might
- * not be a power of 2, so don't rely on that.
+ * not account for list header overhead.
*/
- new_max_len = 16; /* semi-arbitrary small power of 2 */
- while (new_max_len < min_size)
- new_max_len *= 2;
+
+ /* clamp the minimum value to 16, a semi-arbitrary small power of 2 */
+ new_max_len = pg_nextpower2_32(Max(16, min_size));
+
#else
/* As above, don't allocate anything extra */
new_max_len = min_size;
diff --git a/src/backend/statistics/mvdistinct.c b/src/backend/statistics/mvdistinct.c
index 977d6f3e2e..4b86f0ab2d 100644
--- a/src/backend/statistics/mvdistinct.c
+++ b/src/backend/statistics/mvdistinct.c
@@ -576,15 +576,7 @@ n_choose_k(int n, int k)
static int
num_combinations(int n)
{
- int k;
- int ncombs = 1;
-
- for (k = 1; k <= n; k++)
- ncombs *= 2;
-
- ncombs -= (n + 1);
-
- return ncombs;
+ return (1 << n) - (n + 1);
}
/*
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index 7a4a5aaa86..11987c8f3b 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -24,6 +24,7 @@
#include "nodes/nodeFuncs.h"
#include "nodes/supportnodes.h"
#include "optimizer/optimizer.h"
+#include "port/pg_bitutils.h"
#include "utils/array.h"
#include "utils/arrayaccess.h"
#include "utils/builtins.h"
@@ -5313,9 +5314,7 @@ accumArrayResultArr(ArrayBuildStateArr *astate,
memcpy(&astate->lbs[1], lbs, ndims * sizeof(int));
/* Allocate at least enough data space for this item */
- astate->abytes = 1024;
- while (astate->abytes <= ndatabytes)
- astate->abytes *= 2;
+ astate->abytes = pg_nextpower2_32(Max(1024, ndatabytes + 1));
astate->data = (char *) palloc(astate->abytes);
}
else
@@ -5362,9 +5361,7 @@ accumArrayResultArr(ArrayBuildStateArr *astate,
* First input with nulls; we must retrospectively handle any
* previous inputs by marking all their items non-null.
*/
- astate->aitems = 256;
- while (astate->aitems <= newnitems)
- astate->aitems *= 2;
+ astate->aitems = pg_nextpower2_32(Max(256, newnitems + 1));
astate->nullbitmap = (bits8 *) palloc((astate->aitems + 7) / 8);
array_bitmap_copy(astate->nullbitmap, 0,
NULL, 0,
--
2.25.1
v7-0002-Modify-various-power-2-calculations-to-use-new-he.patchtext/x-patch; charset=US-ASCII; name=v7-0002-Modify-various-power-2-calculations-to-use-new-he.patchDownload
From 79a1d94225dae859a5eb0c39062e6cb924664413 Mon Sep 17 00:00:00 2001
From: "dgrowley@gmail.com" <dgrowley@gmail.com>
Date: Tue, 7 Apr 2020 17:56:54 +1200
Subject: [PATCH v7 2/3] Modify various power 2 calculations to use new helper
functions
First pass of modifying various places which obtain the next power of 2 of
a number and make them use the new functions added in pg_bitutils instead.
---
src/backend/access/hash/hashpage.c | 24 +++++++++++-------------
src/backend/access/hash/hashsort.c | 3 ++-
src/backend/access/hash/hashutil.c | 4 ++--
src/backend/utils/hash/dynahash.c | 14 +++++++-------
src/include/lib/simplehash.h | 27 ++++-----------------------
src/tools/msvc/Mkvcbuild.pm | 2 +-
6 files changed, 27 insertions(+), 47 deletions(-)
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 55d85644a4..a664ecf494 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -31,6 +31,7 @@
#include "access/hash.h"
#include "access/hash_xlog.h"
#include "miscadmin.h"
+#include "port/pg_bitutils.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
#include "storage/smgr.h"
@@ -502,7 +503,7 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
double dnumbuckets;
uint32 num_buckets;
uint32 spare_index;
- uint32 i;
+ uint32 lshift;
/*
* Choose the number of initial bucket pages to match the fill factor
@@ -542,15 +543,12 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
metap->hashm_nmaps = 0;
metap->hashm_ffactor = ffactor;
metap->hashm_bsize = HashGetMaxBitmapSize(page);
+
/* find largest bitmap array size that will fit in page size */
- for (i = _hash_log2(metap->hashm_bsize); i > 0; --i)
- {
- if ((1 << i) <= metap->hashm_bsize)
- break;
- }
- Assert(i > 0);
- metap->hashm_bmsize = 1 << i;
- metap->hashm_bmshift = i + BYTE_TO_BIT;
+ lshift = pg_leftmost_one_pos32(metap->hashm_bsize);
+ Assert(lshift > 0);
+ metap->hashm_bmsize = 1 << lshift;
+ metap->hashm_bmshift = lshift + BYTE_TO_BIT;
Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1));
/*
@@ -570,7 +568,7 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
* Set highmask as next immediate ((2 ^ x) - 1), which should be
* sufficient to cover num_buckets.
*/
- metap->hashm_highmask = (1 << (_hash_log2(num_buckets + 1))) - 1;
+ metap->hashm_highmask = pg_nextpower2_32(num_buckets + 1) - 1;
metap->hashm_lowmask = (metap->hashm_highmask >> 1);
MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares));
@@ -659,9 +657,9 @@ restart_expand:
*
* Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
* the calculation maxbucket+1 mustn't overflow). Currently we restrict
- * to half that because of overflow looping in _hash_log2() and
- * insufficient space in hashm_spares[]. It's moot anyway because an
- * index with 2^32 buckets would certainly overflow BlockNumber and hence
+ * to half that to prevent failure of pg_ceil_log2_32() and insufficient
+ * space in hashm_spares[]. It's moot anyway because an index with 2^32
+ * buckets would certainly overflow BlockNumber and hence
* _hash_alloc_buckets() would fail, but if we supported buckets smaller
* than a disk block then this would be an independent constraint.
*
diff --git a/src/backend/access/hash/hashsort.c b/src/backend/access/hash/hashsort.c
index 9cb41d62e7..2c7b5857b5 100644
--- a/src/backend/access/hash/hashsort.c
+++ b/src/backend/access/hash/hashsort.c
@@ -29,6 +29,7 @@
#include "commands/progress.h"
#include "miscadmin.h"
#include "pgstat.h"
+#include "port/pg_bitutils.h"
#include "utils/tuplesort.h"
@@ -69,7 +70,7 @@ _h_spoolinit(Relation heap, Relation index, uint32 num_buckets)
* NOTE : This hash mask calculation should be in sync with similar
* calculation in _hash_init_metabuffer.
*/
- hspool->high_mask = (((uint32) 1) << _hash_log2(num_buckets + 1)) - 1;
+ hspool->high_mask = pg_nextpower2_32(num_buckets + 1) - 1;
hspool->low_mask = (hspool->high_mask >> 1);
hspool->max_buckets = num_buckets - 1;
diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c
index 9efc8016bc..9770d17ff4 100644
--- a/src/backend/access/hash/hashutil.c
+++ b/src/backend/access/hash/hashutil.c
@@ -17,6 +17,7 @@
#include "access/hash.h"
#include "access/reloptions.h"
#include "access/relscan.h"
+#include "port/pg_bitutils.h"
#include "storage/buf_internals.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
@@ -158,8 +159,7 @@ _hash_spareindex(uint32 num_bucket)
{
uint32 splitpoint_group;
uint32 splitpoint_phases;
-
- splitpoint_group = _hash_log2(num_bucket);
+ splitpoint_group = pg_ceil_log2_32(num_bucket);
if (splitpoint_group < HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE)
return splitpoint_group;
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index b5381958e7..5b4b9e487f 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -87,6 +87,7 @@
#include "access/xact.h"
#include "common/hashfn.h"
+#include "port/pg_bitutils.h"
#include "storage/shmem.h"
#include "storage/spin.h"
#include "utils/dynahash.h"
@@ -1718,16 +1719,15 @@ hash_corrupted(HTAB *hashp)
int
my_log2(long num)
{
- int i;
- long limit;
-
- /* guard against too-large input, which would put us into infinite loop */
+ /* guard against too-large input, which would be invalid for pg_ceil_log2_*() */
if (num > LONG_MAX / 2)
num = LONG_MAX / 2;
- for (i = 0, limit = 1; limit < num; i++, limit <<= 1)
- ;
- return i;
+#if SIZEOF_LONG < 8
+ return pg_ceil_log2_32(num);
+#else
+ return pg_ceil_log2_64(num);
+#endif
}
/* calculate first power of 2 >= num, bounded to what will fit in a long */
diff --git a/src/include/lib/simplehash.h b/src/include/lib/simplehash.h
index 5a6783f653..8cb03cda6c 100644
--- a/src/include/lib/simplehash.h
+++ b/src/include/lib/simplehash.h
@@ -57,6 +57,8 @@
* backwards, unless they're empty or already at their optimal position.
*/
+#include "port/pg_bitutils.h"
+
/* helpers */
#define SH_MAKE_PREFIX(a) CppConcat(a,_)
#define SH_MAKE_NAME(name) SH_MAKE_NAME_(SH_MAKE_PREFIX(SH_PREFIX),name)
@@ -215,27 +217,6 @@ SH_SCOPE void SH_STAT(SH_TYPE * tb);
#ifndef SIMPLEHASH_H
#define SIMPLEHASH_H
-/* FIXME: can we move these to a central location? */
-
-/* calculate ceil(log base 2) of num */
-static inline uint64
-sh_log2(uint64 num)
-{
- int i;
- uint64 limit;
-
- for (i = 0, limit = 1; limit < num; i++, limit <<= 1)
- ;
- return i;
-}
-
-/* calculate first power of 2 >= num */
-static inline uint64
-sh_pow2(uint64 num)
-{
- return ((uint64) 1) << sh_log2(num);
-}
-
#ifdef FRONTEND
#define sh_error(...) pg_log_error(__VA_ARGS__)
#define sh_log(...) pg_log_info(__VA_ARGS__)
@@ -259,7 +240,7 @@ SH_COMPUTE_PARAMETERS(SH_TYPE * tb, uint32 newsize)
size = Max(newsize, 2);
/* round up size to the next power of 2, that's how bucketing works */
- size = sh_pow2(size);
+ size = pg_nextpower2_64(size);
Assert(size <= SH_MAX_SIZE);
/*
@@ -434,7 +415,7 @@ SH_GROW(SH_TYPE * tb, uint32 newsize)
uint32 startelem = 0;
uint32 copyelem;
- Assert(oldsize == sh_pow2(oldsize));
+ Assert(oldsize == pg_nextpower2_64(oldsize));
Assert(oldsize != SH_MAX_SIZE);
Assert(oldsize < newsize);
diff --git a/src/tools/msvc/Mkvcbuild.pm b/src/tools/msvc/Mkvcbuild.pm
index 72a21dbd41..fd7d4021a5 100644
--- a/src/tools/msvc/Mkvcbuild.pm
+++ b/src/tools/msvc/Mkvcbuild.pm
@@ -81,7 +81,7 @@ my $frontend_extrasource = {
};
my @frontend_excludes = (
'pgevent', 'pg_basebackup', 'pg_rewind', 'pg_dump',
- 'pg_waldump', 'scripts');
+ 'pg_validatebackup', 'pg_waldump', 'scripts');
sub mkvcbuild
{
--
2.25.1
On Tue, Apr 7, 2020 at 7:41 PM David Rowley <dgrowleyml@gmail.com> wrote:
I've attached an updated patch. It includes the modifications
mentioned above to pre-check for a power of 2 number with the bit
masking hack mentioned above. I also renamed the functions to be more
aligned to the other functions in pg_bitutils.h I'm not convinced
pg_ceil_log2_* needs the word "ceil" in there.I dropped the part of the patch that was changing longs to ints of a
known size. I went on and did some additional conversion in the 0003
patch. There are more laying around the code base, but I ended up
finding a bit to fix up than i had thought I would. e.g. various
places that repalloc() inside a loop that is multiplying the
allocation size by 2 each time. The repalloc should be done at the
end, not during the loop. I thought I might come back to those some
time in the future.Is anyone able to have a look at this?
David
Hi David,
Overall looks good to me. Just a couple things I see:
It seems _hash_log2 is still in the tree, but has no callers?
- max_size = 8; /* semi-arbitrary small power of 2 */
- while (max_size < min_size + LIST_HEADER_OVERHEAD)
- max_size *= 2;
+ max_size = pg_nextpower2_32(Max(8, min_size + LIST_HEADER_OVERHEAD));
Minor nit: We might want to keep the comment that the number is
"semi-arbitrary" here as well.
- 'pg_waldump', 'scripts');
+ 'pg_validatebackup', 'pg_waldump', 'scripts');
This seems like a separate concern?
--
John Naylor https://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services
Hi John,
Thanks for having a look at this.
On Wed, 8 Apr 2020 at 00:16, John Naylor <john.naylor@2ndquadrant.com> wrote:
Overall looks good to me. Just a couple things I see:
It seems _hash_log2 is still in the tree, but has no callers?
Yeah, I left it in there since it was an external function. Perhaps
we could rip it out and write something in the commit message that it
should be replaced with the newer functions. Thinking of extension
authors here.
- max_size = 8; /* semi-arbitrary small power of 2 */ - while (max_size < min_size + LIST_HEADER_OVERHEAD) - max_size *= 2; + max_size = pg_nextpower2_32(Max(8, min_size + LIST_HEADER_OVERHEAD));Minor nit: We might want to keep the comment that the number is
"semi-arbitrary" here as well.
I had dropped that as the 8 part was mentioned in the comment above:
"The minimum allocation is 8 ListCell units". I can put it back, I had
just thought it was overkill.
- 'pg_waldump', 'scripts'); + 'pg_validatebackup', 'pg_waldump', 'scripts');This seems like a separate concern?
That's required due to the #include "lib/simplehash.h" in
pg_validatebackup.c. I have to say, I didn't really take the time to
understand all the Perl code there, but without that change, I was
getting a link error when testing on Windows, and after I added
pg_validatebackup to that array, it worked.
David
On Tue, Apr 7, 2020 at 8:26 PM David Rowley <dgrowleyml@gmail.com> wrote:
Hi John,
Thanks for having a look at this.
On Wed, 8 Apr 2020 at 00:16, John Naylor <john.naylor@2ndquadrant.com> wrote:
Overall looks good to me. Just a couple things I see:
It seems _hash_log2 is still in the tree, but has no callers?
Yeah, I left it in there since it was an external function. Perhaps
we could rip it out and write something in the commit message that it
should be replaced with the newer functions. Thinking of extension
authors here.
I'm not the best judge of where to draw the line for extensions, but
this function does have a name beginning with an underscore, which to
me is a red flag that it's internal in nature.
Minor nit: We might want to keep the comment that the number is
"semi-arbitrary" here as well.I had dropped that as the 8 part was mentioned in the comment above:
"The minimum allocation is 8 ListCell units". I can put it back, I had
just thought it was overkill.
Oh I see now, nevermind.
- 'pg_waldump', 'scripts'); + 'pg_validatebackup', 'pg_waldump', 'scripts');This seems like a separate concern?
That's required due to the #include "lib/simplehash.h" in
pg_validatebackup.c. I have to say, I didn't really take the time to
understand all the Perl code there, but without that change, I was
getting a link error when testing on Windows, and after I added
pg_validatebackup to that array, it worked.
Hmm. Does pg_bitutils.h need something like this?
#ifndef FRONTEND
extern PGDLLIMPORT const uint8 pg_leftmost_one_pos[256];
extern PGDLLIMPORT const uint8 pg_rightmost_one_pos[256];
extern PGDLLIMPORT const uint8 pg_number_of_ones[256];
#else
extern const uint8 pg_leftmost_one_pos[256];
extern const uint8 pg_rightmost_one_pos[256];
extern const uint8 pg_number_of_ones[256];
#endif
--
John Naylor https://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services
On Wed, 8 Apr 2020 at 01:16, John Naylor <john.naylor@2ndquadrant.com> wrote:
On Tue, Apr 7, 2020 at 8:26 PM David Rowley <dgrowleyml@gmail.com> wrote:
Hi John,
Thanks for having a look at this.
On Wed, 8 Apr 2020 at 00:16, John Naylor <john.naylor@2ndquadrant.com> wrote:
Overall looks good to me. Just a couple things I see:
It seems _hash_log2 is still in the tree, but has no callers?
Yeah, I left it in there since it was an external function. Perhaps
we could rip it out and write something in the commit message that it
should be replaced with the newer functions. Thinking of extension
authors here.I'm not the best judge of where to draw the line for extensions, but
this function does have a name beginning with an underscore, which to
me is a red flag that it's internal in nature.
OK. I've removed that function now and stuck a note in the commit
message to mention an alternative.
Hmm. Does pg_bitutils.h need something like this?
#ifndef FRONTEND
extern PGDLLIMPORT const uint8 pg_leftmost_one_pos[256];
extern PGDLLIMPORT const uint8 pg_rightmost_one_pos[256];
extern PGDLLIMPORT const uint8 pg_number_of_ones[256];
#else
extern const uint8 pg_leftmost_one_pos[256];
extern const uint8 pg_rightmost_one_pos[256];
extern const uint8 pg_number_of_ones[256];
#endif
Yeah, looking at keywords.h, we hit this before in c2d1eea9e75. Your
proposed fix works and is the same as in keywords.h, so I've gone with
that.
I've attached v8 of the patchset.
David
Attachments:
v8-0001-Add-functions-to-calculate-the-next-power-of-2.patchapplication/octet-stream; name=v8-0001-Add-functions-to-calculate-the-next-power-of-2.patchDownload
From 45016b7eec158e9851139fdedeed9ae78d8dc018 Mon Sep 17 00:00:00 2001
From: "dgrowley@gmail.com" <dgrowley@gmail.com>
Date: Tue, 7 Apr 2020 11:02:47 +1200
Subject: [PATCH v8 1/3] Add functions to calculate the next power of 2
There are many areas in the code where we need to determine the next
highest power of 2 of a given number. We tend to always do that in an
ad-hoc way each time, generally with some tight for loop which performs a
bitshift left once per loop and goes until it finds a number above the
given number.
Here we add two generic functions which make use of the existing
pg_leftmost_one_pos* functions which, when available will allow us to
calculate the next power of 2 without any looping.
Here we don't add any code which uses these new functions. That will be
done in followup commits.
---
src/include/port/pg_bitutils.h | 72 ++++++++++++++++++++++++++++++++++
1 file changed, 72 insertions(+)
diff --git a/src/include/port/pg_bitutils.h b/src/include/port/pg_bitutils.h
index 498e532308..4ca92f076d 100644
--- a/src/include/port/pg_bitutils.h
+++ b/src/include/port/pg_bitutils.h
@@ -129,6 +129,78 @@ pg_rightmost_one_pos64(uint64 word)
#endif /* HAVE__BUILTIN_CTZ */
}
+/*
+ * pg_nextpower2_32
+ * Returns the next highest power of 2 of 'num', or 'num', if it's
+ * already a power of 2.
+ *
+ * 'num' mustn't be 0 or be above PG_UINT32_MAX / 2 + 1.
+ */
+static inline uint32
+pg_nextpower2_32(uint32 num)
+{
+ Assert(num > 0 && num <= PG_UINT32_MAX / 2 + 1);
+
+ /*
+ * A power 2 number has only 1 bit set. Subtracting 1 from such a number
+ * will turn on all previous bits resulting in no common bits being set
+ * between num and num-1.
+ */
+ if ((num & (num - 1)) == 0)
+ return num; /* already power 2 */
+
+ return ((uint32) 1) << (pg_leftmost_one_pos32(num) + 1);
+}
+
+/*
+ * pg_nextpower2_64
+ * Returns the next highest power of 2 of 'num', or 'num', if it's
+ * already a power of 2.
+ *
+ * 'num' mustn't be 0 or be above PG_UINT64_MAX / 2 + 1.
+ */
+static inline uint64
+pg_nextpower2_64(uint64 num)
+{
+ Assert(num > 0 && num <= PG_UINT64_MAX / 2 + 1);
+
+ /*
+ * A power 2 number has only 1 bit set. Subtracting 1 from such a number
+ * will turn on all previous bits resulting in no common bits being set
+ * between num and num-1.
+ */
+ if ((num & (num - 1)) == 0)
+ return num; /* already power 2 */
+
+ return ((uint64) 1) << (pg_leftmost_one_pos64(num) + 1);
+}
+
+/*
+ * pg_ceil_log2_32
+ * Returns equivalent of ceil(log2(num))
+ */
+static inline uint32
+pg_ceil_log2_32(uint32 num)
+{
+ if (num < 2)
+ return 0;
+ else
+ return pg_leftmost_one_pos32(num - 1) + 1;
+}
+
+/*
+ * pg_ceil_log2_64
+ * Returns equivalent of ceil(log2(num))
+ */
+static inline uint64
+pg_ceil_log2_64(uint64 num)
+{
+ if (num < 2)
+ return 0;
+ else
+ return pg_leftmost_one_pos64(num - 1) + 1;
+}
+
/* Count the number of one-bits in a uint32 or uint64 */
extern int (*pg_popcount32) (uint32 word);
extern int (*pg_popcount64) (uint64 word);
--
2.25.1
v8-0002-Modify-various-power-2-calculations-to-use-new-he.patchapplication/octet-stream; name=v8-0002-Modify-various-power-2-calculations-to-use-new-he.patchDownload
From f59f8c832ff32b1c4b1813ecbbfdd9eacf551d11 Mon Sep 17 00:00:00 2001
From: "dgrowley@gmail.com" <dgrowley@gmail.com>
Date: Tue, 7 Apr 2020 17:56:54 +1200
Subject: [PATCH v8 2/3] Modify various power 2 calculations to use new helper
functions
First pass of modifying various places which obtain the next power of 2 of
a number and make them use the new functions added in pg_bitutils.h
instead.
This also removes the _hash_log2() function. There are no longer any
callers in core. Other users can swap their _hash_log2(n) call to make use
of pg_ceil_log2_32(n).
---
src/backend/access/hash/hashpage.c | 24 +++++++++++-------------
src/backend/access/hash/hashsort.c | 3 ++-
src/backend/access/hash/hashutil.c | 19 ++-----------------
src/backend/utils/hash/dynahash.c | 14 +++++++-------
src/include/access/hash.h | 1 -
src/include/lib/simplehash.h | 27 ++++-----------------------
src/include/port/pg_bitutils.h | 6 ++++++
7 files changed, 32 insertions(+), 62 deletions(-)
diff --git a/src/backend/access/hash/hashpage.c b/src/backend/access/hash/hashpage.c
index 55d85644a4..a664ecf494 100644
--- a/src/backend/access/hash/hashpage.c
+++ b/src/backend/access/hash/hashpage.c
@@ -31,6 +31,7 @@
#include "access/hash.h"
#include "access/hash_xlog.h"
#include "miscadmin.h"
+#include "port/pg_bitutils.h"
#include "storage/lmgr.h"
#include "storage/predicate.h"
#include "storage/smgr.h"
@@ -502,7 +503,7 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
double dnumbuckets;
uint32 num_buckets;
uint32 spare_index;
- uint32 i;
+ uint32 lshift;
/*
* Choose the number of initial bucket pages to match the fill factor
@@ -542,15 +543,12 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
metap->hashm_nmaps = 0;
metap->hashm_ffactor = ffactor;
metap->hashm_bsize = HashGetMaxBitmapSize(page);
+
/* find largest bitmap array size that will fit in page size */
- for (i = _hash_log2(metap->hashm_bsize); i > 0; --i)
- {
- if ((1 << i) <= metap->hashm_bsize)
- break;
- }
- Assert(i > 0);
- metap->hashm_bmsize = 1 << i;
- metap->hashm_bmshift = i + BYTE_TO_BIT;
+ lshift = pg_leftmost_one_pos32(metap->hashm_bsize);
+ Assert(lshift > 0);
+ metap->hashm_bmsize = 1 << lshift;
+ metap->hashm_bmshift = lshift + BYTE_TO_BIT;
Assert((1 << BMPG_SHIFT(metap)) == (BMPG_MASK(metap) + 1));
/*
@@ -570,7 +568,7 @@ _hash_init_metabuffer(Buffer buf, double num_tuples, RegProcedure procid,
* Set highmask as next immediate ((2 ^ x) - 1), which should be
* sufficient to cover num_buckets.
*/
- metap->hashm_highmask = (1 << (_hash_log2(num_buckets + 1))) - 1;
+ metap->hashm_highmask = pg_nextpower2_32(num_buckets + 1) - 1;
metap->hashm_lowmask = (metap->hashm_highmask >> 1);
MemSet(metap->hashm_spares, 0, sizeof(metap->hashm_spares));
@@ -659,9 +657,9 @@ restart_expand:
*
* Ideally we'd allow bucket numbers up to UINT_MAX-1 (no higher because
* the calculation maxbucket+1 mustn't overflow). Currently we restrict
- * to half that because of overflow looping in _hash_log2() and
- * insufficient space in hashm_spares[]. It's moot anyway because an
- * index with 2^32 buckets would certainly overflow BlockNumber and hence
+ * to half that to prevent failure of pg_ceil_log2_32() and insufficient
+ * space in hashm_spares[]. It's moot anyway because an index with 2^32
+ * buckets would certainly overflow BlockNumber and hence
* _hash_alloc_buckets() would fail, but if we supported buckets smaller
* than a disk block then this would be an independent constraint.
*
diff --git a/src/backend/access/hash/hashsort.c b/src/backend/access/hash/hashsort.c
index 9cb41d62e7..2c7b5857b5 100644
--- a/src/backend/access/hash/hashsort.c
+++ b/src/backend/access/hash/hashsort.c
@@ -29,6 +29,7 @@
#include "commands/progress.h"
#include "miscadmin.h"
#include "pgstat.h"
+#include "port/pg_bitutils.h"
#include "utils/tuplesort.h"
@@ -69,7 +70,7 @@ _h_spoolinit(Relation heap, Relation index, uint32 num_buckets)
* NOTE : This hash mask calculation should be in sync with similar
* calculation in _hash_init_metabuffer.
*/
- hspool->high_mask = (((uint32) 1) << _hash_log2(num_buckets + 1)) - 1;
+ hspool->high_mask = pg_nextpower2_32(num_buckets + 1) - 1;
hspool->low_mask = (hspool->high_mask >> 1);
hspool->max_buckets = num_buckets - 1;
diff --git a/src/backend/access/hash/hashutil.c b/src/backend/access/hash/hashutil.c
index 9efc8016bc..b23d19474e 100644
--- a/src/backend/access/hash/hashutil.c
+++ b/src/backend/access/hash/hashutil.c
@@ -17,6 +17,7 @@
#include "access/hash.h"
#include "access/reloptions.h"
#include "access/relscan.h"
+#include "port/pg_bitutils.h"
#include "storage/buf_internals.h"
#include "utils/lsyscache.h"
#include "utils/rel.h"
@@ -134,21 +135,6 @@ _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket,
return bucket;
}
-/*
- * _hash_log2 -- returns ceil(lg2(num))
- */
-uint32
-_hash_log2(uint32 num)
-{
- uint32 i,
- limit;
-
- limit = 1;
- for (i = 0; limit < num; limit <<= 1, i++)
- ;
- return i;
-}
-
/*
* _hash_spareindex -- returns spare index / global splitpoint phase of the
* bucket
@@ -158,8 +144,7 @@ _hash_spareindex(uint32 num_bucket)
{
uint32 splitpoint_group;
uint32 splitpoint_phases;
-
- splitpoint_group = _hash_log2(num_bucket);
+ splitpoint_group = pg_ceil_log2_32(num_bucket);
if (splitpoint_group < HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE)
return splitpoint_group;
diff --git a/src/backend/utils/hash/dynahash.c b/src/backend/utils/hash/dynahash.c
index b5381958e7..5b4b9e487f 100644
--- a/src/backend/utils/hash/dynahash.c
+++ b/src/backend/utils/hash/dynahash.c
@@ -87,6 +87,7 @@
#include "access/xact.h"
#include "common/hashfn.h"
+#include "port/pg_bitutils.h"
#include "storage/shmem.h"
#include "storage/spin.h"
#include "utils/dynahash.h"
@@ -1718,16 +1719,15 @@ hash_corrupted(HTAB *hashp)
int
my_log2(long num)
{
- int i;
- long limit;
-
- /* guard against too-large input, which would put us into infinite loop */
+ /* guard against too-large input, which would be invalid for pg_ceil_log2_*() */
if (num > LONG_MAX / 2)
num = LONG_MAX / 2;
- for (i = 0, limit = 1; limit < num; i++, limit <<= 1)
- ;
- return i;
+#if SIZEOF_LONG < 8
+ return pg_ceil_log2_32(num);
+#else
+ return pg_ceil_log2_64(num);
+#endif
}
/* calculate first power of 2 >= num, bounded to what will fit in a long */
diff --git a/src/include/access/hash.h b/src/include/access/hash.h
index 8cda938cbe..94b643cc77 100644
--- a/src/include/access/hash.h
+++ b/src/include/access/hash.h
@@ -451,7 +451,6 @@ extern uint32 _hash_datum2hashkey(Relation rel, Datum key);
extern uint32 _hash_datum2hashkey_type(Relation rel, Datum key, Oid keytype);
extern Bucket _hash_hashkey2bucket(uint32 hashkey, uint32 maxbucket,
uint32 highmask, uint32 lowmask);
-extern uint32 _hash_log2(uint32 num);
extern uint32 _hash_spareindex(uint32 num_bucket);
extern uint32 _hash_get_totalbuckets(uint32 splitpoint_phase);
extern void _hash_checkpage(Relation rel, Buffer buf, int flags);
diff --git a/src/include/lib/simplehash.h b/src/include/lib/simplehash.h
index 5a6783f653..8cb03cda6c 100644
--- a/src/include/lib/simplehash.h
+++ b/src/include/lib/simplehash.h
@@ -57,6 +57,8 @@
* backwards, unless they're empty or already at their optimal position.
*/
+#include "port/pg_bitutils.h"
+
/* helpers */
#define SH_MAKE_PREFIX(a) CppConcat(a,_)
#define SH_MAKE_NAME(name) SH_MAKE_NAME_(SH_MAKE_PREFIX(SH_PREFIX),name)
@@ -215,27 +217,6 @@ SH_SCOPE void SH_STAT(SH_TYPE * tb);
#ifndef SIMPLEHASH_H
#define SIMPLEHASH_H
-/* FIXME: can we move these to a central location? */
-
-/* calculate ceil(log base 2) of num */
-static inline uint64
-sh_log2(uint64 num)
-{
- int i;
- uint64 limit;
-
- for (i = 0, limit = 1; limit < num; i++, limit <<= 1)
- ;
- return i;
-}
-
-/* calculate first power of 2 >= num */
-static inline uint64
-sh_pow2(uint64 num)
-{
- return ((uint64) 1) << sh_log2(num);
-}
-
#ifdef FRONTEND
#define sh_error(...) pg_log_error(__VA_ARGS__)
#define sh_log(...) pg_log_info(__VA_ARGS__)
@@ -259,7 +240,7 @@ SH_COMPUTE_PARAMETERS(SH_TYPE * tb, uint32 newsize)
size = Max(newsize, 2);
/* round up size to the next power of 2, that's how bucketing works */
- size = sh_pow2(size);
+ size = pg_nextpower2_64(size);
Assert(size <= SH_MAX_SIZE);
/*
@@ -434,7 +415,7 @@ SH_GROW(SH_TYPE * tb, uint32 newsize)
uint32 startelem = 0;
uint32 copyelem;
- Assert(oldsize == sh_pow2(oldsize));
+ Assert(oldsize == pg_nextpower2_64(oldsize));
Assert(oldsize != SH_MAX_SIZE);
Assert(oldsize < newsize);
diff --git a/src/include/port/pg_bitutils.h b/src/include/port/pg_bitutils.h
index 4ca92f076d..887e782911 100644
--- a/src/include/port/pg_bitutils.h
+++ b/src/include/port/pg_bitutils.h
@@ -13,9 +13,15 @@
#ifndef PG_BITUTILS_H
#define PG_BITUTILS_H
+#ifndef FRONTEND
extern PGDLLIMPORT const uint8 pg_leftmost_one_pos[256];
extern PGDLLIMPORT const uint8 pg_rightmost_one_pos[256];
extern PGDLLIMPORT const uint8 pg_number_of_ones[256];
+#else
+extern const uint8 pg_leftmost_one_pos[256];
+extern const uint8 pg_rightmost_one_pos[256];
+extern const uint8 pg_number_of_ones[256];
+#endif
/*
* pg_leftmost_one_pos32
--
2.25.1
v8-0003-Modify-additional-power-2-calculations-to-use-new.patchapplication/octet-stream; name=v8-0003-Modify-additional-power-2-calculations-to-use-new.patchDownload
From 4925bba810c02699bcc40d6379073e54f52566d1 Mon Sep 17 00:00:00 2001
From: "dgrowley@gmail.com" <dgrowley@gmail.com>
Date: Tue, 7 Apr 2020 22:50:27 +1200
Subject: [PATCH v8 3/3] Modify additional power 2 calculations to use new
helper functions
2nd pass of modifying various places which obtain the next power
of 2 of a number and make them use the new functions added in
pg_bitutils instead.
In passing, also modify num_combinations(). This can be implemented
using simple bitshifting rather than looping.
---
src/backend/access/gin/ginfast.c | 12 +++---------
src/backend/executor/nodeHash.c | 8 ++------
src/backend/nodes/list.c | 15 +++++++--------
src/backend/statistics/mvdistinct.c | 10 +---------
src/backend/utils/adt/arrayfuncs.c | 9 +++------
5 files changed, 16 insertions(+), 38 deletions(-)
diff --git a/src/backend/access/gin/ginfast.c b/src/backend/access/gin/ginfast.c
index 11d7ec067a..2e41b34d8d 100644
--- a/src/backend/access/gin/ginfast.c
+++ b/src/backend/access/gin/ginfast.c
@@ -25,6 +25,7 @@
#include "catalog/pg_am.h"
#include "commands/vacuum.h"
#include "miscadmin.h"
+#include "port/pg_bitutils.h"
#include "postmaster/autovacuum.h"
#include "storage/indexfsm.h"
#include "storage/lmgr.h"
@@ -503,10 +504,7 @@ ginHeapTupleFastCollect(GinState *ginstate,
* initially. Make it a power of 2 to avoid wasting memory when
* resizing (since palloc likes powers of 2).
*/
- collector->lentuples = 16;
- while (collector->lentuples < nentries)
- collector->lentuples *= 2;
-
+ collector->lentuples = pg_nextpower2_32(Max(16, nentries));
collector->tuples = (IndexTuple *) palloc(sizeof(IndexTuple) * collector->lentuples);
}
else if (collector->lentuples < collector->ntuples + nentries)
@@ -516,11 +514,7 @@ ginHeapTupleFastCollect(GinState *ginstate,
* overflow, though we could get to a value that exceeds
* MaxAllocSize/sizeof(IndexTuple), causing an error in repalloc.
*/
- do
- {
- collector->lentuples *= 2;
- } while (collector->lentuples < collector->ntuples + nentries);
-
+ collector->lentuples = pg_nextpower2_32(collector->ntuples + nentries);
collector->tuples = (IndexTuple *) repalloc(collector->tuples,
sizeof(IndexTuple) * collector->lentuples);
}
diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c
index b6d5084908..c881dc1de8 100644
--- a/src/backend/executor/nodeHash.c
+++ b/src/backend/executor/nodeHash.c
@@ -831,9 +831,7 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew,
dbatch = ceil(inner_rel_bytes / (hash_table_bytes - bucket_bytes));
dbatch = Min(dbatch, max_pointers);
minbatch = (int) dbatch;
- nbatch = 2;
- while (nbatch < minbatch)
- nbatch <<= 1;
+ nbatch = pg_nextpower2_32(Max(2, minbatch));
}
Assert(nbuckets > 0);
@@ -2272,9 +2270,7 @@ ExecHashBuildSkewHash(HashJoinTable hashtable, Hash *node, int mcvsToUse)
* MaxAllocSize/sizeof(void *)/8, but that is not currently possible
* since we limit pg_statistic entries to much less than that.
*/
- nbuckets = 2;
- while (nbuckets <= mcvsToUse)
- nbuckets <<= 1;
+ nbuckets = pg_nextpower2_32(mcvsToUse + 1);
/* use two more bits just to help avoid collisions */
nbuckets <<= 2;
diff --git a/src/backend/nodes/list.c b/src/backend/nodes/list.c
index bd0c58cd81..80fa8c84e4 100644
--- a/src/backend/nodes/list.c
+++ b/src/backend/nodes/list.c
@@ -18,6 +18,7 @@
#include "postgres.h"
#include "nodes/pg_list.h"
+#include "port/pg_bitutils.h"
#include "utils/memdebug.h"
#include "utils/memutils.h"
@@ -119,9 +120,7 @@ new_list(NodeTag type, int min_size)
* that's more than twice the size of an existing list, so the size limits
* within palloc will ensure that we don't overflow here.
*/
- max_size = 8; /* semi-arbitrary small power of 2 */
- while (max_size < min_size + LIST_HEADER_OVERHEAD)
- max_size *= 2;
+ max_size = pg_nextpower2_32(Max(8, min_size + LIST_HEADER_OVERHEAD));
max_size -= LIST_HEADER_OVERHEAD;
#else
@@ -160,12 +159,12 @@ enlarge_list(List *list, int min_size)
/*
* As above, we prefer power-of-two total allocations; but here we need
- * not account for list header overhead. The existing max length might
- * not be a power of 2, so don't rely on that.
+ * not account for list header overhead.
*/
- new_max_len = 16; /* semi-arbitrary small power of 2 */
- while (new_max_len < min_size)
- new_max_len *= 2;
+
+ /* clamp the minimum value to 16, a semi-arbitrary small power of 2 */
+ new_max_len = pg_nextpower2_32(Max(16, min_size));
+
#else
/* As above, don't allocate anything extra */
new_max_len = min_size;
diff --git a/src/backend/statistics/mvdistinct.c b/src/backend/statistics/mvdistinct.c
index 977d6f3e2e..4b86f0ab2d 100644
--- a/src/backend/statistics/mvdistinct.c
+++ b/src/backend/statistics/mvdistinct.c
@@ -576,15 +576,7 @@ n_choose_k(int n, int k)
static int
num_combinations(int n)
{
- int k;
- int ncombs = 1;
-
- for (k = 1; k <= n; k++)
- ncombs *= 2;
-
- ncombs -= (n + 1);
-
- return ncombs;
+ return (1 << n) - (n + 1);
}
/*
diff --git a/src/backend/utils/adt/arrayfuncs.c b/src/backend/utils/adt/arrayfuncs.c
index 7a4a5aaa86..11987c8f3b 100644
--- a/src/backend/utils/adt/arrayfuncs.c
+++ b/src/backend/utils/adt/arrayfuncs.c
@@ -24,6 +24,7 @@
#include "nodes/nodeFuncs.h"
#include "nodes/supportnodes.h"
#include "optimizer/optimizer.h"
+#include "port/pg_bitutils.h"
#include "utils/array.h"
#include "utils/arrayaccess.h"
#include "utils/builtins.h"
@@ -5313,9 +5314,7 @@ accumArrayResultArr(ArrayBuildStateArr *astate,
memcpy(&astate->lbs[1], lbs, ndims * sizeof(int));
/* Allocate at least enough data space for this item */
- astate->abytes = 1024;
- while (astate->abytes <= ndatabytes)
- astate->abytes *= 2;
+ astate->abytes = pg_nextpower2_32(Max(1024, ndatabytes + 1));
astate->data = (char *) palloc(astate->abytes);
}
else
@@ -5362,9 +5361,7 @@ accumArrayResultArr(ArrayBuildStateArr *astate,
* First input with nulls; we must retrospectively handle any
* previous inputs by marking all their items non-null.
*/
- astate->aitems = 256;
- while (astate->aitems <= newnitems)
- astate->aitems *= 2;
+ astate->aitems = pg_nextpower2_32(Max(256, newnitems + 1));
astate->nullbitmap = (bits8 *) palloc((astate->aitems + 7) / 8);
array_bitmap_copy(astate->nullbitmap, 0,
NULL, 0,
--
2.25.1
On Wed, Apr 8, 2020 at 9:04 AM David Rowley <dgrowleyml@gmail.com> wrote:
[v8]
Looks good to me, marked RFC.
--
John Naylor https://www.2ndQuadrant.com/
PostgreSQL Development, 24x7 Support, Remote DBA, Training & Services