PostgreSQL Source Code git master
nbtutils.c
Go to the documentation of this file.
1/*-------------------------------------------------------------------------
2 *
3 * nbtutils.c
4 * Utility code for Postgres btree implementation.
5 *
6 * Portions Copyright (c) 1996-2025, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
8 *
9 *
10 * IDENTIFICATION
11 * src/backend/access/nbtree/nbtutils.c
12 *
13 *-------------------------------------------------------------------------
14 */
15
16#include "postgres.h"
17
18#include <time.h>
19
20#include "access/nbtree.h"
21#include "access/reloptions.h"
22#include "access/relscan.h"
23#include "commands/progress.h"
24#include "miscadmin.h"
25#include "utils/datum.h"
26#include "utils/lsyscache.h"
27#include "utils/rel.h"
28
29
30#define LOOK_AHEAD_REQUIRED_RECHECKS 3
31#define LOOK_AHEAD_DEFAULT_DISTANCE 5
32#define NSKIPADVANCES_THRESHOLD 3
33
34static inline int32 _bt_compare_array_skey(FmgrInfo *orderproc,
35 Datum tupdatum, bool tupnull,
36 Datum arrdatum, ScanKey cur);
37static void _bt_binsrch_skiparray_skey(bool cur_elem_trig, ScanDirection dir,
38 Datum tupdatum, bool tupnull,
40 int32 *set_elem_result);
41static void _bt_skiparray_set_element(Relation rel, ScanKey skey, BTArrayKeyInfo *array,
42 int32 set_elem_result, Datum tupdatum, bool tupnull);
43static void _bt_skiparray_set_isnull(Relation rel, ScanKey skey, BTArrayKeyInfo *array);
44static void _bt_array_set_low_or_high(Relation rel, ScanKey skey,
45 BTArrayKeyInfo *array, bool low_not_high);
46static bool _bt_array_decrement(Relation rel, ScanKey skey, BTArrayKeyInfo *array);
47static bool _bt_array_increment(Relation rel, ScanKey skey, BTArrayKeyInfo *array);
49 bool *skip_array_set);
51 IndexTuple tuple, TupleDesc tupdesc, int tupnatts,
52 bool readpagetup, int sktrig, bool *scanBehind);
54 IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
55 int sktrig, bool sktrig_required);
56#ifdef USE_ASSERT_CHECKING
57static bool _bt_verify_keys_with_arraykeys(IndexScanDesc scan);
58#endif
60 IndexTuple finaltup);
62 IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
63 bool advancenonrequired, bool forcenonrequired,
64 bool *continuescan, int *ikey);
65static bool _bt_rowcompare_cmpresult(ScanKey subkey, int cmpresult);
66static bool _bt_check_rowcompare(ScanKey header,
67 IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
68 ScanDirection dir, bool forcenonrequired, bool *continuescan);
70 int tupnatts, TupleDesc tupdesc);
71static int _bt_keep_natts(Relation rel, IndexTuple lastleft,
72 IndexTuple firstright, BTScanInsert itup_key);
73
74
75/*
76 * _bt_mkscankey
77 * Build an insertion scan key that contains comparison data from itup
78 * as well as comparator routines appropriate to the key datatypes.
79 *
80 * The result is intended for use with _bt_compare() and _bt_truncate().
81 * Callers that don't need to fill out the insertion scankey arguments
82 * (e.g. they use an ad-hoc comparison routine, or only need a scankey
83 * for _bt_truncate()) can pass a NULL index tuple. The scankey will
84 * be initialized as if an "all truncated" pivot tuple was passed
85 * instead.
86 *
87 * Note that we may occasionally have to share lock the metapage to
88 * determine whether or not the keys in the index are expected to be
89 * unique (i.e. if this is a "heapkeyspace" index). We assume a
90 * heapkeyspace index when caller passes a NULL tuple, allowing index
91 * build callers to avoid accessing the non-existent metapage. We
92 * also assume that the index is _not_ allequalimage when a NULL tuple
93 * is passed; CREATE INDEX callers call _bt_allequalimage() to set the
94 * field themselves.
95 */
98{
100 ScanKey skey;
101 TupleDesc itupdesc;
102 int indnkeyatts;
103 int16 *indoption;
104 int tupnatts;
105 int i;
106
107 itupdesc = RelationGetDescr(rel);
108 indnkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
109 indoption = rel->rd_indoption;
110 tupnatts = itup ? BTreeTupleGetNAtts(itup, rel) : 0;
111
113
114 /*
115 * We'll execute search using scan key constructed on key columns.
116 * Truncated attributes and non-key attributes are omitted from the final
117 * scan key.
118 */
119 key = palloc(offsetof(BTScanInsertData, scankeys) +
120 sizeof(ScanKeyData) * indnkeyatts);
121 if (itup)
122 _bt_metaversion(rel, &key->heapkeyspace, &key->allequalimage);
123 else
124 {
125 /* Utility statement callers can set these fields themselves */
126 key->heapkeyspace = true;
127 key->allequalimage = false;
128 }
129 key->anynullkeys = false; /* initial assumption */
130 key->nextkey = false; /* usual case, required by btinsert */
131 key->backward = false; /* usual case, required by btinsert */
132 key->keysz = Min(indnkeyatts, tupnatts);
133 key->scantid = key->heapkeyspace && itup ?
134 BTreeTupleGetHeapTID(itup) : NULL;
135 skey = key->scankeys;
136 for (i = 0; i < indnkeyatts; i++)
137 {
138 FmgrInfo *procinfo;
139 Datum arg;
140 bool null;
141 int flags;
142
143 /*
144 * We can use the cached (default) support procs since no cross-type
145 * comparison can be needed.
146 */
147 procinfo = index_getprocinfo(rel, i + 1, BTORDER_PROC);
148
149 /*
150 * Key arguments built from truncated attributes (or when caller
151 * provides no tuple) are defensively represented as NULL values. They
152 * should never be used.
153 */
154 if (i < tupnatts)
155 arg = index_getattr(itup, i + 1, itupdesc, &null);
156 else
157 {
158 arg = (Datum) 0;
159 null = true;
160 }
161 flags = (null ? SK_ISNULL : 0) | (indoption[i] << SK_BT_INDOPTION_SHIFT);
163 flags,
164 (AttrNumber) (i + 1),
167 rel->rd_indcollation[i],
168 procinfo,
169 arg);
170 /* Record if any key attribute is NULL (or truncated) */
171 if (null)
172 key->anynullkeys = true;
173 }
174
175 /*
176 * In NULLS NOT DISTINCT mode, we pretend that there are no null keys, so
177 * that full uniqueness check is done.
178 */
179 if (rel->rd_index->indnullsnotdistinct)
180 key->anynullkeys = false;
181
182 return key;
183}
184
185/*
186 * free a retracement stack made by _bt_search.
187 */
188void
190{
191 BTStack ostack;
192
193 while (stack != NULL)
194 {
195 ostack = stack;
196 stack = stack->bts_parent;
197 pfree(ostack);
198 }
199}
200
201/*
202 * _bt_compare_array_skey() -- apply array comparison function
203 *
204 * Compares caller's tuple attribute value to a scan key/array element.
205 * Helper function used during binary searches of SK_SEARCHARRAY arrays.
206 *
207 * This routine returns:
208 * <0 if tupdatum < arrdatum;
209 * 0 if tupdatum == arrdatum;
210 * >0 if tupdatum > arrdatum.
211 *
212 * This is essentially the same interface as _bt_compare: both functions
213 * compare the value that they're searching for to a binary search pivot.
214 * However, unlike _bt_compare, this function's "tuple argument" comes first,
215 * while its "array/scankey argument" comes second.
216*/
217static inline int32
219 Datum tupdatum, bool tupnull,
220 Datum arrdatum, ScanKey cur)
221{
222 int32 result = 0;
223
224 Assert(cur->sk_strategy == BTEqualStrategyNumber);
225 Assert(!(cur->sk_flags & (SK_BT_MINVAL | SK_BT_MAXVAL)));
226
227 if (tupnull) /* NULL tupdatum */
228 {
229 if (cur->sk_flags & SK_ISNULL)
230 result = 0; /* NULL "=" NULL */
231 else if (cur->sk_flags & SK_BT_NULLS_FIRST)
232 result = -1; /* NULL "<" NOT_NULL */
233 else
234 result = 1; /* NULL ">" NOT_NULL */
235 }
236 else if (cur->sk_flags & SK_ISNULL) /* NOT_NULL tupdatum, NULL arrdatum */
237 {
238 if (cur->sk_flags & SK_BT_NULLS_FIRST)
239 result = 1; /* NOT_NULL ">" NULL */
240 else
241 result = -1; /* NOT_NULL "<" NULL */
242 }
243 else
244 {
245 /*
246 * Like _bt_compare, we need to be careful of cross-type comparisons,
247 * so the left value has to be the value that came from an index tuple
248 */
249 result = DatumGetInt32(FunctionCall2Coll(orderproc, cur->sk_collation,
250 tupdatum, arrdatum));
251
252 /*
253 * We flip the sign by following the obvious rule: flip whenever the
254 * column is a DESC column.
255 *
256 * _bt_compare does it the wrong way around (flip when *ASC*) in order
257 * to compensate for passing its orderproc arguments backwards. We
258 * don't need to play these games because we find it natural to pass
259 * tupdatum as the left value (and arrdatum as the right value).
260 */
261 if (cur->sk_flags & SK_BT_DESC)
262 INVERT_COMPARE_RESULT(result);
263 }
264
265 return result;
266}
267
268/*
269 * _bt_binsrch_array_skey() -- Binary search for next matching array key
270 *
271 * Returns an index to the first array element >= caller's tupdatum argument.
272 * This convention is more natural for forwards scan callers, but that can't
273 * really matter to backwards scan callers. Both callers require handling for
274 * the case where the match we return is < tupdatum, and symmetric handling
275 * for the case where our best match is > tupdatum.
276 *
277 * Also sets *set_elem_result to the result _bt_compare_array_skey returned
278 * when we used it to compare the matching array element to tupdatum/tupnull.
279 *
280 * cur_elem_trig indicates if array advancement was triggered by this array's
281 * scan key, and that the array is for a required scan key. We can apply this
282 * information to find the next matching array element in the current scan
283 * direction using far fewer comparisons (fewer on average, compared to naive
284 * binary search). This scheme takes advantage of an important property of
285 * required arrays: required arrays always advance in lockstep with the index
286 * scan's progress through the index's key space.
287 */
288int
290 bool cur_elem_trig, ScanDirection dir,
291 Datum tupdatum, bool tupnull,
292 BTArrayKeyInfo *array, ScanKey cur,
293 int32 *set_elem_result)
294{
295 int low_elem = 0,
296 mid_elem = -1,
297 high_elem = array->num_elems - 1,
298 result = 0;
299 Datum arrdatum;
300
301 Assert(cur->sk_flags & SK_SEARCHARRAY);
302 Assert(!(cur->sk_flags & SK_BT_SKIP));
303 Assert(!(cur->sk_flags & SK_ISNULL)); /* SAOP arrays never have NULLs */
304 Assert(cur->sk_strategy == BTEqualStrategyNumber);
305
306 if (cur_elem_trig)
307 {
309 Assert(cur->sk_flags & SK_BT_REQFWD);
310
311 /*
312 * When the scan key that triggered array advancement is a required
313 * array scan key, it is now certain that the current array element
314 * (plus all prior elements relative to the current scan direction)
315 * cannot possibly be at or ahead of the corresponding tuple value.
316 * (_bt_checkkeys must have called _bt_tuple_before_array_skeys, which
317 * makes sure this is true as a condition of advancing the arrays.)
318 *
319 * This makes it safe to exclude array elements up to and including
320 * the former-current array element from our search.
321 *
322 * Separately, when array advancement was triggered by a required scan
323 * key, the array element immediately after the former-current element
324 * is often either an exact tupdatum match, or a "close by" near-match
325 * (a near-match tupdatum is one whose key space falls _between_ the
326 * former-current and new-current array elements). We'll detect both
327 * cases via an optimistic comparison of the new search lower bound
328 * (or new search upper bound in the case of backwards scans).
329 */
330 if (ScanDirectionIsForward(dir))
331 {
332 low_elem = array->cur_elem + 1; /* old cur_elem exhausted */
333
334 /* Compare prospective new cur_elem (also the new lower bound) */
335 if (high_elem >= low_elem)
336 {
337 arrdatum = array->elem_values[low_elem];
338 result = _bt_compare_array_skey(orderproc, tupdatum, tupnull,
339 arrdatum, cur);
340
341 if (result <= 0)
342 {
343 /* Optimistic comparison optimization worked out */
344 *set_elem_result = result;
345 return low_elem;
346 }
347 mid_elem = low_elem;
348 low_elem++; /* this cur_elem exhausted, too */
349 }
350
351 if (high_elem < low_elem)
352 {
353 /* Caller needs to perform "beyond end" array advancement */
354 *set_elem_result = 1;
355 return high_elem;
356 }
357 }
358 else
359 {
360 high_elem = array->cur_elem - 1; /* old cur_elem exhausted */
361
362 /* Compare prospective new cur_elem (also the new upper bound) */
363 if (high_elem >= low_elem)
364 {
365 arrdatum = array->elem_values[high_elem];
366 result = _bt_compare_array_skey(orderproc, tupdatum, tupnull,
367 arrdatum, cur);
368
369 if (result >= 0)
370 {
371 /* Optimistic comparison optimization worked out */
372 *set_elem_result = result;
373 return high_elem;
374 }
375 mid_elem = high_elem;
376 high_elem--; /* this cur_elem exhausted, too */
377 }
378
379 if (high_elem < low_elem)
380 {
381 /* Caller needs to perform "beyond end" array advancement */
382 *set_elem_result = -1;
383 return low_elem;
384 }
385 }
386 }
387
388 while (high_elem > low_elem)
389 {
390 mid_elem = low_elem + ((high_elem - low_elem) / 2);
391 arrdatum = array->elem_values[mid_elem];
392
393 result = _bt_compare_array_skey(orderproc, tupdatum, tupnull,
394 arrdatum, cur);
395
396 if (result == 0)
397 {
398 /*
399 * It's safe to quit as soon as we see an equal array element.
400 * This often saves an extra comparison or two...
401 */
402 low_elem = mid_elem;
403 break;
404 }
405
406 if (result > 0)
407 low_elem = mid_elem + 1;
408 else
409 high_elem = mid_elem;
410 }
411
412 /*
413 * ...but our caller also cares about how its searched-for tuple datum
414 * compares to the low_elem datum. Must always set *set_elem_result with
415 * the result of that comparison specifically.
416 */
417 if (low_elem != mid_elem)
418 result = _bt_compare_array_skey(orderproc, tupdatum, tupnull,
419 array->elem_values[low_elem], cur);
420
421 *set_elem_result = result;
422
423 return low_elem;
424}
425
426/*
427 * _bt_binsrch_skiparray_skey() -- "Binary search" within a skip array
428 *
429 * Does not return an index into the array, since skip arrays don't really
430 * contain elements (they generate their array elements procedurally instead).
431 * Our interface matches that of _bt_binsrch_array_skey in every other way.
432 *
433 * Sets *set_elem_result just like _bt_binsrch_array_skey would with a true
434 * array. The value 0 indicates that tupdatum/tupnull is within the range of
435 * the skip array. We return -1 when tupdatum/tupnull is lower that any value
436 * within the range of the array, and 1 when it is higher than every value.
437 * Caller should pass *set_elem_result to _bt_skiparray_set_element to advance
438 * the array.
439 *
440 * cur_elem_trig indicates if array advancement was triggered by this array's
441 * scan key. We use this to optimize-away comparisons that are known by our
442 * caller to be unnecessary from context, just like _bt_binsrch_array_skey.
443 */
444static void
446 Datum tupdatum, bool tupnull,
447 BTArrayKeyInfo *array, ScanKey cur,
448 int32 *set_elem_result)
449{
450 Assert(cur->sk_flags & SK_BT_SKIP);
451 Assert(cur->sk_flags & SK_SEARCHARRAY);
452 Assert(cur->sk_flags & SK_BT_REQFWD);
453 Assert(array->num_elems == -1);
455
456 if (array->null_elem)
457 {
458 Assert(!array->low_compare && !array->high_compare);
459
460 *set_elem_result = 0;
461 return;
462 }
463
464 if (tupnull) /* NULL tupdatum */
465 {
466 if (cur->sk_flags & SK_BT_NULLS_FIRST)
467 *set_elem_result = -1; /* NULL "<" NOT_NULL */
468 else
469 *set_elem_result = 1; /* NULL ">" NOT_NULL */
470 return;
471 }
472
473 /*
474 * Array inequalities determine whether tupdatum is within the range of
475 * caller's skip array
476 */
477 *set_elem_result = 0;
478 if (ScanDirectionIsForward(dir))
479 {
480 /*
481 * Evaluate low_compare first (unless cur_elem_trig tells us that it
482 * cannot possibly fail to be satisfied), then evaluate high_compare
483 */
484 if (!cur_elem_trig && array->low_compare &&
487 tupdatum,
488 array->low_compare->sk_argument)))
489 *set_elem_result = -1;
490 else if (array->high_compare &&
493 tupdatum,
494 array->high_compare->sk_argument)))
495 *set_elem_result = 1;
496 }
497 else
498 {
499 /*
500 * Evaluate high_compare first (unless cur_elem_trig tells us that it
501 * cannot possibly fail to be satisfied), then evaluate low_compare
502 */
503 if (!cur_elem_trig && array->high_compare &&
506 tupdatum,
507 array->high_compare->sk_argument)))
508 *set_elem_result = 1;
509 else if (array->low_compare &&
512 tupdatum,
513 array->low_compare->sk_argument)))
514 *set_elem_result = -1;
515 }
516
517 /*
518 * Assert that any keys that were assumed to be satisfied already (due to
519 * caller passing cur_elem_trig=true) really are satisfied as expected
520 */
521#ifdef USE_ASSERT_CHECKING
522 if (cur_elem_trig)
523 {
524 if (ScanDirectionIsForward(dir) && array->low_compare)
527 tupdatum,
528 array->low_compare->sk_argument)));
529
530 if (ScanDirectionIsBackward(dir) && array->high_compare)
533 tupdatum,
534 array->high_compare->sk_argument)));
535 }
536#endif
537}
538
539/*
540 * _bt_skiparray_set_element() -- Set skip array scan key's sk_argument
541 *
542 * Caller passes set_elem_result returned by _bt_binsrch_skiparray_skey for
543 * caller's tupdatum/tupnull.
544 *
545 * We copy tupdatum/tupnull into skey's sk_argument iff set_elem_result == 0.
546 * Otherwise, we set skey to either the lowest or highest value that's within
547 * the range of caller's skip array (whichever is the best available match to
548 * tupdatum/tupnull that is still within the range of the skip array according
549 * to _bt_binsrch_skiparray_skey/set_elem_result).
550 */
551static void
553 int32 set_elem_result, Datum tupdatum, bool tupnull)
554{
555 Assert(skey->sk_flags & SK_BT_SKIP);
557
558 if (set_elem_result)
559 {
560 /* tupdatum/tupnull is out of the range of the skip array */
561 Assert(!array->null_elem);
562
563 _bt_array_set_low_or_high(rel, skey, array, set_elem_result < 0);
564 return;
565 }
566
567 /* Advance skip array to tupdatum (or tupnull) value */
568 if (unlikely(tupnull))
569 {
570 _bt_skiparray_set_isnull(rel, skey, array);
571 return;
572 }
573
574 /* Free memory previously allocated for sk_argument if needed */
575 if (!array->attbyval && skey->sk_argument)
577
578 /* tupdatum becomes new sk_argument/new current element */
579 skey->sk_flags &= ~(SK_SEARCHNULL | SK_ISNULL |
582 skey->sk_argument = datumCopy(tupdatum, array->attbyval, array->attlen);
583}
584
585/*
586 * _bt_skiparray_set_isnull() -- set skip array scan key to NULL
587 */
588static void
590{
591 Assert(skey->sk_flags & SK_BT_SKIP);
593 Assert(array->null_elem && !array->low_compare && !array->high_compare);
594
595 /* Free memory previously allocated for sk_argument if needed */
596 if (!array->attbyval && skey->sk_argument)
598
599 /* NULL becomes new sk_argument/new current element */
600 skey->sk_argument = (Datum) 0;
601 skey->sk_flags &= ~(SK_BT_MINVAL | SK_BT_MAXVAL |
603 skey->sk_flags |= (SK_SEARCHNULL | SK_ISNULL);
604}
605
606/*
607 * _bt_start_array_keys() -- Initialize array keys at start of a scan
608 *
609 * Set up the cur_elem counters and fill in the first sk_argument value for
610 * each array scankey.
611 */
612void
614{
615 Relation rel = scan->indexRelation;
616 BTScanOpaque so = (BTScanOpaque) scan->opaque;
617
618 Assert(so->numArrayKeys);
619 Assert(so->qual_ok);
620
621 for (int i = 0; i < so->numArrayKeys; i++)
622 {
623 BTArrayKeyInfo *array = &so->arrayKeys[i];
624 ScanKey skey = &so->keyData[array->scan_key];
625
627
628 _bt_array_set_low_or_high(rel, skey, array,
630 }
631 so->scanBehind = so->oppositeDirCheck = false; /* reset */
632}
633
634/*
635 * _bt_array_set_low_or_high() -- Set array scan key to lowest/highest element
636 *
637 * Caller also passes associated scan key, which will have its argument set to
638 * the lowest/highest array value in passing.
639 */
640static void
642 bool low_not_high)
643{
645
646 if (array->num_elems != -1)
647 {
648 /* set low or high element for SAOP array */
649 int set_elem = 0;
650
651 Assert(!(skey->sk_flags & SK_BT_SKIP));
652
653 if (!low_not_high)
654 set_elem = array->num_elems - 1;
655
656 /*
657 * Just copy over array datum (only skip arrays require freeing and
658 * allocating memory for sk_argument)
659 */
660 array->cur_elem = set_elem;
661 skey->sk_argument = array->elem_values[set_elem];
662
663 return;
664 }
665
666 /* set low or high element for skip array */
667 Assert(skey->sk_flags & SK_BT_SKIP);
668 Assert(array->num_elems == -1);
669
670 /* Free memory previously allocated for sk_argument if needed */
671 if (!array->attbyval && skey->sk_argument)
673
674 /* Reset flags */
675 skey->sk_argument = (Datum) 0;
676 skey->sk_flags &= ~(SK_SEARCHNULL | SK_ISNULL |
679
680 if (array->null_elem &&
681 (low_not_high == ((skey->sk_flags & SK_BT_NULLS_FIRST) != 0)))
682 {
683 /* Requested element (either lowest or highest) has the value NULL */
684 skey->sk_flags |= (SK_SEARCHNULL | SK_ISNULL);
685 }
686 else if (low_not_high)
687 {
688 /* Setting array to lowest element (according to low_compare) */
689 skey->sk_flags |= SK_BT_MINVAL;
690 }
691 else
692 {
693 /* Setting array to highest element (according to high_compare) */
694 skey->sk_flags |= SK_BT_MAXVAL;
695 }
696}
697
698/*
699 * _bt_array_decrement() -- decrement array scan key's sk_argument
700 *
701 * Return value indicates whether caller's array was successfully decremented.
702 * Cannot decrement an array whose current element is already the first one.
703 */
704static bool
706{
707 bool uflow = false;
708 Datum dec_sk_argument;
709
712
713 /* SAOP array? */
714 if (array->num_elems != -1)
715 {
717 if (array->cur_elem > 0)
718 {
719 /*
720 * Just decrement current element, and assign its datum to skey
721 * (only skip arrays need us to free existing sk_argument memory)
722 */
723 array->cur_elem--;
724 skey->sk_argument = array->elem_values[array->cur_elem];
725
726 /* Successfully decremented array */
727 return true;
728 }
729
730 /* Cannot decrement to before first array element */
731 return false;
732 }
733
734 /* Nope, this is a skip array */
735 Assert(skey->sk_flags & SK_BT_SKIP);
736
737 /*
738 * The sentinel value that represents the minimum value within the range
739 * of a skip array (often just -inf) is never decrementable
740 */
741 if (skey->sk_flags & SK_BT_MINVAL)
742 return false;
743
744 /*
745 * When the current array element is NULL, and the lowest sorting value in
746 * the index is also NULL, we cannot decrement before first array element
747 */
748 if ((skey->sk_flags & SK_ISNULL) && (skey->sk_flags & SK_BT_NULLS_FIRST))
749 return false;
750
751 /*
752 * Opclasses without skip support "decrement" the scan key's current
753 * element by setting the PRIOR flag. The true prior value is determined
754 * by repositioning to the last index tuple < existing sk_argument/current
755 * array element. Note that this works in the usual way when the scan key
756 * is already marked ISNULL (i.e. when the current element is NULL).
757 */
758 if (!array->sksup)
759 {
760 /* Successfully "decremented" array */
761 skey->sk_flags |= SK_BT_PRIOR;
762 return true;
763 }
764
765 /*
766 * Opclasses with skip support directly decrement sk_argument
767 */
768 if (skey->sk_flags & SK_ISNULL)
769 {
771
772 /*
773 * Existing sk_argument/array element is NULL (for an IS NULL qual).
774 *
775 * "Decrement" from NULL to the high_elem value provided by opclass
776 * skip support routine.
777 */
778 skey->sk_flags &= ~(SK_SEARCHNULL | SK_ISNULL);
779 skey->sk_argument = datumCopy(array->sksup->high_elem,
780 array->attbyval, array->attlen);
781 return true;
782 }
783
784 /*
785 * Ask opclass support routine to provide decremented copy of existing
786 * non-NULL sk_argument
787 */
788 dec_sk_argument = array->sksup->decrement(rel, skey->sk_argument, &uflow);
789 if (unlikely(uflow))
790 {
791 /* dec_sk_argument has undefined value (so no pfree) */
792 if (array->null_elem && (skey->sk_flags & SK_BT_NULLS_FIRST))
793 {
794 _bt_skiparray_set_isnull(rel, skey, array);
795
796 /* Successfully "decremented" array to NULL */
797 return true;
798 }
799
800 /* Cannot decrement to before first array element */
801 return false;
802 }
803
804 /*
805 * Successfully decremented sk_argument to a non-NULL value. Make sure
806 * that the decremented value is still within the range of the array.
807 */
808 if (array->low_compare &&
811 dec_sk_argument,
812 array->low_compare->sk_argument)))
813 {
814 /* Keep existing sk_argument after all */
815 if (!array->attbyval)
816 pfree(DatumGetPointer(dec_sk_argument));
817
818 /* Cannot decrement to before first array element */
819 return false;
820 }
821
822 /* Accept value returned by opclass decrement callback */
823 if (!array->attbyval && skey->sk_argument)
825 skey->sk_argument = dec_sk_argument;
826
827 /* Successfully decremented array */
828 return true;
829}
830
831/*
832 * _bt_array_increment() -- increment array scan key's sk_argument
833 *
834 * Return value indicates whether caller's array was successfully incremented.
835 * Cannot increment an array whose current element is already the final one.
836 */
837static bool
839{
840 bool oflow = false;
841 Datum inc_sk_argument;
842
845
846 /* SAOP array? */
847 if (array->num_elems != -1)
848 {
850 if (array->cur_elem < array->num_elems - 1)
851 {
852 /*
853 * Just increment current element, and assign its datum to skey
854 * (only skip arrays need us to free existing sk_argument memory)
855 */
856 array->cur_elem++;
857 skey->sk_argument = array->elem_values[array->cur_elem];
858
859 /* Successfully incremented array */
860 return true;
861 }
862
863 /* Cannot increment past final array element */
864 return false;
865 }
866
867 /* Nope, this is a skip array */
868 Assert(skey->sk_flags & SK_BT_SKIP);
869
870 /*
871 * The sentinel value that represents the maximum value within the range
872 * of a skip array (often just +inf) is never incrementable
873 */
874 if (skey->sk_flags & SK_BT_MAXVAL)
875 return false;
876
877 /*
878 * When the current array element is NULL, and the highest sorting value
879 * in the index is also NULL, we cannot increment past the final element
880 */
881 if ((skey->sk_flags & SK_ISNULL) && !(skey->sk_flags & SK_BT_NULLS_FIRST))
882 return false;
883
884 /*
885 * Opclasses without skip support "increment" the scan key's current
886 * element by setting the NEXT flag. The true next value is determined by
887 * repositioning to the first index tuple > existing sk_argument/current
888 * array element. Note that this works in the usual way when the scan key
889 * is already marked ISNULL (i.e. when the current element is NULL).
890 */
891 if (!array->sksup)
892 {
893 /* Successfully "incremented" array */
894 skey->sk_flags |= SK_BT_NEXT;
895 return true;
896 }
897
898 /*
899 * Opclasses with skip support directly increment sk_argument
900 */
901 if (skey->sk_flags & SK_ISNULL)
902 {
904
905 /*
906 * Existing sk_argument/array element is NULL (for an IS NULL qual).
907 *
908 * "Increment" from NULL to the low_elem value provided by opclass
909 * skip support routine.
910 */
911 skey->sk_flags &= ~(SK_SEARCHNULL | SK_ISNULL);
912 skey->sk_argument = datumCopy(array->sksup->low_elem,
913 array->attbyval, array->attlen);
914 return true;
915 }
916
917 /*
918 * Ask opclass support routine to provide incremented copy of existing
919 * non-NULL sk_argument
920 */
921 inc_sk_argument = array->sksup->increment(rel, skey->sk_argument, &oflow);
922 if (unlikely(oflow))
923 {
924 /* inc_sk_argument has undefined value (so no pfree) */
925 if (array->null_elem && !(skey->sk_flags & SK_BT_NULLS_FIRST))
926 {
927 _bt_skiparray_set_isnull(rel, skey, array);
928
929 /* Successfully "incremented" array to NULL */
930 return true;
931 }
932
933 /* Cannot increment past final array element */
934 return false;
935 }
936
937 /*
938 * Successfully incremented sk_argument to a non-NULL value. Make sure
939 * that the incremented value is still within the range of the array.
940 */
941 if (array->high_compare &&
944 inc_sk_argument,
945 array->high_compare->sk_argument)))
946 {
947 /* Keep existing sk_argument after all */
948 if (!array->attbyval)
949 pfree(DatumGetPointer(inc_sk_argument));
950
951 /* Cannot increment past final array element */
952 return false;
953 }
954
955 /* Accept value returned by opclass increment callback */
956 if (!array->attbyval && skey->sk_argument)
958 skey->sk_argument = inc_sk_argument;
959
960 /* Successfully incremented array */
961 return true;
962}
963
964/*
965 * _bt_advance_array_keys_increment() -- Advance to next set of array elements
966 *
967 * Advances the array keys by a single increment in the current scan
968 * direction. When there are multiple array keys this can roll over from the
969 * lowest order array to higher order arrays.
970 *
971 * Returns true if there is another set of values to consider, false if not.
972 * On true result, the scankeys are initialized with the next set of values.
973 * On false result, the scankeys stay the same, and the array keys are not
974 * advanced (every array remains at its final element for scan direction).
975 */
976static bool
978 bool *skip_array_set)
979{
980 Relation rel = scan->indexRelation;
981 BTScanOpaque so = (BTScanOpaque) scan->opaque;
982
983 /*
984 * We must advance the last array key most quickly, since it will
985 * correspond to the lowest-order index column among the available
986 * qualifications
987 */
988 for (int i = so->numArrayKeys - 1; i >= 0; i--)
989 {
990 BTArrayKeyInfo *array = &so->arrayKeys[i];
991 ScanKey skey = &so->keyData[array->scan_key];
992
993 if (array->num_elems == -1)
994 *skip_array_set = true;
995
996 if (ScanDirectionIsForward(dir))
997 {
998 if (_bt_array_increment(rel, skey, array))
999 return true;
1000 }
1001 else
1002 {
1003 if (_bt_array_decrement(rel, skey, array))
1004 return true;
1005 }
1006
1007 /*
1008 * Couldn't increment (or decrement) array. Handle array roll over.
1009 *
1010 * Start over at the array's lowest sorting value (or its highest
1011 * value, for backward scans)...
1012 */
1013 _bt_array_set_low_or_high(rel, skey, array,
1015
1016 /* ...then increment (or decrement) next most significant array */
1017 }
1018
1019 /*
1020 * The array keys are now exhausted.
1021 *
1022 * Restore the array keys to the state they were in immediately before we
1023 * were called. This ensures that the arrays only ever ratchet in the
1024 * current scan direction.
1025 *
1026 * Without this, scans could overlook matching tuples when the scan
1027 * direction gets reversed just before btgettuple runs out of items to
1028 * return, but just after _bt_readpage prepares all the items from the
1029 * scan's final page in so->currPos. When we're on the final page it is
1030 * typical for so->currPos to get invalidated once btgettuple finally
1031 * returns false, which'll effectively invalidate the scan's array keys.
1032 * That hasn't happened yet, though -- and in general it may never happen.
1033 */
1034 _bt_start_array_keys(scan, -dir);
1035
1036 return false;
1037}
1038
1039/*
1040 * _bt_tuple_before_array_skeys() -- too early to advance required arrays?
1041 *
1042 * We always compare the tuple using the current array keys (which we assume
1043 * are already set in so->keyData[]). readpagetup indicates if tuple is the
1044 * scan's current _bt_readpage-wise tuple.
1045 *
1046 * readpagetup callers must only call here when _bt_check_compare already set
1047 * continuescan=false. We help these callers deal with _bt_check_compare's
1048 * inability to distinguish between the < and > cases (it uses equality
1049 * operator scan keys, whereas we use 3-way ORDER procs). These callers pass
1050 * a _bt_check_compare-set sktrig value that indicates which scan key
1051 * triggered the call (!readpagetup callers just pass us sktrig=0 instead).
1052 * This information allows us to avoid wastefully checking earlier scan keys
1053 * that were already deemed to have been satisfied inside _bt_check_compare.
1054 *
1055 * Returns false when caller's tuple is >= the current required equality scan
1056 * keys (or <=, in the case of backwards scans). This happens to readpagetup
1057 * callers when the scan has reached the point of needing its array keys
1058 * advanced; caller will need to advance required and non-required arrays at
1059 * scan key offsets >= sktrig, plus scan keys < sktrig iff sktrig rolls over.
1060 * (When we return false to readpagetup callers, tuple can only be == current
1061 * required equality scan keys when caller's sktrig indicates that the arrays
1062 * need to be advanced due to an unsatisfied required inequality key trigger.)
1063 *
1064 * Returns true when caller passes a tuple that is < the current set of
1065 * equality keys for the most significant non-equal required scan key/column
1066 * (or > the keys, during backwards scans). This happens to readpagetup
1067 * callers when tuple is still before the start of matches for the scan's
1068 * required equality strategy scan keys. (sktrig can't have indicated that an
1069 * inequality strategy scan key wasn't satisfied in _bt_check_compare when we
1070 * return true. In fact, we automatically return false when passed such an
1071 * inequality sktrig by readpagetup callers -- _bt_check_compare's initial
1072 * continuescan=false doesn't really need to be confirmed here by us.)
1073 *
1074 * !readpagetup callers optionally pass us *scanBehind, which tracks whether
1075 * any missing truncated attributes might have affected array advancement
1076 * (compared to what would happen if it was shown the first non-pivot tuple on
1077 * the page to the right of caller's finaltup/high key tuple instead). It's
1078 * only possible that we'll set *scanBehind to true when caller passes us a
1079 * pivot tuple (with truncated -inf attributes) that we return false for.
1080 */
1081static bool
1083 IndexTuple tuple, TupleDesc tupdesc, int tupnatts,
1084 bool readpagetup, int sktrig, bool *scanBehind)
1085{
1086 BTScanOpaque so = (BTScanOpaque) scan->opaque;
1087
1088 Assert(so->numArrayKeys);
1089 Assert(so->numberOfKeys);
1090 Assert(sktrig == 0 || readpagetup);
1091 Assert(!readpagetup || scanBehind == NULL);
1092
1093 if (scanBehind)
1094 *scanBehind = false;
1095
1096 for (int ikey = sktrig; ikey < so->numberOfKeys; ikey++)
1097 {
1098 ScanKey cur = so->keyData + ikey;
1099 Datum tupdatum;
1100 bool tupnull;
1101 int32 result;
1102
1103 /* readpagetup calls require one ORDER proc comparison (at most) */
1104 Assert(!readpagetup || ikey == sktrig);
1105
1106 /*
1107 * Once we reach a non-required scan key, we're completely done.
1108 *
1109 * Note: we deliberately don't consider the scan direction here.
1110 * _bt_advance_array_keys caller requires that we track *scanBehind
1111 * without concern for scan direction.
1112 */
1113 if ((cur->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) == 0)
1114 {
1115 Assert(!readpagetup);
1116 Assert(ikey > sktrig || ikey == 0);
1117 return false;
1118 }
1119
1120 if (cur->sk_attno > tupnatts)
1121 {
1122 Assert(!readpagetup);
1123
1124 /*
1125 * When we reach a high key's truncated attribute, assume that the
1126 * tuple attribute's value is >= the scan's equality constraint
1127 * scan keys (but set *scanBehind to let interested callers know
1128 * that a truncated attribute might have affected our answer).
1129 */
1130 if (scanBehind)
1131 *scanBehind = true;
1132
1133 return false;
1134 }
1135
1136 /*
1137 * Deal with inequality strategy scan keys that _bt_check_compare set
1138 * continuescan=false for
1139 */
1140 if (cur->sk_strategy != BTEqualStrategyNumber)
1141 {
1142 /*
1143 * When _bt_check_compare indicated that a required inequality
1144 * scan key wasn't satisfied, there's no need to verify anything;
1145 * caller always calls _bt_advance_array_keys with this sktrig.
1146 */
1147 if (readpagetup)
1148 return false;
1149
1150 /*
1151 * Otherwise we can't give up, since we must check all required
1152 * scan keys (required in either direction) in order to correctly
1153 * track *scanBehind for caller
1154 */
1155 continue;
1156 }
1157
1158 tupdatum = index_getattr(tuple, cur->sk_attno, tupdesc, &tupnull);
1159
1160 if (likely(!(cur->sk_flags & (SK_BT_MINVAL | SK_BT_MAXVAL))))
1161 {
1162 /* Scankey has a valid/comparable sk_argument value */
1163 result = _bt_compare_array_skey(&so->orderProcs[ikey],
1164 tupdatum, tupnull,
1165 cur->sk_argument, cur);
1166
1167 if (result == 0)
1168 {
1169 /*
1170 * Interpret result in a way that takes NEXT/PRIOR into
1171 * account
1172 */
1173 if (cur->sk_flags & SK_BT_NEXT)
1174 result = -1;
1175 else if (cur->sk_flags & SK_BT_PRIOR)
1176 result = 1;
1177
1178 Assert(result == 0 || (cur->sk_flags & SK_BT_SKIP));
1179 }
1180 }
1181 else
1182 {
1183 BTArrayKeyInfo *array = NULL;
1184
1185 /*
1186 * Current array element/array = scan key value is a sentinel
1187 * value that represents the lowest (or highest) possible value
1188 * that's still within the range of the array.
1189 *
1190 * Like _bt_first, we only see MINVAL keys during forwards scans
1191 * (and similarly only see MAXVAL keys during backwards scans).
1192 * Even if the scan's direction changes, we'll stop at some higher
1193 * order key before we can ever reach any MAXVAL (or MINVAL) keys.
1194 * (However, unlike _bt_first we _can_ get to keys marked either
1195 * NEXT or PRIOR, regardless of the scan's current direction.)
1196 */
1198 !(cur->sk_flags & SK_BT_MAXVAL) :
1199 !(cur->sk_flags & SK_BT_MINVAL));
1200
1201 /*
1202 * There are no valid sk_argument values in MINVAL/MAXVAL keys.
1203 * Check if tupdatum is within the range of skip array instead.
1204 */
1205 for (int arrayidx = 0; arrayidx < so->numArrayKeys; arrayidx++)
1206 {
1207 array = &so->arrayKeys[arrayidx];
1208 if (array->scan_key == ikey)
1209 break;
1210 }
1211
1212 _bt_binsrch_skiparray_skey(false, dir, tupdatum, tupnull,
1213 array, cur, &result);
1214
1215 if (result == 0)
1216 {
1217 /*
1218 * tupdatum satisfies both low_compare and high_compare, so
1219 * it's time to advance the array keys.
1220 *
1221 * Note: It's possible that the skip array will "advance" from
1222 * its MINVAL (or MAXVAL) representation to an alternative,
1223 * logically equivalent representation of the same value: a
1224 * representation where the = key gets a valid datum in its
1225 * sk_argument. This is only possible when low_compare uses
1226 * the >= strategy (or high_compare uses the <= strategy).
1227 */
1228 return false;
1229 }
1230 }
1231
1232 /*
1233 * Does this comparison indicate that caller must _not_ advance the
1234 * scan's arrays just yet?
1235 */
1236 if ((ScanDirectionIsForward(dir) && result < 0) ||
1237 (ScanDirectionIsBackward(dir) && result > 0))
1238 return true;
1239
1240 /*
1241 * Does this comparison indicate that caller should now advance the
1242 * scan's arrays? (Must be if we get here during a readpagetup call.)
1243 */
1244 if (readpagetup || result != 0)
1245 {
1246 Assert(result != 0);
1247 return false;
1248 }
1249
1250 /*
1251 * Inconclusive -- need to check later scan keys, too.
1252 *
1253 * This must be a finaltup precheck, or a call made from an assertion.
1254 */
1255 Assert(result == 0);
1256 }
1257
1258 Assert(!readpagetup);
1259
1260 return false;
1261}
1262
1263/*
1264 * _bt_start_prim_scan() -- start scheduled primitive index scan?
1265 *
1266 * Returns true if _bt_checkkeys scheduled another primitive index scan, just
1267 * as the last one ended. Otherwise returns false, indicating that the array
1268 * keys are now fully exhausted.
1269 *
1270 * Only call here during scans with one or more equality type array scan keys,
1271 * after _bt_first or _bt_next return false.
1272 */
1273bool
1275{
1276 BTScanOpaque so = (BTScanOpaque) scan->opaque;
1277
1278 Assert(so->numArrayKeys);
1279
1280 so->scanBehind = so->oppositeDirCheck = false; /* reset */
1281
1282 /*
1283 * Array keys are advanced within _bt_checkkeys when the scan reaches the
1284 * leaf level (more precisely, they're advanced when the scan reaches the
1285 * end of each distinct set of array elements). This process avoids
1286 * repeat access to leaf pages (across multiple primitive index scans) by
1287 * advancing the scan's array keys when it allows the primitive index scan
1288 * to find nearby matching tuples (or when it eliminates ranges of array
1289 * key space that can't possibly be satisfied by any index tuple).
1290 *
1291 * _bt_checkkeys sets a simple flag variable to schedule another primitive
1292 * index scan. The flag tells us what to do.
1293 *
1294 * We cannot rely on _bt_first always reaching _bt_checkkeys. There are
1295 * various cases where that won't happen. For example, if the index is
1296 * completely empty, then _bt_first won't call _bt_readpage/_bt_checkkeys.
1297 * We also don't expect a call to _bt_checkkeys during searches for a
1298 * non-existent value that happens to be lower/higher than any existing
1299 * value in the index.
1300 *
1301 * We don't require special handling for these cases -- we don't need to
1302 * be explicitly instructed to _not_ perform another primitive index scan.
1303 * It's up to code under the control of _bt_first to always set the flag
1304 * when another primitive index scan will be required.
1305 *
1306 * This works correctly, even with the tricky cases listed above, which
1307 * all involve access to leaf pages "near the boundaries of the key space"
1308 * (whether it's from a leftmost/rightmost page, or an imaginary empty
1309 * leaf root page). If _bt_checkkeys cannot be reached by a primitive
1310 * index scan for one set of array keys, then it also won't be reached for
1311 * any later set ("later" in terms of the direction that we scan the index
1312 * and advance the arrays). The array keys won't have advanced in these
1313 * cases, but that's the correct behavior (even _bt_advance_array_keys
1314 * won't always advance the arrays at the point they become "exhausted").
1315 */
1316 if (so->needPrimScan)
1317 {
1318 /*
1319 * Flag was set -- must call _bt_first again, which will reset the
1320 * scan's needPrimScan flag
1321 */
1322 return true;
1323 }
1324
1325 /* The top-level index scan ran out of tuples in this scan direction */
1326 if (scan->parallel_scan != NULL)
1327 _bt_parallel_done(scan);
1328
1329 return false;
1330}
1331
1332/*
1333 * _bt_advance_array_keys() -- Advance array elements using a tuple
1334 *
1335 * The scan always gets a new qual as a consequence of calling here (except
1336 * when we determine that the top-level scan has run out of matching tuples).
1337 * All later _bt_check_compare calls also use the same new qual that was first
1338 * used here (at least until the next call here advances the keys once again).
1339 * It's convenient to structure _bt_check_compare rechecks of caller's tuple
1340 * (using the new qual) as one the steps of advancing the scan's array keys,
1341 * so this function works as a wrapper around _bt_check_compare.
1342 *
1343 * Like _bt_check_compare, we'll set pstate.continuescan on behalf of the
1344 * caller, and return a boolean indicating if caller's tuple satisfies the
1345 * scan's new qual. But unlike _bt_check_compare, we set so->needPrimScan
1346 * when we set continuescan=false, indicating if a new primitive index scan
1347 * has been scheduled (otherwise, the top-level scan has run out of tuples in
1348 * the current scan direction).
1349 *
1350 * Caller must use _bt_tuple_before_array_skeys to determine if the current
1351 * place in the scan is >= the current array keys _before_ calling here.
1352 * We're responsible for ensuring that caller's tuple is <= the newly advanced
1353 * required array keys once we return. We try to find an exact match, but
1354 * failing that we'll advance the array keys to whatever set of array elements
1355 * comes next in the key space for the current scan direction. Required array
1356 * keys "ratchet forwards" (or backwards). They can only advance as the scan
1357 * itself advances through the index/key space.
1358 *
1359 * (The rules are the same for backwards scans, except that the operators are
1360 * flipped: just replace the precondition's >= operator with a <=, and the
1361 * postcondition's <= operator with a >=. In other words, just swap the
1362 * precondition with the postcondition.)
1363 *
1364 * We also deal with "advancing" non-required arrays here (or arrays that are
1365 * treated as non-required for the duration of a _bt_readpage call). Callers
1366 * whose sktrig scan key is non-required specify sktrig_required=false. These
1367 * calls are the only exception to the general rule about always advancing the
1368 * required array keys (the scan may not even have a required array). These
1369 * callers should just pass a NULL pstate (since there is never any question
1370 * of stopping the scan). No call to _bt_tuple_before_array_skeys is required
1371 * ahead of these calls (it's already clear that any required scan keys must
1372 * be satisfied by caller's tuple).
1373 *
1374 * Note that we deal with non-array required equality strategy scan keys as
1375 * degenerate single element arrays here. Obviously, they can never really
1376 * advance in the way that real arrays can, but they must still affect how we
1377 * advance real array scan keys (exactly like true array equality scan keys).
1378 * We have to keep around a 3-way ORDER proc for these (using the "=" operator
1379 * won't do), since in general whether the tuple is < or > _any_ unsatisfied
1380 * required equality key influences how the scan's real arrays must advance.
1381 *
1382 * Note also that we may sometimes need to advance the array keys when the
1383 * existing required array keys (and other required equality keys) are already
1384 * an exact match for every corresponding value from caller's tuple. We must
1385 * do this for inequalities that _bt_check_compare set continuescan=false for.
1386 * They'll advance the array keys here, just like any other scan key that
1387 * _bt_check_compare stops on. (This can even happen _after_ we advance the
1388 * array keys, in which case we'll advance the array keys a second time. That
1389 * way _bt_checkkeys caller always has its required arrays advance to the
1390 * maximum possible extent that its tuple will allow.)
1391 */
1392static bool
1394 IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
1395 int sktrig, bool sktrig_required)
1396{
1397 BTScanOpaque so = (BTScanOpaque) scan->opaque;
1398 Relation rel = scan->indexRelation;
1399 ScanDirection dir = so->currPos.dir;
1400 int arrayidx = 0;
1401 bool beyond_end_advance = false,
1402 skip_array_advanced = false,
1403 has_required_opposite_direction_only = false,
1404 all_required_satisfied = true,
1405 all_satisfied = true;
1406
1407 Assert(!so->needPrimScan && !so->scanBehind && !so->oppositeDirCheck);
1408 Assert(_bt_verify_keys_with_arraykeys(scan));
1409
1410 if (sktrig_required)
1411 {
1412 /*
1413 * Precondition array state assertion
1414 */
1415 Assert(!_bt_tuple_before_array_skeys(scan, dir, tuple, tupdesc,
1416 tupnatts, false, 0, NULL));
1417
1418 /*
1419 * Once we return we'll have a new set of required array keys, so
1420 * reset state used by "look ahead" optimization
1421 */
1422 pstate->rechecks = 0;
1423 pstate->targetdistance = 0;
1424 }
1425 else if (sktrig < so->numberOfKeys - 1 &&
1426 !(so->keyData[so->numberOfKeys - 1].sk_flags & SK_SEARCHARRAY))
1427 {
1428 int least_sign_ikey = so->numberOfKeys - 1;
1429 bool continuescan;
1430
1431 /*
1432 * Optimization: perform a precheck of the least significant key
1433 * during !sktrig_required calls when it isn't already our sktrig
1434 * (provided the precheck key is not itself an array).
1435 *
1436 * When the precheck works out we'll avoid an expensive binary search
1437 * of sktrig's array (plus any other arrays before least_sign_ikey).
1438 */
1439 Assert(so->keyData[sktrig].sk_flags & SK_SEARCHARRAY);
1440 if (!_bt_check_compare(scan, dir, tuple, tupnatts, tupdesc, false,
1441 false, &continuescan,
1442 &least_sign_ikey))
1443 return false;
1444 }
1445
1446 for (int ikey = 0; ikey < so->numberOfKeys; ikey++)
1447 {
1448 ScanKey cur = so->keyData + ikey;
1449 BTArrayKeyInfo *array = NULL;
1450 Datum tupdatum;
1451 bool required = false,
1452 tupnull;
1453 int32 result;
1454 int set_elem = 0;
1455
1456 if (cur->sk_strategy == BTEqualStrategyNumber)
1457 {
1458 /* Manage array state */
1459 if (cur->sk_flags & SK_SEARCHARRAY)
1460 {
1461 array = &so->arrayKeys[arrayidx++];
1462 Assert(array->scan_key == ikey);
1463 }
1464 }
1465 else
1466 {
1467 /*
1468 * Are any inequalities required in the opposite direction only
1469 * present here?
1470 */
1471 if (((ScanDirectionIsForward(dir) &&
1472 (cur->sk_flags & (SK_BT_REQBKWD))) ||
1474 (cur->sk_flags & (SK_BT_REQFWD)))))
1475 has_required_opposite_direction_only = true;
1476 }
1477
1478 /* Optimization: skip over known-satisfied scan keys */
1479 if (ikey < sktrig)
1480 continue;
1481
1482 if (cur->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD))
1483 {
1484 required = true;
1485
1486 if (cur->sk_attno > tupnatts)
1487 {
1488 /* Set this just like _bt_tuple_before_array_skeys */
1489 Assert(sktrig < ikey);
1490 so->scanBehind = true;
1491 }
1492 }
1493
1494 /*
1495 * Handle a required non-array scan key that the initial call to
1496 * _bt_check_compare indicated triggered array advancement, if any.
1497 *
1498 * The non-array scan key's strategy will be <, <=, or = during a
1499 * forwards scan (or any one of =, >=, or > during a backwards scan).
1500 * It follows that the corresponding tuple attribute's value must now
1501 * be either > or >= the scan key value (for backwards scans it must
1502 * be either < or <= that value).
1503 *
1504 * If this is a required equality strategy scan key, this is just an
1505 * optimization; _bt_tuple_before_array_skeys already confirmed that
1506 * this scan key places us ahead of caller's tuple. There's no need
1507 * to repeat that work now. (The same underlying principle also gets
1508 * applied by the cur_elem_trig optimization used to speed up searches
1509 * for the next array element.)
1510 *
1511 * If this is a required inequality strategy scan key, we _must_ rely
1512 * on _bt_check_compare like this; we aren't capable of directly
1513 * evaluating required inequality strategy scan keys here, on our own.
1514 */
1515 if (ikey == sktrig && !array)
1516 {
1517 Assert(sktrig_required && required && all_required_satisfied);
1518
1519 /* Use "beyond end" advancement. See below for an explanation. */
1520 beyond_end_advance = true;
1521 all_satisfied = all_required_satisfied = false;
1522
1523 continue;
1524 }
1525
1526 /*
1527 * Nothing more for us to do with an inequality strategy scan key that
1528 * wasn't the one that _bt_check_compare stopped on, though.
1529 *
1530 * Note: if our later call to _bt_check_compare (to recheck caller's
1531 * tuple) sets continuescan=false due to finding this same inequality
1532 * unsatisfied (possible when it's required in the scan direction),
1533 * we'll deal with it via a recursive "second pass" call.
1534 */
1535 else if (cur->sk_strategy != BTEqualStrategyNumber)
1536 continue;
1537
1538 /*
1539 * Nothing for us to do with an equality strategy scan key that isn't
1540 * marked required, either -- unless it's a non-required array
1541 */
1542 else if (!required && !array)
1543 continue;
1544
1545 /*
1546 * Here we perform steps for all array scan keys after a required
1547 * array scan key whose binary search triggered "beyond end of array
1548 * element" array advancement due to encountering a tuple attribute
1549 * value > the closest matching array key (or < for backwards scans).
1550 */
1551 if (beyond_end_advance)
1552 {
1553 if (array)
1554 _bt_array_set_low_or_high(rel, cur, array,
1556
1557 continue;
1558 }
1559
1560 /*
1561 * Here we perform steps for all array scan keys after a required
1562 * array scan key whose tuple attribute was < the closest matching
1563 * array key when we dealt with it (or > for backwards scans).
1564 *
1565 * This earlier required array key already puts us ahead of caller's
1566 * tuple in the key space (for the current scan direction). We must
1567 * make sure that subsequent lower-order array keys do not put us too
1568 * far ahead (ahead of tuples that have yet to be seen by our caller).
1569 * For example, when a tuple "(a, b) = (42, 5)" advances the array
1570 * keys on "a" from 40 to 45, we must also set "b" to whatever the
1571 * first array element for "b" is. It would be wrong to allow "b" to
1572 * be set based on the tuple value.
1573 *
1574 * Perform the same steps with truncated high key attributes. You can
1575 * think of this as a "binary search" for the element closest to the
1576 * value -inf. Again, the arrays must never get ahead of the scan.
1577 */
1578 if (!all_required_satisfied || cur->sk_attno > tupnatts)
1579 {
1580 if (array)
1581 _bt_array_set_low_or_high(rel, cur, array,
1583
1584 continue;
1585 }
1586
1587 /*
1588 * Search in scankey's array for the corresponding tuple attribute
1589 * value from caller's tuple
1590 */
1591 tupdatum = index_getattr(tuple, cur->sk_attno, tupdesc, &tupnull);
1592
1593 if (array)
1594 {
1595 bool cur_elem_trig = (sktrig_required && ikey == sktrig);
1596
1597 /*
1598 * "Binary search" by checking if tupdatum/tupnull are within the
1599 * range of the skip array
1600 */
1601 if (array->num_elems == -1)
1602 _bt_binsrch_skiparray_skey(cur_elem_trig, dir,
1603 tupdatum, tupnull, array, cur,
1604 &result);
1605
1606 /*
1607 * Binary search for the closest match from the SAOP array
1608 */
1609 else
1610 set_elem = _bt_binsrch_array_skey(&so->orderProcs[ikey],
1611 cur_elem_trig, dir,
1612 tupdatum, tupnull, array, cur,
1613 &result);
1614 }
1615 else
1616 {
1618
1619 /*
1620 * This is a required non-array equality strategy scan key, which
1621 * we'll treat as a degenerate single element array.
1622 *
1623 * This scan key's imaginary "array" can't really advance, but it
1624 * can still roll over like any other array. (Actually, this is
1625 * no different to real single value arrays, which never advance
1626 * without rolling over -- they can never truly advance, either.)
1627 */
1628 result = _bt_compare_array_skey(&so->orderProcs[ikey],
1629 tupdatum, tupnull,
1630 cur->sk_argument, cur);
1631 }
1632
1633 /*
1634 * Consider "beyond end of array element" array advancement.
1635 *
1636 * When the tuple attribute value is > the closest matching array key
1637 * (or < in the backwards scan case), we need to ratchet this array
1638 * forward (backward) by one increment, so that caller's tuple ends up
1639 * being < final array value instead (or > final array value instead).
1640 * This process has to work for all of the arrays, not just this one:
1641 * it must "carry" to higher-order arrays when the set_elem that we
1642 * just found happens to be the final one for the scan's direction.
1643 * Incrementing (decrementing) set_elem itself isn't good enough.
1644 *
1645 * Our approach is to provisionally use set_elem as if it was an exact
1646 * match now, then set each later/less significant array to whatever
1647 * its final element is. Once outside the loop we'll then "increment
1648 * this array's set_elem" by calling _bt_advance_array_keys_increment.
1649 * That way the process rolls over to higher order arrays as needed.
1650 *
1651 * Under this scheme any required arrays only ever ratchet forwards
1652 * (or backwards), and always do so to the maximum possible extent
1653 * that we can know will be safe without seeing the scan's next tuple.
1654 * We don't need any special handling for required scan keys that lack
1655 * a real array to advance, nor for redundant scan keys that couldn't
1656 * be eliminated by _bt_preprocess_keys. It won't matter if some of
1657 * our "true" array scan keys (or even all of them) are non-required.
1658 */
1659 if (sktrig_required && required &&
1660 ((ScanDirectionIsForward(dir) && result > 0) ||
1661 (ScanDirectionIsBackward(dir) && result < 0)))
1662 beyond_end_advance = true;
1663
1664 Assert(all_required_satisfied && all_satisfied);
1665 if (result != 0)
1666 {
1667 /*
1668 * Track whether caller's tuple satisfies our new post-advancement
1669 * qual, for required scan keys, as well as for the entire set of
1670 * interesting scan keys (all required scan keys plus non-required
1671 * array scan keys are considered interesting.)
1672 */
1673 all_satisfied = false;
1674 if (sktrig_required && required)
1675 all_required_satisfied = false;
1676 else
1677 {
1678 /*
1679 * There's no need to advance the arrays using the best
1680 * available match for a non-required array. Give up now.
1681 * (Though note that sktrig_required calls still have to do
1682 * all the usual post-advancement steps, including the recheck
1683 * call to _bt_check_compare.)
1684 */
1685 break;
1686 }
1687 }
1688
1689 /* Advance array keys, even when we don't have an exact match */
1690 if (array)
1691 {
1692 if (array->num_elems == -1)
1693 {
1694 /* Skip array's new element is tupdatum (or MINVAL/MAXVAL) */
1695 _bt_skiparray_set_element(rel, cur, array, result,
1696 tupdatum, tupnull);
1697 skip_array_advanced = true;
1698 }
1699 else if (array->cur_elem != set_elem)
1700 {
1701 /* SAOP array's new element is set_elem datum */
1702 array->cur_elem = set_elem;
1703 cur->sk_argument = array->elem_values[set_elem];
1704 }
1705 }
1706 }
1707
1708 /*
1709 * Advance the array keys incrementally whenever "beyond end of array
1710 * element" array advancement happens, so that advancement will carry to
1711 * higher-order arrays (might exhaust all the scan's arrays instead, which
1712 * ends the top-level scan).
1713 */
1714 if (beyond_end_advance &&
1715 !_bt_advance_array_keys_increment(scan, dir, &skip_array_advanced))
1716 goto end_toplevel_scan;
1717
1718 Assert(_bt_verify_keys_with_arraykeys(scan));
1719
1720 /*
1721 * Maintain a page-level count of the number of times the scan's array
1722 * keys advanced in a way that affected at least one skip array
1723 */
1724 if (sktrig_required && skip_array_advanced)
1725 pstate->nskipadvances++;
1726
1727 /*
1728 * Does tuple now satisfy our new qual? Recheck with _bt_check_compare.
1729 *
1730 * Calls triggered by an unsatisfied required scan key, whose tuple now
1731 * satisfies all required scan keys, but not all nonrequired array keys,
1732 * will still require a recheck call to _bt_check_compare. They'll still
1733 * need its "second pass" handling of required inequality scan keys.
1734 * (Might have missed a still-unsatisfied required inequality scan key
1735 * that caller didn't detect as the sktrig scan key during its initial
1736 * _bt_check_compare call that used the old/original qual.)
1737 *
1738 * Calls triggered by an unsatisfied nonrequired array scan key never need
1739 * "second pass" handling of required inequalities (nor any other handling
1740 * of any required scan key). All that matters is whether caller's tuple
1741 * satisfies the new qual, so it's safe to just skip the _bt_check_compare
1742 * recheck when we've already determined that it can only return 'false'.
1743 *
1744 * Note: In practice most scan keys are marked required by preprocessing,
1745 * if necessary by generating a preceding skip array. We nevertheless
1746 * often handle array keys marked required as if they were nonrequired.
1747 * This behavior is requested by our _bt_check_compare caller, though only
1748 * when it is passed "forcenonrequired=true" by _bt_checkkeys.
1749 */
1750 if ((sktrig_required && all_required_satisfied) ||
1751 (!sktrig_required && all_satisfied))
1752 {
1753 int nsktrig = sktrig + 1;
1754 bool continuescan;
1755
1756 Assert(all_required_satisfied);
1757
1758 /* Recheck _bt_check_compare on behalf of caller */
1759 if (_bt_check_compare(scan, dir, tuple, tupnatts, tupdesc, false,
1760 !sktrig_required, &continuescan,
1761 &nsktrig) &&
1762 !so->scanBehind)
1763 {
1764 /* This tuple satisfies the new qual */
1765 Assert(all_satisfied && continuescan);
1766
1767 if (pstate)
1768 pstate->continuescan = true;
1769
1770 return true;
1771 }
1772
1773 /*
1774 * Consider "second pass" handling of required inequalities.
1775 *
1776 * It's possible that our _bt_check_compare call indicated that the
1777 * scan should end due to some unsatisfied inequality that wasn't
1778 * initially recognized as such by us. Handle this by calling
1779 * ourselves recursively, this time indicating that the trigger is the
1780 * inequality that we missed first time around (and using a set of
1781 * required array/equality keys that are now exact matches for tuple).
1782 *
1783 * We make a strong, general guarantee that every _bt_checkkeys call
1784 * here will advance the array keys to the maximum possible extent
1785 * that we can know to be safe based on caller's tuple alone. If we
1786 * didn't perform this step, then that guarantee wouldn't quite hold.
1787 */
1788 if (unlikely(!continuescan))
1789 {
1790 bool satisfied PG_USED_FOR_ASSERTS_ONLY;
1791
1792 Assert(sktrig_required);
1794
1795 /*
1796 * The tuple must use "beyond end" advancement during the
1797 * recursive call, so we cannot possibly end up back here when
1798 * recursing. We'll consume a small, fixed amount of stack space.
1799 */
1800 Assert(!beyond_end_advance);
1801
1802 /* Advance the array keys a second time using same tuple */
1803 satisfied = _bt_advance_array_keys(scan, pstate, tuple, tupnatts,
1804 tupdesc, nsktrig, true);
1805
1806 /* This tuple doesn't satisfy the inequality */
1807 Assert(!satisfied);
1808 return false;
1809 }
1810
1811 /*
1812 * Some non-required scan key (from new qual) still not satisfied.
1813 *
1814 * All scan keys required in the current scan direction must still be
1815 * satisfied, though, so we can trust all_required_satisfied below.
1816 */
1817 }
1818
1819 /*
1820 * When we were called just to deal with "advancing" non-required arrays,
1821 * this is as far as we can go (cannot stop the scan for these callers)
1822 */
1823 if (!sktrig_required)
1824 {
1825 /* Caller's tuple doesn't match any qual */
1826 return false;
1827 }
1828
1829 /*
1830 * Postcondition array state assertion (for still-unsatisfied tuples).
1831 *
1832 * By here we have established that the scan's required arrays (scan must
1833 * have at least one required array) advanced, without becoming exhausted.
1834 *
1835 * Caller's tuple is now < the newly advanced array keys (or > when this
1836 * is a backwards scan), except in the case where we only got this far due
1837 * to an unsatisfied non-required scan key. Verify that with an assert.
1838 *
1839 * Note: we don't just quit at this point when all required scan keys were
1840 * found to be satisfied because we need to consider edge-cases involving
1841 * scan keys required in the opposite direction only; those aren't tracked
1842 * by all_required_satisfied.
1843 */
1844 Assert(_bt_tuple_before_array_skeys(scan, dir, tuple, tupdesc, tupnatts,
1845 false, 0, NULL) ==
1846 !all_required_satisfied);
1847
1848 /*
1849 * We generally permit primitive index scans to continue onto the next
1850 * sibling page when the page's finaltup satisfies all required scan keys
1851 * at the point where we're between pages.
1852 *
1853 * If caller's tuple is also the page's finaltup, and we see that required
1854 * scan keys still aren't satisfied, start a new primitive index scan.
1855 */
1856 if (!all_required_satisfied && pstate->finaltup == tuple)
1857 goto new_prim_scan;
1858
1859 /*
1860 * Proactively check finaltup (don't wait until finaltup is reached by the
1861 * scan) when it might well turn out to not be satisfied later on.
1862 *
1863 * Note: if so->scanBehind hasn't already been set for finaltup by us,
1864 * it'll be set during this call to _bt_tuple_before_array_skeys. Either
1865 * way, it'll be set correctly (for the whole page) after this point.
1866 */
1867 if (!all_required_satisfied && pstate->finaltup &&
1868 _bt_tuple_before_array_skeys(scan, dir, pstate->finaltup, tupdesc,
1869 BTreeTupleGetNAtts(pstate->finaltup, rel),
1870 false, 0, &so->scanBehind))
1871 goto new_prim_scan;
1872
1873 /*
1874 * When we encounter a truncated finaltup high key attribute, we're
1875 * optimistic about the chances of its corresponding required scan key
1876 * being satisfied when we go on to recheck it against tuples from this
1877 * page's right sibling leaf page. We consider truncated attributes to be
1878 * satisfied by required scan keys, which allows the primitive index scan
1879 * to continue to the next leaf page. We must set so->scanBehind to true
1880 * to remember that the last page's finaltup had "satisfied" required scan
1881 * keys for one or more truncated attribute values (scan keys required in
1882 * _either_ scan direction).
1883 *
1884 * There is a chance that _bt_readpage (which checks so->scanBehind) will
1885 * find that even the sibling leaf page's finaltup is < the new array
1886 * keys. When that happens, our optimistic policy will have incurred a
1887 * single extra leaf page access that could have been avoided.
1888 *
1889 * A pessimistic policy would give backward scans a gratuitous advantage
1890 * over forward scans. We'd punish forward scans for applying more
1891 * accurate information from the high key, rather than just using the
1892 * final non-pivot tuple as finaltup, in the style of backward scans.
1893 * Being pessimistic would also give some scans with non-required arrays a
1894 * perverse advantage over similar scans that use required arrays instead.
1895 *
1896 * This is similar to our scan-level heuristics, below. They also set
1897 * scanBehind to speculatively continue the primscan onto the next page.
1898 */
1899 if (so->scanBehind)
1900 {
1901 /* Truncated high key -- _bt_scanbehind_checkkeys recheck scheduled */
1902 }
1903
1904 /*
1905 * Handle inequalities marked required in the opposite scan direction.
1906 * They can also signal that we should start a new primitive index scan.
1907 *
1908 * It's possible that the scan is now positioned where "matching" tuples
1909 * begin, and that caller's tuple satisfies all scan keys required in the
1910 * current scan direction. But if caller's tuple still doesn't satisfy
1911 * other scan keys that are required in the opposite scan direction only
1912 * (e.g., a required >= strategy scan key when scan direction is forward),
1913 * it's still possible that there are many leaf pages before the page that
1914 * _bt_first could skip straight to. Groveling through all those pages
1915 * will always give correct answers, but it can be very inefficient. We
1916 * must avoid needlessly scanning extra pages.
1917 *
1918 * Separately, it's possible that _bt_check_compare set continuescan=false
1919 * for a scan key that's required in the opposite direction only. This is
1920 * a special case, that happens only when _bt_check_compare sees that the
1921 * inequality encountered a NULL value. This signals the end of non-NULL
1922 * values in the current scan direction, which is reason enough to end the
1923 * (primitive) scan. If this happens at the start of a large group of
1924 * NULL values, then we shouldn't expect to be called again until after
1925 * the scan has already read indefinitely-many leaf pages full of tuples
1926 * with NULL suffix values. (_bt_first is expected to skip over the group
1927 * of NULLs by applying a similar "deduce NOT NULL" rule of its own, which
1928 * involves consing up an explicit SK_SEARCHNOTNULL key.)
1929 *
1930 * Apply a test against finaltup to detect and recover from the problem:
1931 * if even finaltup doesn't satisfy such an inequality, we just skip by
1932 * starting a new primitive index scan. When we skip, we know for sure
1933 * that all of the tuples on the current page following caller's tuple are
1934 * also before the _bt_first-wise start of tuples for our new qual. That
1935 * at least suggests many more skippable pages beyond the current page.
1936 * (when so->scanBehind and so->oppositeDirCheck are set, this'll happen
1937 * when we test the next page's finaltup/high key instead.)
1938 */
1939 else if (has_required_opposite_direction_only && pstate->finaltup &&
1940 unlikely(!_bt_oppodir_checkkeys(scan, dir, pstate->finaltup)))
1941 goto new_prim_scan;
1942
1943continue_scan:
1944
1945 /*
1946 * Stick with the ongoing primitive index scan for now.
1947 *
1948 * It's possible that later tuples will also turn out to have values that
1949 * are still < the now-current array keys (or > the current array keys).
1950 * Our caller will handle this by performing what amounts to a linear
1951 * search of the page, implemented by calling _bt_check_compare and then
1952 * _bt_tuple_before_array_skeys for each tuple.
1953 *
1954 * This approach has various advantages over a binary search of the page.
1955 * Repeated binary searches of the page (one binary search for every array
1956 * advancement) won't outperform a continuous linear search. While there
1957 * are workloads that a naive linear search won't handle well, our caller
1958 * has a "look ahead" fallback mechanism to deal with that problem.
1959 */
1960 pstate->continuescan = true; /* Override _bt_check_compare */
1961 so->needPrimScan = false; /* _bt_readpage has more tuples to check */
1962
1963 if (so->scanBehind)
1964 {
1965 /*
1966 * Remember if recheck needs to call _bt_oppodir_checkkeys for next
1967 * page's finaltup (see above comments about "Handle inequalities
1968 * marked required in the opposite scan direction" for why).
1969 */
1970 so->oppositeDirCheck = has_required_opposite_direction_only;
1971
1972 /*
1973 * skip by setting "look ahead" mechanism's offnum for forwards scans
1974 * (backwards scans check scanBehind flag directly instead)
1975 */
1976 if (ScanDirectionIsForward(dir))
1977 pstate->skip = pstate->maxoff + 1;
1978 }
1979
1980 /* Caller's tuple doesn't match the new qual */
1981 return false;
1982
1983new_prim_scan:
1984
1985 Assert(pstate->finaltup); /* not on rightmost/leftmost page */
1986
1987 /*
1988 * Looks like another primitive index scan is required. But consider
1989 * continuing the current primscan based on scan-level heuristics.
1990 *
1991 * Continue the ongoing primitive scan (and schedule a recheck for when
1992 * the scan arrives on the next sibling leaf page) when it has already
1993 * read at least one leaf page before the one we're reading now. This
1994 * makes primscan scheduling more efficient when scanning subsets of an
1995 * index with many distinct attribute values matching many array elements.
1996 * It encourages fewer, larger primitive scans where that makes sense.
1997 * This will in turn encourage _bt_readpage to apply the pstate.startikey
1998 * optimization more often.
1999 *
2000 * Also continue the ongoing primitive index scan when it is still on the
2001 * first page if there have been more than NSKIPADVANCES_THRESHOLD calls
2002 * here that each advanced at least one of the scan's skip arrays
2003 * (deliberately ignore advancements that only affected SAOP arrays here).
2004 * A page that cycles through this many skip array elements is quite
2005 * likely to neighbor similar pages, that we'll also need to read.
2006 *
2007 * Note: These heuristics aren't as aggressive as you might think. We're
2008 * conservative about allowing a primitive scan to step from the first
2009 * leaf page it reads to the page's sibling page (we only allow it on
2010 * first pages whose finaltup strongly suggests that it'll work out, as
2011 * well as first pages that have a large number of skip array advances).
2012 * Clearing this first page finaltup hurdle is a strong signal in itself.
2013 *
2014 * Note: The NSKIPADVANCES_THRESHOLD heuristic exists only to avoid
2015 * pathological cases. Specifically, cases where a skip scan should just
2016 * behave like a traditional full index scan, but ends up "skipping" again
2017 * and again, descending to the prior leaf page's direct sibling leaf page
2018 * each time. This misbehavior would otherwise be possible during scans
2019 * that never quite manage to "clear the first page finaltup hurdle".
2020 */
2021 if (!pstate->firstpage || pstate->nskipadvances > NSKIPADVANCES_THRESHOLD)
2022 {
2023 /* Schedule a recheck once on the next (or previous) page */
2024 so->scanBehind = true;
2025
2026 /* Continue the current primitive scan after all */
2027 goto continue_scan;
2028 }
2029
2030 /*
2031 * End this primitive index scan, but schedule another.
2032 *
2033 * Note: We make a soft assumption that the current scan direction will
2034 * also be used within _bt_next, when it is asked to step off this page.
2035 * It is up to _bt_next to cancel this scheduled primitive index scan
2036 * whenever it steps to a page in the direction opposite currPos.dir.
2037 */
2038 pstate->continuescan = false; /* Tell _bt_readpage we're done... */
2039 so->needPrimScan = true; /* ...but call _bt_first again */
2040
2041 if (scan->parallel_scan)
2043
2044 /* Caller's tuple doesn't match the new qual */
2045 return false;
2046
2047end_toplevel_scan:
2048
2049 /*
2050 * End the current primitive index scan, but don't schedule another.
2051 *
2052 * This ends the entire top-level scan in the current scan direction.
2053 *
2054 * Note: The scan's arrays (including any non-required arrays) are now in
2055 * their final positions for the current scan direction. If the scan
2056 * direction happens to change, then the arrays will already be in their
2057 * first positions for what will then be the current scan direction.
2058 */
2059 pstate->continuescan = false; /* Tell _bt_readpage we're done... */
2060 so->needPrimScan = false; /* ...and don't call _bt_first again */
2061
2062 /* Caller's tuple doesn't match any qual */
2063 return false;
2064}
2065
2066#ifdef USE_ASSERT_CHECKING
2067/*
2068 * Verify that the scan's "so->keyData[]" scan keys are in agreement with
2069 * its array key state
2070 */
2071static bool
2072_bt_verify_keys_with_arraykeys(IndexScanDesc scan)
2073{
2074 BTScanOpaque so = (BTScanOpaque) scan->opaque;
2075 int last_sk_attno = InvalidAttrNumber,
2076 arrayidx = 0;
2077 bool nonrequiredseen = false;
2078
2079 if (!so->qual_ok)
2080 return false;
2081
2082 for (int ikey = 0; ikey < so->numberOfKeys; ikey++)
2083 {
2084 ScanKey cur = so->keyData + ikey;
2085 BTArrayKeyInfo *array;
2086
2087 if (cur->sk_strategy != BTEqualStrategyNumber ||
2088 !(cur->sk_flags & SK_SEARCHARRAY))
2089 continue;
2090
2091 array = &so->arrayKeys[arrayidx++];
2092 if (array->scan_key != ikey)
2093 return false;
2094
2095 if (array->num_elems == 0 || array->num_elems < -1)
2096 return false;
2097
2098 if (array->num_elems != -1 &&
2099 cur->sk_argument != array->elem_values[array->cur_elem])
2100 return false;
2101 if (cur->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD))
2102 {
2103 if (last_sk_attno > cur->sk_attno)
2104 return false;
2105 if (nonrequiredseen)
2106 return false;
2107 }
2108 else
2109 nonrequiredseen = true;
2110
2111 last_sk_attno = cur->sk_attno;
2112 }
2113
2114 if (arrayidx != so->numArrayKeys)
2115 return false;
2116
2117 return true;
2118}
2119#endif
2120
2121/*
2122 * Test whether an indextuple satisfies all the scankey conditions.
2123 *
2124 * Return true if so, false if not. If the tuple fails to pass the qual,
2125 * we also determine whether there's any need to continue the scan beyond
2126 * this tuple, and set pstate.continuescan accordingly. See comments for
2127 * _bt_preprocess_keys() about how this is done.
2128 *
2129 * Forward scan callers can pass a high key tuple in the hopes of having
2130 * us set *continuescan to false, and avoiding an unnecessary visit to
2131 * the page to the right.
2132 *
2133 * Advances the scan's array keys when necessary for arrayKeys=true callers.
2134 * Scans without any array keys must always pass arrayKeys=false.
2135 *
2136 * Also stops and starts primitive index scans for arrayKeys=true callers.
2137 * Scans with array keys are required to set up page state that helps us with
2138 * this. The page's finaltup tuple (the page high key for a forward scan, or
2139 * the page's first non-pivot tuple for a backward scan) must be set in
2140 * pstate.finaltup ahead of the first call here for the page. Set this to
2141 * NULL for rightmost page (or the leftmost page for backwards scans).
2142 *
2143 * scan: index scan descriptor (containing a search-type scankey)
2144 * pstate: page level input and output parameters
2145 * arrayKeys: should we advance the scan's array keys if necessary?
2146 * tuple: index tuple to test
2147 * tupnatts: number of attributes in tupnatts (high key may be truncated)
2148 */
2149bool
2150_bt_checkkeys(IndexScanDesc scan, BTReadPageState *pstate, bool arrayKeys,
2151 IndexTuple tuple, int tupnatts)
2152{
2153 TupleDesc tupdesc = RelationGetDescr(scan->indexRelation);
2154 BTScanOpaque so = (BTScanOpaque) scan->opaque;
2155 ScanDirection dir = so->currPos.dir;
2156 int ikey = pstate->startikey;
2157 bool res;
2158
2159 Assert(BTreeTupleGetNAtts(tuple, scan->indexRelation) == tupnatts);
2160 Assert(!so->needPrimScan && !so->scanBehind && !so->oppositeDirCheck);
2161 Assert(arrayKeys || so->numArrayKeys == 0);
2162
2163 res = _bt_check_compare(scan, dir, tuple, tupnatts, tupdesc, arrayKeys,
2164 pstate->forcenonrequired, &pstate->continuescan,
2165 &ikey);
2166
2167 /*
2168 * If _bt_check_compare relied on the pstate.startikey optimization, call
2169 * again (in assert-enabled builds) to verify it didn't affect our answer.
2170 *
2171 * Note: we can't do this when !pstate.forcenonrequired, since any arrays
2172 * before pstate.startikey won't have advanced on this page at all.
2173 */
2174 Assert(!pstate->forcenonrequired || arrayKeys);
2175#ifdef USE_ASSERT_CHECKING
2176 if (pstate->startikey > 0 && !pstate->forcenonrequired)
2177 {
2178 bool dres,
2179 dcontinuescan;
2180 int dikey = 0;
2181
2182 /* Pass arrayKeys=false to avoid array side-effects */
2183 dres = _bt_check_compare(scan, dir, tuple, tupnatts, tupdesc, false,
2184 pstate->forcenonrequired, &dcontinuescan,
2185 &dikey);
2186 Assert(res == dres);
2187 Assert(pstate->continuescan == dcontinuescan);
2188
2189 /*
2190 * Should also get the same ikey result. We need a slightly weaker
2191 * assertion during arrayKeys calls, since they might be using an
2192 * array that couldn't be marked required during preprocessing.
2193 */
2194 Assert(arrayKeys || ikey == dikey);
2195 Assert(ikey <= dikey);
2196 }
2197#endif
2198
2199 /*
2200 * Only one _bt_check_compare call is required in the common case where
2201 * there are no equality strategy array scan keys. Otherwise we can only
2202 * accept _bt_check_compare's answer unreservedly when it didn't set
2203 * pstate.continuescan=false.
2204 */
2205 if (!arrayKeys || pstate->continuescan)
2206 return res;
2207
2208 /*
2209 * _bt_check_compare call set continuescan=false in the presence of
2210 * equality type array keys. This could mean that the tuple is just past
2211 * the end of matches for the current array keys.
2212 *
2213 * It's also possible that the scan is still _before_ the _start_ of
2214 * tuples matching the current set of array keys. Check for that first.
2215 */
2216 Assert(!pstate->forcenonrequired);
2217 if (_bt_tuple_before_array_skeys(scan, dir, tuple, tupdesc, tupnatts, true,
2218 ikey, NULL))
2219 {
2220 /* Override _bt_check_compare, continue primitive scan */
2221 pstate->continuescan = true;
2222
2223 /*
2224 * We will end up here repeatedly given a group of tuples > the
2225 * previous array keys and < the now-current keys (for a backwards
2226 * scan it's just the same, though the operators swap positions).
2227 *
2228 * We must avoid allowing this linear search process to scan very many
2229 * tuples from well before the start of tuples matching the current
2230 * array keys (or from well before the point where we'll once again
2231 * have to advance the scan's array keys).
2232 *
2233 * We keep the overhead under control by speculatively "looking ahead"
2234 * to later still-unscanned items from this same leaf page. We'll
2235 * only attempt this once the number of tuples that the linear search
2236 * process has examined starts to get out of hand.
2237 */
2238 pstate->rechecks++;
2240 {
2241 /* See if we should skip ahead within the current leaf page */
2242 _bt_checkkeys_look_ahead(scan, pstate, tupnatts, tupdesc);
2243
2244 /*
2245 * Might have set pstate.skip to a later page offset. When that
2246 * happens then _bt_readpage caller will inexpensively skip ahead
2247 * to a later tuple from the same page (the one just after the
2248 * tuple we successfully "looked ahead" to).
2249 */
2250 }
2251
2252 /* This indextuple doesn't match the current qual, in any case */
2253 return false;
2254 }
2255
2256 /*
2257 * Caller's tuple is >= the current set of array keys and other equality
2258 * constraint scan keys (or <= if this is a backwards scan). It's now
2259 * clear that we _must_ advance any required array keys in lockstep with
2260 * the scan.
2261 */
2262 return _bt_advance_array_keys(scan, pstate, tuple, tupnatts, tupdesc,
2263 ikey, true);
2264}
2265
2266/*
2267 * Test whether caller's finaltup tuple is still before the start of matches
2268 * for the current array keys.
2269 *
2270 * Called at the start of reading a page during a scan with array keys, though
2271 * only when the so->scanBehind flag was set on the scan's prior page.
2272 *
2273 * Returns false if the tuple is still before the start of matches. When that
2274 * happens, caller should cut its losses and start a new primitive index scan.
2275 * Otherwise returns true.
2276 */
2277bool
2279 IndexTuple finaltup)
2280{
2281 Relation rel = scan->indexRelation;
2282 TupleDesc tupdesc = RelationGetDescr(rel);
2283 BTScanOpaque so = (BTScanOpaque) scan->opaque;
2284 int nfinaltupatts = BTreeTupleGetNAtts(finaltup, rel);
2285 bool scanBehind;
2286
2287 Assert(so->numArrayKeys);
2288
2289 if (_bt_tuple_before_array_skeys(scan, dir, finaltup, tupdesc,
2290 nfinaltupatts, false, 0, &scanBehind))
2291 return false;
2292
2293 /*
2294 * If scanBehind was set, all of the untruncated attribute values from
2295 * finaltup that correspond to an array match the array's current element,
2296 * but there are other keys associated with truncated suffix attributes.
2297 * Array advancement must have incremented the scan's arrays on the
2298 * previous page, resulting in a set of array keys that happen to be an
2299 * exact match for the current page high key's untruncated prefix values.
2300 *
2301 * This page definitely doesn't contain tuples that the scan will need to
2302 * return. The next page may or may not contain relevant tuples. Handle
2303 * this by cutting our losses and starting a new primscan.
2304 */
2305 if (scanBehind)
2306 return false;
2307
2308 if (!so->oppositeDirCheck)
2309 return true;
2310
2311 return _bt_oppodir_checkkeys(scan, dir, finaltup);
2312}
2313
2314/*
2315 * Test whether an indextuple fails to satisfy an inequality required in the
2316 * opposite direction only.
2317 *
2318 * Caller's finaltup tuple is the page high key (for forwards scans), or the
2319 * first non-pivot tuple (for backwards scans). Called during scans with
2320 * required array keys and required opposite-direction inequalities.
2321 *
2322 * Returns false if an inequality scan key required in the opposite direction
2323 * only isn't satisfied (and any earlier required scan keys are satisfied).
2324 * Otherwise returns true.
2325 *
2326 * An unsatisfied inequality required in the opposite direction only might
2327 * well enable skipping over many leaf pages, provided another _bt_first call
2328 * takes place. This type of unsatisfied inequality won't usually cause
2329 * _bt_checkkeys to stop the scan to consider array advancement/starting a new
2330 * primitive index scan.
2331 */
2332static bool
2334 IndexTuple finaltup)
2335{
2336 Relation rel = scan->indexRelation;
2337 TupleDesc tupdesc = RelationGetDescr(rel);
2338 BTScanOpaque so = (BTScanOpaque) scan->opaque;
2339 int nfinaltupatts = BTreeTupleGetNAtts(finaltup, rel);
2340 bool continuescan;
2341 ScanDirection flipped = -dir;
2342 int ikey = 0;
2343
2344 Assert(so->numArrayKeys);
2345
2346 _bt_check_compare(scan, flipped, finaltup, nfinaltupatts, tupdesc, false,
2347 false, &continuescan,
2348 &ikey);
2349
2350 if (!continuescan && so->keyData[ikey].sk_strategy != BTEqualStrategyNumber)
2351 return false;
2352
2353 return true;
2354}
2355
2356/*
2357 * Determines an offset to the first scan key (an so->keyData[]-wise offset)
2358 * that is _not_ guaranteed to be satisfied by every tuple from pstate.page,
2359 * which is set in pstate.startikey for _bt_checkkeys calls for the page.
2360 * This allows caller to save cycles on comparisons of a prefix of keys while
2361 * reading pstate.page.
2362 *
2363 * Also determines if later calls to _bt_checkkeys (for pstate.page) should be
2364 * forced to treat all required scan keys >= pstate.startikey as nonrequired
2365 * (that is, if they're to be treated as if any SK_BT_REQFWD/SK_BT_REQBKWD
2366 * markings that were set by preprocessing were not set at all, for the
2367 * duration of _bt_checkkeys calls prior to the call for pstate.finaltup).
2368 * This is indicated to caller by setting pstate.forcenonrequired.
2369 *
2370 * Call here at the start of reading a leaf page beyond the first one for the
2371 * primitive index scan. We consider all non-pivot tuples, so it doesn't make
2372 * sense to call here when only a subset of those tuples can ever be read.
2373 * This is also a good idea on performance grounds; not calling here when on
2374 * the first page (first for the current primitive scan) avoids wasting cycles
2375 * during selective point queries. They typically don't stand to gain as much
2376 * when we can set pstate.startikey, and are likely to notice the overhead of
2377 * calling here. (Also, allowing pstate.forcenonrequired to be set on a
2378 * primscan's first page would mislead _bt_advance_array_keys, which expects
2379 * pstate.nskipadvances to be representative of every first page's key space.)
2380 *
2381 * Caller must call _bt_start_array_keys and reset startikey/forcenonrequired
2382 * ahead of the finaltup _bt_checkkeys call when we set forcenonrequired=true.
2383 * This will give _bt_checkkeys the opportunity to call _bt_advance_array_keys
2384 * with sktrig_required=true, restoring the invariant that the scan's required
2385 * arrays always track the scan's progress through the index's key space.
2386 * Caller won't need to do this on the rightmost/leftmost page in the index
2387 * (where pstate.finaltup isn't ever set), since forcenonrequired will never
2388 * be set here in the first place.
2389 */
2390void
2392{
2393 BTScanOpaque so = (BTScanOpaque) scan->opaque;
2394 Relation rel = scan->indexRelation;
2395 TupleDesc tupdesc = RelationGetDescr(rel);
2396 ItemId iid;
2397 IndexTuple firsttup,
2398 lasttup;
2399 int startikey = 0,
2400 arrayidx = 0,
2401 firstchangingattnum;
2402 bool start_past_saop_eq = false;
2403
2404 Assert(!so->scanBehind);
2405 Assert(pstate->minoff < pstate->maxoff);
2406 Assert(!pstate->firstpage);
2407 Assert(pstate->startikey == 0);
2408 Assert(!so->numArrayKeys || pstate->finaltup ||
2409 P_RIGHTMOST(BTPageGetOpaque(pstate->page)) ||
2410 P_LEFTMOST(BTPageGetOpaque(pstate->page)));
2411
2412 if (so->numberOfKeys == 0)
2413 return;
2414
2415 /* minoff is an offset to the lowest non-pivot tuple on the page */
2416 iid = PageGetItemId(pstate->page, pstate->minoff);
2417 firsttup = (IndexTuple) PageGetItem(pstate->page, iid);
2418
2419 /* maxoff is an offset to the highest non-pivot tuple on the page */
2420 iid = PageGetItemId(pstate->page, pstate->maxoff);
2421 lasttup = (IndexTuple) PageGetItem(pstate->page, iid);
2422
2423 /* Determine the first attribute whose values change on caller's page */
2424 firstchangingattnum = _bt_keep_natts_fast(rel, firsttup, lasttup);
2425
2426 for (; startikey < so->numberOfKeys; startikey++)
2427 {
2428 ScanKey key = so->keyData + startikey;
2429 BTArrayKeyInfo *array;
2430 Datum firstdatum,
2431 lastdatum;
2432 bool firstnull,
2433 lastnull;
2434 int32 result;
2435
2436 /*
2437 * Determine if it's safe to set pstate.startikey to an offset to a
2438 * key that comes after this key, by examining this key
2439 */
2440 if (key->sk_flags & SK_ROW_HEADER)
2441 {
2442 /* RowCompare inequality (header key) */
2443 ScanKey subkey = (ScanKey) DatumGetPointer(key->sk_argument);
2444 bool satisfied = false;
2445
2446 for (;;)
2447 {
2448 int cmpresult;
2449 bool firstsatisfies = false;
2450
2451 if (subkey->sk_attno > firstchangingattnum) /* >, not >= */
2452 break; /* unsafe, preceding attr has multiple
2453 * distinct values */
2454
2455 if (subkey->sk_flags & SK_ISNULL)
2456 break; /* unsafe, unsatisfiable NULL subkey arg */
2457
2458 firstdatum = index_getattr(firsttup, subkey->sk_attno,
2459 tupdesc, &firstnull);
2460 lastdatum = index_getattr(lasttup, subkey->sk_attno,
2461 tupdesc, &lastnull);
2462
2463 if (firstnull || lastnull)
2464 break; /* unsafe, NULL value won't satisfy subkey */
2465
2466 /*
2467 * Compare the first tuple's datum for this row compare member
2468 */
2469 cmpresult = DatumGetInt32(FunctionCall2Coll(&subkey->sk_func,
2470 subkey->sk_collation,
2471 firstdatum,
2472 subkey->sk_argument));
2473 if (subkey->sk_flags & SK_BT_DESC)
2474 INVERT_COMPARE_RESULT(cmpresult);
2475
2476 if (cmpresult != 0 || (subkey->sk_flags & SK_ROW_END))
2477 {
2478 firstsatisfies = _bt_rowcompare_cmpresult(subkey,
2479 cmpresult);
2480 if (!firstsatisfies)
2481 {
2482 /* Unsafe, firstdatum does not satisfy subkey */
2483 break;
2484 }
2485 }
2486
2487 /*
2488 * Compare the last tuple's datum for this row compare member
2489 */
2490 cmpresult = DatumGetInt32(FunctionCall2Coll(&subkey->sk_func,
2491 subkey->sk_collation,
2492 lastdatum,
2493 subkey->sk_argument));
2494 if (subkey->sk_flags & SK_BT_DESC)
2495 INVERT_COMPARE_RESULT(cmpresult);
2496
2497 if (cmpresult != 0 || (subkey->sk_flags & SK_ROW_END))
2498 {
2499 if (!firstsatisfies)
2500 {
2501 /*
2502 * It's only safe to set startikey beyond the row
2503 * compare header key when both firsttup and lasttup
2504 * satisfy the key as a whole based on the same
2505 * deciding subkey/attribute. That can't happen now.
2506 */
2507 break; /* unsafe */
2508 }
2509
2510 satisfied = _bt_rowcompare_cmpresult(subkey, cmpresult);
2511 break; /* safe iff 'satisfied' is true */
2512 }
2513
2514 /* Move on to next row member/subkey */
2515 if (subkey->sk_flags & SK_ROW_END)
2516 break; /* defensive */
2517 subkey++;
2518
2519 /*
2520 * We deliberately don't check if the next subkey has the same
2521 * strategy as this iteration's subkey (which happens when
2522 * subkeys for both ASC and DESC columns are used together),
2523 * nor if any subkey is marked required. This is safe because
2524 * in general all prior index attributes must have only one
2525 * distinct value (across all of the tuples on the page) in
2526 * order for us to even consider any subkey's attribute.
2527 */
2528 }
2529
2530 if (satisfied)
2531 {
2532 /* Safe, row compare satisfied by every tuple on page */
2533 continue;
2534 }
2535
2536 break; /* unsafe */
2537 }
2538 if (key->sk_strategy != BTEqualStrategyNumber)
2539 {
2540 /*
2541 * Scalar inequality key.
2542 *
2543 * It's definitely safe for _bt_checkkeys to avoid assessing this
2544 * inequality when the page's first and last non-pivot tuples both
2545 * satisfy the inequality (since the same must also be true of all
2546 * the tuples in between these two).
2547 *
2548 * Unlike the "=" case, it doesn't matter if this attribute has
2549 * more than one distinct value (though it _is_ necessary for any
2550 * and all _prior_ attributes to contain no more than one distinct
2551 * value amongst all of the tuples from pstate.page).
2552 */
2553 if (key->sk_attno > firstchangingattnum) /* >, not >= */
2554 break; /* unsafe, preceding attr has multiple
2555 * distinct values */
2556
2557 firstdatum = index_getattr(firsttup, key->sk_attno, tupdesc, &firstnull);
2558 lastdatum = index_getattr(lasttup, key->sk_attno, tupdesc, &lastnull);
2559
2560 if (key->sk_flags & SK_ISNULL)
2561 {
2562 /* IS NOT NULL key */
2563 Assert(key->sk_flags & SK_SEARCHNOTNULL);
2564
2565 if (firstnull || lastnull)
2566 break; /* unsafe */
2567
2568 /* Safe, IS NOT NULL key satisfied by every tuple */
2569 continue;
2570 }
2571
2572 /* Test firsttup */
2573 if (firstnull ||
2575 key->sk_collation, firstdatum,
2576 key->sk_argument)))
2577 break; /* unsafe */
2578
2579 /* Test lasttup */
2580 if (lastnull ||
2582 key->sk_collation, lastdatum,
2583 key->sk_argument)))
2584 break; /* unsafe */
2585
2586 /* Safe, scalar inequality satisfied by every tuple */
2587 continue;
2588 }
2589
2590 /* Some = key (could be a scalar = key, could be an array = key) */
2591 Assert(key->sk_strategy == BTEqualStrategyNumber);
2592
2593 if (!(key->sk_flags & SK_SEARCHARRAY))
2594 {
2595 /*
2596 * Scalar = key (possibly an IS NULL key).
2597 *
2598 * It is unsafe to set pstate.startikey to an ikey beyond this
2599 * key, unless the = key is satisfied by every possible tuple on
2600 * the page (possible only when attribute has just one distinct
2601 * value among all tuples on the page).
2602 */
2603 if (key->sk_attno >= firstchangingattnum)
2604 break; /* unsafe, multiple distinct attr values */
2605
2606 firstdatum = index_getattr(firsttup, key->sk_attno, tupdesc,
2607 &firstnull);
2608 if (key->sk_flags & SK_ISNULL)
2609 {
2610 /* IS NULL key */
2611 Assert(key->sk_flags & SK_SEARCHNULL);
2612
2613 if (!firstnull)
2614 break; /* unsafe */
2615
2616 /* Safe, IS NULL key satisfied by every tuple */
2617 continue;
2618 }
2619 if (firstnull ||
2621 key->sk_collation, firstdatum,
2622 key->sk_argument)))
2623 break; /* unsafe */
2624
2625 /* Safe, scalar = key satisfied by every tuple */
2626 continue;
2627 }
2628
2629 /* = array key (could be a SAOP array, could be a skip array) */
2630 array = &so->arrayKeys[arrayidx++];
2631 Assert(array->scan_key == startikey);
2632 if (array->num_elems != -1)
2633 {
2634 /*
2635 * SAOP array = key.
2636 *
2637 * Handle this like we handle scalar = keys (though binary search
2638 * for a matching element, to avoid relying on key's sk_argument).
2639 */
2640 if (key->sk_attno >= firstchangingattnum)
2641 break; /* unsafe, multiple distinct attr values */
2642
2643 firstdatum = index_getattr(firsttup, key->sk_attno, tupdesc,
2644 &firstnull);
2645 _bt_binsrch_array_skey(&so->orderProcs[startikey],
2647 firstdatum, firstnull, array, key,
2648 &result);
2649 if (result != 0)
2650 break; /* unsafe */
2651
2652 /* Safe, SAOP = key satisfied by every tuple */
2653 start_past_saop_eq = true;
2654 continue;
2655 }
2656
2657 /*
2658 * Skip array = key
2659 */
2660 Assert(key->sk_flags & SK_BT_SKIP);
2661 if (array->null_elem)
2662 {
2663 /*
2664 * Non-range skip array = key.
2665 *
2666 * Safe, non-range skip array "satisfied" by every tuple on page
2667 * (safe even when "key->sk_attno > firstchangingattnum").
2668 */
2669 continue;
2670 }
2671
2672 /*
2673 * Range skip array = key.
2674 *
2675 * Handle this like we handle scalar inequality keys (but avoid using
2676 * key's sk_argument directly, as in the SAOP array case).
2677 */
2678 if (key->sk_attno > firstchangingattnum) /* >, not >= */
2679 break; /* unsafe, preceding attr has multiple
2680 * distinct values */
2681
2682 firstdatum = index_getattr(firsttup, key->sk_attno, tupdesc, &firstnull);
2683 lastdatum = index_getattr(lasttup, key->sk_attno, tupdesc, &lastnull);
2684
2685 /* Test firsttup */
2687 firstdatum, firstnull, array, key,
2688 &result);
2689 if (result != 0)
2690 break; /* unsafe */
2691
2692 /* Test lasttup */
2694 lastdatum, lastnull, array, key,
2695 &result);
2696 if (result != 0)
2697 break; /* unsafe */
2698
2699 /* Safe, range skip array satisfied by every tuple on page */
2700 }
2701
2702 /*
2703 * Use of forcenonrequired is typically undesirable, since it'll force
2704 * _bt_readpage caller to read every tuple on the page -- even though, in
2705 * general, it might well be possible to end the scan on an earlier tuple.
2706 * However, caller must use forcenonrequired when start_past_saop_eq=true,
2707 * since the usual required array behavior might fail to roll over to the
2708 * SAOP array.
2709 *
2710 * We always prefer forcenonrequired=true during scans with skip arrays
2711 * (except on the first page of each primitive index scan), though -- even
2712 * when "startikey == 0". That way, _bt_advance_array_keys's low-order
2713 * key precheck optimization can always be used (unless on the first page
2714 * of the scan). It seems slightly preferable to check more tuples when
2715 * that allows us to do significantly less skip array maintenance.
2716 */
2717 pstate->forcenonrequired = (start_past_saop_eq || so->skipScan);
2718 pstate->startikey = startikey;
2719
2720 /*
2721 * _bt_readpage caller is required to call _bt_checkkeys against page's
2722 * finaltup with forcenonrequired=false whenever we initially set
2723 * forcenonrequired=true. That way the scan's arrays will reliably track
2724 * its progress through the index's key space.
2725 *
2726 * We don't expect this when _bt_readpage caller has no finaltup due to
2727 * its page being the rightmost (or the leftmost, during backwards scans).
2728 * When we see that _bt_readpage has no finaltup, back out of everything.
2729 */
2730 Assert(!pstate->forcenonrequired || so->numArrayKeys);
2731 if (pstate->forcenonrequired && !pstate->finaltup)
2732 {
2733 pstate->forcenonrequired = false;
2734 pstate->startikey = 0;
2735 }
2736}
2737
2738/*
2739 * Test whether an indextuple satisfies current scan condition.
2740 *
2741 * Return true if so, false if not. If not, also sets *continuescan to false
2742 * when it's also not possible for any later tuples to pass the current qual
2743 * (with the scan's current set of array keys, in the current scan direction),
2744 * in addition to setting *ikey to the so->keyData[] subscript/offset for the
2745 * unsatisfied scan key (needed when caller must consider advancing the scan's
2746 * array keys).
2747 *
2748 * This is a subroutine for _bt_checkkeys. We provisionally assume that
2749 * reaching the end of the current set of required keys (in particular the
2750 * current required array keys) ends the ongoing (primitive) index scan.
2751 * Callers without array keys should just end the scan right away when they
2752 * find that continuescan has been set to false here by us. Things are more
2753 * complicated for callers with array keys.
2754 *
2755 * Callers with array keys must first consider advancing the arrays when
2756 * continuescan has been set to false here by us. They must then consider if
2757 * it really does make sense to end the current (primitive) index scan, in
2758 * light of everything that is known at that point. (In general when we set
2759 * continuescan=false for these callers it must be treated as provisional.)
2760 *
2761 * We deal with advancing unsatisfied non-required arrays directly, though.
2762 * This is safe, since by definition non-required keys can't end the scan.
2763 * This is just how we determine if non-required arrays are just unsatisfied
2764 * by the current array key, or if they're truly unsatisfied (that is, if
2765 * they're unsatisfied by every possible array key).
2766 *
2767 * Pass advancenonrequired=false to avoid all array related side effects.
2768 * This allows _bt_advance_array_keys caller to avoid infinite recursion.
2769 *
2770 * Pass forcenonrequired=true to instruct us to treat all keys as nonrequired.
2771 * This is used to make it safe to temporarily stop properly maintaining the
2772 * scan's required arrays. _bt_checkkeys caller (_bt_readpage, actually)
2773 * determines a prefix of keys that must satisfy every possible corresponding
2774 * index attribute value from its page, which is passed to us via *ikey arg
2775 * (this is the first key that might be unsatisfied by tuples on the page).
2776 * Obviously, we won't maintain any array keys from before *ikey, so it's
2777 * quite possible for such arrays to "fall behind" the index's keyspace.
2778 * Caller will need to "catch up" by passing forcenonrequired=true (alongside
2779 * an *ikey=0) once the page's finaltup is reached.
2780 *
2781 * Note: it's safe to pass an *ikey > 0 with forcenonrequired=false, but only
2782 * when caller determines that it won't affect array maintenance.
2783 */
2784static bool
2786 IndexTuple tuple, int tupnatts, TupleDesc tupdesc,
2787 bool advancenonrequired, bool forcenonrequired,
2788 bool *continuescan, int *ikey)
2789{
2790 BTScanOpaque so = (BTScanOpaque) scan->opaque;
2791
2792 *continuescan = true; /* default assumption */
2793
2794 for (; *ikey < so->numberOfKeys; (*ikey)++)
2795 {
2796 ScanKey key = so->keyData + *ikey;
2797 Datum datum;
2798 bool isNull;
2799 bool requiredSameDir = false,
2800 requiredOppositeDirOnly = false;
2801
2802 /*
2803 * Check if the key is required in the current scan direction, in the
2804 * opposite scan direction _only_, or in neither direction (except
2805 * when we're forced to treat all scan keys as nonrequired)
2806 */
2807 if (forcenonrequired)
2808 {
2809 /* treating scan's keys as non-required */
2810 }
2811 else if (((key->sk_flags & SK_BT_REQFWD) && ScanDirectionIsForward(dir)) ||
2812 ((key->sk_flags & SK_BT_REQBKWD) && ScanDirectionIsBackward(dir)))
2813 requiredSameDir = true;
2814 else if (((key->sk_flags & SK_BT_REQFWD) && ScanDirectionIsBackward(dir)) ||
2815 ((key->sk_flags & SK_BT_REQBKWD) && ScanDirectionIsForward(dir)))
2816 requiredOppositeDirOnly = true;
2817
2818 if (key->sk_attno > tupnatts)
2819 {
2820 /*
2821 * This attribute is truncated (must be high key). The value for
2822 * this attribute in the first non-pivot tuple on the page to the
2823 * right could be any possible value. Assume that truncated
2824 * attribute passes the qual.
2825 */
2826 Assert(BTreeTupleIsPivot(tuple));
2827 continue;
2828 }
2829
2830 /*
2831 * A skip array scan key uses one of several sentinel values. We just
2832 * fall back on _bt_tuple_before_array_skeys when we see such a value.
2833 */
2834 if (key->sk_flags & (SK_BT_MINVAL | SK_BT_MAXVAL |
2836 {
2837 Assert(key->sk_flags & SK_SEARCHARRAY);
2838 Assert(key->sk_flags & SK_BT_SKIP);
2839 Assert(requiredSameDir || forcenonrequired);
2840
2841 /*
2842 * Cannot fall back on _bt_tuple_before_array_skeys when we're
2843 * treating the scan's keys as nonrequired, though. Just handle
2844 * this like any other non-required equality-type array key.
2845 */
2846 if (forcenonrequired)
2847 return _bt_advance_array_keys(scan, NULL, tuple, tupnatts,
2848 tupdesc, *ikey, false);
2849
2850 *continuescan = false;
2851 return false;
2852 }
2853
2854 /* row-comparison keys need special processing */
2855 if (key->sk_flags & SK_ROW_HEADER)
2856 {
2857 if (_bt_check_rowcompare(key, tuple, tupnatts, tupdesc, dir,
2858 forcenonrequired, continuescan))
2859 continue;
2860 return false;
2861 }
2862
2863 datum = index_getattr(tuple,
2864 key->sk_attno,
2865 tupdesc,
2866 &isNull);
2867
2868 if (key->sk_flags & SK_ISNULL)
2869 {
2870 /* Handle IS NULL/NOT NULL tests */
2871 if (key->sk_flags & SK_SEARCHNULL)
2872 {
2873 if (isNull)
2874 continue; /* tuple satisfies this qual */
2875 }
2876 else
2877 {
2878 Assert(key->sk_flags & SK_SEARCHNOTNULL);
2879 Assert(!(key->sk_flags & SK_BT_SKIP));
2880 if (!isNull)
2881 continue; /* tuple satisfies this qual */
2882 }
2883
2884 /*
2885 * Tuple fails this qual. If it's a required qual for the current
2886 * scan direction, then we can conclude no further tuples will
2887 * pass, either.
2888 */
2889 if (requiredSameDir)
2890 *continuescan = false;
2891 else if (unlikely(key->sk_flags & SK_BT_SKIP))
2892 {
2893 /*
2894 * If we're treating scan keys as nonrequired, and encounter a
2895 * skip array scan key whose current element is NULL, then it
2896 * must be a non-range skip array. It must be satisfied, so
2897 * there's no need to call _bt_advance_array_keys to check.
2898 */
2899 Assert(forcenonrequired && *ikey > 0);
2900 continue;
2901 }
2902
2903 /*
2904 * This indextuple doesn't match the qual.
2905 */
2906 return false;
2907 }
2908
2909 if (isNull)
2910 {
2911 /*
2912 * Scalar scan key isn't satisfied by NULL tuple value.
2913 *
2914 * If we're treating scan keys as nonrequired, and key is for a
2915 * skip array, then we must attempt to advance the array to NULL
2916 * (if we're successful then the tuple might match the qual).
2917 */
2918 if (unlikely(forcenonrequired && key->sk_flags & SK_BT_SKIP))
2919 return _bt_advance_array_keys(scan, NULL, tuple, tupnatts,
2920 tupdesc, *ikey, false);
2921
2922 if (key->sk_flags & SK_BT_NULLS_FIRST)
2923 {
2924 /*
2925 * Since NULLs are sorted before non-NULLs, we know we have
2926 * reached the lower limit of the range of values for this
2927 * index attr. On a backward scan, we can stop if this qual
2928 * is one of the "must match" subset. We can stop regardless
2929 * of whether the qual is > or <, so long as it's required,
2930 * because it's not possible for any future tuples to pass. On
2931 * a forward scan, however, we must keep going, because we may
2932 * have initially positioned to the start of the index.
2933 * (_bt_advance_array_keys also relies on this behavior during
2934 * forward scans.)
2935 */
2936 if ((requiredSameDir || requiredOppositeDirOnly) &&
2938 *continuescan = false;
2939 }
2940 else
2941 {
2942 /*
2943 * Since NULLs are sorted after non-NULLs, we know we have
2944 * reached the upper limit of the range of values for this
2945 * index attr. On a forward scan, we can stop if this qual is
2946 * one of the "must match" subset. We can stop regardless of
2947 * whether the qual is > or <, so long as it's required,
2948 * because it's not possible for any future tuples to pass. On
2949 * a backward scan, however, we must keep going, because we
2950 * may have initially positioned to the end of the index.
2951 * (_bt_advance_array_keys also relies on this behavior during
2952 * backward scans.)
2953 */
2954 if ((requiredSameDir || requiredOppositeDirOnly) &&
2956 *continuescan = false;
2957 }
2958
2959 /*
2960 * This indextuple doesn't match the qual.
2961 */
2962 return false;
2963 }
2964
2965 if (!DatumGetBool(FunctionCall2Coll(&key->sk_func, key->sk_collation,
2966 datum, key->sk_argument)))
2967 {
2968 /*
2969 * Tuple fails this qual. If it's a required qual for the current
2970 * scan direction, then we can conclude no further tuples will
2971 * pass, either.
2972 */
2973 if (requiredSameDir)
2974 *continuescan = false;
2975
2976 /*
2977 * If this is a non-required equality-type array key, the tuple
2978 * needs to be checked against every possible array key. Handle
2979 * this by "advancing" the scan key's array to a matching value
2980 * (if we're successful then the tuple might match the qual).
2981 */
2982 else if (advancenonrequired &&
2983 key->sk_strategy == BTEqualStrategyNumber &&
2984 (key->sk_flags & SK_SEARCHARRAY))
2985 return _bt_advance_array_keys(scan, NULL, tuple, tupnatts,
2986 tupdesc, *ikey, false);
2987
2988 /*
2989 * This indextuple doesn't match the qual.
2990 */
2991 return false;
2992 }
2993 }
2994
2995 /* If we get here, the tuple passes all index quals. */
2996 return true;
2997}
2998
2999/*
3000 * Call here when a row compare member returns a non-zero result, or with the
3001 * result for the final ROW_END row compare member (no matter the cmpresult).
3002 *
3003 * cmpresult indicates the overall result of the row comparison (must already
3004 * be commuted for DESC subkeys), and subkey is the deciding row member.
3005 */
3006static bool
3007_bt_rowcompare_cmpresult(ScanKey subkey, int cmpresult)
3008{
3009 bool satisfied;
3010
3011 Assert(subkey->sk_flags & SK_ROW_MEMBER);
3012
3013 switch (subkey->sk_strategy)
3014 {
3016 satisfied = (cmpresult < 0);
3017 break;
3019 satisfied = (cmpresult <= 0);
3020 break;
3022 satisfied = (cmpresult >= 0);
3023 break;
3025 satisfied = (cmpresult > 0);
3026 break;
3027 default:
3028 /* EQ and NE cases aren't allowed here */
3029 elog(ERROR, "unexpected strategy number %d", subkey->sk_strategy);
3030 satisfied = false; /* keep compiler quiet */
3031 break;
3032 }
3033
3034 return satisfied;
3035}
3036
3037/*
3038 * Test whether an indextuple satisfies a row-comparison scan condition.
3039 *
3040 * Return true if so, false if not. If not, also clear *continuescan if
3041 * it's not possible for any future tuples in the current scan direction
3042 * to pass the qual.
3043 *
3044 * This is a subroutine for _bt_checkkeys/_bt_check_compare. Caller passes us
3045 * a row compare header key taken from so->keyData[].
3046 *
3047 * Row value comparisons can be described in terms of logical expansions that
3048 * use only scalar operators. Consider the following example row comparison:
3049 *
3050 * "(a, b, c) > (7, 'bar', 62)"
3051 *
3052 * This can be evaluated as:
3053 *
3054 * "(a = 7 AND b = 'bar' AND c > 62) OR (a = 7 AND b > 'bar') OR (a > 7)".
3055 *
3056 * Notice that this condition is satisfied by _all_ rows that satisfy "a > 7",
3057 * and by a subset of all rows that satisfy "a >= 7" (possibly all such rows).
3058 * It _can't_ be satisfied by other rows (where "a < 7" or where "a IS NULL").
3059 * A row comparison header key can therefore often be treated as if it was a
3060 * simple scalar inequality on the row compare's most significant column.
3061 * (For example, _bt_advance_array_keys and most preprocessing routines treat
3062 * row compares like any other same-strategy inequality on the same column.)
3063 *
3064 * Things get more complicated for our row compare given a row where "a = 7".
3065 * Note that a row compare isn't necessarily satisfied by _every_ tuple that
3066 * appears between the first and last satisfied tuple returned by the scan,
3067 * due to the way that its lower-order subkeys are only conditionally applied.
3068 * A forwards scan that uses our example qual might initially return a tuple
3069 * "(a, b, c) = (7, 'zebra', 54)". But it won't subsequently return a tuple
3070 * "(a, b, c) = (7, NULL, 1)" located to the right of the first matching tuple
3071 * (assume that "b" was declared NULLS LAST here). The scan will only return
3072 * additional matches upon reaching tuples where "a > 7". If you rereview our
3073 * example row comparison's logical expansion, you'll understand why this is.
3074 * (Here we assume that all subkeys could be marked required, guaranteeing
3075 * that row comparison order matches index order. This is the common case.)
3076 *
3077 * Note that a row comparison header key behaves _exactly_ the same as a
3078 * similar scalar inequality key on the row's most significant column once the
3079 * scan reaches the point where it no longer needs to evaluate lower-order
3080 * subkeys (or before the point where it starts needing to evaluate them).
3081 * For example, once a forwards scan that uses our example qual reaches the
3082 * first tuple "a > 7", we'll behave in just the same way as our caller would
3083 * behave with a similar scalar inequality "a > 7" for the remainder of the
3084 * scan (assuming that the scan never changes direction/never goes backwards).
3085 * We'll even set continuescan=false according to exactly the same rules as
3086 * the ones our caller applies with simple scalar inequalities, including the
3087 * rules it applies when NULL tuple values don't satisfy an inequality qual.
3088 */
3089static bool
3090_bt_check_rowcompare(ScanKey header, IndexTuple tuple, int tupnatts,
3091 TupleDesc tupdesc, ScanDirection dir,
3092 bool forcenonrequired, bool *continuescan)
3093{
3094 ScanKey subkey = (ScanKey) DatumGetPointer(header->sk_argument);
3095 int32 cmpresult = 0;
3096 bool result;
3097
3098 /* First subkey should be same as the header says */
3099 Assert(header->sk_flags & SK_ROW_HEADER);
3100 Assert(subkey->sk_attno == header->sk_attno);
3101 Assert(subkey->sk_strategy == header->sk_strategy);
3102
3103 /* Loop over columns of the row condition */
3104 for (;;)
3105 {
3106 Datum datum;
3107 bool isNull;
3108
3109 Assert(subkey->sk_flags & SK_ROW_MEMBER);
3110
3111 /* When a NULL row member is compared, the row never matches */
3112 if (subkey->sk_flags & SK_ISNULL)
3113 {
3114 /*
3115 * Unlike the simple-scankey case, this isn't a disallowed case
3116 * (except when it's the first row element that has the NULL arg).
3117 * But it can never match. If all the earlier row comparison
3118 * columns are required for the scan direction, we can stop the
3119 * scan, because there can't be another tuple that will succeed.
3120 */
3121 Assert(subkey != (ScanKey) DatumGetPointer(header->sk_argument));
3122 subkey--;
3123 if (forcenonrequired)
3124 {
3125 /* treating scan's keys as non-required */
3126 }
3127 else if ((subkey->sk_flags & SK_BT_REQFWD) &&
3129 *continuescan = false;
3130 else if ((subkey->sk_flags & SK_BT_REQBKWD) &&
3132 *continuescan = false;
3133 return false;
3134 }
3135
3136 if (subkey->sk_attno > tupnatts)
3137 {
3138 /*
3139 * This attribute is truncated (must be high key). The value for
3140 * this attribute in the first non-pivot tuple on the page to the
3141 * right could be any possible value. Assume that truncated
3142 * attribute passes the qual.
3143 */
3144 Assert(BTreeTupleIsPivot(tuple));
3145 return true;
3146 }
3147
3148 datum = index_getattr(tuple,
3149 subkey->sk_attno,
3150 tupdesc,
3151 &isNull);
3152
3153 if (isNull)
3154 {
3155 int reqflags;
3156
3157 if (forcenonrequired)
3158 {
3159 /* treating scan's keys as non-required */
3160 }
3161 else if (subkey->sk_flags & SK_BT_NULLS_FIRST)
3162 {
3163 /*
3164 * Since NULLs are sorted before non-NULLs, we know we have
3165 * reached the lower limit of the range of values for this
3166 * index attr. On a backward scan, we can stop if this qual
3167 * is one of the "must match" subset. However, on a forwards
3168 * scan, we must keep going, because we may have initially
3169 * positioned to the start of the index.
3170 *
3171 * All required NULLS FIRST > row members can use NULL tuple
3172 * values to end backwards scans, just like with other values.
3173 * A qual "WHERE (a, b, c) > (9, 42, 'foo')" can terminate a
3174 * backwards scan upon reaching the index's rightmost "a = 9"
3175 * tuple whose "b" column contains a NULL (if not sooner).
3176 * Since "b" is NULLS FIRST, we can treat its NULLs as "<" 42.
3177 */
3178 reqflags = SK_BT_REQBKWD;
3179
3180 /*
3181 * When a most significant required NULLS FIRST < row compare
3182 * member sees NULL tuple values during a backwards scan, it
3183 * signals the end of matches for the whole row compare/scan.
3184 * A qual "WHERE (a, b, c) < (9, 42, 'foo')" will terminate a
3185 * backwards scan upon reaching the rightmost tuple whose "a"
3186 * column has a NULL. The "a" NULL value is "<" 9, and yet
3187 * our < row compare will still end the scan. (This isn't
3188 * safe with later/lower-order row members. Notice that it
3189 * can only happen with an "a" NULL some time after the scan
3190 * completely stops needing to use its "b" and "c" members.)
3191 */
3192 if (subkey == (ScanKey) DatumGetPointer(header->sk_argument))
3193 reqflags |= SK_BT_REQFWD; /* safe, first row member */
3194
3195 if ((subkey->sk_flags & reqflags) &&
3197 *continuescan = false;
3198 }
3199 else
3200 {
3201 /*
3202 * Since NULLs are sorted after non-NULLs, we know we have
3203 * reached the upper limit of the range of values for this
3204 * index attr. On a forward scan, we can stop if this qual is
3205 * one of the "must match" subset. However, on a backward
3206 * scan, we must keep going, because we may have initially
3207 * positioned to the end of the index.
3208 *
3209 * All required NULLS LAST < row members can use NULL tuple
3210 * values to end forwards scans, just like with other values.
3211 * A qual "WHERE (a, b, c) < (9, 42, 'foo')" can terminate a
3212 * forwards scan upon reaching the index's leftmost "a = 9"
3213 * tuple whose "b" column contains a NULL (if not sooner).
3214 * Since "b" is NULLS LAST, we can treat its NULLs as ">" 42.
3215 */
3216 reqflags = SK_BT_REQFWD;
3217
3218 /*
3219 * When a most significant required NULLS LAST > row compare
3220 * member sees NULL tuple values during a forwards scan, it
3221 * signals the end of matches for the whole row compare/scan.
3222 * A qual "WHERE (a, b, c) > (9, 42, 'foo')" will terminate a
3223 * forwards scan upon reaching the leftmost tuple whose "a"
3224 * column has a NULL. The "a" NULL value is ">" 9, and yet
3225 * our > row compare will end the scan. (This isn't safe with
3226 * later/lower-order row members. Notice that it can only
3227 * happen with an "a" NULL some time after the scan completely
3228 * stops needing to use its "b" and "c" members.)
3229 */
3230 if (subkey == (ScanKey) DatumGetPointer(header->sk_argument))
3231 reqflags |= SK_BT_REQBKWD; /* safe, first row member */
3232
3233 if ((subkey->sk_flags & reqflags) &&
3235 *continuescan = false;
3236 }
3237
3238 /*
3239 * In any case, this indextuple doesn't match the qual.
3240 */
3241 return false;
3242 }
3243
3244 /* Perform the test --- three-way comparison not bool operator */
3245 cmpresult = DatumGetInt32(FunctionCall2Coll(&subkey->sk_func,
3246 subkey->sk_collation,
3247 datum,
3248 subkey->sk_argument));
3249
3250 if (subkey->sk_flags & SK_BT_DESC)
3251 INVERT_COMPARE_RESULT(cmpresult);
3252
3253 /* Done comparing if unequal, else advance to next column */
3254 if (cmpresult != 0)
3255 break;
3256
3257 if (subkey->sk_flags & SK_ROW_END)
3258 break;
3259 subkey++;
3260 }
3261
3262 /* Final subkey/column determines if row compare is satisfied */
3263 result = _bt_rowcompare_cmpresult(subkey, cmpresult);
3264
3265 if (!result && !forcenonrequired)
3266 {
3267 /*
3268 * Tuple fails this qual. If it's a required qual for the current
3269 * scan direction, then we can conclude no further tuples will pass,
3270 * either. Note we have to look at the deciding column, not
3271 * necessarily the first or last column of the row condition.
3272 */
3273 if ((subkey->sk_flags & SK_BT_REQFWD) &&
3275 *continuescan = false;
3276 else if ((subkey->sk_flags & SK_BT_REQBKWD) &&
3278 *continuescan = false;
3279 }
3280
3281 return result;
3282}
3283
3284/*
3285 * Determine if a scan with array keys should skip over uninteresting tuples.
3286 *
3287 * This is a subroutine for _bt_checkkeys. Called when _bt_readpage's linear
3288 * search process (started after it finishes reading an initial group of
3289 * matching tuples, used to locate the start of the next group of tuples
3290 * matching the next set of required array keys) has already scanned an
3291 * excessive number of tuples whose key space is "between arrays".
3292 *
3293 * When we perform look ahead successfully, we'll sets pstate.skip, which
3294 * instructs _bt_readpage to skip ahead to that tuple next (could be past the
3295 * end of the scan's leaf page). Pages where the optimization is effective
3296 * will generally still need to skip several times. Each call here performs
3297 * only a single "look ahead" comparison of a later tuple, whose distance from
3298 * the current tuple's offset number is determined by applying heuristics.
3299 */
3300static void
3302 int tupnatts, TupleDesc tupdesc)
3303{
3304 BTScanOpaque so = (BTScanOpaque) scan->opaque;
3305 ScanDirection dir = so->currPos.dir;
3306 OffsetNumber aheadoffnum;
3307 IndexTuple ahead;
3308
3309 Assert(!pstate->forcenonrequired);
3310
3311 /* Avoid looking ahead when comparing the page high key */
3312 if (pstate->offnum < pstate->minoff)
3313 return;
3314
3315 /*
3316 * Don't look ahead when there aren't enough tuples remaining on the page
3317 * (in the current scan direction) for it to be worth our while
3318 */
3319 if (ScanDirectionIsForward(dir) &&
3320 pstate->offnum >= pstate->maxoff - LOOK_AHEAD_DEFAULT_DISTANCE)
3321 return;
3322 else if (ScanDirectionIsBackward(dir) &&
3323 pstate->offnum <= pstate->minoff + LOOK_AHEAD_DEFAULT_DISTANCE)
3324 return;
3325
3326 /*
3327 * The look ahead distance starts small, and ramps up as each call here
3328 * allows _bt_readpage to skip over more tuples
3329 */
3330 if (!pstate->targetdistance)
3332 else if (pstate->targetdistance < MaxIndexTuplesPerPage / 2)
3333 pstate->targetdistance *= 2;
3334
3335 /* Don't read past the end (or before the start) of the page, though */
3336 if (ScanDirectionIsForward(dir))
3337 aheadoffnum = Min((int) pstate->maxoff,
3338 (int) pstate->offnum + pstate->targetdistance);
3339 else
3340 aheadoffnum = Max((int) pstate->minoff,
3341 (int) pstate->offnum - pstate->targetdistance);
3342
3343 ahead = (IndexTuple) PageGetItem(pstate->page,
3344 PageGetItemId(pstate->page, aheadoffnum));
3345 if (_bt_tuple_before_array_skeys(scan, dir, ahead, tupdesc, tupnatts,
3346 false, 0, NULL))
3347 {
3348 /*
3349 * Success -- instruct _bt_readpage to skip ahead to very next tuple
3350 * after the one we determined was still before the current array keys
3351 */
3352 if (ScanDirectionIsForward(dir))
3353 pstate->skip = aheadoffnum + 1;
3354 else
3355 pstate->skip = aheadoffnum - 1;
3356 }
3357 else
3358 {
3359 /*
3360 * Failure -- "ahead" tuple is too far ahead (we were too aggressive).
3361 *
3362 * Reset the number of rechecks, and aggressively reduce the target
3363 * distance (we're much more aggressive here than we were when the
3364 * distance was initially ramped up).
3365 */
3366 pstate->rechecks = 0;
3367 pstate->targetdistance = Max(pstate->targetdistance / 8, 1);
3368 }
3369}
3370
3371/*
3372 * _bt_killitems - set LP_DEAD state for items an indexscan caller has
3373 * told us were killed
3374 *
3375 * scan->opaque, referenced locally through so, contains information about the
3376 * current page and killed tuples thereon (generally, this should only be
3377 * called if so->numKilled > 0).
3378 *
3379 * Caller should not have a lock on the so->currPos page, but must hold a
3380 * buffer pin when !so->dropPin. When we return, it still won't be locked.
3381 * It'll continue to hold whatever pins were held before calling here.
3382 *
3383 * We match items by heap TID before assuming they are the right ones to set
3384 * LP_DEAD. If the scan is one that holds a buffer pin on the target page
3385 * continuously from initially reading the items until applying this function
3386 * (if it is a !so->dropPin scan), VACUUM cannot have deleted any items on the
3387 * page, so the page's TIDs can't have been recycled by now. There's no risk
3388 * that we'll confuse a new index tuple that happens to use a recycled TID
3389 * with a now-removed tuple with the same TID (that used to be on this same
3390 * page). We can't rely on that during scans that drop buffer pins eagerly
3391 * (so->dropPin scans), though, so we must condition setting LP_DEAD bits on
3392 * the page LSN having not changed since back when _bt_readpage saw the page.
3393 * We totally give up on setting LP_DEAD bits when the page LSN changed.
3394 *
3395 * We give up much less often during !so->dropPin scans, but it still happens.
3396 * We cope with cases where items have moved right due to insertions. If an
3397 * item has moved off the current page due to a split, we'll fail to find it
3398 * and just give up on it.
3399 */
3400void
3402{
3403 Relation rel = scan->indexRelation;
3404 BTScanOpaque so = (BTScanOpaque) scan->opaque;
3405 Page page;
3406 BTPageOpaque opaque;
3407 OffsetNumber minoff;
3408 OffsetNumber maxoff;
3409 int numKilled = so->numKilled;
3410 bool killedsomething = false;
3411 Buffer buf;
3412
3413 Assert(numKilled > 0);
3415 Assert(scan->heapRelation != NULL); /* can't be a bitmap index scan */
3416
3417 /* Always invalidate so->killedItems[] before leaving so->currPos */
3418 so->numKilled = 0;
3419
3420 if (!so->dropPin)
3421 {
3422 /*
3423 * We have held the pin on this page since we read the index tuples,
3424 * so all we need to do is lock it. The pin will have prevented
3425 * concurrent VACUUMs from recycling any of the TIDs on the page.
3426 */
3428 buf = so->currPos.buf;
3429 _bt_lockbuf(rel, buf, BT_READ);
3430 }
3431 else
3432 {
3433 XLogRecPtr latestlsn;
3434
3437 buf = _bt_getbuf(rel, so->currPos.currPage, BT_READ);
3438
3439 latestlsn = BufferGetLSNAtomic(buf);
3440 Assert(so->currPos.lsn <= latestlsn);
3441 if (so->currPos.lsn != latestlsn)
3442 {
3443 /* Modified, give up on hinting */
3444 _bt_relbuf(rel, buf);
3445 return;
3446 }
3447
3448 /* Unmodified, hinting is safe */
3449 }
3450
3451 page = BufferGetPage(buf);
3452 opaque = BTPageGetOpaque(page);
3453 minoff = P_FIRSTDATAKEY(opaque);
3454 maxoff = PageGetMaxOffsetNumber(page);
3455
3456 for (int i = 0; i < numKilled; i++)
3457 {
3458 int itemIndex = so->killedItems[i];
3459 BTScanPosItem *kitem = &so->currPos.items[itemIndex];
3460 OffsetNumber offnum = kitem->indexOffset;
3461
3462 Assert(itemIndex >= so->currPos.firstItem &&
3463 itemIndex <= so->currPos.lastItem);
3464 if (offnum < minoff)
3465 continue; /* pure paranoia */
3466 while (offnum <= maxoff)
3467 {
3468 ItemId iid = PageGetItemId(page, offnum);
3469 IndexTuple ituple = (IndexTuple) PageGetItem(page, iid);
3470 bool killtuple = false;
3471
3472 if (BTreeTupleIsPosting(ituple))
3473 {
3474 int pi = i + 1;
3475 int nposting = BTreeTupleGetNPosting(ituple);
3476 int j;
3477
3478 /*
3479 * We rely on the convention that heap TIDs in the scanpos
3480 * items array are stored in ascending heap TID order for a
3481 * group of TIDs that originally came from a posting list
3482 * tuple. This convention even applies during backwards
3483 * scans, where returning the TIDs in descending order might
3484 * seem more natural. This is about effectiveness, not
3485 * correctness.
3486 *
3487 * Note that the page may have been modified in almost any way
3488 * since we first read it (in the !so->dropPin case), so it's
3489 * possible that this posting list tuple wasn't a posting list
3490 * tuple when we first encountered its heap TIDs.
3491 */
3492 for (j = 0; j < nposting; j++)
3493 {
3494 ItemPointer item = BTreeTupleGetPostingN(ituple, j);
3495
3496 if (!ItemPointerEquals(item, &kitem->heapTid))
3497 break; /* out of posting list loop */
3498
3499 /*
3500 * kitem must have matching offnum when heap TIDs match,
3501 * though only in the common case where the page can't
3502 * have been concurrently modified
3503 */
3504 Assert(kitem->indexOffset == offnum || !so->dropPin);
3505
3506 /*
3507 * Read-ahead to later kitems here.
3508 *
3509 * We rely on the assumption that not advancing kitem here
3510 * will prevent us from considering the posting list tuple
3511 * fully dead by not matching its next heap TID in next
3512 * loop iteration.
3513 *
3514 * If, on the other hand, this is the final heap TID in
3515 * the posting list tuple, then tuple gets killed
3516 * regardless (i.e. we handle the case where the last
3517 * kitem is also the last heap TID in the last index tuple
3518 * correctly -- posting tuple still gets killed).
3519 */
3520 if (pi < numKilled)
3521 kitem = &so->currPos.items[so->killedItems[pi++]];
3522 }
3523
3524 /*
3525 * Don't bother advancing the outermost loop's int iterator to
3526 * avoid processing killed items that relate to the same
3527 * offnum/posting list tuple. This micro-optimization hardly
3528 * seems worth it. (Further iterations of the outermost loop
3529 * will fail to match on this same posting list's first heap
3530 * TID instead, so we'll advance to the next offnum/index
3531 * tuple pretty quickly.)
3532 */
3533 if (j == nposting)
3534 killtuple = true;
3535 }
3536 else if (ItemPointerEquals(&ituple->t_tid, &kitem->heapTid))
3537 killtuple = true;
3538
3539 /*
3540 * Mark index item as dead, if it isn't already. Since this
3541 * happens while holding a buffer lock possibly in shared mode,
3542 * it's possible that multiple processes attempt to do this
3543 * simultaneously, leading to multiple full-page images being sent
3544 * to WAL (if wal_log_hints or data checksums are enabled), which
3545 * is undesirable.
3546 */
3547 if (killtuple && !ItemIdIsDead(iid))
3548 {
3549 /* found the item/all posting list items */
3550 ItemIdMarkDead(iid);
3551 killedsomething = true;
3552 break; /* out of inner search loop */
3553 }
3554 offnum = OffsetNumberNext(offnum);
3555 }
3556 }
3557
3558 /*
3559 * Since this can be redone later if needed, mark as dirty hint.
3560 *
3561 * Whenever we mark anything LP_DEAD, we also set the page's
3562 * BTP_HAS_GARBAGE flag, which is likewise just a hint. (Note that we
3563 * only rely on the page-level flag in !heapkeyspace indexes.)
3564 */
3565 if (killedsomething)
3566 {
3567 opaque->btpo_flags |= BTP_HAS_GARBAGE;
3568 MarkBufferDirtyHint(buf, true);
3569 }
3570
3571 if (!so->dropPin)
3572 _bt_unlockbuf(rel, buf);
3573 else
3574 _bt_relbuf(rel, buf);
3575}
3576
3577
3578/*
3579 * The following routines manage a shared-memory area in which we track
3580 * assignment of "vacuum cycle IDs" to currently-active btree vacuuming
3581 * operations. There is a single counter which increments each time we
3582 * start a vacuum to assign it a cycle ID. Since multiple vacuums could
3583 * be active concurrently, we have to track the cycle ID for each active
3584 * vacuum; this requires at most MaxBackends entries (usually far fewer).
3585 * We assume at most one vacuum can be active for a given index.
3586 *
3587 * Access to the shared memory area is controlled by BtreeVacuumLock.
3588 * In principle we could use a separate lmgr locktag for each index,
3589 * but a single LWLock is much cheaper, and given the short time that
3590 * the lock is ever held, the concurrency hit should be minimal.
3591 */
3592
3593typedef struct BTOneVacInfo
3594{
3595 LockRelId relid; /* global identifier of an index */
3596 BTCycleId cycleid; /* cycle ID for its active VACUUM */
3598
3599typedef struct BTVacInfo
3600{
3601 BTCycleId cycle_ctr; /* cycle ID most recently assigned */
3602 int num_vacuums; /* number of currently active VACUUMs */
3603 int max_vacuums; /* allocated length of vacuums[] array */
3606
3608
3609
3610/*
3611 * _bt_vacuum_cycleid --- get the active vacuum cycle ID for an index,
3612 * or zero if there is no active VACUUM
3613 *
3614 * Note: for correct interlocking, the caller must already hold pin and
3615 * exclusive lock on each buffer it will store the cycle ID into. This
3616 * ensures that even if a VACUUM starts immediately afterwards, it cannot
3617 * process those pages until the page split is complete.
3618 */
3621{
3622 BTCycleId result = 0;
3623 int i;
3624
3625 /* Share lock is enough since this is a read-only operation */
3626 LWLockAcquire(BtreeVacuumLock, LW_SHARED);
3627
3628 for (i = 0; i < btvacinfo->num_vacuums; i++)
3629 {
3630 BTOneVacInfo *vac = &btvacinfo->vacuums[i];
3631
3632 if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
3633 vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
3634 {
3635 result = vac->cycleid;
3636 break;
3637 }
3638 }
3639
3640 LWLockRelease(BtreeVacuumLock);
3641 return result;
3642}
3643
3644/*
3645 * _bt_start_vacuum --- assign a cycle ID to a just-starting VACUUM operation
3646 *
3647 * Note: the caller must guarantee that it will eventually call
3648 * _bt_end_vacuum, else we'll permanently leak an array slot. To ensure
3649 * that this happens even in elog(FATAL) scenarios, the appropriate coding
3650 * is not just a PG_TRY, but
3651 * PG_ENSURE_ERROR_CLEANUP(_bt_end_vacuum_callback, PointerGetDatum(rel))
3652 */
3655{
3656 BTCycleId result;
3657 int i;
3658 BTOneVacInfo *vac;
3659
3660 LWLockAcquire(BtreeVacuumLock, LW_EXCLUSIVE);
3661
3662 /*
3663 * Assign the next cycle ID, being careful to avoid zero as well as the
3664 * reserved high values.
3665 */
3666 result = ++(btvacinfo->cycle_ctr);
3667 if (result == 0 || result > MAX_BT_CYCLE_ID)
3668 result = btvacinfo->cycle_ctr = 1;
3669
3670 /* Let's just make sure there's no entry already for this index */
3671 for (i = 0; i < btvacinfo->num_vacuums; i++)
3672 {
3673 vac = &btvacinfo->vacuums[i];
3674 if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
3675 vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
3676 {
3677 /*
3678 * Unlike most places in the backend, we have to explicitly
3679 * release our LWLock before throwing an error. This is because
3680 * we expect _bt_end_vacuum() to be called before transaction
3681 * abort cleanup can run to release LWLocks.
3682 */
3683 LWLockRelease(BtreeVacuumLock);
3684 elog(ERROR, "multiple active vacuums for index \"%s\"",
3686 }
3687 }
3688
3689 /* OK, add an entry */
3691 {
3692 LWLockRelease(BtreeVacuumLock);
3693 elog(ERROR, "out of btvacinfo slots");
3694 }
3696 vac->relid = rel->rd_lockInfo.lockRelId;
3697 vac->cycleid = result;
3699
3700 LWLockRelease(BtreeVacuumLock);
3701 return result;
3702}
3703
3704/*
3705 * _bt_end_vacuum --- mark a btree VACUUM operation as done
3706 *
3707 * Note: this is deliberately coded not to complain if no entry is found;
3708 * this allows the caller to put PG_TRY around the start_vacuum operation.
3709 */
3710void
3712{
3713 int i;
3714
3715 LWLockAcquire(BtreeVacuumLock, LW_EXCLUSIVE);
3716
3717 /* Find the array entry */
3718 for (i = 0; i < btvacinfo->num_vacuums; i++)
3719 {
3720 BTOneVacInfo *vac = &btvacinfo->vacuums[i];
3721
3722 if (vac->relid.relId == rel->rd_lockInfo.lockRelId.relId &&
3723 vac->relid.dbId == rel->rd_lockInfo.lockRelId.dbId)
3724 {
3725 /* Remove it by shifting down the last entry */
3726 *vac = btvacinfo->vacuums[btvacinfo->num_vacuums - 1];
3728 break;
3729 }
3730 }
3731
3732 LWLockRelease(BtreeVacuumLock);
3733}
3734
3735/*
3736 * _bt_end_vacuum wrapped as an on_shmem_exit callback function
3737 */
3738void
3740{
3742}
3743
3744/*
3745 * BTreeShmemSize --- report amount of shared memory space needed
3746 */
3747Size
3749{
3750 Size size;
3751
3752 size = offsetof(BTVacInfo, vacuums);
3753 size = add_size(size, mul_size(MaxBackends, sizeof(BTOneVacInfo)));
3754 return size;
3755}
3756
3757/*
3758 * BTreeShmemInit --- initialize this module's shared memory
3759 */
3760void
3762{
3763 bool found;
3764
3765 btvacinfo = (BTVacInfo *) ShmemInitStruct("BTree Vacuum State",
3767 &found);
3768
3769 if (!IsUnderPostmaster)
3770 {
3771 /* Initialize shared memory area */
3772 Assert(!found);
3773
3774 /*
3775 * It doesn't really matter what the cycle counter starts at, but
3776 * having it always start the same doesn't seem good. Seed with
3777 * low-order bits of time() instead.
3778 */
3779 btvacinfo->cycle_ctr = (BTCycleId) time(NULL);
3780
3783 }
3784 else
3785 Assert(found);
3786}
3787
3788bytea *
3789btoptions(Datum reloptions, bool validate)
3790{
3791 static const relopt_parse_elt tab[] = {
3792 {"fillfactor", RELOPT_TYPE_INT, offsetof(BTOptions, fillfactor)},
3793 {"vacuum_cleanup_index_scale_factor", RELOPT_TYPE_REAL,
3794 offsetof(BTOptions, vacuum_cleanup_index_scale_factor)},
3795 {"deduplicate_items", RELOPT_TYPE_BOOL,
3796 offsetof(BTOptions, deduplicate_items)}
3797 };
3798
3799 return (bytea *) build_reloptions(reloptions, validate,
3801 sizeof(BTOptions),
3802 tab, lengthof(tab));
3803}
3804
3805/*
3806 * btproperty() -- Check boolean properties of indexes.
3807 *
3808 * This is optional, but handling AMPROP_RETURNABLE here saves opening the rel
3809 * to call btcanreturn.
3810 */
3811bool
3812btproperty(Oid index_oid, int attno,
3813 IndexAMProperty prop, const char *propname,
3814 bool *res, bool *isnull)
3815{
3816 switch (prop)
3817 {
3818 case AMPROP_RETURNABLE:
3819 /* answer only for columns, not AM or whole index */
3820 if (attno == 0)
3821 return false;
3822 /* otherwise, btree can always return data */
3823 *res = true;
3824 return true;
3825
3826 default:
3827 return false; /* punt to generic code */
3828 }
3829}
3830
3831/*
3832 * btbuildphasename() -- Return name of index build phase.
3833 */
3834char *
3836{
3837 switch (phasenum)
3838 {
3840 return "initializing";
3842 return "scanning table";
3844 return "sorting live tuples";
3846 return "sorting dead tuples";
3848 return "loading tuples in tree";
3849 default:
3850 return NULL;
3851 }
3852}
3853
3854/*
3855 * _bt_truncate() -- create tuple without unneeded suffix attributes.
3856 *
3857 * Returns truncated pivot index tuple allocated in caller's memory context,
3858 * with key attributes copied from caller's firstright argument. If rel is
3859 * an INCLUDE index, non-key attributes will definitely be truncated away,
3860 * since they're not part of the key space. More aggressive suffix
3861 * truncation can take place when it's clear that the returned tuple does not
3862 * need one or more suffix key attributes. We only need to keep firstright
3863 * attributes up to and including the first non-lastleft-equal attribute.
3864 * Caller's insertion scankey is used to compare the tuples; the scankey's
3865 * argument values are not considered here.
3866 *
3867 * Note that returned tuple's t_tid offset will hold the number of attributes
3868 * present, so the original item pointer offset is not represented. Caller
3869 * should only change truncated tuple's downlink. Note also that truncated
3870 * key attributes are treated as containing "minus infinity" values by
3871 * _bt_compare().
3872 *
3873 * In the worst case (when a heap TID must be appended to distinguish lastleft
3874 * from firstright), the size of the returned tuple is the size of firstright
3875 * plus the size of an additional MAXALIGN()'d item pointer. This guarantee
3876 * is important, since callers need to stay under the 1/3 of a page
3877 * restriction on tuple size. If this routine is ever taught to truncate
3878 * within an attribute/datum, it will need to avoid returning an enlarged
3879 * tuple to caller when truncation + TOAST compression ends up enlarging the
3880 * final datum.
3881 */
3883_bt_truncate(Relation rel, IndexTuple lastleft, IndexTuple firstright,
3884 BTScanInsert itup_key)
3885{
3886 TupleDesc itupdesc = RelationGetDescr(rel);
3888 int keepnatts;
3889 IndexTuple pivot;
3890 IndexTuple tidpivot;
3891 ItemPointer pivotheaptid;
3892 Size newsize;
3893
3894 /*
3895 * We should only ever truncate non-pivot tuples from leaf pages. It's
3896 * never okay to truncate when splitting an internal page.
3897 */
3898 Assert(!BTreeTupleIsPivot(lastleft) && !BTreeTupleIsPivot(firstright));
3899
3900 /* Determine how many attributes must be kept in truncated tuple */
3901 keepnatts = _bt_keep_natts(rel, lastleft, firstright, itup_key);
3902
3903#ifdef DEBUG_NO_TRUNCATE
3904 /* Force truncation to be ineffective for testing purposes */
3905 keepnatts = nkeyatts + 1;
3906#endif
3907
3908 pivot = index_truncate_tuple(itupdesc, firstright,
3909 Min(keepnatts, nkeyatts));
3910
3911 if (BTreeTupleIsPosting(pivot))
3912 {
3913 /*
3914 * index_truncate_tuple() just returns a straight copy of firstright
3915 * when it has no attributes to truncate. When that happens, we may
3916 * need to truncate away a posting list here instead.
3917 */
3918 Assert(keepnatts == nkeyatts || keepnatts == nkeyatts + 1);
3920 pivot->t_info &= ~INDEX_SIZE_MASK;
3921 pivot->t_info |= MAXALIGN(BTreeTupleGetPostingOffset(firstright));
3922 }
3923
3924 /*
3925 * If there is a distinguishing key attribute within pivot tuple, we're
3926 * done
3927 */
3928 if (keepnatts <= nkeyatts)
3929 {
3930 BTreeTupleSetNAtts(pivot, keepnatts, false);
3931 return pivot;
3932 }
3933
3934 /*
3935 * We have to store a heap TID in the new pivot tuple, since no non-TID
3936 * key attribute value in firstright distinguishes the right side of the
3937 * split from the left side. nbtree conceptualizes this case as an
3938 * inability to truncate away any key attributes, since heap TID is
3939 * treated as just another key attribute (despite lacking a pg_attribute
3940 * entry).
3941 *
3942 * Use enlarged space that holds a copy of pivot. We need the extra space
3943 * to store a heap TID at the end (using the special pivot tuple
3944 * representation). Note that the original pivot already has firstright's
3945 * possible posting list/non-key attribute values removed at this point.
3946 */
3947 newsize = MAXALIGN(IndexTupleSize(pivot)) + MAXALIGN(sizeof(ItemPointerData));
3948 tidpivot = palloc0(newsize);
3949 memcpy(tidpivot, pivot, MAXALIGN(IndexTupleSize(pivot)));
3950 /* Cannot leak memory here */
3951 pfree(pivot);
3952
3953 /*
3954 * Store all of firstright's key attribute values plus a tiebreaker heap
3955 * TID value in enlarged pivot tuple
3956 */
3957 tidpivot->t_info &= ~INDEX_SIZE_MASK;
3958 tidpivot->t_info |= newsize;
3959 BTreeTupleSetNAtts(tidpivot, nkeyatts, true);
3960 pivotheaptid = BTreeTupleGetHeapTID(tidpivot);
3961
3962 /*
3963 * Lehman & Yao use lastleft as the leaf high key in all cases, but don't
3964 * consider suffix truncation. It seems like a good idea to follow that
3965 * example in cases where no truncation takes place -- use lastleft's heap
3966 * TID. (This is also the closest value to negative infinity that's
3967 * legally usable.)
3968 */
3969 ItemPointerCopy(BTreeTupleGetMaxHeapTID(lastleft), pivotheaptid);
3970
3971 /*
3972 * We're done. Assert() that heap TID invariants hold before returning.
3973 *
3974 * Lehman and Yao require that the downlink to the right page, which is to
3975 * be inserted into the parent page in the second phase of a page split be
3976 * a strict lower bound on items on the right page, and a non-strict upper
3977 * bound for items on the left page. Assert that heap TIDs follow these
3978 * invariants, since a heap TID value is apparently needed as a
3979 * tiebreaker.
3980 */
3981#ifndef DEBUG_NO_TRUNCATE
3983 BTreeTupleGetHeapTID(firstright)) < 0);
3984 Assert(ItemPointerCompare(pivotheaptid,
3985 BTreeTupleGetHeapTID(lastleft)) >= 0);
3986 Assert(ItemPointerCompare(pivotheaptid,
3987 BTreeTupleGetHeapTID(firstright)) < 0);
3988#else
3989
3990 /*
3991 * Those invariants aren't guaranteed to hold for lastleft + firstright
3992 * heap TID attribute values when they're considered here only because
3993 * DEBUG_NO_TRUNCATE is defined (a heap TID is probably not actually
3994 * needed as a tiebreaker). DEBUG_NO_TRUNCATE must therefore use a heap
3995 * TID value that always works as a strict lower bound for items to the
3996 * right. In particular, it must avoid using firstright's leading key
3997 * attribute values along with lastleft's heap TID value when lastleft's
3998 * TID happens to be greater than firstright's TID.
3999 */
4000 ItemPointerCopy(BTreeTupleGetHeapTID(firstright), pivotheaptid);
4001
4002 /*
4003 * Pivot heap TID should never be fully equal to firstright. Note that
4004 * the pivot heap TID will still end up equal to lastleft's heap TID when
4005 * that's the only usable value.
4006 */
4007 ItemPointerSetOffsetNumber(pivotheaptid,
4009 Assert(ItemPointerCompare(pivotheaptid,
4010 BTreeTupleGetHeapTID(firstright)) < 0);
4011#endif
4012
4013 return tidpivot;
4014}
4015
4016/*
4017 * _bt_keep_natts - how many key attributes to keep when truncating.
4018 *
4019 * Caller provides two tuples that enclose a split point. Caller's insertion
4020 * scankey is used to compare the tuples; the scankey's argument values are
4021 * not considered here.
4022 *
4023 * This can return a number of attributes that is one greater than the
4024 * number of key attributes for the index relation. This indicates that the
4025 * caller must use a heap TID as a unique-ifier in new pivot tuple.
4026 */
4027static int
4029 BTScanInsert itup_key)
4030{
4031 int nkeyatts = IndexRelationGetNumberOfKeyAttributes(rel);
4032 TupleDesc itupdesc = RelationGetDescr(rel);
4033 int keepnatts;
4034 ScanKey scankey;
4035
4036 /*
4037 * _bt_compare() treats truncated key attributes as having the value minus
4038 * infinity, which would break searches within !heapkeyspace indexes. We
4039 * must still truncate away non-key attribute values, though.
4040 */
4041 if (!itup_key->heapkeyspace)
4042 return nkeyatts;
4043
4044 scankey = itup_key->scankeys;
4045 keepnatts = 1;
4046 for (int attnum = 1; attnum <= nkeyatts; attnum++, scankey++)
4047 {
4048 Datum datum1,
4049 datum2;
4050 bool isNull1,
4051 isNull2;
4052
4053 datum1 = index_getattr(lastleft, attnum, itupdesc, &isNull1);
4054 datum2 = index_getattr(firstright, attnum, itupdesc, &isNull2);
4055
4056 if (isNull1 != isNull2)
4057 break;
4058
4059 if (!isNull1 &&
4061 scankey->sk_collation,
4062 datum1,
4063 datum2)) != 0)
4064 break;
4065
4066 keepnatts++;
4067 }
4068
4069 /*
4070 * Assert that _bt_keep_natts_fast() agrees with us in passing. This is
4071 * expected in an allequalimage index.
4072 */
4073 Assert(!itup_key->allequalimage ||
4074 keepnatts == _bt_keep_natts_fast(rel, lastleft, firstright));
4075
4076 return keepnatts;
4077}
4078
4079/*
4080 * _bt_keep_natts_fast - fast bitwise variant of _bt_keep_natts.
4081 *
4082 * This is exported so that a candidate split point can have its effect on
4083 * suffix truncation inexpensively evaluated ahead of time when finding a
4084 * split location. A naive bitwise approach to datum comparisons is used to
4085 * save cycles.
4086 *
4087 * The approach taken here usually provides the same answer as _bt_keep_natts
4088 * will (for the same pair of tuples from a heapkeyspace index), since the
4089 * majority of btree opclasses can never indicate that two datums are equal
4090 * unless they're bitwise equal after detoasting. When an index only has
4091 * "equal image" columns, routine is guaranteed to give the same result as
4092 * _bt_keep_natts would.
4093 *
4094 * Callers can rely on the fact that attributes considered equal here are
4095 * definitely also equal according to _bt_keep_natts, even when the index uses
4096 * an opclass or collation that is not "allequalimage"/deduplication-safe.
4097 * This weaker guarantee is good enough for nbtsplitloc.c caller, since false
4098 * negatives generally only have the effect of making leaf page splits use a
4099 * more balanced split point.
4100 */
4101int
4103{
4104 TupleDesc itupdesc = RelationGetDescr(rel);
4106 int keepnatts;
4107
4108 keepnatts = 1;
4109 for (int attnum = 1; attnum <= keysz; attnum++)
4110 {
4111 Datum datum1,
4112 datum2;
4113 bool isNull1,
4114 isNull2;
4115 CompactAttribute *att;
4116
4117 datum1 = index_getattr(lastleft, attnum, itupdesc, &isNull1);
4118 datum2 = index_getattr(firstright, attnum, itupdesc, &isNull2);
4119 att = TupleDescCompactAttr(itupdesc, attnum - 1);
4120
4121 if (isNull1 != isNull2)
4122 break;
4123
4124 if (!isNull1 &&
4125 !datum_image_eq(datum1, datum2, att->attbyval, att->attlen))
4126 break;
4127
4128 keepnatts++;
4129 }
4130
4131 return keepnatts;
4132}
4133
4134/*
4135 * _bt_check_natts() -- Verify tuple has expected number of attributes.
4136 *
4137 * Returns value indicating if the expected number of attributes were found
4138 * for a particular offset on page. This can be used as a general purpose
4139 * sanity check.
4140 *
4141 * Testing a tuple directly with BTreeTupleGetNAtts() should generally be
4142 * preferred to calling here. That's usually more convenient, and is always
4143 * more explicit. Call here instead when offnum's tuple may be a negative
4144 * infinity tuple that uses the pre-v11 on-disk representation, or when a low
4145 * context check is appropriate. This routine is as strict as possible about
4146 * what is expected on each version of btree.
4147 */
4148bool
4149_bt_check_natts(Relation rel, bool heapkeyspace, Page page, OffsetNumber offnum)
4150{
4153 BTPageOpaque opaque = BTPageGetOpaque(page);
4154 IndexTuple itup;
4155 int tupnatts;
4156
4157 /*
4158 * We cannot reliably test a deleted or half-dead page, since they have
4159 * dummy high keys
4160 */
4161 if (P_IGNORE(opaque))
4162 return true;
4163
4164 Assert(offnum >= FirstOffsetNumber &&
4165 offnum <= PageGetMaxOffsetNumber(page));
4166
4167 itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
4168 tupnatts = BTreeTupleGetNAtts(itup, rel);
4169
4170 /* !heapkeyspace indexes do not support deduplication */
4171 if (!heapkeyspace && BTreeTupleIsPosting(itup))
4172 return false;
4173
4174 /* Posting list tuples should never have "pivot heap TID" bit set */
4175 if (BTreeTupleIsPosting(itup) &&
4178 return false;
4179
4180 /* INCLUDE indexes do not support deduplication */
4181 if (natts != nkeyatts && BTreeTupleIsPosting(itup))
4182 return false;
4183
4184 if (P_ISLEAF(opaque))
4185 {
4186 if (offnum >= P_FIRSTDATAKEY(opaque))
4187 {
4188 /*
4189 * Non-pivot tuple should never be explicitly marked as a pivot
4190 * tuple
4191 */
4192 if (BTreeTupleIsPivot(itup))
4193 return false;
4194
4195 /*
4196 * Leaf tuples that are not the page high key (non-pivot tuples)
4197 * should never be truncated. (Note that tupnatts must have been
4198 * inferred, even with a posting list tuple, because only pivot
4199 * tuples store tupnatts directly.)
4200 */
4201 return tupnatts == natts;
4202 }
4203 else
4204 {
4205 /*
4206 * Rightmost page doesn't contain a page high key, so tuple was
4207 * checked above as ordinary leaf tuple
4208 */
4209 Assert(!P_RIGHTMOST(opaque));
4210
4211 /*
4212 * !heapkeyspace high key tuple contains only key attributes. Note
4213 * that tupnatts will only have been explicitly represented in
4214 * !heapkeyspace indexes that happen to have non-key attributes.
4215 */
4216 if (!heapkeyspace)
4217 return tupnatts == nkeyatts;
4218
4219 /* Use generic heapkeyspace pivot tuple handling */
4220 }
4221 }
4222 else /* !P_ISLEAF(opaque) */
4223 {
4224 if (offnum == P_FIRSTDATAKEY(opaque))
4225 {
4226 /*
4227 * The first tuple on any internal page (possibly the first after
4228 * its high key) is its negative infinity tuple. Negative
4229 * infinity tuples are always truncated to zero attributes. They
4230 * are a particular kind of pivot tuple.
4231 */
4232 if (heapkeyspace)
4233 return tupnatts == 0;
4234
4235 /*
4236 * The number of attributes won't be explicitly represented if the
4237 * negative infinity tuple was generated during a page split that
4238 * occurred with a version of Postgres before v11. There must be
4239 * a problem when there is an explicit representation that is
4240 * non-zero, or when there is no explicit representation and the
4241 * tuple is evidently not a pre-pg_upgrade tuple.
4242 *
4243 * Prior to v11, downlinks always had P_HIKEY as their offset.
4244 * Accept that as an alternative indication of a valid
4245 * !heapkeyspace negative infinity tuple.
4246 */
4247 return tupnatts == 0 ||
4249 }
4250 else
4251 {
4252 /*
4253 * !heapkeyspace downlink tuple with separator key contains only
4254 * key attributes. Note that tupnatts will only have been
4255 * explicitly represented in !heapkeyspace indexes that happen to
4256 * have non-key attributes.
4257 */
4258 if (!heapkeyspace)
4259 return tupnatts == nkeyatts;
4260
4261 /* Use generic heapkeyspace pivot tuple handling */
4262 }
4263 }
4264
4265 /* Handle heapkeyspace pivot tuples (excluding minus infinity items) */
4266 Assert(heapkeyspace);
4267
4268 /*
4269 * Explicit representation of the number of attributes is mandatory with
4270 * heapkeyspace index pivot tuples, regardless of whether or not there are
4271 * non-key attributes.
4272 */
4273 if (!BTreeTupleIsPivot(itup))
4274 return false;
4275
4276 /* Pivot tuple should not use posting list representation (redundant) */
4277 if (BTreeTupleIsPosting(itup))
4278 return false;
4279
4280 /*
4281 * Heap TID is a tiebreaker key attribute, so it cannot be untruncated
4282 * when any other key attribute is truncated
4283 */
4284 if (BTreeTupleGetHeapTID(itup) != NULL && tupnatts != nkeyatts)
4285 return false;
4286
4287 /*
4288 * Pivot tuple must have at least one untruncated key attribute (minus
4289 * infinity pivot tuples are the only exception). Pivot tuples can never
4290 * represent that there is a value present for a key attribute that
4291 * exceeds pg_index.indnkeyatts for the index.
4292 */
4293 return tupnatts > 0 && tupnatts <= nkeyatts;
4294}
4295
4296/*
4297 *
4298 * _bt_check_third_page() -- check whether tuple fits on a btree page at all.
4299 *
4300 * We actually need to be able to fit three items on every page, so restrict
4301 * any one item to 1/3 the per-page available space. Note that itemsz should
4302 * not include the ItemId overhead.
4303 *
4304 * It might be useful to apply TOAST methods rather than throw an error here.
4305 * Using out of line storage would break assumptions made by suffix truncation
4306 * and by contrib/amcheck, though.
4307 */
4308void
4309_bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace,
4310 Page page, IndexTuple newtup)
4311{
4312 Size itemsz;
4313 BTPageOpaque opaque;
4314
4315 itemsz = MAXALIGN(IndexTupleSize(newtup));
4316
4317 /* Double check item size against limit */
4318 if (itemsz <= BTMaxItemSize)
4319 return;
4320
4321 /*
4322 * Tuple is probably too large to fit on page, but it's possible that the
4323 * index uses version 2 or version 3, or that page is an internal page, in
4324 * which case a slightly higher limit applies.
4325 */
4326 if (!needheaptidspace && itemsz <= BTMaxItemSizeNoHeapTid)
4327 return;
4328
4329 /*
4330 * Internal page insertions cannot fail here, because that would mean that
4331 * an earlier leaf level insertion that should have failed didn't
4332 */
4333 opaque = BTPageGetOpaque(page);
4334 if (!P_ISLEAF(opaque))
4335 elog(ERROR, "cannot insert oversized tuple of size %zu on internal page of index \"%s\"",
4336 itemsz, RelationGetRelationName(rel));
4337
4338 ereport(ERROR,
4339 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
4340 errmsg("index row size %zu exceeds btree version %u maximum %zu for index \"%s\"",
4341 itemsz,
4342 needheaptidspace ? BTREE_VERSION : BTREE_NOVAC_VERSION,
4343 needheaptidspace ? BTMaxItemSize : BTMaxItemSizeNoHeapTid,
4345 errdetail("Index row references tuple (%u,%u) in relation \"%s\".",
4349 errhint("Values larger than 1/3 of a buffer page cannot be indexed.\n"
4350 "Consider a function index of an MD5 hash of the value, "
4351 "or use full text indexing."),
4353}
4354
4355/*
4356 * Are all attributes in rel "equality is image equality" attributes?
4357 *
4358 * We use each attribute's BTEQUALIMAGE_PROC opclass procedure. If any
4359 * opclass either lacks a BTEQUALIMAGE_PROC procedure or returns false, we
4360 * return false; otherwise we return true.
4361 *
4362 * Returned boolean value is stored in index metapage during index builds.
4363 * Deduplication can only be used when we return true.
4364 */
4365bool
4366_bt_allequalimage(Relation rel, bool debugmessage)
4367{
4368 bool allequalimage = true;
4369
4370 /* INCLUDE indexes can never support deduplication */
4373 return false;
4374
4375 for (int i = 0; i < IndexRelationGetNumberOfKeyAttributes(rel); i++)
4376 {
4377 Oid opfamily = rel->rd_opfamily[i];
4378 Oid opcintype = rel->rd_opcintype[i];
4379 Oid collation = rel->rd_indcollation[i];
4380 Oid equalimageproc;
4381
4382 equalimageproc = get_opfamily_proc(opfamily, opcintype, opcintype,
4384
4385 /*
4386 * If there is no BTEQUALIMAGE_PROC then deduplication is assumed to
4387 * be unsafe. Otherwise, actually call proc and see what it says.
4388 */
4389 if (!OidIsValid(equalimageproc) ||
4390 !DatumGetBool(OidFunctionCall1Coll(equalimageproc, collation,
4391 ObjectIdGetDatum(opcintype))))
4392 {
4393 allequalimage = false;
4394 break;
4395 }
4396 }
4397
4398 if (debugmessage)
4399 {
4400 if (allequalimage)
4401 elog(DEBUG1, "index \"%s\" can safely use deduplication",
4403 else
4404 elog(DEBUG1, "index \"%s\" cannot use deduplication",
4406 }
4407
4408 return allequalimage;
4409}
IndexAMProperty
Definition: amapi.h:39
@ AMPROP_RETURNABLE
Definition: amapi.h:47
int16 AttrNumber
Definition: attnum.h:21
#define InvalidAttrNumber
Definition: attnum.h:23
static bool validate(Port *port, const char *auth)
Definition: auth-oauth.c:638
int Buffer
Definition: buf.h:23
XLogRecPtr BufferGetLSNAtomic(Buffer buffer)
Definition: bufmgr.c:4499
void MarkBufferDirtyHint(Buffer buffer, bool buffer_std)
Definition: bufmgr.c:5430
static Page BufferGetPage(Buffer buffer)
Definition: bufmgr.h:425
static void * PageGetItem(const PageData *page, const ItemIdData *itemId)
Definition: bufpage.h:353
static ItemId PageGetItemId(Page page, OffsetNumber offsetNumber)
Definition: bufpage.h:243
PageData * Page
Definition: bufpage.h:81
static OffsetNumber PageGetMaxOffsetNumber(const PageData *page)
Definition: bufpage.h:371
#define Min(x, y)
Definition: c.h:1008
#define INVERT_COMPARE_RESULT(var)
Definition: c.h:1110
#define likely(x)
Definition: c.h:406
#define MAXALIGN(LEN)
Definition: c.h:815
#define PG_USED_FOR_ASSERTS_ONLY
Definition: c.h:228
#define Max(x, y)
Definition: c.h:1002
int64_t int64
Definition: c.h:540
#define FLEXIBLE_ARRAY_MEMBER
Definition: c.h:475
int16_t int16
Definition: c.h:538
int32_t int32
Definition: c.h:539
#define unlikely(x)
Definition: c.h:407
#define lengthof(array)
Definition: c.h:792
#define OidIsValid(objectId)
Definition: c.h:779
size_t Size
Definition: c.h:615
Datum datumCopy(Datum value, bool typByVal, int typLen)
Definition: datum.c:132
bool datum_image_eq(Datum value1, Datum value2, bool typByVal, int typLen)
Definition: datum.c:266
struct cursor * cur
Definition: ecpg.c:29
int errdetail(const char *fmt,...)
Definition: elog.c:1216
int errhint(const char *fmt,...)
Definition: elog.c:1330
int errcode(int sqlerrcode)
Definition: elog.c:863
int errmsg(const char *fmt,...)
Definition: elog.c:1080
#define DEBUG1
Definition: elog.h:30
#define ERROR
Definition: elog.h:39
#define elog(elevel,...)
Definition: elog.h:226
#define ereport(elevel,...)
Definition: elog.h:150
Datum FunctionCall2Coll(FmgrInfo *flinfo, Oid collation, Datum arg1, Datum arg2)
Definition: fmgr.c:1150
Datum OidFunctionCall1Coll(Oid functionId, Oid collation, Datum arg1)
Definition: fmgr.c:1412
bool IsUnderPostmaster
Definition: globals.c:120
int MaxBackends
Definition: globals.c:146
Assert(PointerIsAligned(start, uint64))
for(;;)
FmgrInfo * index_getprocinfo(Relation irel, AttrNumber attnum, uint16 procnum)
Definition: indexam.c:917
IndexTuple index_truncate_tuple(TupleDesc sourceDescriptor, IndexTuple source, int leavenatts)
Definition: indextuple.c:576
int j
Definition: isn.c:78
int i
Definition: isn.c:77
if(TABLE==NULL||TABLE_index==NULL)
Definition: isn.c:81
#define ItemIdMarkDead(itemId)
Definition: itemid.h:179
#define ItemIdIsDead(itemId)
Definition: itemid.h:113
int32 ItemPointerCompare(const ItemPointerData *arg1, const ItemPointerData *arg2)
Definition: itemptr.c:51
bool ItemPointerEquals(const ItemPointerData *pointer1, const ItemPointerData *pointer2)
Definition: itemptr.c:35
static void ItemPointerSetOffsetNumber(ItemPointerData *pointer, OffsetNumber offsetNumber)
Definition: itemptr.h:158
static OffsetNumber ItemPointerGetOffsetNumber(const ItemPointerData *pointer)
Definition: itemptr.h:124
static OffsetNumber ItemPointerGetOffsetNumberNoCheck(const ItemPointerData *pointer)
Definition: itemptr.h:114
static BlockNumber ItemPointerGetBlockNumber(const ItemPointerData *pointer)
Definition: itemptr.h:103
static void ItemPointerCopy(const ItemPointerData *fromPointer, ItemPointerData *toPointer)
Definition: itemptr.h:172
IndexTupleData * IndexTuple
Definition: itup.h:53
static Datum index_getattr(IndexTuple tup, int attnum, TupleDesc tupleDesc, bool *isnull)
Definition: itup.h:131
static Size IndexTupleSize(const IndexTupleData *itup)
Definition: itup.h:71
#define MaxIndexTuplesPerPage
Definition: itup.h:181
Oid get_opfamily_proc(Oid opfamily, Oid lefttype, Oid righttype, int16 procnum)
Definition: lsyscache.c:889
bool LWLockAcquire(LWLock *lock, LWLockMode mode)
Definition: lwlock.c:1174
void LWLockRelease(LWLock *lock)
Definition: lwlock.c:1894
@ LW_SHARED
Definition: lwlock.h:113
@ LW_EXCLUSIVE
Definition: lwlock.h:112
void pfree(void *pointer)
Definition: mcxt.c:1594
void * palloc0(Size size)
Definition: mcxt.c:1395
void * palloc(Size size)
Definition: mcxt.c:1365
void _bt_relbuf(Relation rel, Buffer buf)
Definition: nbtpage.c:1023
void _bt_metaversion(Relation rel, bool *heapkeyspace, bool *allequalimage)
Definition: nbtpage.c:739
Buffer _bt_getbuf(Relation rel, BlockNumber blkno, int access)
Definition: nbtpage.c:845
void _bt_unlockbuf(Relation rel, Buffer buf)
Definition: nbtpage.c:1070
void _bt_lockbuf(Relation rel, Buffer buf, int access)
Definition: nbtpage.c:1039
void _bt_parallel_primscan_schedule(IndexScanDesc scan, BlockNumber curr_page)
Definition: nbtree.c:1031
void _bt_parallel_done(IndexScanDesc scan)
Definition: nbtree.c:981
#define BTScanPosIsPinned(scanpos)
Definition: nbtree.h:1004
#define BT_PIVOT_HEAP_TID_ATTR
Definition: nbtree.h:466
static uint16 BTreeTupleGetNPosting(IndexTuple posting)
Definition: nbtree.h:519
static bool BTreeTupleIsPivot(IndexTuple itup)
Definition: nbtree.h:481
#define P_ISLEAF(opaque)
Definition: nbtree.h:221
#define SK_BT_SKIP
Definition: nbtree.h:1137
#define P_HIKEY
Definition: nbtree.h:368
#define PROGRESS_BTREE_PHASE_PERFORMSORT_2
Definition: nbtree.h:1179
#define PROGRESS_BTREE_PHASE_LEAF_LOAD
Definition: nbtree.h:1180
#define BTP_HAS_GARBAGE
Definition: nbtree.h:83
#define BTEQUALIMAGE_PROC
Definition: nbtree.h:720
#define BTORDER_PROC
Definition: nbtree.h:717
#define P_LEFTMOST(opaque)
Definition: nbtree.h:219
#define BTPageGetOpaque(page)
Definition: nbtree.h:74
#define SK_BT_PRIOR
Definition: nbtree.h:1143
#define SK_BT_NEXT
Definition: nbtree.h:1142
#define BTREE_VERSION
Definition: nbtree.h:151
#define BTScanPosIsValid(scanpos)
Definition: nbtree.h:1021
#define PROGRESS_BTREE_PHASE_INDEXBUILD_TABLESCAN
Definition: nbtree.h:1177
#define SK_BT_INDOPTION_SHIFT
Definition: nbtree.h:1146
#define P_FIRSTDATAKEY(opaque)
Definition: nbtree.h:370
#define MAX_BT_CYCLE_ID
Definition: nbtree.h:94
#define PROGRESS_BTREE_PHASE_PERFORMSORT_1
Definition: nbtree.h:1178
uint16 BTCycleId
Definition: nbtree.h:30
static uint32 BTreeTupleGetPostingOffset(IndexTuple posting)
Definition: nbtree.h:530
#define SK_BT_REQBKWD
Definition: nbtree.h:1136
#define P_RIGHTMOST(opaque)
Definition: nbtree.h:220
#define SK_BT_NULLS_FIRST
Definition: nbtree.h:1148
static ItemPointer BTreeTupleGetPostingN(IndexTuple posting, int n)
Definition: nbtree.h:545
#define SK_BT_MAXVAL
Definition: nbtree.h:1141
#define BT_READ
Definition: nbtree.h:730
#define SK_BT_REQFWD
Definition: nbtree.h:1135
#define SK_BT_DESC
Definition: nbtree.h:1147
#define P_IGNORE(opaque)
Definition: nbtree.h:226
static ItemPointer BTreeTupleGetMaxHeapTID(IndexTuple itup)
Definition: nbtree.h:665
static bool BTreeTupleIsPosting(IndexTuple itup)
Definition: nbtree.h:493
#define BTREE_NOVAC_VERSION
Definition: nbtree.h:153
#define BTMaxItemSizeNoHeapTid
Definition: nbtree.h:170
static ItemPointer BTreeTupleGetHeapTID(IndexTuple itup)
Definition: nbtree.h:639
static void BTreeTupleSetNAtts(IndexTuple itup, uint16 nkeyatts, bool heaptid)
Definition: nbtree.h:596
#define BTMaxItemSize
Definition: nbtree.h:165
#define BTreeTupleGetNAtts(itup, rel)
Definition: nbtree.h:578
#define SK_BT_MINVAL
Definition: nbtree.h:1140
BTScanOpaqueData * BTScanOpaque
Definition: nbtree.h:1097
void _bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace, Page page, IndexTuple newtup)
Definition: nbtutils.c:4309
bool _bt_scanbehind_checkkeys(IndexScanDesc scan, ScanDirection dir, IndexTuple finaltup)
Definition: nbtutils.c:2278
void _bt_end_vacuum(Relation rel)
Definition: nbtutils.c:3711
static void _bt_binsrch_skiparray_skey(bool cur_elem_trig, ScanDirection dir, Datum tupdatum, bool tupnull, BTArrayKeyInfo *array, ScanKey cur, int32 *set_elem_result)
Definition: nbtutils.c:445
static void _bt_array_set_low_or_high(Relation rel, ScanKey skey, BTArrayKeyInfo *array, bool low_not_high)
Definition: nbtutils.c:641
bool _bt_checkkeys(IndexScanDesc scan, BTReadPageState *pstate, bool arrayKeys, IndexTuple tuple, int tupnatts)
Definition: nbtutils.c:2150
static void _bt_skiparray_set_element(Relation rel, ScanKey skey, BTArrayKeyInfo *array, int32 set_elem_result, Datum tupdatum, bool tupnull)
Definition: nbtutils.c:552
void _bt_end_vacuum_callback(int code, Datum arg)
Definition: nbtutils.c:3739
#define NSKIPADVANCES_THRESHOLD
Definition: nbtutils.c:32
int _bt_binsrch_array_skey(FmgrInfo *orderproc, bool cur_elem_trig, ScanDirection dir, Datum tupdatum, bool tupnull, BTArrayKeyInfo *array, ScanKey cur, int32 *set_elem_result)
Definition: nbtutils.c:289
void _bt_freestack(BTStack stack)
Definition: nbtutils.c:189
void BTreeShmemInit(void)
Definition: nbtutils.c:3761
struct BTVacInfo BTVacInfo
BTCycleId _bt_vacuum_cycleid(Relation rel)
Definition: nbtutils.c:3620
BTScanInsert _bt_mkscankey(Relation rel, IndexTuple itup)
Definition: nbtutils.c:97
void _bt_killitems(IndexScanDesc scan)
Definition: nbtutils.c:3401
static bool _bt_advance_array_keys(IndexScanDesc scan, BTReadPageState *pstate, IndexTuple tuple, int tupnatts, TupleDesc tupdesc, int sktrig, bool sktrig_required)
Definition: nbtutils.c:1393
bool _bt_start_prim_scan(IndexScanDesc scan, ScanDirection dir)
Definition: nbtutils.c:1274
bool _bt_check_natts(Relation rel, bool heapkeyspace, Page page, OffsetNumber offnum)
Definition: nbtutils.c:4149
IndexTuple _bt_truncate(Relation rel, IndexTuple lastleft, IndexTuple firstright, BTScanInsert itup_key)
Definition: nbtutils.c:3883
#define LOOK_AHEAD_REQUIRED_RECHECKS
Definition: nbtutils.c:30
static bool _bt_rowcompare_cmpresult(ScanKey subkey, int cmpresult)
Definition: nbtutils.c:3007
static bool _bt_oppodir_checkkeys(IndexScanDesc scan, ScanDirection dir, IndexTuple finaltup)
Definition: nbtutils.c:2333
int _bt_keep_natts_fast(Relation rel, IndexTuple lastleft, IndexTuple firstright)
Definition: nbtutils.c:4102
#define LOOK_AHEAD_DEFAULT_DISTANCE
Definition: nbtutils.c:31
static bool _bt_check_rowcompare(ScanKey header, IndexTuple tuple, int tupnatts, TupleDesc tupdesc, ScanDirection dir, bool forcenonrequired, bool *continuescan)
Definition: nbtutils.c:3090
static BTVacInfo * btvacinfo
Definition: nbtutils.c:3607
static bool _bt_check_compare(IndexScanDesc scan, ScanDirection dir, IndexTuple tuple, int tupnatts, TupleDesc tupdesc, bool advancenonrequired, bool forcenonrequired, bool *continuescan, int *ikey)
Definition: nbtutils.c:2785
void _bt_set_startikey(IndexScanDesc scan, BTReadPageState *pstate)
Definition: nbtutils.c:2391
static bool _bt_array_decrement(Relation rel, ScanKey skey, BTArrayKeyInfo *array)
Definition: nbtutils.c:705
char * btbuildphasename(int64 phasenum)
Definition: nbtutils.c:3835
static bool _bt_tuple_before_array_skeys(IndexScanDesc scan, ScanDirection dir, IndexTuple tuple, TupleDesc tupdesc, int tupnatts, bool readpagetup, int sktrig, bool *scanBehind)
Definition: nbtutils.c:1082
bytea * btoptions(Datum reloptions, bool validate)
Definition: nbtutils.c:3789
Size BTreeShmemSize(void)
Definition: nbtutils.c:3748
static int _bt_keep_natts(Relation rel, IndexTuple lastleft, IndexTuple firstright, BTScanInsert itup_key)
Definition: nbtutils.c:4028
bool btproperty(Oid index_oid, int attno, IndexAMProperty prop, const char *propname, bool *res, bool *isnull)
Definition: nbtutils.c:3812
static void _bt_skiparray_set_isnull(Relation rel, ScanKey skey, BTArrayKeyInfo *array)
Definition: nbtutils.c:589
bool _bt_allequalimage(Relation rel, bool debugmessage)
Definition: nbtutils.c:4366
static bool _bt_advance_array_keys_increment(IndexScanDesc scan, ScanDirection dir, bool *skip_array_set)
Definition: nbtutils.c:977
static void _bt_checkkeys_look_ahead(IndexScanDesc scan, BTReadPageState *pstate, int tupnatts, TupleDesc tupdesc)
Definition: nbtutils.c:3301
static int32 _bt_compare_array_skey(FmgrInfo *orderproc, Datum tupdatum, bool tupnull, Datum arrdatum, ScanKey cur)
Definition: nbtutils.c:218
struct BTOneVacInfo BTOneVacInfo
void _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir)
Definition: nbtutils.c:613
BTCycleId _bt_start_vacuum(Relation rel)
Definition: nbtutils.c:3654
static bool _bt_array_increment(Relation rel, ScanKey skey, BTArrayKeyInfo *array)
Definition: nbtutils.c:838
#define OffsetNumberNext(offsetNumber)
Definition: off.h:52
uint16 OffsetNumber
Definition: off.h:24
#define FirstOffsetNumber
Definition: off.h:27
#define OffsetNumberPrev(offsetNumber)
Definition: off.h:54
int16 attnum
Definition: pg_attribute.h:74
void * arg
static char * buf
Definition: pg_test_fsync.c:72
static int fillfactor
Definition: pgbench.c:188
static bool DatumGetBool(Datum X)
Definition: postgres.h:100
static Datum ObjectIdGetDatum(Oid X)
Definition: postgres.h:262
uint64_t Datum
Definition: postgres.h:70
static Pointer DatumGetPointer(Datum X)
Definition: postgres.h:322
static int32 DatumGetInt32(Datum X)
Definition: postgres.h:212
#define InvalidOid
Definition: postgres_ext.h:37
unsigned int Oid
Definition: postgres_ext.h:32
#define PROGRESS_CREATEIDX_SUBPHASE_INITIALIZE
Definition: progress.h:109
#define RelationGetDescr(relation)
Definition: rel.h:541
#define RelationGetRelationName(relation)
Definition: rel.h:549
#define RelationNeedsWAL(relation)
Definition: rel.h:638
#define IndexRelationGetNumberOfAttributes(relation)
Definition: rel.h:527
#define IndexRelationGetNumberOfKeyAttributes(relation)
Definition: rel.h:534
int errtableconstraint(Relation rel, const char *conname)
Definition: relcache.c:6103
void * build_reloptions(Datum reloptions, bool validate, relopt_kind kind, Size relopt_struct_size, const relopt_parse_elt *relopt_elems, int num_relopt_elems)
Definition: reloptions.c:1954
@ RELOPT_KIND_BTREE
Definition: reloptions.h:44
@ RELOPT_TYPE_INT
Definition: reloptions.h:32
@ RELOPT_TYPE_BOOL
Definition: reloptions.h:31
@ RELOPT_TYPE_REAL
Definition: reloptions.h:33
void ScanKeyEntryInitializeWithInfo(ScanKey entry, int flags, AttrNumber attributeNumber, StrategyNumber strategy, Oid subtype, Oid collation, FmgrInfo *finfo, Datum argument)
Definition: scankey.c:101
#define ScanDirectionIsForward(direction)
Definition: sdir.h:64
#define ScanDirectionIsBackward(direction)
Definition: sdir.h:50
#define ScanDirectionIsNoMovement(direction)
Definition: sdir.h:57
ScanDirection
Definition: sdir.h:25
@ NoMovementScanDirection
Definition: sdir.h:27
@ ForwardScanDirection
Definition: sdir.h:28
Size add_size(Size s1, Size s2)
Definition: shmem.c:494
Size mul_size(Size s1, Size s2)
Definition: shmem.c:511
void * ShmemInitStruct(const char *name, Size size, bool *foundPtr)
Definition: shmem.c:388
#define SK_ROW_HEADER
Definition: skey.h:117
#define SK_SEARCHARRAY
Definition: skey.h:120
#define SK_ROW_MEMBER
Definition: skey.h:118
#define SK_SEARCHNOTNULL
Definition: skey.h:122
#define SK_SEARCHNULL
Definition: skey.h:121
#define SK_ROW_END
Definition: skey.h:119
ScanKeyData * ScanKey
Definition: skey.h:75
#define SK_ISNULL
Definition: skey.h:115
#define BTGreaterStrategyNumber
Definition: stratnum.h:33
#define InvalidStrategy
Definition: stratnum.h:24
#define BTLessStrategyNumber
Definition: stratnum.h:29
#define BTEqualStrategyNumber
Definition: stratnum.h:31
#define BTLessEqualStrategyNumber
Definition: stratnum.h:30
#define BTGreaterEqualStrategyNumber
Definition: stratnum.h:32
bool attbyval
Definition: nbtree.h:1046
Datum * elem_values
Definition: nbtree.h:1041
ScanKey high_compare
Definition: nbtree.h:1050
ScanKey low_compare
Definition: nbtree.h:1049
SkipSupport sksup
Definition: nbtree.h:1048
int16 attlen
Definition: nbtree.h:1045
bool null_elem
Definition: nbtree.h:1047
BTCycleId cycleid
Definition: nbtutils.c:3596
LockRelId relid
Definition: nbtutils.c:3595
bool forcenonrequired
Definition: nbtree.h:1110
bool continuescan
Definition: nbtree.h:1118
bool firstpage
Definition: nbtree.h:1109
IndexTuple finaltup
Definition: nbtree.h:1107
OffsetNumber minoff
Definition: nbtree.h:1105
int16 targetdistance
Definition: nbtree.h:1125
int16 nskipadvances
Definition: nbtree.h:1126
OffsetNumber offnum
Definition: nbtree.h:1114
int16 rechecks
Definition: nbtree.h:1124
OffsetNumber skip
Definition: nbtree.h:1117
OffsetNumber maxoff
Definition: nbtree.h:1106
bool allequalimage
Definition: nbtree.h:798
bool heapkeyspace
Definition: nbtree.h:797
ScanKeyData scankeys[INDEX_MAX_KEYS]
Definition: nbtree.h:804
bool needPrimScan
Definition: nbtree.h:1063
BTArrayKeyInfo * arrayKeys
Definition: nbtree.h:1066
FmgrInfo * orderProcs
Definition: nbtree.h:1067
BTScanPosData currPos
Definition: nbtree.h:1093
int * killedItems
Definition: nbtree.h:1071
bool oppositeDirCheck
Definition: nbtree.h:1065
ScanKey keyData
Definition: nbtree.h:1058
Buffer buf
Definition: nbtree.h:964
BlockNumber currPage
Definition: nbtree.h:967
int firstItem
Definition: nbtree.h:995
BTScanPosItem items[MaxTIDsPerBTreePage]
Definition: nbtree.h:999
ScanDirection dir
Definition: nbtree.h:973
XLogRecPtr lsn
Definition: nbtree.h:970
ItemPointerData heapTid
Definition: nbtree.h:957
OffsetNumber indexOffset
Definition: nbtree.h:958
struct BTStackData * bts_parent
Definition: nbtree.h:747
BTCycleId cycle_ctr
Definition: nbtutils.c:3601
int num_vacuums
Definition: nbtutils.c:3602
BTOneVacInfo vacuums[FLEXIBLE_ARRAY_MEMBER]
Definition: nbtutils.c:3604
int max_vacuums
Definition: nbtutils.c:3603
int16 attlen
Definition: tupdesc.h:71
Definition: fmgr.h:57
struct ParallelIndexScanDescData * parallel_scan
Definition: relscan.h:191
Relation indexRelation
Definition: relscan.h:137
Relation heapRelation
Definition: relscan.h:136
ItemPointerData t_tid
Definition: itup.h:37
unsigned short t_info
Definition: itup.h:49
LockRelId lockRelId
Definition: rel.h:46
Definition: rel.h:39
Oid relId
Definition: rel.h:40
Oid dbId
Definition: rel.h:41
LockInfoData rd_lockInfo
Definition: rel.h:114
Oid * rd_opcintype
Definition: rel.h:208
int16 * rd_indoption
Definition: rel.h:211
Form_pg_index rd_index
Definition: rel.h:192
Oid * rd_opfamily
Definition: rel.h:207
Oid * rd_indcollation
Definition: rel.h:217
int sk_flags
Definition: skey.h:66
Datum sk_argument
Definition: skey.h:72
FmgrInfo sk_func
Definition: skey.h:71
Oid sk_collation
Definition: skey.h:70
StrategyNumber sk_strategy
Definition: skey.h:68
AttrNumber sk_attno
Definition: skey.h:67
SkipSupportIncDec decrement
Definition: skipsupport.h:91
SkipSupportIncDec increment
Definition: skipsupport.h:92
Definition: c.h:697
static CompactAttribute * TupleDescCompactAttr(TupleDesc tupdesc, int i)
Definition: tupdesc.h:175
uint64 XLogRecPtr
Definition: xlogdefs.h:21