1 : /*-------------------------------------------------------------------------
2 : *
3 : * hash.c
4 : * Implementation of Margo Seltzer's Hashing package for postgres.
5 : *
6 : * Portions Copyright (c) 1996-2008, PostgreSQL Global Development Group
7 : * Portions Copyright (c) 1994, Regents of the University of California
8 : *
9 : *
10 : * IDENTIFICATION
11 : * $PostgreSQL: pgsql/src/backend/access/hash/hash.c,v 1.97 2007/11/15 21:14:32 momjian Exp $
12 : *
13 : * NOTES
14 : * This file contains only the public interface routines.
15 : *
16 : *-------------------------------------------------------------------------
17 : */
18 :
19 : #include "postgres.h"
20 :
21 : #include "access/genam.h"
22 : #include "access/hash.h"
23 : #include "catalog/index.h"
24 : #include "commands/vacuum.h"
25 :
26 :
27 : /* Working state for hashbuild and its callback */
28 : typedef struct
29 : {
30 : double indtuples;
31 : } HashBuildState;
32 :
33 : static void hashbuildCallback(Relation index,
34 : HeapTuple htup,
35 : Datum *values,
36 : bool *isnull,
37 : bool tupleIsAlive,
38 : void *state);
39 :
40 :
41 : /*
42 : * hashbuild() -- build a new hash index.
43 : */
44 : Datum
45 : hashbuild(PG_FUNCTION_ARGS)
46 6 : {
47 6 : Relation heap = (Relation) PG_GETARG_POINTER(0);
48 6 : Relation index = (Relation) PG_GETARG_POINTER(1);
49 6 : IndexInfo *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
50 : IndexBuildResult *result;
51 : double reltuples;
52 : HashBuildState buildstate;
53 :
54 : /*
55 : * We expect to be called exactly once for any index relation. If that's
56 : * not the case, big trouble's what we have.
57 : */
58 6 : if (RelationGetNumberOfBlocks(index) != 0)
59 0 : elog(ERROR, "index \"%s\" already contains data",
60 : RelationGetRelationName(index));
61 :
62 : /* initialize the hash index metadata page */
63 6 : _hash_metapinit(index);
64 :
65 : /* build the index */
66 6 : buildstate.indtuples = 0;
67 :
68 : /* do the heap scan */
69 6 : reltuples = IndexBuildHeapScan(heap, index, indexInfo,
70 : hashbuildCallback, (void *) &buildstate);
71 :
72 : /*
73 : * Return statistics
74 : */
75 6 : result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult));
76 :
77 6 : result->heap_tuples = reltuples;
78 6 : result->index_tuples = buildstate.indtuples;
79 :
80 6 : PG_RETURN_POINTER(result);
81 : }
82 :
83 : /*
84 : * Per-tuple callback from IndexBuildHeapScan
85 : */
86 : static void
87 : hashbuildCallback(Relation index,
88 : HeapTuple htup,
89 : Datum *values,
90 : bool *isnull,
91 : bool tupleIsAlive,
92 : void *state)
93 40009 : {
94 40009 : HashBuildState *buildstate = (HashBuildState *) state;
95 : IndexTuple itup;
96 :
97 : /* form an index tuple and point it at the heap tuple */
98 40009 : itup = index_form_tuple(RelationGetDescr(index), values, isnull);
99 40009 : itup->t_tid = htup->t_self;
100 :
101 : /* Hash indexes don't index nulls, see notes in hashinsert */
102 40009 : if (IndexTupleHasNulls(itup))
103 : {
104 0 : pfree(itup);
105 0 : return;
106 : }
107 :
108 40009 : _hash_doinsert(index, itup);
109 :
110 40009 : buildstate->indtuples += 1;
111 :
112 40009 : pfree(itup);
113 : }
114 :
115 : /*
116 : * hashinsert() -- insert an index tuple into a hash table.
117 : *
118 : * Hash on the index tuple's key, find the appropriate location
119 : * for the new tuple, and put it there.
120 : */
121 : Datum
122 : hashinsert(PG_FUNCTION_ARGS)
123 9 : {
124 9 : Relation rel = (Relation) PG_GETARG_POINTER(0);
125 9 : Datum *values = (Datum *) PG_GETARG_POINTER(1);
126 9 : bool *isnull = (bool *) PG_GETARG_POINTER(2);
127 9 : ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
128 :
129 : #ifdef NOT_USED
130 : Relation heapRel = (Relation) PG_GETARG_POINTER(4);
131 : bool checkUnique = PG_GETARG_BOOL(5);
132 : #endif
133 : IndexTuple itup;
134 :
135 : /* generate an index tuple */
136 9 : itup = index_form_tuple(RelationGetDescr(rel), values, isnull);
137 9 : itup->t_tid = *ht_ctid;
138 :
139 : /*
140 : * If the single index key is null, we don't insert it into the index.
141 : * Hash tables support scans on '='. Relational algebra says that A = B
142 : * returns null if either A or B is null. This means that no
143 : * qualification used in an index scan could ever return true on a null
144 : * attribute. It also means that indices can't be used by ISNULL or
145 : * NOTNULL scans, but that's an artifact of the strategy map architecture
146 : * chosen in 1986, not of the way nulls are handled here.
147 : */
148 9 : if (IndexTupleHasNulls(itup))
149 : {
150 0 : pfree(itup);
151 0 : PG_RETURN_BOOL(false);
152 : }
153 :
154 9 : _hash_doinsert(rel, itup);
155 :
156 9 : pfree(itup);
157 :
158 9 : PG_RETURN_BOOL(true);
159 : }
160 :
161 :
162 : /*
163 : * hashgettuple() -- Get the next tuple in the scan.
164 : */
165 : Datum
166 : hashgettuple(PG_FUNCTION_ARGS)
167 2 : {
168 2 : IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
169 2 : ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
170 2 : HashScanOpaque so = (HashScanOpaque) scan->opaque;
171 2 : Relation rel = scan->indexRelation;
172 : Page page;
173 : OffsetNumber offnum;
174 : bool res;
175 :
176 : /*
177 : * We hold pin but not lock on current buffer while outside the hash AM.
178 : * Reacquire the read lock here.
179 : */
180 2 : if (BufferIsValid(so->hashso_curbuf))
181 1 : _hash_chgbufaccess(rel, so->hashso_curbuf, HASH_NOLOCK, HASH_READ);
182 :
183 : /*
184 : * If we've already initialized this scan, we can just advance it in the
185 : * appropriate direction. If we haven't done so yet, we call a routine to
186 : * get the first item in the scan.
187 : */
188 2 : if (ItemPointerIsValid(&(so->hashso_curpos)))
189 : {
190 : /*
191 : * Check to see if we should kill the previously-fetched tuple.
192 : */
193 1 : if (scan->kill_prior_tuple)
194 : {
195 : /*
196 : * Yes, so mark it by setting the LP_DEAD state in the item flags.
197 : */
198 0 : offnum = ItemPointerGetOffsetNumber(&(so->hashso_curpos));
199 0 : page = BufferGetPage(so->hashso_curbuf);
200 0 : ItemIdMarkDead(PageGetItemId(page, offnum));
201 :
202 : /*
203 : * Since this can be redone later if needed, it's treated the same
204 : * as a commit-hint-bit status update for heap tuples: we mark the
205 : * buffer dirty but don't make a WAL log entry.
206 : */
207 0 : SetBufferCommitInfoNeedsSave(so->hashso_curbuf);
208 : }
209 :
210 : /*
211 : * Now continue the scan.
212 : */
213 1 : res = _hash_next(scan, dir);
214 : }
215 : else
216 1 : res = _hash_first(scan, dir);
217 :
218 : /*
219 : * Skip killed tuples if asked to.
220 : */
221 2 : if (scan->ignore_killed_tuples)
222 : {
223 2 : while (res)
224 : {
225 1 : offnum = ItemPointerGetOffsetNumber(&(so->hashso_curpos));
226 1 : page = BufferGetPage(so->hashso_curbuf);
227 1 : if (!ItemIdIsDead(PageGetItemId(page, offnum)))
228 1 : break;
229 0 : res = _hash_next(scan, dir);
230 : }
231 : }
232 :
233 : /* Release read lock on current buffer, but keep it pinned */
234 2 : if (BufferIsValid(so->hashso_curbuf))
235 1 : _hash_chgbufaccess(rel, so->hashso_curbuf, HASH_READ, HASH_NOLOCK);
236 :
237 2 : PG_RETURN_BOOL(res);
238 : }
239 :
240 :
241 : /*
242 : * hashgetmulti() -- get multiple tuples at once
243 : *
244 : * This is a somewhat generic implementation: it avoids lock reacquisition
245 : * overhead, but there's no smarts about picking especially good stopping
246 : * points such as index page boundaries.
247 : */
248 : Datum
249 : hashgetmulti(PG_FUNCTION_ARGS)
250 20 : {
251 20 : IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
252 20 : ItemPointer tids = (ItemPointer) PG_GETARG_POINTER(1);
253 20 : int32 max_tids = PG_GETARG_INT32(2);
254 20 : int32 *returned_tids = (int32 *) PG_GETARG_POINTER(3);
255 20 : HashScanOpaque so = (HashScanOpaque) scan->opaque;
256 20 : Relation rel = scan->indexRelation;
257 20 : bool res = true;
258 20 : int32 ntids = 0;
259 :
260 : /*
261 : * We hold pin but not lock on current buffer while outside the hash AM.
262 : * Reacquire the read lock here.
263 : */
264 20 : if (BufferIsValid(so->hashso_curbuf))
265 0 : _hash_chgbufaccess(rel, so->hashso_curbuf, HASH_NOLOCK, HASH_READ);
266 :
267 39 : while (ntids < max_tids)
268 : {
269 : /*
270 : * Start scan, or advance to next tuple.
271 : */
272 39 : if (ItemPointerIsValid(&(so->hashso_curpos)))
273 19 : res = _hash_next(scan, ForwardScanDirection);
274 : else
275 20 : res = _hash_first(scan, ForwardScanDirection);
276 :
277 : /*
278 : * Skip killed tuples if asked to.
279 : */
280 39 : if (scan->ignore_killed_tuples)
281 : {
282 39 : while (res)
283 : {
284 : Page page;
285 : OffsetNumber offnum;
286 :
287 19 : offnum = ItemPointerGetOffsetNumber(&(so->hashso_curpos));
288 19 : page = BufferGetPage(so->hashso_curbuf);
289 19 : if (!ItemIdIsDead(PageGetItemId(page, offnum)))
290 19 : break;
291 0 : res = _hash_next(scan, ForwardScanDirection);
292 : }
293 : }
294 :
295 39 : if (!res)
296 20 : break;
297 : /* Save tuple ID, and continue scanning */
298 19 : tids[ntids] = scan->xs_ctup.t_self;
299 19 : ntids++;
300 : }
301 :
302 : /* Release read lock on current buffer, but keep it pinned */
303 20 : if (BufferIsValid(so->hashso_curbuf))
304 0 : _hash_chgbufaccess(rel, so->hashso_curbuf, HASH_READ, HASH_NOLOCK);
305 :
306 20 : *returned_tids = ntids;
307 20 : PG_RETURN_BOOL(res);
308 : }
309 :
310 :
311 : /*
312 : * hashbeginscan() -- start a scan on a hash index
313 : */
314 : Datum
315 : hashbeginscan(PG_FUNCTION_ARGS)
316 21 : {
317 21 : Relation rel = (Relation) PG_GETARG_POINTER(0);
318 21 : int keysz = PG_GETARG_INT32(1);
319 21 : ScanKey scankey = (ScanKey) PG_GETARG_POINTER(2);
320 : IndexScanDesc scan;
321 : HashScanOpaque so;
322 :
323 21 : scan = RelationGetIndexScan(rel, keysz, scankey);
324 21 : so = (HashScanOpaque) palloc(sizeof(HashScanOpaqueData));
325 21 : so->hashso_bucket_valid = false;
326 21 : so->hashso_bucket_blkno = 0;
327 21 : so->hashso_curbuf = so->hashso_mrkbuf = InvalidBuffer;
328 : /* set positions invalid (this will cause _hash_first call) */
329 21 : ItemPointerSetInvalid(&(so->hashso_curpos));
330 21 : ItemPointerSetInvalid(&(so->hashso_mrkpos));
331 :
332 21 : scan->opaque = so;
333 :
334 : /* register scan in case we change pages it's using */
335 21 : _hash_regscan(scan);
336 :
337 21 : PG_RETURN_POINTER(scan);
338 : }
339 :
340 : /*
341 : * hashrescan() -- rescan an index relation
342 : */
343 : Datum
344 : hashrescan(PG_FUNCTION_ARGS)
345 21 : {
346 21 : IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
347 21 : ScanKey scankey = (ScanKey) PG_GETARG_POINTER(1);
348 21 : HashScanOpaque so = (HashScanOpaque) scan->opaque;
349 21 : Relation rel = scan->indexRelation;
350 :
351 : /* if we are called from beginscan, so is still NULL */
352 21 : if (so)
353 : {
354 : /* release any pins we still hold */
355 0 : if (BufferIsValid(so->hashso_curbuf))
356 0 : _hash_dropbuf(rel, so->hashso_curbuf);
357 0 : so->hashso_curbuf = InvalidBuffer;
358 :
359 0 : if (BufferIsValid(so->hashso_mrkbuf))
360 0 : _hash_dropbuf(rel, so->hashso_mrkbuf);
361 0 : so->hashso_mrkbuf = InvalidBuffer;
362 :
363 : /* release lock on bucket, too */
364 0 : if (so->hashso_bucket_blkno)
365 0 : _hash_droplock(rel, so->hashso_bucket_blkno, HASH_SHARE);
366 0 : so->hashso_bucket_blkno = 0;
367 :
368 : /* set positions invalid (this will cause _hash_first call) */
369 0 : ItemPointerSetInvalid(&(so->hashso_curpos));
370 0 : ItemPointerSetInvalid(&(so->hashso_mrkpos));
371 : }
372 :
373 : /* Update scan key, if a new one is given */
374 21 : if (scankey && scan->numberOfKeys > 0)
375 : {
376 21 : memmove(scan->keyData,
377 : scankey,
378 : scan->numberOfKeys * sizeof(ScanKeyData));
379 21 : if (so)
380 0 : so->hashso_bucket_valid = false;
381 : }
382 :
383 21 : PG_RETURN_VOID();
384 : }
385 :
386 : /*
387 : * hashendscan() -- close down a scan
388 : */
389 : Datum
390 : hashendscan(PG_FUNCTION_ARGS)
391 21 : {
392 21 : IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
393 21 : HashScanOpaque so = (HashScanOpaque) scan->opaque;
394 21 : Relation rel = scan->indexRelation;
395 :
396 : /* don't need scan registered anymore */
397 21 : _hash_dropscan(scan);
398 :
399 : /* release any pins we still hold */
400 21 : if (BufferIsValid(so->hashso_curbuf))
401 0 : _hash_dropbuf(rel, so->hashso_curbuf);
402 21 : so->hashso_curbuf = InvalidBuffer;
403 :
404 21 : if (BufferIsValid(so->hashso_mrkbuf))
405 0 : _hash_dropbuf(rel, so->hashso_mrkbuf);
406 21 : so->hashso_mrkbuf = InvalidBuffer;
407 :
408 : /* release lock on bucket, too */
409 21 : if (so->hashso_bucket_blkno)
410 21 : _hash_droplock(rel, so->hashso_bucket_blkno, HASH_SHARE);
411 21 : so->hashso_bucket_blkno = 0;
412 :
413 21 : pfree(so);
414 21 : scan->opaque = NULL;
415 :
416 21 : PG_RETURN_VOID();
417 : }
418 :
419 : /*
420 : * hashmarkpos() -- save current scan position
421 : */
422 : Datum
423 : hashmarkpos(PG_FUNCTION_ARGS)
424 0 : {
425 0 : IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
426 0 : HashScanOpaque so = (HashScanOpaque) scan->opaque;
427 0 : Relation rel = scan->indexRelation;
428 :
429 : /* release pin on old marked data, if any */
430 0 : if (BufferIsValid(so->hashso_mrkbuf))
431 0 : _hash_dropbuf(rel, so->hashso_mrkbuf);
432 0 : so->hashso_mrkbuf = InvalidBuffer;
433 0 : ItemPointerSetInvalid(&(so->hashso_mrkpos));
434 :
435 : /* bump pin count on current buffer and copy to marked buffer */
436 0 : if (ItemPointerIsValid(&(so->hashso_curpos)))
437 : {
438 0 : IncrBufferRefCount(so->hashso_curbuf);
439 0 : so->hashso_mrkbuf = so->hashso_curbuf;
440 0 : so->hashso_mrkpos = so->hashso_curpos;
441 : }
442 :
443 0 : PG_RETURN_VOID();
444 : }
445 :
446 : /*
447 : * hashrestrpos() -- restore scan to last saved position
448 : */
449 : Datum
450 : hashrestrpos(PG_FUNCTION_ARGS)
451 0 : {
452 0 : IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
453 0 : HashScanOpaque so = (HashScanOpaque) scan->opaque;
454 0 : Relation rel = scan->indexRelation;
455 :
456 : /* release pin on current data, if any */
457 0 : if (BufferIsValid(so->hashso_curbuf))
458 0 : _hash_dropbuf(rel, so->hashso_curbuf);
459 0 : so->hashso_curbuf = InvalidBuffer;
460 0 : ItemPointerSetInvalid(&(so->hashso_curpos));
461 :
462 : /* bump pin count on marked buffer and copy to current buffer */
463 0 : if (ItemPointerIsValid(&(so->hashso_mrkpos)))
464 : {
465 0 : IncrBufferRefCount(so->hashso_mrkbuf);
466 0 : so->hashso_curbuf = so->hashso_mrkbuf;
467 0 : so->hashso_curpos = so->hashso_mrkpos;
468 : }
469 :
470 0 : PG_RETURN_VOID();
471 : }
472 :
473 : /*
474 : * Bulk deletion of all index entries pointing to a set of heap tuples.
475 : * The set of target tuples is specified via a callback routine that tells
476 : * whether any given heap tuple (identified by ItemPointer) is being deleted.
477 : *
478 : * Result: a palloc'd struct containing statistical info for VACUUM displays.
479 : */
480 : Datum
481 : hashbulkdelete(PG_FUNCTION_ARGS)
482 0 : {
483 0 : IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
484 0 : IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
485 0 : IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(2);
486 0 : void *callback_state = (void *) PG_GETARG_POINTER(3);
487 0 : Relation rel = info->index;
488 : double tuples_removed;
489 : double num_index_tuples;
490 : double orig_ntuples;
491 : Bucket orig_maxbucket;
492 : Bucket cur_maxbucket;
493 : Bucket cur_bucket;
494 : Buffer metabuf;
495 : HashMetaPage metap;
496 : HashMetaPageData local_metapage;
497 :
498 0 : tuples_removed = 0;
499 0 : num_index_tuples = 0;
500 :
501 : /*
502 : * Read the metapage to fetch original bucket and tuple counts. Also, we
503 : * keep a copy of the last-seen metapage so that we can use its
504 : * hashm_spares[] values to compute bucket page addresses. This is a bit
505 : * hokey but perfectly safe, since the interesting entries in the spares
506 : * array cannot change under us; and it beats rereading the metapage for
507 : * each bucket.
508 : */
509 0 : metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE);
510 0 : metap = (HashMetaPage) BufferGetPage(metabuf);
511 0 : orig_maxbucket = metap->hashm_maxbucket;
512 0 : orig_ntuples = metap->hashm_ntuples;
513 0 : memcpy(&local_metapage, metap, sizeof(local_metapage));
514 0 : _hash_relbuf(rel, metabuf);
515 :
516 : /* Scan the buckets that we know exist */
517 0 : cur_bucket = 0;
518 0 : cur_maxbucket = orig_maxbucket;
519 :
520 : loop_top:
521 0 : while (cur_bucket <= cur_maxbucket)
522 : {
523 : BlockNumber bucket_blkno;
524 : BlockNumber blkno;
525 0 : bool bucket_dirty = false;
526 :
527 : /* Get address of bucket's start page */
528 0 : bucket_blkno = BUCKET_TO_BLKNO(&local_metapage, cur_bucket);
529 :
530 : /* Exclusive-lock the bucket so we can shrink it */
531 0 : _hash_getlock(rel, bucket_blkno, HASH_EXCLUSIVE);
532 :
533 : /* Shouldn't have any active scans locally, either */
534 0 : if (_hash_has_active_scan(rel, cur_bucket))
535 0 : elog(ERROR, "hash index has active scan during VACUUM");
536 :
537 : /* Scan each page in bucket */
538 0 : blkno = bucket_blkno;
539 0 : while (BlockNumberIsValid(blkno))
540 : {
541 : Buffer buf;
542 : Page page;
543 : HashPageOpaque opaque;
544 : OffsetNumber offno;
545 : OffsetNumber maxoffno;
546 0 : bool page_dirty = false;
547 :
548 0 : vacuum_delay_point();
549 :
550 0 : buf = _hash_getbuf_with_strategy(rel, blkno, HASH_WRITE,
551 : LH_BUCKET_PAGE | LH_OVERFLOW_PAGE,
552 : info->strategy);
553 0 : page = BufferGetPage(buf);
554 0 : opaque = (HashPageOpaque) PageGetSpecialPointer(page);
555 : Assert(opaque->hasho_bucket == cur_bucket);
556 :
557 : /* Scan each tuple in page */
558 0 : offno = FirstOffsetNumber;
559 0 : maxoffno = PageGetMaxOffsetNumber(page);
560 0 : while (offno <= maxoffno)
561 : {
562 : IndexTuple itup;
563 : ItemPointer htup;
564 :
565 0 : itup = (IndexTuple) PageGetItem(page,
566 : PageGetItemId(page, offno));
567 0 : htup = &(itup->t_tid);
568 0 : if (callback(htup, callback_state))
569 : {
570 : /* delete the item from the page */
571 0 : PageIndexTupleDelete(page, offno);
572 0 : bucket_dirty = page_dirty = true;
573 :
574 : /* don't increment offno, instead decrement maxoffno */
575 0 : maxoffno = OffsetNumberPrev(maxoffno);
576 :
577 0 : tuples_removed += 1;
578 : }
579 : else
580 : {
581 0 : offno = OffsetNumberNext(offno);
582 :
583 0 : num_index_tuples += 1;
584 : }
585 : }
586 :
587 : /*
588 : * Write page if needed, advance to next page.
589 : */
590 0 : blkno = opaque->hasho_nextblkno;
591 :
592 0 : if (page_dirty)
593 0 : _hash_wrtbuf(rel, buf);
594 : else
595 0 : _hash_relbuf(rel, buf);
596 : }
597 :
598 : /* If we deleted anything, try to compact free space */
599 0 : if (bucket_dirty)
600 0 : _hash_squeezebucket(rel, cur_bucket, bucket_blkno,
601 : info->strategy);
602 :
603 : /* Release bucket lock */
604 0 : _hash_droplock(rel, bucket_blkno, HASH_EXCLUSIVE);
605 :
606 : /* Advance to next bucket */
607 0 : cur_bucket++;
608 : }
609 :
610 : /* Write-lock metapage and check for split since we started */
611 0 : metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_WRITE, LH_META_PAGE);
612 0 : metap = (HashMetaPage) BufferGetPage(metabuf);
613 :
614 0 : if (cur_maxbucket != metap->hashm_maxbucket)
615 : {
616 : /* There's been a split, so process the additional bucket(s) */
617 0 : cur_maxbucket = metap->hashm_maxbucket;
618 0 : memcpy(&local_metapage, metap, sizeof(local_metapage));
619 0 : _hash_relbuf(rel, metabuf);
620 0 : goto loop_top;
621 : }
622 :
623 : /* Okay, we're really done. Update tuple count in metapage. */
624 :
625 0 : if (orig_maxbucket == metap->hashm_maxbucket &&
626 : orig_ntuples == metap->hashm_ntuples)
627 : {
628 : /*
629 : * No one has split or inserted anything since start of scan, so
630 : * believe our count as gospel.
631 : */
632 0 : metap->hashm_ntuples = num_index_tuples;
633 : }
634 : else
635 : {
636 : /*
637 : * Otherwise, our count is untrustworthy since we may have
638 : * double-scanned tuples in split buckets. Proceed by dead-reckoning.
639 : */
640 0 : if (metap->hashm_ntuples > tuples_removed)
641 0 : metap->hashm_ntuples -= tuples_removed;
642 : else
643 0 : metap->hashm_ntuples = 0;
644 0 : num_index_tuples = metap->hashm_ntuples;
645 : }
646 :
647 0 : _hash_wrtbuf(rel, metabuf);
648 :
649 : /* return statistics */
650 0 : if (stats == NULL)
651 0 : stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
652 0 : stats->num_index_tuples = num_index_tuples;
653 0 : stats->tuples_removed += tuples_removed;
654 : /* hashvacuumcleanup will fill in num_pages */
655 :
656 0 : PG_RETURN_POINTER(stats);
657 : }
658 :
659 : /*
660 : * Post-VACUUM cleanup.
661 : *
662 : * Result: a palloc'd struct containing statistical info for VACUUM displays.
663 : */
664 : Datum
665 : hashvacuumcleanup(PG_FUNCTION_ARGS)
666 4 : {
667 4 : IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
668 4 : IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
669 4 : Relation rel = info->index;
670 : BlockNumber num_pages;
671 :
672 : /* If hashbulkdelete wasn't called, return NULL signifying no change */
673 4 : if (stats == NULL)
674 4 : PG_RETURN_POINTER(NULL);
675 :
676 : /* update statistics */
677 0 : num_pages = RelationGetNumberOfBlocks(rel);
678 0 : stats->num_pages = num_pages;
679 :
680 0 : PG_RETURN_POINTER(stats);
681 : }
682 :
683 :
684 : void
685 : hash_redo(XLogRecPtr lsn, XLogRecord *record)
686 0 : {
687 0 : elog(PANIC, "hash_redo: unimplemented");
688 0 : }
689 :
690 : void
691 : hash_desc(StringInfo buf, uint8 xl_info, char *rec)
692 0 : {
693 0 : }
|