@@ -400,7 +400,8 @@ static int hash_choose_num_partitions(uint64 input_groups,
400400 double hashentrysize ,
401401 int used_bits ,
402402 int * log2_npartittions );
403- static AggStatePerGroup lookup_hash_entry (AggState * aggstate , uint32 hash );
403+ static AggStatePerGroup lookup_hash_entry (AggState * aggstate , uint32 hash ,
404+ bool * in_hash_table );
404405static void lookup_hash_entries (AggState * aggstate );
405406static TupleTableSlot * agg_retrieve_direct (AggState * aggstate );
406407static void agg_fill_hash_table (AggState * aggstate );
@@ -1968,10 +1969,11 @@ hash_choose_num_partitions(uint64 input_groups, double hashentrysize,
19681969 *
19691970 * If in "spill mode", then only find existing hashtable entries; don't create
19701971 * new ones. If a tuple's group is not already present in the hash table for
1971- * the current grouping set, return NULL and the caller will spill it to disk.
1972+ * the current grouping set, assign *in_hash_table=false and the caller will
1973+ * spill it to disk.
19721974 */
19731975static AggStatePerGroup
1974- lookup_hash_entry (AggState * aggstate , uint32 hash )
1976+ lookup_hash_entry (AggState * aggstate , uint32 hash , bool * in_hash_table )
19751977{
19761978 AggStatePerHash perhash = & aggstate -> perhash [aggstate -> current_set ];
19771979 TupleTableSlot * hashslot = perhash -> hashslot ;
@@ -1987,7 +1989,12 @@ lookup_hash_entry(AggState *aggstate, uint32 hash)
19871989 hash );
19881990
19891991 if (entry == NULL )
1992+ {
1993+ * in_hash_table = false;
19901994 return NULL ;
1995+ }
1996+ else
1997+ * in_hash_table = true;
19911998
19921999 if (isnew )
19932000 {
@@ -1997,9 +2004,14 @@ lookup_hash_entry(AggState *aggstate, uint32 hash)
19972004 aggstate -> hash_ngroups_current ++ ;
19982005 hash_agg_check_limits (aggstate );
19992006
2007+ /* no need to allocate or initialize per-group state */
2008+ if (aggstate -> numtrans == 0 )
2009+ return NULL ;
2010+
20002011 pergroup = (AggStatePerGroup )
20012012 MemoryContextAlloc (perhash -> hashtable -> tablecxt ,
20022013 sizeof (AggStatePerGroupData ) * aggstate -> numtrans );
2014+
20032015 entry -> additional = pergroup ;
20042016
20052017 /*
@@ -2046,14 +2058,15 @@ lookup_hash_entries(AggState *aggstate)
20462058 {
20472059 AggStatePerHash perhash = & aggstate -> perhash [setno ];
20482060 uint32 hash ;
2061+ bool in_hash_table ;
20492062
20502063 select_current_set (aggstate , setno , true);
20512064 prepare_hash_slot (aggstate );
20522065 hash = TupleHashTableHash (perhash -> hashtable , perhash -> hashslot );
2053- pergroup [setno ] = lookup_hash_entry (aggstate , hash );
2066+ pergroup [setno ] = lookup_hash_entry (aggstate , hash , & in_hash_table );
20542067
20552068 /* check to see if we need to spill the tuple for this grouping set */
2056- if (pergroup [ setno ] == NULL )
2069+ if (! in_hash_table )
20572070 {
20582071 HashAggSpill * spill = & aggstate -> hash_spills [setno ];
20592072 TupleTableSlot * slot = aggstate -> tmpcontext -> ecxt_outertuple ;
@@ -2587,6 +2600,7 @@ agg_refill_hash_table(AggState *aggstate)
25872600 TupleTableSlot * slot = aggstate -> hash_spill_slot ;
25882601 MinimalTuple tuple ;
25892602 uint32 hash ;
2603+ bool in_hash_table ;
25902604
25912605 CHECK_FOR_INTERRUPTS ();
25922606
@@ -2598,9 +2612,10 @@ agg_refill_hash_table(AggState *aggstate)
25982612 aggstate -> tmpcontext -> ecxt_outertuple = slot ;
25992613
26002614 prepare_hash_slot (aggstate );
2601- aggstate -> hash_pergroup [batch -> setno ] = lookup_hash_entry (aggstate , hash );
2615+ aggstate -> hash_pergroup [batch -> setno ] = lookup_hash_entry (
2616+ aggstate , hash , & in_hash_table );
26022617
2603- if (aggstate -> hash_pergroup [ batch -> setno ] != NULL )
2618+ if (in_hash_table )
26042619 {
26052620 /* Advance the aggregates (or combine functions) */
26062621 advance_aggregates (aggstate );
0 commit comments