Skip to content

Commit

Permalink
Use MinimalTuple for tuple queues.
Browse files Browse the repository at this point in the history
This representation saves 8 bytes per tuple compared to HeapTuple, and
avoids the need to allocate, copy and free on the receiving side.

Gather can emit the returned MinimalTuple directly, but GatherMerge now
needs to make an explicit copy because it buffers multiple tuples at a
time.  That should be no worse than before.

Reviewed-by: Soumyadeep Chakraborty <soumyadeep2007@gmail.com>
Discussion: https://postgr.es/m/CA%2BhUKG%2B8T_ggoUTAE-U%3DA%2BOcPc4%3DB0nPPHcSfffuQhvXXjML6w%40mail.gmail.com
  • Loading branch information
macdice committed Jul 17, 2020
1 parent d2bddc2 commit cdc7169
Show file tree
Hide file tree
Showing 5 changed files with 51 additions and 47 deletions.
16 changes: 8 additions & 8 deletions src/backend/executor/nodeGather.c
Expand Up @@ -46,7 +46,7 @@

static TupleTableSlot *ExecGather(PlanState *pstate);
static TupleTableSlot *gather_getnext(GatherState *gatherstate);
static HeapTuple gather_readnext(GatherState *gatherstate);
static MinimalTuple gather_readnext(GatherState *gatherstate);
static void ExecShutdownGatherWorkers(GatherState *node);


Expand Down Expand Up @@ -120,7 +120,7 @@ ExecInitGather(Gather *node, EState *estate, int eflags)
* Initialize funnel slot to same tuple descriptor as outer plan.
*/
gatherstate->funnel_slot = ExecInitExtraTupleSlot(estate, tupDesc,
&TTSOpsHeapTuple);
&TTSOpsMinimalTuple);

/*
* Gather doesn't support checking a qual (it's always more efficient to
Expand Down Expand Up @@ -266,7 +266,7 @@ gather_getnext(GatherState *gatherstate)
PlanState *outerPlan = outerPlanState(gatherstate);
TupleTableSlot *outerTupleSlot;
TupleTableSlot *fslot = gatherstate->funnel_slot;
HeapTuple tup;
MinimalTuple tup;

while (gatherstate->nreaders > 0 || gatherstate->need_to_scan_locally)
{
Expand All @@ -278,9 +278,9 @@ gather_getnext(GatherState *gatherstate)

if (HeapTupleIsValid(tup))
{
ExecStoreHeapTuple(tup, /* tuple to store */
fslot, /* slot to store the tuple */
true); /* pfree tuple when done with it */
ExecStoreMinimalTuple(tup, /* tuple to store */
fslot, /* slot to store the tuple */
false); /* don't pfree tuple */
return fslot;
}
}
Expand Down Expand Up @@ -308,15 +308,15 @@ gather_getnext(GatherState *gatherstate)
/*
* Attempt to read a tuple from one of our parallel workers.
*/
static HeapTuple
static MinimalTuple
gather_readnext(GatherState *gatherstate)
{
int nvisited = 0;

for (;;)
{
TupleQueueReader *reader;
HeapTuple tup;
MinimalTuple tup;
bool readerdone;

/* Check for async events, particularly messages from workers. */
Expand Down
40 changes: 22 additions & 18 deletions src/backend/executor/nodeGatherMerge.c
Expand Up @@ -45,7 +45,7 @@
*/
typedef struct GMReaderTupleBuffer
{
HeapTuple *tuple; /* array of length MAX_TUPLE_STORE */
MinimalTuple *tuple; /* array of length MAX_TUPLE_STORE */
int nTuples; /* number of tuples currently stored */
int readCounter; /* index of next tuple to extract */
bool done; /* true if reader is known exhausted */
Expand All @@ -54,8 +54,8 @@ typedef struct GMReaderTupleBuffer
static TupleTableSlot *ExecGatherMerge(PlanState *pstate);
static int32 heap_compare_slots(Datum a, Datum b, void *arg);
static TupleTableSlot *gather_merge_getnext(GatherMergeState *gm_state);
static HeapTuple gm_readnext_tuple(GatherMergeState *gm_state, int nreader,
bool nowait, bool *done);
static MinimalTuple gm_readnext_tuple(GatherMergeState *gm_state, int nreader,
bool nowait, bool *done);
static void ExecShutdownGatherMergeWorkers(GatherMergeState *node);
static void gather_merge_setup(GatherMergeState *gm_state);
static void gather_merge_init(GatherMergeState *gm_state);
Expand Down Expand Up @@ -419,12 +419,12 @@ gather_merge_setup(GatherMergeState *gm_state)
{
/* Allocate the tuple array with length MAX_TUPLE_STORE */
gm_state->gm_tuple_buffers[i].tuple =
(HeapTuple *) palloc0(sizeof(HeapTuple) * MAX_TUPLE_STORE);
(MinimalTuple *) palloc0(sizeof(MinimalTuple) * MAX_TUPLE_STORE);

/* Initialize tuple slot for worker */
gm_state->gm_slots[i + 1] =
ExecInitExtraTupleSlot(gm_state->ps.state, gm_state->tupDesc,
&TTSOpsHeapTuple);
&TTSOpsMinimalTuple);
}

/* Allocate the resources for the merge */
Expand Down Expand Up @@ -533,7 +533,7 @@ gather_merge_clear_tuples(GatherMergeState *gm_state)
GMReaderTupleBuffer *tuple_buffer = &gm_state->gm_tuple_buffers[i];

while (tuple_buffer->readCounter < tuple_buffer->nTuples)
heap_freetuple(tuple_buffer->tuple[tuple_buffer->readCounter++]);
pfree(tuple_buffer->tuple[tuple_buffer->readCounter++]);

ExecClearTuple(gm_state->gm_slots[i + 1]);
}
Expand Down Expand Up @@ -613,13 +613,13 @@ load_tuple_array(GatherMergeState *gm_state, int reader)
/* Try to fill additional slots in the array. */
for (i = tuple_buffer->nTuples; i < MAX_TUPLE_STORE; i++)
{
HeapTuple tuple;
MinimalTuple tuple;

tuple = gm_readnext_tuple(gm_state,
reader,
true,
&tuple_buffer->done);
if (!HeapTupleIsValid(tuple))
if (!tuple)
break;
tuple_buffer->tuple[i] = tuple;
tuple_buffer->nTuples++;
Expand All @@ -637,7 +637,7 @@ static bool
gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
{
GMReaderTupleBuffer *tuple_buffer;
HeapTuple tup;
MinimalTuple tup;

/*
* If we're being asked to generate a tuple from the leader, then we just
Expand Down Expand Up @@ -687,7 +687,7 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
reader,
nowait,
&tuple_buffer->done);
if (!HeapTupleIsValid(tup))
if (!tup)
return false;

/*
Expand All @@ -697,26 +697,26 @@ gather_merge_readnext(GatherMergeState *gm_state, int reader, bool nowait)
load_tuple_array(gm_state, reader);
}

Assert(HeapTupleIsValid(tup));
Assert(tup);

/* Build the TupleTableSlot for the given tuple */
ExecStoreHeapTuple(tup, /* tuple to store */
gm_state->gm_slots[reader], /* slot in which to store
* the tuple */
true); /* pfree tuple when done with it */
ExecStoreMinimalTuple(tup, /* tuple to store */
gm_state->gm_slots[reader], /* slot in which to store
* the tuple */
true); /* pfree tuple when done with it */

return true;
}

/*
* Attempt to read a tuple from given worker.
*/
static HeapTuple
static MinimalTuple
gm_readnext_tuple(GatherMergeState *gm_state, int nreader, bool nowait,
bool *done)
{
TupleQueueReader *reader;
HeapTuple tup;
MinimalTuple tup;

/* Check for async events, particularly messages from workers. */
CHECK_FOR_INTERRUPTS();
Expand All @@ -732,7 +732,11 @@ gm_readnext_tuple(GatherMergeState *gm_state, int nreader, bool nowait,
reader = gm_state->reader[nreader - 1];
tup = TupleQueueReaderNext(reader, nowait, done);

return tup;
/*
* Since we'll be buffering these across multiple calls, we need to make a
* copy.
*/
return tup ? heap_copy_minimal_tuple(tup) : NULL;
}

/*
Expand Down
30 changes: 14 additions & 16 deletions src/backend/executor/tqueue.c
Expand Up @@ -54,16 +54,16 @@ static bool
tqueueReceiveSlot(TupleTableSlot *slot, DestReceiver *self)
{
TQueueDestReceiver *tqueue = (TQueueDestReceiver *) self;
HeapTuple tuple;
MinimalTuple tuple;
shm_mq_result result;
bool should_free;

/* Send the tuple itself. */
tuple = ExecFetchSlotHeapTuple(slot, true, &should_free);
result = shm_mq_send(tqueue->queue, tuple->t_len, tuple->t_data, false);
tuple = ExecFetchSlotMinimalTuple(slot, &should_free);
result = shm_mq_send(tqueue->queue, tuple->t_len, tuple, false);

if (should_free)
heap_freetuple(tuple);
pfree(tuple);

/* Check for failure. */
if (result == SHM_MQ_DETACHED)
Expand Down Expand Up @@ -164,18 +164,18 @@ DestroyTupleQueueReader(TupleQueueReader *reader)
* nowait = true and no tuple is ready to return. *done, if not NULL,
* is set to true when there are no remaining tuples and otherwise to false.
*
* The returned tuple, if any, is allocated in CurrentMemoryContext.
* Note that this routine must not leak memory! (We used to allow that,
* but not any more.)
* The returned tuple, if any, is either in shared memory or a private buffer
* and should not be freed. The pointer is invalid after the next call to
* TupleQueueReaderNext().
*
* Even when shm_mq_receive() returns SHM_MQ_WOULD_BLOCK, this can still
* accumulate bytes from a partially-read message, so it's useful to call
* this with nowait = true even if nothing is returned.
*/
HeapTuple
MinimalTuple
TupleQueueReaderNext(TupleQueueReader *reader, bool nowait, bool *done)
{
HeapTupleData htup;
MinimalTuple tuple;
shm_mq_result result;
Size nbytes;
void *data;
Expand All @@ -200,13 +200,11 @@ TupleQueueReaderNext(TupleQueueReader *reader, bool nowait, bool *done)
Assert(result == SHM_MQ_SUCCESS);

/*
* Set up a dummy HeapTupleData pointing to the data from the shm_mq
* (which had better be sufficiently aligned).
* Return a pointer to the queue memory directly (which had better be
* sufficiently aligned).
*/
ItemPointerSetInvalid(&htup.t_self);
htup.t_tableOid = InvalidOid;
htup.t_len = nbytes;
htup.t_data = data;
tuple = (MinimalTuple) data;
Assert(tuple->t_len == nbytes);

return heap_copytuple(&htup);
return tuple;
}
8 changes: 5 additions & 3 deletions src/backend/optimizer/plan/createplan.c
Expand Up @@ -1730,8 +1730,10 @@ create_gather_plan(PlannerInfo *root, GatherPath *best_path)
List *tlist;

/*
* Although the Gather node can project, we prefer to push down such work
* to its child node, so demand an exact tlist from the child.
* Push projection down to the child node. That way, the projection work
* is parallelized, and there can be no system columns in the result (they
* can't travel through a tuple queue because it uses MinimalTuple
* representation).
*/
subplan = create_plan_recurse(root, best_path->subpath, CP_EXACT_TLIST);

Expand Down Expand Up @@ -1766,7 +1768,7 @@ create_gather_merge_plan(PlannerInfo *root, GatherMergePath *best_path)
List *pathkeys = best_path->path.pathkeys;
List *tlist = build_path_tlist(root, &best_path->path);

/* As with Gather, it's best to project away columns in the workers. */
/* As with Gather, project away columns in the workers. */
subplan = create_plan_recurse(root, best_path->subpath, CP_EXACT_TLIST);

/* Create a shell for a GatherMerge plan. */
Expand Down
4 changes: 2 additions & 2 deletions src/include/executor/tqueue.h
Expand Up @@ -26,7 +26,7 @@ extern DestReceiver *CreateTupleQueueDestReceiver(shm_mq_handle *handle);
/* Use these to receive tuples from a shm_mq. */
extern TupleQueueReader *CreateTupleQueueReader(shm_mq_handle *handle);
extern void DestroyTupleQueueReader(TupleQueueReader *reader);
extern HeapTuple TupleQueueReaderNext(TupleQueueReader *reader,
bool nowait, bool *done);
extern MinimalTuple TupleQueueReaderNext(TupleQueueReader *reader,
bool nowait, bool *done);

#endif /* TQUEUE_H */

0 comments on commit cdc7169

Please sign in to comment.