head	1.11;
access;
symbols
	REL9_0_0:1.11
	REL9_1_ALPHA1:1.11
	REL9_0_RC1:1.11
	REL9_0_BETA4:1.11
	REL9_0_STABLE:1.11.0.8
	REL9_0_BETA3:1.11
	REL9_0_BETA2:1.11
	REL7_4_29:1.3
	REL8_0_25:1.4
	REL8_1_21:1.5
	REL8_2_17:1.5
	REL8_3_11:1.5
	REL8_4_4:1.8
	REL9_0_BETA1:1.11
	REL9_0_ALPHA5_BRANCH:1.11.0.6
	REL9_0_ALPHA5:1.11
	REL7_4_28:1.3
	REL8_0_24:1.4
	REL8_1_20:1.5
	REL8_2_16:1.5
	REL8_3_10:1.5
	REL8_4_3:1.8
	REL9_0_ALPHA4:1.11
	REL9_0_ALPHA4_BRANCH:1.11.0.4
	REL8_5_ALPHA3:1.11
	REL8_5_ALPHA3_BRANCH:1.11.0.2
	REL7_4_27:1.3
	REL8_0_23:1.4
	REL8_1_19:1.5
	REL8_2_15:1.5
	REL8_3_9:1.5
	REL8_4_2:1.8
	REL8_5_ALPHA2:1.10
	REL8_5_ALPHA2_BRANCH:1.10.0.2
	REL7_4_26:1.3
	REL8_0_22:1.4
	REL8_1_18:1.5
	REL8_2_14:1.5
	REL8_3_8:1.5
	REL8_4_1:1.8
	REL8_5_ALPHA1:1.8
	REL8_5_ALPHA1_BRANCH:1.8.0.4
	REL8_4_STABLE:1.8.0.2
	REL8_4_0:1.8
	REL8_4_RC2:1.8
	REL8_4_RC1:1.8
	REL8_4_BETA2:1.8
	REL8_4_BETA1:1.8
	REL7_4_25:1.3
	REL8_0_21:1.4
	REL8_1_17:1.5
	REL8_2_13:1.5
	REL8_3_7:1.5
	REL7_4_24:1.3
	REL8_0_20:1.4
	REL8_1_16:1.5
	REL8_2_12:1.5
	REL8_3_6:1.5
	REL7_4_23:1.3
	REL8_0_19:1.4
	REL8_1_15:1.5
	REL8_2_11:1.5
	REL8_3_5:1.5
	REL7_4_22:1.3
	REL8_0_18:1.4
	REL8_1_14:1.5
	REL8_2_10:1.5
	REL8_3_4:1.5
	REL7_4_21:1.3
	REL8_0_17:1.4
	REL8_1_13:1.5
	REL8_2_9:1.5
	REL8_3_3:1.5
	REL7_4_20:1.3
	REL8_0_16:1.4
	REL8_1_12:1.5
	REL8_2_8:1.5
	REL8_3_2:1.5
	REL8_2_7:1.5
	REL8_3_1:1.5
	REL8_3_STABLE:1.5.0.6
	REL8_3_0:1.5
	REL8_3_RC2:1.5
	REL7_3_21:1.1
	REL7_4_19:1.3
	REL8_0_15:1.4
	REL8_1_11:1.5
	REL8_2_6:1.5
	REL8_3_RC1:1.5
	REL8_3_BETA4:1.5
	REL8_3_BETA3:1.5
	REL8_3_BETA2:1.5
	REL8_3_BETA1:1.5
	REL7_3_20:1.1
	REL7_4_18:1.3
	REL8_0_14:1.4
	REL8_1_10:1.5
	REL8_2_5:1.5
	REL7_3_19:1.1
	REL7_4_17:1.3
	REL8_0_13:1.4
	REL8_1_9:1.5
	REL8_2_4:1.5
	REL8_0_12:1.4
	REL8_1_8:1.5
	REL8_2_3:1.5
	REL7_3_18:1.1
	REL7_4_16:1.3
	REL8_0_11:1.4
	REL8_1_7:1.5
	REL8_2_2:1.5
	REL8_0_10:1.4
	REL8_1_6:1.5
	REL8_2_1:1.5
	REL7_4_15:1.3
	REL7_3_17:1.1
	REL8_2_STABLE:1.5.0.4
	REL8_2_0:1.5
	REL8_2_RC1:1.5
	REL8_2_BETA3:1.5
	REL8_2_BETA2:1.5
	REL8_1_5:1.5
	REL8_0_9:1.4
	REL7_4_14:1.3
	REL7_3_16:1.1
	REL8_2_BETA1:1.5
	REL7_3_15:1.1
	REL7_4_13:1.3
	REL8_0_8:1.4
	REL8_1_4:1.5
	REL7_3_14:1.1
	REL7_4_12:1.3
	REL8_0_7:1.4
	REL8_1_3:1.5
	REL7_3_13:1.1
	REL7_4_11:1.3
	REL8_0_6:1.4
	REL8_1_2:1.5
	REL7_3_12:1.1
	REL7_4_10:1.3
	REL8_0_5:1.4
	REL8_1_1:1.5
	REL8_1_STABLE:1.5.0.2
	REL8_1_0:1.5
	REL8_1_0RC1:1.5
	REL8_1_0BETA4:1.5
	REL8_1_0BETA3:1.5
	REL7_3_11:1.1
	REL7_4_9:1.3
	REL8_0_4:1.4
	REL8_1_0BETA2:1.5
	REL8_1_0BETA1:1.5
	REL7_2_8:1.1
	REL7_3_10:1.1
	REL7_4_8:1.3
	REL8_0_3:1.4
	REL8_0_2:1.4
	REL7_2_7:1.1
	REL7_3_9:1.1
	REL7_4_7:1.3
	REL8_0_1:1.4
	REL8_0_STABLE:1.4.0.4
	REL8_0_0:1.4.0.2
	REL8_0_0RC5:1.4
	REL8_0_0RC4:1.4
	REL8_0_0RC3:1.4
	REL8_0_0RC2:1.4
	REL8_0_0RC1:1.4
	REL8_0_0BETA5:1.4
	REL8_0_0BETA4:1.4
	REL7_4_6:1.3
	REL7_3_8:1.1
	REL7_2_6:1.1
	REL8_0_0BETA3:1.4
	REL8_0_0BETA2:1.4
	REL7_2_5:1.1
	REL7_4_5:1.3
	REL7_3_7:1.1
	REL7_4_4:1.3
	REL8_0_0BETA1:1.4
	REL7_4_3:1.3
	REL7_4_2:1.3
	REL7_3_6:1.1
	REL7_4_1:1.3
	REL7_3_5:1.1
	REL7_4:1.3
	REL7_4_RC2:1.3
	REL7_4_STABLE:1.3.0.4
	REL7_4_RC1:1.3
	REL7_4_BETA5:1.3
	REL7_4_BETA4:1.3
	REL7_4_BETA3:1.3
	REL7_4_BETA2:1.3
	WIN32_DEV:1.3.0.2
	REL7_4_BETA1:1.3
	REL7_3_4:1.1
	REL7_3_2:1.1
	REL7_2_4:1.1
	REL7_3_STABLE:1.1.0.4
	REL7_2_3:1.1
	REL7_2_STABLE:1.1.0.2
	REL7_2:1.1
	REL7_2_RC2:1.1
	REL7_2_RC1:1.1
	REL7_2_BETA5:1.1
	REL7_2_BETA4:1.1
	REL7_2_BETA3:1.1
	REL7_2_BETA2:1.1
	REL7_2_BETA1:1.1;
locks; strict;
comment	@# @;


1.11
date	2009.10.26.02.26.29;	author tgl;	state Exp;
branches;
next	1.10;

1.10
date	2009.10.12.18.10.41;	author tgl;	state Exp;
branches;
next	1.9;

1.9
date	2009.10.10.01.43.45;	author tgl;	state Exp;
branches;
next	1.8;

1.8
date	2009.01.09.15.46.10;	author tgl;	state Exp;
branches;
next	1.7;

1.7
date	2008.03.21.13.23.28;	author momjian;	state Exp;
branches;
next	1.6;

1.6
date	2008.03.20.17.55.14;	author momjian;	state Exp;
branches;
next	1.5;

1.5
date	2005.04.28.21.47.12;	author tgl;	state Exp;
branches;
next	1.4;

1.4
date	2003.11.29.19.51.48;	author pgsql;	state Exp;
branches;
next	1.3;

1.3
date	2002.12.15.16.17.45;	author tgl;	state Exp;
branches;
next	1.2;

1.2
date	2002.12.05.15.50.30;	author tgl;	state Exp;
branches;
next	1.1;

1.1
date	2001.05.15.00.35.50;	author tgl;	state Exp;
branches;
next	;


desc
@@


1.11
log
@Re-implement EvalPlanQual processing to improve its performance and eliminate
a lot of strange behaviors that occurred in join cases.  We now identify the
"current" row for every joined relation in UPDATE, DELETE, and SELECT FOR
UPDATE/SHARE queries.  If an EvalPlanQual recheck is necessary, we jam the
appropriate row into each scan node in the rechecking plan, forcing it to emit
only that one row.  The former behavior could rescan the whole of each joined
relation for each recheck, which was terrible for performance, and what's much
worse could result in duplicated output tuples.

Also, the original implementation of EvalPlanQual could not re-use the recheck
execution tree --- it had to go through a full executor init and shutdown for
every row to be tested.  To avoid this overhead, I've associated a special
runtime Param with each LockRows or ModifyTable plan node, and arranged to
make every scan node below such a node depend on that Param.  Thus, by
signaling a change in that Param, the EPQ machinery can just rescan the
already-built test plan.

This patch also adds a prohibition on set-returning functions in the
targetlist of SELECT FOR UPDATE/SHARE.  This is needed to avoid the
duplicate-output-tuple problem.  It seems fairly reasonable since the
other restrictions on SELECT FOR UPDATE are meant to ensure that there
is a unique correspondence between source tuples and result tuples,
which an output SRF destroys as much as anything else does.
@
text
@$PostgreSQL: pgsql/src/backend/executor/README,v 1.10 2009/10/12 18:10:41 tgl Exp $

The Postgres Executor
=====================

The executor processes a tree of "plan nodes".  The plan tree is essentially
a demand-pull pipeline of tuple processing operations.  Each node, when
called, will produce the next tuple in its output sequence, or NULL if no
more tuples are available.  If the node is not a primitive relation-scanning
node, it will have child node(s) that it calls in turn to obtain input
tuples.

Refinements on this basic model include:

* Choice of scan direction (forwards or backwards).  Caution: this is not
currently well-supported.  It works for primitive scan nodes, but not very
well for joins, aggregates, etc.

* Rescan command to reset a node and make it generate its output sequence
over again.

* Parameters that can alter a node's results.  After adjusting a parameter,
the rescan command must be applied to that node and all nodes above it.
There is a moderately intelligent scheme to avoid rescanning nodes
unnecessarily (for example, Sort does not rescan its input if no parameters
of the input have changed, since it can just reread its stored sorted data).

For a SELECT, it is only necessary to deliver the top-level result tuples
to the client.  For INSERT/UPDATE/DELETE, the actual table modification
operations happen in a top-level ModifyTable plan node.  If the query
includes a RETURNING clause, the ModifyTable node delivers the computed
RETURNING rows as output, otherwise it returns nothing.  Handling INSERT
is pretty straightforward: the tuples returned from the plan tree below
ModifyTable are inserted into the correct result relation.  For UPDATE,
the plan tree returns the computed tuples to be updated, plus a "junk"
(hidden) CTID column identifying which table row is to be replaced by each
one.  For DELETE, the plan tree need only deliver a CTID column, and the
ModifyTable node visits each of those rows and marks the row deleted.

XXX a great deal more documentation needs to be written here...


Plan Trees and State Trees
--------------------------

The plan tree delivered by the planner contains a tree of Plan nodes (struct
types derived from struct Plan).  Each Plan node may have expression trees
associated with it, to represent its target list, qualification conditions,
etc.  During executor startup we build a parallel tree of identical structure
containing executor state nodes --- every plan and expression node type has
a corresponding executor state node type.  Each node in the state tree has a
pointer to its corresponding node in the plan tree, plus executor state data
as needed to implement that node type.  This arrangement allows the plan
tree to be completely read-only as far as the executor is concerned: all data
that is modified during execution is in the state tree.  Read-only plan trees
make life much simpler for plan caching and reuse.

Altogether there are four classes of nodes used in these trees: Plan nodes,
their corresponding PlanState nodes, Expr nodes, and their corresponding
ExprState nodes.  (Actually, there are also List nodes, which are used as
"glue" in all four kinds of tree.)


Memory Management
-----------------

A "per query" memory context is created during CreateExecutorState();
all storage allocated during an executor invocation is allocated in that
context or a child context.  This allows easy reclamation of storage
during executor shutdown --- rather than messing with retail pfree's and
probable storage leaks, we just destroy the memory context.

In particular, the plan state trees and expression state trees described
in the previous section are allocated in the per-query memory context.

To avoid intra-query memory leaks, most processing while a query runs
is done in "per tuple" memory contexts, which are so-called because they
are typically reset to empty once per tuple.  Per-tuple contexts are usually
associated with ExprContexts, and commonly each PlanState node has its own
ExprContext to evaluate its qual and targetlist expressions in.


Query Processing Control Flow
-----------------------------

This is a sketch of control flow for full query processing:

	CreateQueryDesc

	ExecutorStart
		CreateExecutorState
			creates per-query context
		switch to per-query context to run ExecInitNode
		ExecInitNode --- recursively scans plan tree
			CreateExprContext
				creates per-tuple context
			ExecInitExpr

	ExecutorRun
		ExecProcNode --- recursively called in per-query context
			ExecEvalExpr --- called in per-tuple context
			ResetExprContext --- to free memory

	ExecutorEnd
		ExecEndNode --- recursively releases resources
		FreeExecutorState
			frees per-query context and child contexts

	FreeQueryDesc

Per above comments, it's not really critical for ExecEndNode to free any
memory; it'll all go away in FreeExecutorState anyway.  However, we do need to
be careful to close relations, drop buffer pins, etc, so we do need to scan
the plan state tree to find these sorts of resources.


The executor can also be used to evaluate simple expressions without any Plan
tree ("simple" meaning "no aggregates and no sub-selects", though such might
be hidden inside function calls).  This case has a flow of control like

	CreateExecutorState
		creates per-query context

	CreateExprContext	-- or use GetPerTupleExprContext(estate)
		creates per-tuple context

	ExecPrepareExpr
		temporarily switch to per-query context
		run the expression through expression_planner
		ExecInitExpr

	Repeatedly do:
		ExecEvalExprSwitchContext
			ExecEvalExpr --- called in per-tuple context
		ResetExprContext --- to free memory

	FreeExecutorState
		frees per-query context, as well as ExprContext
		(a separate FreeExprContext call is not necessary)


EvalPlanQual (READ COMMITTED Update Checking)
---------------------------------------------

For simple SELECTs, the executor need only pay attention to tuples that are
valid according to the snapshot seen by the current transaction (ie, they
were inserted by a previously committed transaction, and not deleted by any
previously committed transaction).  However, for UPDATE and DELETE it is not
cool to modify or delete a tuple that's been modified by an open or
concurrently-committed transaction.  If we are running in SERIALIZABLE
isolation level then we just raise an error when this condition is seen to
occur.  In READ COMMITTED isolation level, we must work a lot harder.

The basic idea in READ COMMITTED mode is to take the modified tuple
committed by the concurrent transaction (after waiting for it to commit,
if need be) and re-evaluate the query qualifications to see if it would
still meet the quals.  If so, we regenerate the updated tuple (if we are
doing an UPDATE) from the modified tuple, and finally update/delete the
modified tuple.  SELECT FOR UPDATE/SHARE behaves similarly, except that its
action is just to lock the modified tuple and return results based on that
version of the tuple.

To implement this checking, we actually re-run the query from scratch for
each modified tuple (or set of tuples, for SELECT FOR UPDATE), with the
relation scan nodes tweaked to return only the current tuples --- either
the original ones, or the updated (and now locked) versions of the modified
tuple(s).  If this query returns a tuple, then the modified tuple(s) pass
the quals (and the query output is the suitably modified update tuple, if
we're doing UPDATE).  If no tuple is returned, then the modified tuple(s)
fail the quals, so we ignore the current result tuple and continue the
original query.

In UPDATE/DELETE, only the target relation needs to be handled this way.
In SELECT FOR UPDATE, there may be multiple relations flagged FOR UPDATE,
so we obtain lock on the current tuple version in each such relation before
executing the recheck.

It is also possible that there are relations in the query that are not
to be locked (they are neither the UPDATE/DELETE target nor specified to
be locked in SELECT FOR UPDATE/SHARE).  When re-running the test query
we want to use the same rows from these relations that were joined to
the locked rows.  For ordinary relations this can be implemented relatively
cheaply by including the row TID in the join outputs and re-fetching that
TID.  (The re-fetch is expensive, but we're trying to optimize the normal
case where no re-test is needed.)  We have also to consider non-table
relations, such as a ValuesScan or FunctionScan.  For these, since there
is no equivalent of TID, the only practical solution seems to be to include
the entire row value in the join output row.

We disallow set-returning functions in the targetlist of SELECT FOR UPDATE,
so as to ensure that at most one tuple can be returned for any particular
set of scan tuples.  Otherwise we'd get duplicates due to the original
query returning the same set of scan tuples multiple times.  (Note: there
is no explicit prohibition on SRFs in UPDATE, but the net effect will be
that only the first result row of an SRF counts, because all subsequent
rows will result in attempts to re-update an already updated target row.
This is historical behavior and seems not worth changing.)
@


1.10
log
@Move the handling of SELECT FOR UPDATE locking and rechecking out of
execMain.c and into a new plan node type LockRows.  Like the recent change
to put table updating into a ModifyTable plan node, this increases planning
flexibility by allowing the operations to occur below the top level of the
plan tree.  It's necessary in any case to restore the previous behavior of
having FOR UPDATE locking occur before ModifyTable does.

This partially refactors EvalPlanQual to allow multiple rows-under-test
to be inserted into the EPQ machinery before starting an EPQ test query.
That isn't sufficient to fix EPQ's general bogosity in the face of plans
that return multiple rows per test row, though.  Since this patch is
mostly about getting some plan node infrastructure in place and not about
fixing ten-year-old bugs, I will leave EPQ improvements for another day.

Another behavioral change that we could now think about is doing FOR UPDATE
before LIMIT, but that too seems like it should be treated as a followon
patch.
@
text
@d1 1
a1 1
$PostgreSQL: pgsql/src/backend/executor/README,v 1.9 2009/10/10 01:43:45 tgl Exp $
d163 35
a197 38
To implement this checking, we actually re-run the entire query from scratch
for each modified tuple, but with the scan node that sourced the original
tuple set to return only the modified tuple, not the original tuple or any
of the rest of the relation.  If this query returns a tuple, then the
modified tuple passes the quals (and the query output is the suitably
modified update tuple, if we're doing UPDATE).  If no tuple is returned,
then the modified tuple fails the quals, so we ignore it and continue the
original query.  (This is reasonably efficient for simple queries, but may
be horribly slow for joins.  A better design would be nice; one thought for
future investigation is to treat the tuple substitution like a parameter,
so that we can avoid rescanning unrelated nodes.)

Note a fundamental bogosity of this approach: if the relation containing
the original tuple is being used in a self-join, the other instance(s) of
the relation will be treated as still containing the original tuple, whereas
logical consistency would demand that the modified tuple appear in them too.
But we'd have to actually substitute the modified tuple for the original,
while still returning all the rest of the relation, to ensure consistent
answers.  Implementing this correctly is a task for future work.

In UPDATE/DELETE, only the target relation needs to be handled this way,
so only one special recheck query needs to execute at a time.  In SELECT FOR
UPDATE, there may be multiple relations flagged FOR UPDATE, so it's possible
that while we are executing a recheck query for one modified tuple, we will
hit another modified tuple in another relation.  In this case we "stack up"
recheck queries: a sub-recheck query is spawned in which both the first and
second modified tuples will be returned as the only components of their
relations.  (In event of success, all these modified tuples will be locked.)
Again, this isn't necessarily quite the right thing ... but in simple cases
it works.  Potentially, recheck queries could get nested to the depth of the
number of FOR UPDATE/SHARE relations in the query.

It should be noted also that UPDATE/DELETE expect at most one tuple to
result from the modified query, whereas in the FOR UPDATE case it's possible
for multiple tuples to result (since we could be dealing with a join in
which multiple tuples join to the modified tuple).  We want FOR UPDATE to
lock all relevant tuples, so we process all tuples output by all the stacked
recheck queries.
@


1.9
log
@Split the processing of INSERT/UPDATE/DELETE operations out of execMain.c.
They are now handled by a new plan node type called ModifyTable, which is
placed at the top of the plan tree.  In itself this change doesn't do much,
except perhaps make the handling of RETURNING lists and inherited UPDATEs a
tad less klugy.  But it is necessary preparation for the intended extension of
allowing RETURNING queries inside WITH.

Marko Tiikkaja
@
text
@d1 1
a1 1
$PostgreSQL: pgsql/src/backend/executor/README,v 1.8 2009/01/09 15:46:10 tgl Exp $
d160 2
a161 1
action is just to lock the modified tuple.
d199 2
a200 2
lock all relevant tuples, so we pass all tuples output by all the stacked
recheck queries back to the executor toplevel for locking.
@


1.8
log
@Arrange for function default arguments to be processed properly in expressions
that are set up for execution with ExecPrepareExpr rather than going through
the full planner process.  By introducing an explicit notion of "expression
planning", this patch also lays a bit of groundwork for maybe someday
allowing sub-selects in standalone expressions.
@
text
@d1 1
a1 1
$PostgreSQL: pgsql/src/backend/executor/README,v 1.7 2008/03/21 13:23:28 momjian Exp $
d28 11
a38 10
The plan tree concept implements SELECT directly: it is only necessary to
deliver the top-level result tuples to the client, or insert them into
another table in the case of INSERT ... SELECT.  (INSERT ... VALUES is
handled similarly, but the plan tree is just a Result node with no source
tables.)  For UPDATE, the plan tree selects the tuples that need to be
updated (WHERE condition) and delivers a new calculated tuple value for each
such tuple, plus a "junk" (hidden) tuple CTID identifying the target tuple.
The executor's top level then uses this information to update the correct
tuple.  DELETE is similar to UPDATE except that only a CTID need be
delivered by the plan tree.
@


1.7
log
@More README src cleanups.
@
text
@d1 1
a1 1
$PostgreSQL: pgsql/src/backend/executor/README,v 1.6 2008/03/20 17:55:14 momjian Exp $
d127 2
a128 1
		switch to per-query context to run ExecInitExpr
@


1.6
log
@Make source code READMEs more consistent.  Add CVS tags to all README files.
@
text
@d1 1
a1 1
$PostgreSQL: pgsql/src/backend/executor/README,v 1.5 2005/04/28 21:47:12 tgl Exp $
d4 1
a4 1
---------------------
@


1.5
log
@Implement sharable row-level locks, and use them for foreign key references
to eliminate unnecessary deadlocks.  This commit adds SELECT ... FOR SHARE
paralleling SELECT ... FOR UPDATE.  The implementation uses a new SLRU
data structure (managed much like pg_subtrans) to represent multiple-
transaction-ID sets.  When more than one transaction is holding a shared
lock on a particular row, we create a MultiXactId representing that set
of transactions and store its ID in the row's XMAX.  This scheme allows
an effectively unlimited number of row locks, just as we did before,
while not costing any extra overhead except when a shared lock actually
has to be shared.   Still TODO: use the regular lock manager to control
the grant order when multiple backends are waiting for a row lock.

Alvaro Herrera and Tom Lane.
@
text
@d1 1
a1 1
$PostgreSQL: pgsql/src/backend/executor/README,v 1.4 2003/11/29 19:51:48 pgsql Exp $
d140 1
a140 1
EvalPlanQual (READ COMMITTED update checking)
@


1.4
log
@
$Header: -> $PostgreSQL Changes ...
@
text
@d1 1
a1 1
$PostgreSQL: /cvsroot/pgsql-server/src/backend/executor/README,v 1.3 2002/12/15 16:17:45 tgl Exp $
d157 2
a158 2
modified tuple.  SELECT FOR UPDATE behaves similarly, except that its action
is just to mark the modified tuple for update by the current transaction.
d187 4
a190 4
relations.  (In event of success, all these modified tuples will be marked
for update.)  Again, this isn't necessarily quite the right thing ... but in
simple cases it works.  Potentially, recheck queries could get nested to the
depth of the number of FOR UPDATE relations in the query.
d196 2
a197 2
mark all relevant tuples, so we pass all tuples output by all the stacked
recheck queries back to the executor toplevel for marking.
@


1.3
log
@Revise executor APIs so that all per-query state structure is built in
a per-query memory context created by CreateExecutorState --- and destroyed
by FreeExecutorState.  This provides a final solution to the longstanding
problem of memory leaked by various ExecEndNode calls.
@
text
@d1 1
a1 1
$Header: /cvsroot/pgsql-server/src/backend/executor/README,v 1.2 2002/12/05 15:50:30 tgl Exp $
@


1.2
log
@Phase 1 of read-only-plans project: cause executor state nodes to point
to plan nodes, not vice-versa.  All executor state nodes now inherit from
struct PlanState.  Copying of plan trees has been simplified by not
storing a list of SubPlans in Plan nodes (eliminating duplicate links).
The executor still needs such a list, but it can build it during
ExecutorStart since it has to scan the plan tree anyway.
No initdb forced since no stored-on-disk structures changed, but you
will need a full recompile because of node-numbering changes.
@
text
@d1 1
a1 1
$Header: /cvsroot/pgsql-server/src/backend/executor/README,v 1.1 2001/05/15 00:35:50 tgl Exp $
d61 77
@


1.1
log
@Some badly needed documentation about EvalPlanQual.
@
text
@d1 1
a1 1
$Header$
d40 21
@
