|
14 | 14 | * |
15 | 15 | * |
16 | 16 | * IDENTIFICATION |
17 | | - * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.29 2004/07/17 03:28:47 tgl Exp $ |
| 17 | + * $PostgreSQL: pgsql/src/backend/commands/portalcmds.c,v 1.30 2004/07/31 00:45:31 tgl Exp $ |
18 | 18 | * |
19 | 19 | *------------------------------------------------------------------------- |
20 | 20 | */ |
@@ -259,8 +259,18 @@ PortalCleanup(Portal portal) |
259 | 259 |
|
260 | 260 | /* We must make the portal's resource owner current */ |
261 | 261 | saveResourceOwner = CurrentResourceOwner; |
262 | | - CurrentResourceOwner = portal->resowner; |
263 | | - ExecutorEnd(queryDesc); |
| 262 | + PG_TRY(); |
| 263 | + { |
| 264 | + CurrentResourceOwner = portal->resowner; |
| 265 | + ExecutorEnd(queryDesc); |
| 266 | + } |
| 267 | + PG_CATCH(); |
| 268 | + { |
| 269 | + /* Ensure CurrentResourceOwner is restored on error */ |
| 270 | + CurrentResourceOwner = saveResourceOwner; |
| 271 | + PG_RE_THROW(); |
| 272 | + } |
| 273 | + PG_END_TRY(); |
264 | 274 | CurrentResourceOwner = saveResourceOwner; |
265 | 275 | } |
266 | 276 | } |
@@ -317,91 +327,108 @@ PersistHoldablePortal(Portal portal) |
317 | 327 | portal->status = PORTAL_ACTIVE; |
318 | 328 |
|
319 | 329 | /* |
320 | | - * Set global portal context pointers. |
| 330 | + * Set up global portal context pointers. |
321 | 331 | */ |
322 | 332 | saveActivePortal = ActivePortal; |
323 | | - ActivePortal = portal; |
324 | 333 | saveResourceOwner = CurrentResourceOwner; |
325 | | - CurrentResourceOwner = portal->resowner; |
326 | 334 | savePortalContext = PortalContext; |
327 | | - PortalContext = PortalGetHeapMemory(portal); |
328 | 335 | saveQueryContext = QueryContext; |
329 | | - QueryContext = portal->queryContext; |
| 336 | + PG_TRY(); |
| 337 | + { |
| 338 | + ActivePortal = portal; |
| 339 | + CurrentResourceOwner = portal->resowner; |
| 340 | + PortalContext = PortalGetHeapMemory(portal); |
| 341 | + QueryContext = portal->queryContext; |
| 342 | + |
| 343 | + MemoryContextSwitchTo(PortalContext); |
| 344 | + |
| 345 | + /* |
| 346 | + * Rewind the executor: we need to store the entire result set in the |
| 347 | + * tuplestore, so that subsequent backward FETCHs can be processed. |
| 348 | + */ |
| 349 | + ExecutorRewind(queryDesc); |
| 350 | + |
| 351 | + /* Change the destination to output to the tuplestore */ |
| 352 | + queryDesc->dest = CreateDestReceiver(Tuplestore, portal); |
| 353 | + |
| 354 | + /* Fetch the result set into the tuplestore */ |
| 355 | + ExecutorRun(queryDesc, ForwardScanDirection, 0L); |
| 356 | + |
| 357 | + (*queryDesc->dest->rDestroy) (queryDesc->dest); |
| 358 | + queryDesc->dest = NULL; |
| 359 | + |
| 360 | + /* |
| 361 | + * Now shut down the inner executor. |
| 362 | + */ |
| 363 | + portal->queryDesc = NULL; /* prevent double shutdown */ |
| 364 | + ExecutorEnd(queryDesc); |
| 365 | + |
| 366 | + /* |
| 367 | + * Reset the position in the result set: ideally, this could be |
| 368 | + * implemented by just skipping straight to the tuple # that we need |
| 369 | + * to be at, but the tuplestore API doesn't support that. So we start |
| 370 | + * at the beginning of the tuplestore and iterate through it until we |
| 371 | + * reach where we need to be. FIXME someday? |
| 372 | + */ |
| 373 | + MemoryContextSwitchTo(portal->holdContext); |
| 374 | + |
| 375 | + if (!portal->atEnd) |
| 376 | + { |
| 377 | + long store_pos; |
330 | 378 |
|
331 | | - MemoryContextSwitchTo(PortalContext); |
| 379 | + if (portal->posOverflow) /* oops, cannot trust portalPos */ |
| 380 | + ereport(ERROR, |
| 381 | + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), |
| 382 | + errmsg("could not reposition held cursor"))); |
332 | 383 |
|
333 | | - /* |
334 | | - * Rewind the executor: we need to store the entire result set in the |
335 | | - * tuplestore, so that subsequent backward FETCHs can be processed. |
336 | | - */ |
337 | | - ExecutorRewind(queryDesc); |
| 384 | + tuplestore_rescan(portal->holdStore); |
338 | 385 |
|
339 | | - /* Change the destination to output to the tuplestore */ |
340 | | - queryDesc->dest = CreateDestReceiver(Tuplestore, portal); |
| 386 | + for (store_pos = 0; store_pos < portal->portalPos; store_pos++) |
| 387 | + { |
| 388 | + HeapTuple tup; |
| 389 | + bool should_free; |
341 | 390 |
|
342 | | - /* Fetch the result set into the tuplestore */ |
343 | | - ExecutorRun(queryDesc, ForwardScanDirection, 0L); |
| 391 | + tup = tuplestore_gettuple(portal->holdStore, true, |
| 392 | + &should_free); |
344 | 393 |
|
345 | | - (*queryDesc->dest->rDestroy) (queryDesc->dest); |
346 | | - queryDesc->dest = NULL; |
| 394 | + if (tup == NULL) |
| 395 | + elog(ERROR, "unexpected end of tuple stream"); |
347 | 396 |
|
348 | | - /* |
349 | | - * Now shut down the inner executor. |
350 | | - */ |
351 | | - portal->queryDesc = NULL; /* prevent double shutdown */ |
352 | | - ExecutorEnd(queryDesc); |
353 | | - |
354 | | - /* |
355 | | - * Reset the position in the result set: ideally, this could be |
356 | | - * implemented by just skipping straight to the tuple # that we need |
357 | | - * to be at, but the tuplestore API doesn't support that. So we start |
358 | | - * at the beginning of the tuplestore and iterate through it until we |
359 | | - * reach where we need to be. FIXME someday? |
360 | | - */ |
361 | | - MemoryContextSwitchTo(portal->holdContext); |
362 | | - |
363 | | - if (!portal->atEnd) |
| 397 | + if (should_free) |
| 398 | + pfree(tup); |
| 399 | + } |
| 400 | + } |
| 401 | + } |
| 402 | + PG_CATCH(); |
364 | 403 | { |
365 | | - long store_pos; |
366 | | - |
367 | | - if (portal->posOverflow) /* oops, cannot trust portalPos */ |
368 | | - ereport(ERROR, |
369 | | - (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), |
370 | | - errmsg("could not reposition held cursor"))); |
371 | | - |
372 | | - tuplestore_rescan(portal->holdStore); |
373 | | - |
374 | | - for (store_pos = 0; store_pos < portal->portalPos; store_pos++) |
375 | | - { |
376 | | - HeapTuple tup; |
377 | | - bool should_free; |
378 | | - |
379 | | - tup = tuplestore_gettuple(portal->holdStore, true, |
380 | | - &should_free); |
| 404 | + /* Uncaught error while executing portal: mark it dead */ |
| 405 | + portal->status = PORTAL_FAILED; |
381 | 406 |
|
382 | | - if (tup == NULL) |
383 | | - elog(ERROR, "unexpected end of tuple stream"); |
| 407 | + /* Restore global vars and propagate error */ |
| 408 | + ActivePortal = saveActivePortal; |
| 409 | + CurrentResourceOwner = saveResourceOwner; |
| 410 | + PortalContext = savePortalContext; |
| 411 | + QueryContext = saveQueryContext; |
384 | 412 |
|
385 | | - if (should_free) |
386 | | - pfree(tup); |
387 | | - } |
| 413 | + PG_RE_THROW(); |
388 | 414 | } |
| 415 | + PG_END_TRY(); |
389 | 416 |
|
390 | 417 | MemoryContextSwitchTo(oldcxt); |
391 | 418 |
|
392 | | - /* |
393 | | - * We can now release any subsidiary memory of the portal's heap |
394 | | - * context; we'll never use it again. The executor already dropped |
395 | | - * its context, but this will clean up anything that glommed onto the |
396 | | - * portal's heap via PortalContext. |
397 | | - */ |
398 | | - MemoryContextDeleteChildren(PortalGetHeapMemory(portal)); |
399 | | - |
400 | 419 | /* Mark portal not active */ |
401 | 420 | portal->status = PORTAL_READY; |
402 | 421 |
|
403 | 422 | ActivePortal = saveActivePortal; |
404 | 423 | CurrentResourceOwner = saveResourceOwner; |
405 | 424 | PortalContext = savePortalContext; |
406 | 425 | QueryContext = saveQueryContext; |
| 426 | + |
| 427 | + /* |
| 428 | + * We can now release any subsidiary memory of the portal's heap |
| 429 | + * context; we'll never use it again. The executor already dropped |
| 430 | + * its context, but this will clean up anything that glommed onto the |
| 431 | + * portal's heap via PortalContext. |
| 432 | + */ |
| 433 | + MemoryContextDeleteChildren(PortalGetHeapMemory(portal)); |
407 | 434 | } |
0 commit comments