I have the following script:
#!/bin/bash -e
set -e
DATA_DIR=/home/admin/backup_avl_historico/data
DB_HOST=myHost
DB_USER=myPass
#extract table list
logger 'Extracting Table List'
psql -h $DB_HOST -U $DB_USER -c "select table_name from information_schema.tables where table_name like 'avl_historico_%';" -t -o $DATA_DIR/tables.list
array=($(wc -l $DATA_DIR/tables.list))
logger ''$array
total_tables=${array[0]}
logger 'Total tables: '$total_tables
#Get max date
max_date=$(psql -h $DB_HOST -U $DB_USER -t -c "select now() - interval '12 months'")
logger 'Max date: '$max_date
array=($max_date)
date=${array[0]}
logger 'Only date: '$date
#Dump each table
while read table_name
do
logger 'looping...'
if [ ! -z "$table_name" ]; then
logger 'Processing table '$table_name
output=${table_name}_pre_${date}.csv
psql -h $DB_HOST -U $DB_USER -t -F , -c "COPY (select * from reports.$table_name where fecha < '$max_date') TO STDOUT WITH CSV" -o ${DATA_DIR}/$output
if [ -f ${DATA_DIR}/$output ];then
if test -s ${DATA_DIR}/$output
then
logger 'Deleting records'
psql -h $DB_HOST -U $DB_USER -c "delete from reports.$table_name where fecha < '$max_date'"
logger 'Gzipping '$output
pigz ${DATA_DIR}/$output
logger 'Moving to S3'
aws s3 mv ${DATA_DIR}/$output.gz s3://my-bucket/avl_historico/
logger 'Vacuuming table'
psql -h $DB_HOST -U $DB_USER -c "vacuum full analyze reports.$table_name"
else
rm ${DATA_DIR}/$output
fi
fi
fi
done < $DATA_DIR/tables.list
The problem I'm having is that when PostgreSQL exits a statement with the following error:
ERROR: canceling statement due to lock timeout
The entire script gets aborted and doesn't continue with the next iteration of do the loop.
Any ideas on how to avoid that exit condition would be appreciated, so the script could just skip one iteration, but continue with the rest