Shell script recipes

A. Backup database (mySQL, mariaDB) via CRON
1) Create a bash script somewhere under project directory, e.g. /_myproject/_scripts/db-backup.sh
with content:
#!/bin/bash
# mysql/mariadb database backup - CRON executed script
TABLES="" # list of backuped tables or leave empty for all tables
DUMPFILE_STRUCT=myproject-structure.sql
DUMPFILE_DATA=myproject-data-$(date +%Y%m%d-%H%M%S).sql.gz
echo " "
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] Starting DB backup .."
echo "Exporting structure into $DUMPFILE_STRUCT"
mysqldump --defaults-extra-file=/root/myproject/db-credentials.cnf --no-data myproject > /root/myproject/db_backup/$DUMPFILE_STRUCT
echo "Exporting data into $DUMPFILE_DATA for tables .. $TABLES"
mysqldump --defaults-extra-file=/root/myproject/db-credentials.cnf --lock-all-tables=1 myproject $TABLES | gzip > /root/myproject/db_backup/$DUMPFILE_DATA
# on Saturdays optimize tables, lock whole DB
if [[ $(date +%u) = 6 ]] ; then
TABLES="select concat(TABLE_SCHEMA,'.', TABLE_NAME) from information_schema.tables where data_free>0;"
for tbl in $(mysql --defaults-extra-file=/root/myproject/db-credentials.cnf -N <<< $TABLES)
do
echo "[$(date +%Y-%m-%d-%H-%M-%S)] OPTIMIZING TABLE: $tbl"
mysql --defaults-extra-file=/root/myproject/db-credentials.cnf -N <<< "optimize table $tbl"
done
fi
echo "Database backup completed at [$(date +%Y-%m-%d\ %H-%M-%S)]"
# delete DB backups older than 7 days
find /root/myproject/db_backup/ -type f -mtime +7 -name '*.sql.gz' -execdir rm -- '{}' +
2) Create file with credentials in /root/myproject/db-credentials.cnf
.
Make sure that dbadmin
has privileges to access dumped tables.
You may want to create a new user with extended (or full) privileges or the root
user.
; mysql/mariadb CRON-backup credentials
[client]
user=dbadmin
password=My.Password@123
3) Set up CRON job in e.g. /var/spool/cron/root
:
# daily database 3:05 backups for last 7 days
5 3 * * * /_myproject/_scripts/db-backup.sh >> /_myproject/writable/logs/cron-root.log
Database backup files will be stored in /root/myproject/db_backup/*.*
.
B. Restore database from backup-ed files (mySQL, mariaDB)
This is done manually and means destroying existing database. Therefore first define explicitly which backuped file (from previous A. recipe) will be imported, then execute script via command line:
#!/bin/bash
# edit here - set filename that should be used for restoring database
RESTORE_FILE=myproject-20221231-235959.sql.gz
echo "Started restoring database at [$(date +%Y-%m-%d\ %H-%M-%S)] .."
mysql --defaults-extra-file=/root/myproject/db-credentials.cnf myproject < /root/myproject/db_backup/structure.sql
gunzip -c /root/myproject/db_backup/$RESTORE_FILE | mysql --defaults-extra-file=/root/myproject/db-credentials.cnf myproject
echo "Restoring database completed at [$(date +%Y-%m-%d\ %H-%M-%S)]!"
C. Periodically check whether service is running via CRON
1) Create a bash script somewhere under project directory, e.g. /_myproject/_scripts/service-running.sh
with content:
#!/bin/bash
# --- start nginx if not running ---
/usr/bin/pgrep nginx > /dev/null
if [ $? -ne 0 ]; then
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] NGINX NOT RUNNING - started!"
sudo systemctl start nginx
fi
# --- start php-fpm if not running ---
/usr/bin/pgrep php-fpm > /dev/null
if [ $? -ne 0 ]; then
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] PHP-FPM NOT RUNNING - started!"
sudo systemctl start php-fpm
fi
# --- start mariadb/mysql if not running ---
# /usr/bin/pgrep mysql > /dev/null
/usr/bin/pgrep mariadb > /dev/null
if [ $? -ne 0 ]; then
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] MARIADB NOT RUNNING - started!"
# sudo systemctl start mysql
sudo systemctl start mariadb
fi
# --- start elasticsearch if not running ---
ps -ef | grep elasticsearch | grep -v grep > /dev/null
if [ $? -ne 0 ]; then
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] ELASTICSEARCH NOT RUNNING - started!"
sudo systemctl start elasticsearch
fi
2) Set up CRON job in e.g. /var/spool/cron/root
that will check services every 10 minutes:
# daily database 3:05 backups for the last 7 days
*/10 * * * * /_myproject/_scripts/service-running.sh >> /_myproject/writable/logs/cron-root.log
D. Periodically restart services via CRON
1) Create a bash script somewhere under project directory, e.g. /_myproject/_scripts/service-restart.sh
with content:
#!/bin/bash
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] Restarting sevices ..."
sudo systemctl restart php-fpm
sudo systemctl restart nginx
sudo systemctl restart mysql
sudo systemctl restart elasticsearch
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] -- All services restarted --"
2) Set up CRON job in e.g. /var/spool/cron/root
:
# restart services every day at 3:30
30 3 * * * /_myproject/_scripts/service-restart.sh >> /_myproject/writable/logs/cron-root.log
E. Renew Let's Encrypt certificate if due via CRON
1) Install certbot (nginx or apache, CentOS 8):
sudo dnf -y install epel-release certbot python3-certbot-nginx
sudo certbot --nginx
2) Set up CRON job in e.g. /var/spool/cron/root
:
# Check SSL expiration each Saturday 4:15 and renew if expires in less than 30 days
15 4 * * 6 PATH=$PATH:/usr/sbin /usr/bin/certbot renew --quiet >> /myproject/writable/logs/cron-root.log
/var/log/letsencrypt/*.log
where you should find message e.g. No renewal failures
or the issue description if something went wrong.
F. Set permissions for subdirectories unanimously
Following script will set same permissions for all subdirectories under /_data
requesting confirmation for each subdir:
#!/bin/bash
# set the top directory:
ROOT="/_data"
setPermissions() {
PATH="$1" # take first argument
if [ -d "$PATH" ]; then
# ask admin to confirm whether permissions will be set for each subdirectory
read -p "Set permissions for files and directories in $PATH (y/N)?" CONT
if [ "$CONT" = "y" ]; then
echo "[$(date +%Y-%m-%d\ %H:%M:%S)] Setting [nginx:nginx] for [$PATH]"
chown -R nginx:nginx $PATH
find $PATH -type d -print0 | xargs -0 chmod 755
find $PATH -type f -print0 | xargs -0 chmod 644
echo "[$(date +%Y-%m-%d\ %H:%M:%S)] Finished setting permissions in [$PATH]"
echo " "
fi
fi
}
# note: remove the "-prune" argument, if you want to loop unlimited subdirectory depth
# with "-prune" only first child-level is traversed
for f in $(find $ROOT/* -type d -prune); do setPermissions $f; done
echo "DONE!"
G. Set permissions for subdirectories individually
This is a typical script that needs to be executed after each web site update. It ensures proper permissions for updated files and new directories, removes outdated cache files, creates missing dirs .. etc. Adjust to your project specifics.
- Create script under project directory, e.g.
/_myproject/_scripts/permissions.sh
with the content bellow. - Run script manually via SSH command line as
root
user.
#!/bin/bash
# --- set system-wide permissions (opcache, session) ---
TMP="/var/lib/php/session"
echo "Setting [nginx:nginx] for [$TMP] and with writing permissions"
chown -R nginx:nginx $TMP
chmod -R uog+w $TMP
TMP="/var/lib/php/wsdlcache"
echo "Setting [nginx:nginx] for [$TMP] and with writing permissions"
chown -R nginx:nginx $TMP
chmod -R uog+w $TMP
TMP="/var/lib/php/opcache"
echo "Setting [nginx:nginx] for [$TMP] and with writing permissions"
chown -R nginx:nginx $TMP
chmod -R uog+w $TMP
# --- set website permissions ---
ROOT="/myproject"
# top directory owner nginx, set default directory / file permissions
TMP="$ROOT"
echo "Setting [nginx:nginx] for [$TMP] as READ-only"
chown -R nginx:nginx $TMP
find $TMP -type d -print0 | xargs -0 chmod 755
find $TMP -type f -print0 | xargs -0 chmod 644
# set permissions for directories NOT available to webserver - git repo, SSH scripts ..
TMP="$ROOT/.git"
echo "Setting [root:root] for [$TMP] with writing permissions"
chown -R root:root $TMP
chmod -R u+w $TMP
TMP="$ROOT/_scripts"
echo "Setting [root:root] for [$TMP] and NO writing permissions"
chown -R root:root $TMP
chmod -R uog-w-x $TMP
chmod u+x "$TMP/permissions.sh"
chmod u+x "$TMP/db-backup.sh"
chmod u+x "$TMP/service-restart.sh"
chmod u+x "$TMP/service-running.sh"
# executable CLI for CRON entry script, here we use yii script for Yii PHP framework
chmod u+x "$ROOT/app/yii"
# writable dirs
TMP="$ROOT/writable"
echo "Setting [nginx:nginx] for [$TMP] with writing permissions"
chmod -R u+w $TMP
# remove, create and set permission for assets directory
TMP="$ROOT/web/assets"
if [ ! -d "$TMP" ]; then
mkdir $TMP
echo "Created ASSETS directory with writing permissions [$TMP]"
fi
echo "Deleting ASSETS subdirectories [$TMP]"
rm -Rf $TMP/*
chown -R nginx:nginx $TMP
chmod -R u+w $TMP
# remove all cache files
TMP="$ROOT/writable/cache/*"
echo "Deleted files under cache directory [$TMP]"
rm -f $TMP
# make rss.xml and sitemap.xml writable to refresh these special files any time
TMP="$ROOT/web/rss.xml"
if [ -f "$TMP" ]; then
echo "Setting writing permissions for [$TMP]"
chmod u+w $TMP
fi
TMP="$ROOT/web/sitemap.xml"
if [ -f "$TMP" ]; then
echo "Setting writing permissions for [$TMP]"
chmod u+w $TMP
fi
# restart services if needed
read -p "Restart PHP-FPM (y/N)?" CONT
if [ "$CONT" = "y" ]; then
sudo systemctl restart php-fpm
echo "PHP-FPM restarted !"
fi
read -p "Restart NGINX (y/N)?" CONT
if [ "$CONT" = "y" ]; then
sudo systemctl restart nginx
echo "NGINX restarted !"
fi
read -p "Restart MariaDB/mySQL (y/N)?" CONT
if [ "$CONT" = "y" ]; then
# sudo systemctl restart mysql
sudo systemctl restart mariadb
echo "MariaDB/mySQL restarted !"
fi
read -p "Restart elasticsearch (y/N)?" CONT
if [ "$CONT" = "y" ]; then
sudo systemctl restart elasticsearch
echo "Elasticsearch restarted !"
fi
echo "DONE!"
H. Backup elasticsearch and SQLite
If running elasticsearch as a single node, it can be backuped by compressing the whole directory with data specified in a configuration file e.g. /etc/elasticsearch/elasticsearch.yml
.
However, this will not work in clustered environment since nodes must be properly configured and synced against the master node.
Backuping SQLite is similar to backuping mySQL or mariaDB respectively.
#!/bin/bash
# source elasticsearch directory with data - must match ES config (!)
ES_DATA="/_data_es"
# target directory with backuped files
ES_BAK="/_es_data_backup"
# elasticsearch compressed filename
ES_FNAME="es-myproject-backup-$(date +%Y%m%d-%H%M%S).zip"
# SQLite source file database
SQLITE_DATA="/_data_sqlite/my.sqlite"
# SQLite target compressed backup file
SQLITE_FNAME="my.sqlite-$(date +%Y%m%d-%H%M%S).zip"
echo " "
echo "--- Starting backup on $(date +%Y-%m-%d\ %H-%M-%S) ---"
echo "FOUND SETTINGS:"
echo "- Source ES dir: $ES_DATA"
echo "- Target ES backup file: $ES_BAK/$ES_FNAME"
echo "- Source my.sqlite file: $SQLITE_DATA"
echo "- Target my.sqlite backup file: $ES_BAK/$SQLITE_FNAME"
echo "--------------------------------"
###################################
# check dirs
###################################
# check ES valid data directories
if [ ! -d $ES_DATA ];
then
echo "Data directory not found in [$ES_DATA] - exiting!"
exit 0
fi
if [ ! -f $ES_DATA/nodes ];
then
echo "Data file [nodes] not found in [$ES_DATA/node] - exiting!"
exit 0
fi
# create backup directory if missing
if [ ! -d $ES_BAK ];
then
mkdir $ES_BAK
sudo chown root:root $ES_BAK
fi
###################################
# backup DB SQLite
###################################
read -p "Backup SQLite file [$SQLITE_DATA] ? (y/N)" CONT
if [ "$CONT" = "y" ]; then
if [ ! -f $SQLITE_DATA ];
then
echo "Skipping - data file not found in [$SQLITE_DATA]!"
else
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] Started ZIPPing my.sqlite .."
TMP_PARENT="$(dirname $SQLITE_DATA)"
TMP_FILE="$(basename $SQLITE_DATA)"
cd $TMP_PARENT
# zip relative path in current directory without parent abs. paths - more suitable for restoring into any directory
zip -q $ES_BAK/$SQLITE_FNAME ./$TMP_FILE
# zip with absolute [parent] paths
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] OK - zipped file to $ES_BAK/$SQLITE_FNAME"
fi
fi
###################################
# backup DB elasticsearch (ES)
###################################
read -p "Backup Elastic data directory [$ES_DATA] ? (y/N)" CONT
if [ "$CONT" = "y" ]; then
# stop ES service - cca 5 secs
ps -ef | grep elasticsearch | grep -v grep > /dev/null
if [ $? -ne 0 ]
then
echo "ELASTICSEARCH NOT RUNNING - skipped stopping the service"
else
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] Stopping Elastic .."
sudo systemctl stop elasticsearch
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] Elastic stopped .."
fi
# zip recursively [-r] and quetly [-q] ES data directory
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] Started ZIPPing $ES_DATA .."
# zip all [*] files and directories in ES data with relative paths - no parent directories, easier manipulation at backup
cd $ES_DATA
zip -r -q $ES_BAK/$ES_FNAME *
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] zipped file to $ES_BAK/$ES_FNAME"
# start ES cca 30 - 90 secs
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] Starting Elastic .."
sudo systemctl start elasticsearch
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] Elastic started .."
# show service status - dead|active|..
TMP=$(sudo systemctl show -p SubState elasticsearch | cut -d'=' -f2)
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] Current elastic status: $TMP"
# delete backups older than 30 days
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] Deleting backups older than 30 days .."
find $ES_BAK -type f -mtime +30 -name '*.zip' -execdir rm -- '{}' +
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] --- OK .. ES data backup complete ---"
fi
echo "--------------------------------"
exit 0
Setup CRON job in e.g. /var/spool/cron/root
and pipe in the "yes" user input:
# backup Elasticsearch and SQLite on sundays at 03:15
# note: pipeline sends the YES command to executed shell script just like user input
15 3 * * SUN yes | /_myproject/_scripts/elastic-backup.sh >> /_myproject/writable/logs/cron-root.log
I. Restore elasticsearch and SQLite
Complementary script to previous H. recipe.
Set exact names of restored file(s) into variable(s) RESTORE_FILE_ES
and/or RESTORE_FILE_SQLITE
, then execute script:
#!/bin/bash
# set the name of restored files
RESTORE_FILE_ES="/_es_data_backup/es-bizdata-backup-20251231-235959.zip"
RESTORE_FILE_SQLITE="/_es_data_backup/my.sqlite-20251231-235959.zip"
# target dirs - elasticsearch data dir, must match ES config (!)
ES_DATA="/_data_es"
SQLITE_DATA="/_data_sqlite/my.sqlite"
# ask to confirm restoring SQLite
read -p "Restore SQLITE file [$RESTORE_FILE_SQLITE] ? (y/N)" CONT
if [ "$CONT" = "y" ]; then
if [ ! -f $RESTORE_FILE_SQLITE ];
then
echo "Source SQLITE file not found in [$RESTORE_FILE_SQLITE] - exiting!"
exit 0
fi
if [ -f $SQLITE_DATA ];
then
// rename existing sqlite file rather than overwrite or delete
TMP_FILE="$SQLITE_DATA-$(date +%Y%m%d-%H%M%S)"
mv $SQLITE_DATA $TMP_FILE
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] Renamed existing $SQLITE_DATA to $TMP_FILE .."
fi
TMP_PARENT="$(dirname $SQLITE_DATA)"
cd $TMP_PARENT
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] Started unzipping $RESTORE_FILE_SQLITE to $TMP_PARENT.."
unzip $RESTORE_FILE_SQLITE \*
sudo chown nginx:nginx $SQLITE_DATA
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] Completed unzipping sqlite .."
fi
# ask to confirm restoring elasticsearch
read -p "Restore ES file [$RESTORE_FILE_ES] ? (y/N)" CONT
if [ "$CONT" = "y" ]; then
if [ ! -f $RESTORE_FILE_ES ];
then
echo "Source ES file not found in [$RESTORE_FILE_ES] - exiting!"
exit 0
fi
# stop ES service cca 3-10 secs
ps -ef | grep elasticsearch | grep -v grep > /dev/null
if [ $? -ne 0 ]
then
echo "ELASTICSEARCH NOT RUNNING - skipped stopping the service"
else
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] Stopping Elastic .."
sudo systemctl stop elasticsearch
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] Elastic stopped .."
fi
# rename data dir
if [ -d $ES_DATA ];
then
RENAMED="$ES_DATA-$(date +%Y%m%d-%H%M%S)"
mv $ES_DATA $RENAMED
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] Renamed $ES_DATA to $RENAMED .."
fi
$ create ES dir
if [ ! -d $ES_DATA ];
then
# create new empty directory
mkdir $ES_DATA
sudo chown elasticsearch:elasticsearch $ES_DATA
else
# error - dir renaming failed
echo "Elastic data directory $ES_DATA may not exist - exiting!"
exit 0
fi
cd $ES_DATA
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] Started unzipping ES .."
unzip $RESTORE_FILE_ES \*
sudo chown -R elasticsearch:elasticsearch $ES_DATA
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] Completed unzipping ES .."
# start ES - cca 30 - 90 secs
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] Starting elastic .."
sudo systemctl start elasticsearch
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] Elastic start attempt finished .."
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] --- OK .. ES data backup complete ---"
# show only service status - dead|active|..
TMP=$(sudo systemctl show -p SubState elasticsearch | cut -d'=' -f2)
echo "[$(date +%Y-%m-%d\ %H-%M-%S)] Current elastic status: $TMP"
fi
exit 0