1
0
mirror of https://github.com/deajan/obackup.git synced 2025-05-10 20:23:41 +02:00

Compare commits

..

No commits in common. "master" and "v2.1beta3" have entirely different histories.

38 changed files with 3616 additions and 12527 deletions

View File

@ -1,13 +1,15 @@
# Necessary evil: if 'bash' is selected as language, travis will try to install ruby and fails language:
language: php bash
sudo: required
services: services:
- mysql mysql
os: os:
linux linux
osx
sudo:
required
before_script: before_script:
mysql -e 'CREATE DATABASE travistest;' mysql -e 'CREATE DATABASE travistest;'

View File

@ -9,25 +9,6 @@ KNOWN ISSUES
CHANGELOG CHANGELOG
--------- ---------
dd Mmm YYYY: obackup v2.1 RC2 released
--------------------------------------
- Added a default required config file revision
- ! Update script updated accordingly
- Updated ofunctions to use booleans instead of yes/no syntax which still works
- Fixed verbose rsync output not working
- Fixed a potential bash buffer overflow when very large file lists are logged
03 Jan 2019: obackup v2.1 RC1 released
--------------------------------------
- File backup rotation will always make copies instead of moving files, in order to let rsync do deltas even in local backup scheme
- Fixed non recursive backups being recursive (bug introduced in a8b6bcb)
- Fixed multiple trailing slashes when backing-up '/'
- Updated ofunctions
- Upgraded shunit2 test framework to v2.1.8pre (git commit 07bb329)
- Minor fixes
20 Jun 2017: obackup v2.1 beta3 released 20 Jun 2017: obackup v2.1 beta3 released
---------------------------------------- ----------------------------------------

View File

@ -1,6 +1,6 @@
#!/usr/bin/env bash #!/usr/bin/env bash
## dev pre-processor bootstrap rev 2019052001 ## dev pre-processor bootstrap rev 2017062001
## Yeah !!! A really tech sounding name... In fact it's just include emulation in bash ## Yeah !!! A really tech sounding name... In fact it's just include emulation in bash
function Usage { function Usage {
@ -8,8 +8,7 @@ function Usage {
echo "Creates and executes $0.tmp.sh" echo "Creates and executes $0.tmp.sh"
echo "Usage:" echo "Usage:"
echo "" echo ""
echo "$0 --program=osync|obackup|pmocr [options to pass to program]" echo "$0 --program=osync|osync_target_helper|obackup|pmocr [options to pass to program]"
echo "Can also be run with BASHVERBOSE=yes environment variable in order to prefix program with bash -x"
} }
@ -19,16 +18,16 @@ if [ ! -f "./merge.sh" ]; then
fi fi
bootstrapProgram="" bootstrapProgram=""
opts=() opts=""
outputFileName="$0" outputFileName="$0"
for i in "${@}"; do for i in "$@"; do
case "$i" in case $i in
--program=*) --program=*)
bootstrapProgram="${i##*=}" bootstrapProgram="${i##*=}"
;; ;;
*) *)
opts+=("$i") opts=$opts" $i"
;; ;;
esac esac
done done
@ -44,7 +43,7 @@ else
__PREPROCESSOR_Constants __PREPROCESSOR_Constants
if [ ! -f "$__PREPROCESSOR_PROGRAM_EXEC" ]; then if [ ! -f "$__PREPROCESSOR_PROGRAM_EXEC" ]; then
echo "Cannot find file $__PREPROCESSOR_PROGRAM executable [n_$bootstrapProgram.sh]." echo "Cannot find file [n_$bootstrapProgram.sh]."
exit 1 exit 1
fi fi
fi fi
@ -68,8 +67,4 @@ if type termux-fix-shebang > /dev/null 2>&1; then
termux-fix-shebang "$outputFileName.tmp.sh" termux-fix-shebang "$outputFileName.tmp.sh"
fi fi
if [ "$BASHVERBOSE" == "yes" ]; then "$outputFileName.tmp.sh" $opts
bash -x "$outputFileName.tmp.sh" "${opts[@]}"
else
"$outputFileName.tmp.sh" "${opts[@]}"
fi

View File

@ -1,9 +1,9 @@
#!/usr/bin/env bash #!/usr/bin/env bash
SUBPROGRAM=[prgname] SUBPROGRAM=[prgname]
PROGRAM="$SUBPROGRAM-batch" # Batch program to run osync / obackup instances sequentially and rerun failed ones PROGRAM="$SUBPROGRAM-batch" # Batch program to run osync / obackup instances sequentially and rerun failed ones
AUTHOR="(L) 2013-2020 by Orsiris de Jong" AUTHOR="(L) 2013-2017 by Orsiris de Jong"
CONTACT="http://www.netpower.fr - ozy@netpower.fr" CONTACT="http://www.netpower.fr - ozy@netpower.fr"
PROGRAM_BUILD=2020031502 PROGRAM_BUILD=2016120401
## Runs an osync /obackup instance for every conf file found ## Runs an osync /obackup instance for every conf file found
## If an instance fails, run it again if time permits ## If an instance fails, run it again if time permits
@ -26,19 +26,36 @@ else
LOG_FILE=./$SUBPROGRAM-batch.log LOG_FILE=./$SUBPROGRAM-batch.log
fi fi
## Default directory where to store temporary run files
if [ -w /tmp ]; then
RUN_DIR=/tmp
elif [ -w /var/tmp ]; then
RUN_DIR=/var/tmp
else
RUN_DIR=.
fi
# No need to edit under this line ############################################################## # No need to edit under this line ##############################################################
include #### Logger SUBSET #### function _logger {
include #### CleanUp SUBSET #### local value="${1}" # What to log
include #### GenericTrapQuit SUBSET #### echo -e "$value" >> "$LOG_FILE"
}
function Logger {
local value="${1}" # What to log
local level="${2}" # Log level: DEBUG, NOTICE, WARN, ERROR, CRITIAL
prefix="$(date) - "
if [ "$level" == "CRITICAL" ]; then
_logger "$prefix\e[41m$value\e[0m"
elif [ "$level" == "ERROR" ]; then
_logger "$prefix\e[91m$value\e[0m"
elif [ "$level" == "WARN" ]; then
_logger "$prefix\e[93m$value\e[0m"
elif [ "$level" == "NOTICE" ]; then
_logger "$prefix$value"
elif [ "$level" == "DEBUG" ]; then
if [ "$DEBUG" == "yes" ]; then
_logger "$prefix$value"
fi
else
_logger "\e[41mLogger function called without proper loglevel.\e[0m"
_logger "$prefix$value"
fi
}
function CheckEnvironment { function CheckEnvironment {
## osync / obackup executable full path can be set here if it cannot be found on the system ## osync / obackup executable full path can be set here if it cannot be found on the system
@ -128,8 +145,6 @@ function Usage {
exit 128 exit 128
} }
trap GenericTrapQuit TERM EXIT HUP QUIT
opts="" opts=""
for i in "$@" for i in "$@"
do do

View File

@ -2,6 +2,8 @@
## Installer script suitable for osync / obackup / pmocr ## Installer script suitable for osync / obackup / pmocr
include #### _OFUNCTIONS_BOOTSTRAP SUBSET ####
PROGRAM=[prgname] PROGRAM=[prgname]
PROGRAM_VERSION=$(grep "PROGRAM_VERSION=" $PROGRAM.sh) PROGRAM_VERSION=$(grep "PROGRAM_VERSION=" $PROGRAM.sh)
@ -10,22 +12,33 @@ PROGRAM_BINARY=$PROGRAM".sh"
PROGRAM_BATCH=$PROGRAM"-batch.sh" PROGRAM_BATCH=$PROGRAM"-batch.sh"
SSH_FILTER="ssh_filter.sh" SSH_FILTER="ssh_filter.sh"
SCRIPT_BUILD=2020042901 SCRIPT_BUILD=2017041701
INSTANCE_ID="installer-$SCRIPT_BUILD"
## osync / obackup / pmocr / zsnap install script ## osync / obackup / pmocr / zsnap install script
## Tested on RHEL / CentOS 6 & 7, Fedora 23, Debian 7 & 8, Mint 17 and FreeBSD 8, 10 and 11 ## Tested on RHEL / CentOS 6 & 7, Fedora 23, Debian 7 & 8, Mint 17 and FreeBSD 8, 10 and 11
## Please adapt this to fit your distro needs ## Please adapt this to fit your distro needs
include #### OFUNCTIONS MICRO SUBSET ####
# Get current install.sh path from http://stackoverflow.com/a/246128/2635443 # Get current install.sh path from http://stackoverflow.com/a/246128/2635443
SCRIPT_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" SCRIPT_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
_LOGGER_SILENT=false CONF_DIR=$FAKEROOT/etc/$PROGRAM
_STATS=1 BIN_DIR="$FAKEROOT/usr/local/bin"
ACTION="install" SERVICE_DIR_INIT=$FAKEROOT/etc/init.d
FAKEROOT="" # Should be /usr/lib/systemd/system, but /lib/systemd/system exists on debian & rhel / fedora
SERVICE_DIR_SYSTEMD_SYSTEM=$FAKEROOT/lib/systemd/system
SERVICE_DIR_SYSTEMD_USER=$FAKEROOT/etc/systemd/user
if [ "$PROGRAM" == "osync" ]; then
SERVICE_NAME="osync-srv"
elif [ "$PROGRAM" == "pmocr" ]; then
SERVICE_NAME="pmocr-srv"
fi
SERVICE_FILE_INIT="$SERVICE_NAME"
SERVICE_FILE_SYSTEMD_SYSTEM="$SERVICE_NAME@.service"
SERVICE_FILE_SYSTEMD_USER="$SERVICE_NAME@.service.user"
## Generic code
## Default log file ## Default log file
if [ -w "$FAKEROOT/var/log" ]; then if [ -w "$FAKEROOT/var/log" ]; then
@ -36,15 +49,13 @@ else
LOG_FILE="./$PROGRAM-install.log" LOG_FILE="./$PROGRAM-install.log"
fi fi
include #### QuickLogger SUBSET ####
include #### UrlEncode SUBSET #### include #### UrlEncode SUBSET ####
include #### GetLocalOS SUBSET #### include #### GetLocalOS SUBSET ####
include #### GetConfFileValue SUBSET #### include #### GetConfFileValue SUBSET ####
include #### CleanUp SUBSET ####
include #### GenericTrapQuit SUBSET ####
function SetLocalOSSettings { function SetLocalOSSettings {
USER=root USER=root
DO_INIT=true
# LOCAL_OS and LOCAL_OS_FULL are global variables set at GetLocalOS # LOCAL_OS and LOCAL_OS_FULL are global variables set at GetLocalOS
@ -54,12 +65,10 @@ function SetLocalOSSettings {
;; ;;
*"MacOSX"*) *"MacOSX"*)
GROUP=admin GROUP=admin
DO_INIT=false
;; ;;
*"Cygwin"*|*"Android"*|*"msys"*|*"BusyBox"*) *"msys"*|*"Cygwin"*)
USER="" USER=""
GROUP="" GROUP=""
DO_INIT=false
;; ;;
*) *)
GROUP=root GROUP=root
@ -67,12 +76,12 @@ function SetLocalOSSettings {
esac esac
if [ "$LOCAL_OS" == "Android" ] || [ "$LOCAL_OS" == "BusyBox" ]; then if [ "$LOCAL_OS" == "Android" ] || [ "$LOCAL_OS" == "BusyBox" ]; then
Logger "Cannot be installed on [$LOCAL_OS]. Please use $PROGRAM.sh directly." "CRITICAL" QuickLogger "Cannot be installed on [$LOCAL_OS]. Please use $PROGRAM.sh directly."
exit 1 exit 1
fi fi
if ([ "$USER" != "" ] && [ "$(whoami)" != "$USER" ] && [ "$FAKEROOT" == "" ]); then if ([ "$USER" != "" ] && [ "$(whoami)" != "$USER" ] && [ "$FAKEROOT" == "" ]); then
Logger "Must be run as $USER." "CRITICAL" QuickLogger "Must be run as $USER."
exit 1 exit 1
fi fi
@ -80,97 +89,66 @@ function SetLocalOSSettings {
} }
function GetInit { function GetInit {
if [ -f /sbin/openrc-run ]; then if [ -f /sbin/init ]; then
init="openrc"
Logger "Detected openrc." "NOTICE"
elif [ -f /sbin/init ]; then
if file /sbin/init | grep systemd > /dev/null; then if file /sbin/init | grep systemd > /dev/null; then
init="systemd" init="systemd"
Logger "Detected systemd." "NOTICE"
else else
init="initV" init="initV"
Logger "Detected initV." "NOTICE"
fi fi
else else
Logger "Can't detect initV, systemd or openRC. Service files won't be installed. You can still run $PROGRAM manually or via cron." "WARN" QuickLogger "Can't detect initV or systemd. Service files won't be installed. You can still run $PROGRAM manually or via cron."
init="none" init="none"
fi fi
} }
function CreateDir { function CreateDir {
local dir="${1}" local dir="${1}"
local dirMask="${2}"
local dirUser="${3}"
local dirGroup="${4}"
if [ ! -d "$dir" ]; then if [ ! -d "$dir" ]; then
( mkdir "$dir"
if [ $(IsInteger $dirMask) -eq 1 ]; then
umask $dirMask
fi
mkdir -p "$dir"
)
if [ $? == 0 ]; then if [ $? == 0 ]; then
Logger "Created directory [$dir]." "NOTICE" QuickLogger "Created directory [$dir]."
else else
Logger "Cannot create directory [$dir]." "CRITICAL" QuickLogger "Cannot create directory [$dir]."
exit 1 exit 1
fi fi
fi fi
if [ "$dirUser" != "" ]; then
userGroup="$dirUser"
if [ "$dirGroup" != "" ]; then
userGroup="$userGroup"":$dirGroup"
fi
chown "$userGroup" "$dir"
if [ $? != 0 ]; then
Logger "Could not set directory ownership on [$dir] to [$userGroup]." "CRITICAL"
exit 1
else
Logger "Set file ownership on [$dir] to [$userGroup]." "NOTICE"
fi
fi
} }
function CopyFile { function CopyFile {
local sourcePath="${1}" local sourcePath="${1}"
local destPath="${2}" local destPath="${2}"
local sourceFileName="${3}" local fileName="${3}"
local destFileName="${4}" local fileMod="${4}"
local fileMod="${5}" local fileUser="${5}"
local fileUser="${6}" local fileGroup="${6}"
local fileGroup="${7}" local overwrite="${7:-false}"
local overwrite="${8:-false}"
local userGroup="" local userGroup=""
local oldFileName
if [ "$destFileName" == "" ]; then if [ -f "$destPath/$fileName" ] && [ $overwrite == false ]; then
destFileName="$sourceFileName" oldFileName="$fileName"
fileName="$oldFileName.new"
cp "$sourcePath/$oldFileName" "$destPath/$fileName"
else
cp "$sourcePath/$fileName" "$destPath"
fi fi
if [ -f "$destPath/$destFileName" ] && [ $overwrite == false ]; then
destFileName="$sourceFileName.new"
Logger "Copying [$sourceFileName] to [$destPath/$destFileName]." "NOTICE"
fi
cp "$sourcePath/$sourceFileName" "$destPath/$destFileName"
if [ $? != 0 ]; then if [ $? != 0 ]; then
Logger "Cannot copy [$sourcePath/$sourceFileName] to [$destPath/$destFileName]. Make sure to run install script in the directory containing all other files." "CRITICAL" QuickLogger "Cannot copy [$fileName] to [$destPath]. Make sure to run install script in the directory containing all other files."
Logger "Also make sure you have permissions to write to [$BIN_DIR]." "ERROR" QuickLogger "Also make sure you have permissions to write to [$BIN_DIR]."
exit 1 exit 1
else else
Logger "Copied [$sourcePath/$sourceFileName] to [$destPath/$destFileName]." "NOTICE" QuickLogger "Copied [$fileName] to [$destPath]."
if [ "$(IsInteger $fileMod)" -eq 1 ]; then if [ "$fileMod" != "" ]; then
chmod "$fileMod" "$destPath/$destFileName" chmod "$fileMod" "$destPath/$fileName"
if [ $? != 0 ]; then if [ $? != 0 ]; then
Logger "Cannot set file permissions of [$destPath/$destFileName] to [$fileMod]." "CRITICAL" QuickLogger "Cannot set file permissions of [$destPath/$fileName] to [$fileMod]."
exit 1 exit 1
else else
Logger "Set file permissions to [$fileMod] on [$destPath/$destFileName]." "NOTICE" QuickLogger "Set file permissions to [$fileMod] on [$destPath/$fileName]."
fi fi
elif [ "$fileMod" != "" ]; then
Logger "Bogus filemod [$fileMod] for [$destPath] given." "WARN"
fi fi
if [ "$fileUser" != "" ]; then if [ "$fileUser" != "" ]; then
@ -180,12 +158,12 @@ function CopyFile {
userGroup="$userGroup"":$fileGroup" userGroup="$userGroup"":$fileGroup"
fi fi
chown "$userGroup" "$destPath/$destFileName" chown "$userGroup" "$destPath/$fileName"
if [ $? != 0 ]; then if [ $? != 0 ]; then
Logger "Could not set file ownership on [$destPath/$destFileName] to [$userGroup]." "CRITICAL" QuickLogger "Could not set file ownership on [$destPath/$fileName] to [$userGroup]."
exit 1 exit 1
else else
Logger "Set file ownership on [$destPath/$destFileName] to [$userGroup]." "NOTICE" QuickLogger "Set file ownership on [$destPath/$fileName] to [$userGroup]."
fi fi
fi fi
fi fi
@ -201,7 +179,7 @@ function CopyExampleFiles {
for file in "${exampleFiles[@]}"; do for file in "${exampleFiles[@]}"; do
if [ -f "$SCRIPT_PATH/$file" ]; then if [ -f "$SCRIPT_PATH/$file" ]; then
CopyFile "$SCRIPT_PATH" "$CONF_DIR" "$file" "$file" "" "" "" false CopyFile "$SCRIPT_PATH" "$CONF_DIR" "$file" "" "" "" false
fi fi
done done
} }
@ -225,53 +203,29 @@ function CopyProgram {
fi fi
for file in "${binFiles[@]}"; do for file in "${binFiles[@]}"; do
CopyFile "$SCRIPT_PATH" "$BIN_DIR" "$file" "$file" 755 "$user" "$group" true CopyFile "$SCRIPT_PATH" "$BIN_DIR" "$file" 755 "$user" "$group" true
done done
} }
function CopyServiceFiles { function CopyServiceFiles {
if ([ "$init" == "systemd" ] && [ -f "$SCRIPT_PATH/$SERVICE_FILE_SYSTEMD_SYSTEM" ]); then if ([ "$init" == "systemd" ] && [ -f "$SCRIPT_PATH/$SERVICE_FILE_SYSTEMD_SYSTEM" ]); then
CreateDir "$SERVICE_DIR_SYSTEMD_SYSTEM" CopyFile "$SCRIPT_PATH" "$SERVICE_DIR_SYSTEMD_SYSTEM" "$SERVICE_FILE_SYSTEMD_SYSTEM" "" "" "" true
CopyFile "$SCRIPT_PATH" "$SERVICE_DIR_SYSTEMD_SYSTEM" "$SERVICE_FILE_SYSTEMD_SYSTEM" "$SERVICE_FILE_SYSTEMD_SYSTEM" "" "" "" true if [ -f "$SCRIPT_PATH/$SERVICE_FILE_SYSTEMD_SYSTEM_USER" ]; then
if [ -f "$SCRIPT_PATH/$SERVICE_FILE_SYSTEMD_USER" ]; then CopyFile "$SCRIPT_PATH" "$SERVICE_DIR_SYSTEMD_USER" "$SERVICE_FILE_SYSTEMD_USER" "" "" "" true
CreateDir "$SERVICE_DIR_SYSTEMD_USER"
CopyFile "$SCRIPT_PATH" "$SERVICE_DIR_SYSTEMD_USER" "$SERVICE_FILE_SYSTEMD_USER" "$SERVICE_FILE_SYSTEMD_USER" "" "" "" true
fi fi
if [ -f "$SCRIPT_PATH/$TARGET_HELPER_SERVICE_FILE_SYSTEMD_SYSTEM" ]; then QuickLogger "Created [$SERVICE_NAME] service in [$SERVICE_DIR_SYSTEMD_SYSTEM] and [$SERVICE_DIR_SYSTEMD_USER]."
CopyFile "$SCRIPT_PATH" "$SERVICE_DIR_SYSTEMD_SYSTEM" "$TARGET_HELPER_SERVICE_FILE_SYSTEMD_SYSTEM" "$TARGET_HELPER_SERVICE_FILE_SYSTEMD_SYSTEM" "" "" "" true QuickLogger "Can be activated with [systemctl start SERVICE_NAME@instance.conf] where instance.conf is the name of the config file in $CONF_DIR."
Logger "Created optional service [$TARGET_HELPER_SERVICE_NAME] with same specifications as below." "NOTICE" QuickLogger "Can be enabled on boot with [systemctl enable $SERVICE_NAME@instance.conf]."
fi QuickLogger "In userland, active with [systemctl --user start $SERVICE_NAME@instance.conf]."
if [ -f "$SCRIPT_PATH/$TARGET_HELPER_SERVICE_FILE_SYSTEMD_USER" ]; then
CopyFile "$SCRIPT_PATH" "$SERVICE_DIR_SYSTEMD_USER" "$TARGET_HELPER_SERVICE_FILE_SYSTEMD_USER" "$TARGET_HELPER_SERVICE_FILE_SYSTEMD_USER" "" "" "" true
fi
Logger "Created [$SERVICE_NAME] service in [$SERVICE_DIR_SYSTEMD_SYSTEM] and [$SERVICE_DIR_SYSTEMD_USER]." "NOTICE"
Logger "Can be activated with [systemctl start SERVICE_NAME@instance.conf] where instance.conf is the name of the config file in $CONF_DIR." "NOTICE"
Logger "Can be enabled on boot with [systemctl enable $SERVICE_NAME@instance.conf]." "NOTICE"
Logger "In userland, active with [systemctl --user start $SERVICE_NAME@instance.conf]." "NOTICE"
elif ([ "$init" == "initV" ] && [ -f "$SCRIPT_PATH/$SERVICE_FILE_INIT" ] && [ -d "$SERVICE_DIR_INIT" ]); then elif ([ "$init" == "initV" ] && [ -f "$SCRIPT_PATH/$SERVICE_FILE_INIT" ] && [ -d "$SERVICE_DIR_INIT" ]); then
#CreateDir "$SERVICE_DIR_INIT" CopyFile "$SCRIPT_PATH" "$SERVICE_DIR_INIT" "$SERVICE_FILE_INIT" "755" "" "" true
CopyFile "$SCRIPT_PATH" "$SERVICE_DIR_INIT" "$SERVICE_FILE_INIT" "$SERVICE_FILE_INIT" "755" "" "" true
if [ -f "$SCRIPT_PATH/$TARGET_HELPER_SERVICE_FILE_INIT" ]; then QuickLogger "Created [$SERVICE_NAME] service in [$SERVICE_DIR_INIT]."
CopyFile "$SCRIPT_PATH" "$SERVICE_DIR_INIT" "$TARGET_HELPER_SERVICE_FILE_INIT" "$TARGET_HELPER_SERVICE_FILE_INIT" "755" "" "" true QuickLogger "Can be activated with [service $SERVICE_FILE_INIT start]."
Logger "Created optional service [$TARGET_HELPER_SERVICE_NAME] with same specifications as below." "NOTICE" QuickLogger "Can be enabled on boot with [chkconfig $SERVICE_FILE_INIT on]."
fi
Logger "Created [$SERVICE_NAME] service in [$SERVICE_DIR_INIT]." "NOTICE"
Logger "Can be activated with [service $SERVICE_FILE_INIT start]." "NOTICE"
Logger "Can be enabled on boot with [chkconfig $SERVICE_FILE_INIT on]." "NOTICE"
elif ([ "$init" == "openrc" ] && [ -f "$SCRIPT_PATH/$SERVICE_FILE_OPENRC" ] && [ -d "$SERVICE_DIR_OPENRC" ]); then
# Rename service to usual service file
CopyFile "$SCRIPT_PATH" "$SERVICE_DIR_OPENRC" "$SERVICE_FILE_OPENRC" "$SERVICE_FILE_INIT" "755" "" "" true
if [ -f "$SCRPT_PATH/$TARGET_HELPER_SERVICE_FILE_OPENRC" ]; then
CopyFile "$SCRIPT_PATH" "$SERVICE_DIR_OPENRC" "$TARGET_HELPER_SERVICE_FILE_OPENRC" "$TARGET_HELPER_SERVICE_FILE_OPENRC" "755" "" "" true
Logger "Created optional service [$TARGET_HELPER_SERVICE_NAME] with same specifications as below." "NOTICE"
fi
Logger "Created [$SERVICE_NAME] service in [$SERVICE_DIR_OPENRC]." "NOTICE"
Logger "Can be activated with [rc-update add $SERVICE_NAME.instance] where instance is a configuration file found in /etc/osync." "NOTICE"
else else
Logger "Cannot properly find how to deal with init on this system. Skipping service file installation." "NOTICE" QuickLogger "Cannot define what init style is in use on this system. Skipping service file installation."
fi fi
} }
@ -290,7 +244,7 @@ function Statistics {
fi fi
fi fi
Logger "Neiter wget nor curl could be used for. Cannot run statistics. Use the provided link please." "WARN" QuickLogger "Neiter wget nor curl could be used for. Cannot run statistics. Use the provided link please."
return 1 return 1
} }
@ -300,12 +254,12 @@ function RemoveFile {
if [ -f "$file" ]; then if [ -f "$file" ]; then
rm -f "$file" rm -f "$file"
if [ $? != 0 ]; then if [ $? != 0 ]; then
Logger "Could not remove file [$file]." "ERROR" QuickLogger "Could not remove file [$file]."
else else
Logger "Removed file [$file]." "NOTICE" QuickLogger "Removed file [$file]."
fi fi
else else
Logger "File [$file] not found. Skipping." "NOTICE" QuickLogger "File [$file] not found. Skipping."
fi fi
} }
@ -319,17 +273,13 @@ function RemoveAll {
if [ ! -f "$BIN_DIR/osync.sh" ] && [ ! -f "$BIN_DIR/obackup.sh" ]; then # Check if any other program requiring ssh filter is present before removal if [ ! -f "$BIN_DIR/osync.sh" ] && [ ! -f "$BIN_DIR/obackup.sh" ]; then # Check if any other program requiring ssh filter is present before removal
RemoveFile "$BIN_DIR/$SSH_FILTER" RemoveFile "$BIN_DIR/$SSH_FILTER"
else else
Logger "Skipping removal of [$BIN_DIR/$SSH_FILTER] because other programs present that need it." "NOTICE" QuickLogger "Skipping removal of [$BIN_DIR/$SSH_FILTER] because other programs present that need it."
fi fi
RemoveFile "$SERVICE_DIR_SYSTEMD_SYSTEM/$SERVICE_FILE_SYSTEMD_SYSTEM" RemoveFile "$SERVICE_DIR_SYSTEMD_SYSTEM/$SERVICE_FILE_SYSTEMD_SYSTEM"
RemoveFile "$SERVICE_DIR_SYSTEMD_USER/$SERVICE_FILE_SYSTEMD_USER" RemoveFile "$SERVICE_DIR_SYSTEMD_USER/$SERVICE_FILE_SYSTEMD_SYSTEM"
RemoveFile "$SERVICE_DIR_INIT/$SERVICE_FILE_INIT" RemoveFile "$SERVICE_DIR_INIT/$SERVICE_FILE_INIT"
RemoveFile "$TARGET_HELPER_SERVICE_DIR_SYSTEMD_SYSTEM/$SERVICE_FILE_SYSTEMD_SYSTEM" QuickLogger "Skipping configuration files in [$CONF_DIR]. You may remove this directory manually."
RemoveFile "$TARGET_HELPER_SERVICE_DIR_SYSTEMD_USER/$SERVICE_FILE_SYSTEMD_USER"
RemoveFile "$TARGET_HELPER_SERVICE_DIR_INIT/$SERVICE_FILE_INIT"
Logger "Skipping configuration files in [$CONF_DIR]. You may remove this directory manually." "NOTICE"
} }
function Usage { function Usage {
@ -338,92 +288,43 @@ function Usage {
echo "--silent Will log and bypass user interaction." echo "--silent Will log and bypass user interaction."
echo "--no-stats Used with --silent in order to refuse sending anonymous install stats." echo "--no-stats Used with --silent in order to refuse sending anonymous install stats."
echo "--remove Remove the program." echo "--remove Remove the program."
echo "--prefix=/path Use prefix to install path."
exit 127 exit 127
} }
############################## Script entry point _LOGGER_SILENT=false
_STATS=1
ACTION="install"
function GetCommandlineArguments { for i in "$@"
for i in "$@"; do do
case $i in case $i in
--prefix=*) --silent)
FAKEROOT="${i##*=}" _LOGGER_SILENT=true
;; ;;
--silent) --no-stats)
_LOGGER_SILENT=true _STATS=0
;; ;;
--no-stats) --remove)
_STATS=0 ACTION="uninstall"
;; ;;
--remove) --help|-h|-?)
ACTION="uninstall" Usage
;; esac
--help|-h|-?) done
Usage
;;
*)
Logger "Unknown option '$i'" "ERROR"
Usage
exit
;;
esac
done
}
GetCommandlineArguments "$@" if [ "$FAKEROOT" != "" ]; then
mkdir -p "$SERVICE_DIR_SYSTEMD_SYSTEM" "$SERVICE_DIR_SYSTEMD_USER" "$BIN_DIR"
CONF_DIR=$FAKEROOT/etc/$PROGRAM
BIN_DIR="$FAKEROOT/usr/local/bin"
SERVICE_DIR_INIT=$FAKEROOT/etc/init.d
# Should be /usr/lib/systemd/system, but /lib/systemd/system exists on debian & rhel / fedora
SERVICE_DIR_SYSTEMD_SYSTEM=$FAKEROOT/lib/systemd/system
SERVICE_DIR_SYSTEMD_USER=$FAKEROOT/etc/systemd/user
SERVICE_DIR_OPENRC=$FAKEROOT/etc/init.d
if [ "$PROGRAM" == "osync" ]; then
SERVICE_NAME="osync-srv"
TARGET_HELPER_SERVICE_NAME="osync-target-helper-srv"
TARGET_HELPER_SERVICE_FILE_INIT="$TARGET_HELPER_SERVICE_NAME"
TARGET_HELPER_SERVICE_FILE_SYSTEMD_SYSTEM="$TARGET_HELPER_SERVICE_NAME@.service"
TARGET_HELPER_SERVICE_FILE_SYSTEMD_USER="$TARGET_HELPER_SERVICE_NAME@.service.user"
TARGET_HELPER_SERVICE_FILE_OPENRC="$TARGET_HELPER_SERVICE_NAME-openrc"
elif [ "$PROGRAM" == "pmocr" ]; then
SERVICE_NAME="pmocr-srv"
fi fi
SERVICE_FILE_INIT="$SERVICE_NAME"
SERVICE_FILE_SYSTEMD_SYSTEM="$SERVICE_NAME@.service"
SERVICE_FILE_SYSTEMD_USER="$SERVICE_NAME@.service.user"
SERVICE_FILE_OPENRC="$SERVICE_NAME-openrc"
## Generic code
trap GenericTrapQuit TERM EXIT HUP QUIT
if [ ! -w "$(dirname $LOG_FILE)" ]; then
echo "Cannot write to log [$(dirname $LOG_FILE)]."
else
Logger "Script begin, logging to [$LOG_FILE]." "DEBUG"
fi
# Set default umask
umask 0022
GetLocalOS GetLocalOS
SetLocalOSSettings SetLocalOSSettings
# On Mac OS this always produces a warning which causes the installer to fail with exit code 2 GetInit
# Since we know it won't work anyway, and that's fine, just skip this step
if $DO_INIT; then
GetInit
fi
STATS_LINK="http://instcount.netpower.fr?program=$PROGRAM&version=$PROGRAM_VERSION&os=$OS&action=$ACTION" STATS_LINK="http://instcount.netpower.fr?program=$PROGRAM&version=$PROGRAM_VERSION&os=$OS&action=$ACTION"
if [ "$ACTION" == "uninstall" ]; then if [ "$ACTION" == "uninstall" ]; then
RemoveAll RemoveAll
Logger "$PROGRAM uninstalled." "NOTICE" QuickLogger "$PROGRAM uninstalled."
else else
CreateDir "$CONF_DIR" CreateDir "$CONF_DIR"
CreateDir "$BIN_DIR" CreateDir "$BIN_DIR"
@ -432,11 +333,11 @@ else
if [ "$PROGRAM" == "osync" ] || [ "$PROGRAM" == "pmocr" ]; then if [ "$PROGRAM" == "osync" ] || [ "$PROGRAM" == "pmocr" ]; then
CopyServiceFiles CopyServiceFiles
fi fi
Logger "$PROGRAM installed. Use with $BIN_DIR/$PROGRAM_BINARY" "NOTICE" QuickLogger "$PROGRAM installed. Use with $BIN_DIR/$PROGRAM"
if [ "$PROGRAM" == "osync" ] || [ "$PROGRAM" == "obackup" ]; then if [ "$PROGRAM" == "osync" ] || [ "$PROGRAM" == "obackup" ]; then
echo "" QuickLogger ""
Logger "If connecting remotely, consider setup ssh filter to enhance security." "NOTICE" QuickLogger "If connecting remotely, consider setup ssh filter to enhance security."
echo "" QuickLogger ""
fi fi
fi fi
@ -444,7 +345,7 @@ if [ $_STATS -eq 1 ]; then
if [ $_LOGGER_SILENT == true ]; then if [ $_LOGGER_SILENT == true ]; then
Statistics Statistics
else else
Logger "In order to make usage statistics, the script would like to connect to $STATS_LINK" "NOTICE" QuickLogger "In order to make usage statistics, the script would like to connect to $STATS_LINK"
read -r -p "No data except those in the url will be send. Allow [Y/n] " response read -r -p "No data except those in the url will be send. Allow [Y/n] " response
case $response in case $response in
[nN]) [nN])

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +1,10 @@
#!/usr/bin/env bash #!/usr/bin/env bash
## MERGE 2020031501 ## MERGE 2017061901
## Merges ofunctions.sh and n_program.sh into program.sh ## Merges ofunctions.sh and n_program.sh into program.sh
## Adds installer ## Adds installer
PROGRAM=merge
INSTANCE_ID=dev
function Usage { function Usage {
echo "Merges ofunctions.sh and n_program.sh into debug_program.sh and ../program.sh" echo "Merges ofunctions.sh and n_program.sh into debug_program.sh and ../program.sh"
echo "Usage" echo "Usage"
@ -15,24 +12,30 @@ function Usage {
} }
function __PREPROCESSOR_Merge { function __PREPROCESSOR_Merge {
local nPROGRAM="$1" local PROGRAM="$1"
if [ -f "$nPROGRAM" ]; then VERSION=$(grep "PROGRAM_VERSION=" n_$PROGRAM.sh)
Logger "$nPROGRAM is not found in local path." "CRITICAL"
exit 1
fi
VERSION=$(grep "PROGRAM_VERSION=" n_$nPROGRAM.sh)
VERSION=${VERSION#*=} VERSION=${VERSION#*=}
__PREPROCESSOR_Constants __PREPROCESSOR_Constants
__PREPROCESSOR_Unexpand "n_$nPROGRAM.sh" "debug_$nPROGRAM.sh" source "ofunctions.sh"
if [ $? != 0 ]; then
echo "Please run $0 in dev directory with ofunctions.sh"
exit 1
fi
__PREPROCESSOR_Unexpand "n_$PROGRAM.sh" "debug_$PROGRAM.sh"
for subset in "${__PREPROCESSOR_SUBSETS[@]}"; do for subset in "${__PREPROCESSOR_SUBSETS[@]}"; do
__PREPROCESSOR_MergeSubset "$subset" "${subset//SUBSET/SUBSET END}" "ofunctions.sh" "debug_$nPROGRAM.sh" __PREPROCESSOR_MergeSubset "$subset" "${subset//SUBSET/SUBSET END}" "ofunctions.sh" "debug_$PROGRAM.sh"
done done
__PREPROCESSOR_CleanDebug "debug_$nPROGRAM.sh" "../$nPROGRAM.sh" __PREPROCESSOR_CleanDebug "$PROGRAM"
rm -f tmp_$PROGRAM.sh
if [ $? != 0 ]; then
QuickLogger "Cannot remove tmp_$PROGRAM.sh"
exit 1
fi
} }
function __PREPROCESSOR_Constants { function __PREPROCESSOR_Constants {
@ -43,14 +46,11 @@ function __PREPROCESSOR_Constants {
__PREPROCESSOR_SUBSETS=( __PREPROCESSOR_SUBSETS=(
'#### OFUNCTIONS FULL SUBSET ####' '#### OFUNCTIONS FULL SUBSET ####'
'#### OFUNCTIONS MINI SUBSET ####' '#### OFUNCTIONS MINI SUBSET ####'
'#### OFUNCTIONS MICRO SUBSET ####'
'#### PoorMansRandomGenerator SUBSET ####'
'#### _OFUNCTIONS_BOOTSTRAP SUBSET ####' '#### _OFUNCTIONS_BOOTSTRAP SUBSET ####'
'#### RUN_DIR SUBSET ####'
'#### DEBUG SUBSET ####' '#### DEBUG SUBSET ####'
'#### TrapError SUBSET ####' '#### TrapError SUBSET ####'
'#### RemoteLogger SUBSET ####' '#### RemoteLogger SUBSET ####'
'#### Logger SUBSET ####' '#### QuickLogger SUBSET ####'
'#### GetLocalOS SUBSET ####' '#### GetLocalOS SUBSET ####'
'#### IsInteger SUBSET ####' '#### IsInteger SUBSET ####'
'#### UrlEncode SUBSET ####' '#### UrlEncode SUBSET ####'
@ -59,10 +59,6 @@ function __PREPROCESSOR_Constants {
'#### VerComp SUBSET ####' '#### VerComp SUBSET ####'
'#### GetConfFileValue SUBSET ####' '#### GetConfFileValue SUBSET ####'
'#### SetConfFileValue SUBSET ####' '#### SetConfFileValue SUBSET ####'
'#### CheckRFC822 SUBSET ####'
'#### CleanUp SUBSET ####'
'#### GenericTrapQuit SUBSET ####'
'#### FileMove SUBSET ####'
) )
} }
@ -72,7 +68,7 @@ function __PREPROCESSOR_Unexpand {
unexpand "$source" > "$destination" unexpand "$source" > "$destination"
if [ $? != 0 ]; then if [ $? != 0 ]; then
Logger "Cannot unexpand [$source] to [$destination]." "CRITICAL" QuickLogger "Cannot unexpand [$source] to [$destination]."
exit 1 exit 1
fi fi
} }
@ -85,75 +81,64 @@ function __PREPROCESSOR_MergeSubset {
sed -n "/$subsetBegin/,/$subsetEnd/p" "$subsetFile" > "$subsetFile.$subsetBegin" sed -n "/$subsetBegin/,/$subsetEnd/p" "$subsetFile" > "$subsetFile.$subsetBegin"
if [ $? != 0 ]; then if [ $? != 0 ]; then
Logger "Cannot sed subset [$subsetBegin -- $subsetEnd] in [$subsetFile]." "CRTICIAL" QuickLogger "Cannot sed subset [$subsetBegin -- $subsetEnd] in [$subsetFile]."
exit 1 exit 1
fi fi
sed "/include $subsetBegin/r $subsetFile.$subsetBegin" "$mergedFile" | grep -v -E "$subsetBegin\$|$subsetEnd\$" > "$mergedFile.tmp" sed "/include $subsetBegin/r $subsetFile.$subsetBegin" "$mergedFile" | grep -v -E "$subsetBegin\$|$subsetEnd\$" > "$mergedFile.tmp"
if [ $? != 0 ]; then if [ $? != 0 ]; then
Logger "Cannot add subset [$subsetBegin] to [$mergedFile]." "CRITICAL" QuickLogger "Cannot add subset [$subsetBegin] to [$mergedFile]."
exit 1 exit 1
fi fi
rm -f "$subsetFile.$subsetBegin" rm -f "$subsetFile.$subsetBegin"
if [ $? != 0 ]; then if [ $? != 0 ]; then
Logger "Cannot remove temporary subset [$subsetFile.$subsetBegin]." "CRITICAL" QuickLogger "Cannot remove temporary subset [$subsetFile.$subsetBegin]."
exit 1 exit 1
fi fi
rm -f "$mergedFile" rm -f "$mergedFile"
if [ $? != 0 ]; then if [ $? != 0 ]; then
Logger "Cannot remove merged original file [$mergedFile]." "CRITICAL" QuickLogger "Cannot remove merged original file [$mergedFile]."
exit 1 exit 1
fi fi
mv "$mergedFile.tmp" "$mergedFile" mv "$mergedFile.tmp" "$mergedFile"
if [ $? != 0 ]; then if [ $? != 0 ]; then
Logger "Cannot move merged tmp file to original [$mergedFile]." "CRITICAL" QuickLogger "Cannot move merged tmp file to original [$mergedFile]."
exit 1 exit 1
fi fi
} }
function __PREPROCESSOR_CleanDebug { function __PREPROCESSOR_CleanDebug {
local source="${1}" local PROGRAM="$1"
local destination="${2:-$source}"
sed '/'$PARANOIA_DEBUG_BEGIN'/,/'$PARANOIA_DEBUG_END'/d' "$source" | grep -v "$PARANOIA_DEBUG_LINE" > "$destination.tmp" sed '/'$PARANOIA_DEBUG_BEGIN'/,/'$PARANOIA_DEBUG_END'/d' debug_$PROGRAM.sh | grep -v "$PARANOIA_DEBUG_LINE" > ../$PROGRAM.sh
if [ $? != 0 ]; then if [ $? != 0 ]; then
Logger "Cannot remove PARANOIA_DEBUG code from standard build." "CRITICAL" QuickLogger "Cannot remove PARANOIA_DEBUG code from standard build."
exit 1 exit 1
else
mv -f "$destination.tmp" "$destination"
if [ $? -ne 0 ]; then
Logger "Cannot move [$destination.tmp] to [$destination]." "CRITICAL"
exit 1
fi
fi fi
chmod +x "$source" chmod +x "debug_$PROGRAM.sh"
if [ $? != 0 ]; then if [ $? != 0 ]; then
Logger "Cannot chmod [$source]." "CRITICAL" QuickLogger "Cannot chmod debug_$PROGRAM.sh"
exit 1 exit 1
else else
Logger "Prepared [$source]." "NOTICE" QuickLogger "Prepared ./debug_$PROGRAM.sh"
fi fi
chmod +x "../$PROGRAM.sh"
if [ "$source" != "$destination" ]; then if [ $? != 0 ]; then
QuickLogger "Cannot chmod $PROGRAM.sh"
chmod +x "$destination" exit 1
if [ $? != 0 ]; then else
Logger "Cannot chmod [$destination]." "CRITICAL" QuickLogger "Prepared ../$PROGRAM.sh"
exit 1
else
Logger "Prepared [$destination]." "NOTICE"
fi
fi fi
} }
function __PREPROCESSOR_CopyCommons { function __PREPROCESSOR_CopyCommons {
local nPROGRAM="$1" local PROGRAM="$1"
sed "s/\[prgname\]/$nPROGRAM/g" common_install.sh > ../install.sh sed "s/\[prgname\]/$PROGRAM/g" common_install.sh > ../install.sh
if [ $? != 0 ]; then if [ $? != 0 ]; then
Logger "Cannot assemble install." "CRITICAL" QuickLogger "Cannot assemble install."
exit 1 exit 1
fi fi
@ -161,34 +146,45 @@ function __PREPROCESSOR_CopyCommons {
__PREPROCESSOR_MergeSubset "$subset" "${subset//SUBSET/SUBSET END}" "ofunctions.sh" "../install.sh" __PREPROCESSOR_MergeSubset "$subset" "${subset//SUBSET/SUBSET END}" "ofunctions.sh" "../install.sh"
done done
__PREPROCESSOR_CleanDebug "../install.sh" #sed "s/\[version\]/$VERSION/g" ../tmp_install.sh > ../install.sh
#if [ $? != 0 ]; then
# QuickLogger "Cannot change install version."
# exit 1
#fi
if [ -f "common_batch.sh" ]; then if [ -f "common_batch.sh" ]; then
sed "s/\[prgname\]/$nPROGRAM/g" common_batch.sh > ../$nPROGRAM-batch.sh sed "s/\[prgname\]/$PROGRAM/g" common_batch.sh > ../$PROGRAM-batch.sh
if [ $? != 0 ]; then if [ $? != 0 ]; then
Logger "Cannot assemble batch runner." "CRITICAL" QuickLogger "Cannot assemble batch runner."
exit 1 exit 1
fi fi
chmod +x ../$PROGRAM-batch.sh
for subset in "${__PREPROCESSOR_SUBSETS[@]}"; do if [ $? != 0 ]; then
__PREPROCESSOR_MergeSubset "$subset" "${subset//SUBSET/SUBSET END}" "ofunctions.sh" "../$nPROGRAM-batch.sh" QuickLogger "Cannot chmod $PROGRAM-batch.sh"
done exit 1
else
__PREPROCESSOR_CleanDebug "../$nPROGRAM-batch.sh" QuickLogger "Prepared ../$PROGRAM-batch.sh"
fi
fi
chmod +x ../install.sh
if [ $? != 0 ]; then
QuickLogger "Cannot chmod install.sh"
exit 1
else
QuickLogger "Prepared ../install.sh"
fi
rm -f ../tmp_install.sh
if [ $? != 0 ]; then
QuickLogger "Cannot chmod $PROGRAM.sh"
exit 1
fi fi
} }
# If sourced don't do anything # If sourced don't do anything
if [ "$(basename $0)" == "merge.sh" ]; then if [ "$(basename $0)" == "merge.sh" ]; then
source "./ofunctions.sh"
if [ $? != 0 ]; then
echo "Please run $0 in dev directory with ofunctions.sh"
exit 1
fi
trap GenericTrapQuit TERM EXIT HUP QUIT
if [ "$1" == "osync" ]; then if [ "$1" == "osync" ]; then
__PREPROCESSOR_Merge osync __PREPROCESSOR_Merge osync
__PREPROCESSOR_Merge osync_target_helper
__PREPROCESSOR_CopyCommons osync __PREPROCESSOR_CopyCommons osync
elif [ "$1" == "obackup" ]; then elif [ "$1" == "obackup" ]; then
__PREPROCESSOR_Merge obackup __PREPROCESSOR_Merge obackup
@ -198,7 +194,6 @@ if [ "$(basename $0)" == "merge.sh" ]; then
__PREPROCESSOR_CopyCommons pmocr __PREPROCESSOR_CopyCommons pmocr
else else
echo "No valid program given." echo "No valid program given."
Usage
exit 1 exit 1
fi fi
fi fi

515
dev/n_obackup.sh Normal file → Executable file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -4,7 +4,5 @@
#SC1091 = not following source #SC1091 = not following source
#SC2086 = quoting errors (shellcheck is way too picky about quoting) #SC2086 = quoting errors (shellcheck is way too picky about quoting)
#SC2120 = only for debug version #SC2120 = only for debug version
#SC2034 = unused variabled (can be ignored in ofunctions.sh)
#SC2068 = bad array usage (can be ignored in ofunctions.sh)
shellcheck -e SC1090,SC1091,SC2086,SC2119,SC2120 $@ shellcheck -e SC1090,SC1091,SC2086,SC2119,SC2120 $1

View File

@ -2,9 +2,9 @@
###### obackup - Local or Remote, push or pull backup script for files & mysql databases ###### obackup - Local or Remote, push or pull backup script for files & mysql databases
###### (C) 2013-2016 by Orsiris de Jong (www.netpower.fr) ###### (C) 2013-2016 by Orsiris de Jong (www.netpower.fr)
###### obackup v2.1x config file rev 2016081701
[GENERAL] ###### GENERAL BACKUP OPTIONS
CONFIG_FILE_REVISION=2.1
## Backup identification string. ## Backup identification string.
INSTANCE_ID="local-test" INSTANCE_ID="local-test"
@ -13,29 +13,27 @@ INSTANCE_ID="local-test"
LOGFILE="" LOGFILE=""
## Elements to backup ## Elements to backup
SQL_BACKUP=no SQL_BACKUP=yes
FILE_BACKUP=yes FILE_BACKUP=yes
## Backups can be done local, pulled from another server or pushed to a backup server. Available options are [local,pull,push]. ## Backups can be done local, pulled from another server or pushed to a backup server. Available options are [local,pull,push].
## Pulled backups are the safest option, as the backup server contains the RSA key and cannot be compromised by another server. ## Pulled backups are the safest option, as the backup server contains the RSA key and cannot be compromised by another server.
BACKUP_TYPE=local BACKUP_TYPE=local
[BACKUP STORAGE] ###### BACKUP STORAGE
## Storage paths of the backups (absolute paths of the local or remote system) ## Storage paths of the backups (absolute paths of the local or remote system)
SQL_STORAGE="${HOME}/obackup-storage/sql-local" SQL_STORAGE="${HOME}/obackup-storage/sql-local"
FILE_STORAGE="${HOME}/obackup-storage/files-local" FILE_STORAGE="${HOME}/obackup-storage/files-local"
## Encryption ## Encryption
ENCRYPTION=no ENCRYPTION=yes
## Backup encryption needs a temporary storage space in order to encrypt files before sending them (absolute paths of the local or remote system) ## Backup encryption needs a temporary storage space in order to encrypt files before sending them (absolute paths of the local or remote system)
CRYPT_STORAGE="${HOME}/obackup-storage/crypt-local" CRYPT_STORAGE="${HOME}/obackup-storage/crypt-local"
## GPG recipient (pubkey for this recipient must exist, see gpg2 --list-keys or gpg --list-keys ## GPG recipient (pubkey for this recipient must exist, see gpg2 --list-keys or gpg --list-keys
GPG_RECIPIENT="John Doe" GPG_RECIPIENT="John Doe"
PARALLEL_ENCRYPTION_PROCESSES=""
## Create backup directories if they do not exist ## Create backup directories if they do not exist
CREATE_DIRS=yes CREATE_DIRS=yes
@ -55,17 +53,13 @@ GET_BACKUP_SIZE=yes
SQL_WARN_MIN_SPACE=1048576 SQL_WARN_MIN_SPACE=1048576
FILE_WARN_MIN_SPACE=1048576 FILE_WARN_MIN_SPACE=1048576
[REMOTE_OPTIONS] ###### REMOTE ONLY OPTIONS
## In case of pulled or pushed backups, remote system URI needs to be supplied. ## In case of pulled or pushed backups, remote system URI needs to be supplied.
REMOTE_SYSTEM_URI="ssh://backupuser@remote.system.tld:22/" REMOTE_SYSTEM_URI="ssh://backupuser@remote.system.tld:22/"
## You can specify a RSA key (please use full path). If not defined, the default ~/.ssh/id_rsa will be used. See documentation for further information. ## You can specify a RSA key (please use full path). If not defined, the default ~/.ssh/id_rsa will be used. See documentation for further information.
SSH_RSA_PRIVATE_KEY="${HOME}/.ssh/id_rsa" SSH_RSA_PRIVATE_KEY="${HOME}/.ssh/id_rsa"
SSH_PASSWORD_FILE=""
_REMOTE_TOKEN="SomeAlphaNumericToken9"
## ssh compression should be used unless your remote connection is good enough (LAN) ## ssh compression should be used unless your remote connection is good enough (LAN)
SSH_COMPRESSION=yes SSH_COMPRESSION=yes
@ -85,7 +79,7 @@ REMOTE_3RD_PARTY_HOSTS="www.kernel.org www.google.com"
## If enabled, commands will be executed as superuser on remote side. See documentation for /etc/sudoers configuration ("find", "du", "tee" and "rsync" need to be allowed). Requiretty needs to be disabled. ## If enabled, commands will be executed as superuser on remote side. See documentation for /etc/sudoers configuration ("find", "du", "tee" and "rsync" need to be allowed). Requiretty needs to be disabled.
SUDO_EXEC=no SUDO_EXEC=no
[DATABASE BACKUP SETTINGS] ###### DATABASE SPECIFIC OPTIONS
## Database backup user ## Database backup user
SQL_USER=root SQL_USER=root
@ -93,9 +87,9 @@ SQL_USER=root
## Enabling the following option will save all databases on local or remote given SQL instance except the ones specified in the exclude list. ## Enabling the following option will save all databases on local or remote given SQL instance except the ones specified in the exclude list.
## Every found database will be backed up as separate backup task. ## Every found database will be backed up as separate backup task.
DATABASES_ALL=yes DATABASES_ALL=yes
DATABASES_ALL_EXCLUDE_LIST="test;information_schema;kopano_prod" DATABASES_ALL_EXCLUDE_LIST="test;information_schema;zarafa_prod"
## Alternatively, if DATABASES_ALL=no, you can specify a list of databases to backup separated by spaces. ## Alternatively, if DATABASES_ALL=no, you can specify a list of databases to backup separated by spaces.
DATABASES_LIST=mysql DATABASES_LIST="mysql"
## Max backup execution time per Database task. Soft max exec time generates a warning only. Hard max exec time generates a warning and stops current backup task. ## Max backup execution time per Database task. Soft max exec time generates a warning only. Hard max exec time generates a warning and stops current backup task.
## If a task gets stopped, next one in the task list gets executed. Time is specified in seconds. ## If a task gets stopped, next one in the task list gets executed. Time is specified in seconds.
@ -110,7 +104,7 @@ MYSQLDUMP_OPTIONS="--opt --single-transaction"
## Generally, level 5 is a good compromise between cpu, memory hunger and compress ratio. Gzipped files are set to be rsyncable. ## Generally, level 5 is a good compromise between cpu, memory hunger and compress ratio. Gzipped files are set to be rsyncable.
COMPRESSION_LEVEL=3 COMPRESSION_LEVEL=3
[FILE BACKUP SETTINGS] ###### FILES SPECIFIC OPTIONS
## File backups are divided in tasks. Every directory in DIRECTORY_LIST will be processed as a unique task. ## File backups are divided in tasks. Every directory in DIRECTORY_LIST will be processed as a unique task.
## Every subdirectory of each directory in RECURSIVE_DIRECTORY_LIST will be processed as a unique task. ## Every subdirectory of each directory in RECURSIVE_DIRECTORY_LIST will be processed as a unique task.
@ -118,8 +112,8 @@ COMPRESSION_LEVEL=3
## You can exclude directories from the avove backup task creation, ex: avoid backing up "/home/dir2" by adding it to RECURSIVE_EXCLUDE_LIST. ## You can exclude directories from the avove backup task creation, ex: avoid backing up "/home/dir2" by adding it to RECURSIVE_EXCLUDE_LIST.
## Directories backup list. List of semicolon separated directories that will be backed up. ## Directories backup list. List of semicolon separated directories that will be backed up.
DIRECTORY_LIST=/root/obackup-testdata/nonPresentData DIRECTORY_LIST="${HOME}/obackup-testdata/testData"
RECURSIVE_DIRECTORY_LIST=/root/obackup-testdata/nonPresentDataRecursive RECURSIVE_DIRECTORY_LIST="${HOME}/obackup-testdata/testDataRecursive"
RECURSIVE_EXCLUDE_LIST="${HOME}/obackup-testdata/testDataRecursive/Excluded" RECURSIVE_EXCLUDE_LIST="${HOME}/obackup-testdata/testDataRecursive/Excluded"
## Rsync exclude / include order (the option set here will be set first, eg: include will make include then exclude patterns) ## Rsync exclude / include order (the option set here will be set first, eg: include will make include then exclude patterns)
@ -140,8 +134,6 @@ RSYNC_EXCLUDE_FROM=""
## List separator char. You may set an alternative separator char for your directories lists above. ## List separator char. You may set an alternative separator char for your directories lists above.
PATH_SEPARATOR_CHAR=";" PATH_SEPARATOR_CHAR=";"
RSYNC_OPTIONAL_ARGS=""
## Preserve basic linux permissions ## Preserve basic linux permissions
PRESERVE_PERMISSIONS=yes PRESERVE_PERMISSIONS=yes
@ -185,12 +177,10 @@ BANDWIDTH=0
## Paranoia option. Don't change this unless you read the documentation. ## Paranoia option. Don't change this unless you read the documentation.
RSYNC_EXECUTABLE=rsync RSYNC_EXECUTABLE=rsync
[ALERT_OPTIONS] ###### ALERT OPTIONS
## Alert email addresses separated by a space character ## Alert email addresses separated by a space character
DESTINATION_MAILS="" DESTINATION_MAILS=""
MAIL_BODY_CHARSET=""
## Windows specific (msys / cygwin environment) only mail options (used with mailsend.exe from muquit, http://github.com/muquit/mailsend or from sendemail.exe from Brandon Zehm, http://caspian.dotconf.net/menu/Software/SendEmail/ ## Windows specific (msys / cygwin environment) only mail options (used with mailsend.exe from muquit, http://github.com/muquit/mailsend or from sendemail.exe from Brandon Zehm, http://caspian.dotconf.net/menu/Software/SendEmail/
SENDER_MAIL="alert@your.system.tld" SENDER_MAIL="alert@your.system.tld"
@ -201,7 +191,7 @@ SMTP_ENCRYPTION=none
SMTP_USER= SMTP_USER=
SMTP_PASSWORD= SMTP_PASSWORD=
[BACKUP SETTINGS] ###### GENERAL BACKUP OPTIONS
## Max execution time of whole backup process. Soft max exec time generates a warning only. ## Max execution time of whole backup process. Soft max exec time generates a warning only.
## Hard max exec time generates a warning and stops the whole backup execution. ## Hard max exec time generates a warning and stops the whole backup execution.
@ -217,7 +207,7 @@ ROTATE_SQL_COPIES=7
ROTATE_FILE_BACKUPS=yes ROTATE_FILE_BACKUPS=yes
ROTATE_FILE_COPIES=7 ROTATE_FILE_COPIES=7
[EXECUTION_HOOKS] ###### EXECUTION HOOKS
## Commands can will be run before and / or after backup execution (remote execution will only happen if REMOTE_BACKUP is set). ## Commands can will be run before and / or after backup execution (remote execution will only happen if REMOTE_BACKUP is set).
## This is useful to make a snapshot before backing up data, or even handle snapshots of backed up data. ## This is useful to make a snapshot before backing up data, or even handle snapshots of backed up data.

View File

@ -2,9 +2,9 @@
###### obackup - Local or Remote, push or pull backup script for files & mysql databases ###### obackup - Local or Remote, push or pull backup script for files & mysql databases
###### (C) 2013-2016 by Orsiris de Jong (www.netpower.fr) ###### (C) 2013-2016 by Orsiris de Jong (www.netpower.fr)
###### obackup v2.1x config file rev 2016081701
[GENERAL] ###### GENERAL BACKUP OPTIONS
CONFIG_FILE_REVISION=2.1
## Backup identification string. ## Backup identification string.
INSTANCE_ID="local-test" INSTANCE_ID="local-test"
@ -20,7 +20,7 @@ FILE_BACKUP=yes
## Pulled backups are the safest option, as the backup server contains the RSA key and cannot be compromised by another server. ## Pulled backups are the safest option, as the backup server contains the RSA key and cannot be compromised by another server.
BACKUP_TYPE=local BACKUP_TYPE=local
[BACKUP STORAGE] ###### BACKUP STORAGE
## Storage paths of the backups (absolute paths of the local or remote system) ## Storage paths of the backups (absolute paths of the local or remote system)
SQL_STORAGE="${HOME}/obackup-storage/sql" SQL_STORAGE="${HOME}/obackup-storage/sql"
@ -28,12 +28,6 @@ FILE_STORAGE="${HOME}/obackup-storage/files"
## Backup encryption using GPG and duplicity. Feature not ready yet. ## Backup encryption using GPG and duplicity. Feature not ready yet.
ENCRYPTION=no ENCRYPTION=no
CRYPT_STORAGE="/home/storage/crypt"
GPG_RECIPIENT="Your Name used with GPG signature"
PARALLEL_ENCRYPTION_PROCESSES=""
## Create backup directories if they do not exist ## Create backup directories if they do not exist
CREATE_DIRS=yes CREATE_DIRS=yes
@ -53,17 +47,13 @@ GET_BACKUP_SIZE=yes
SQL_WARN_MIN_SPACE=1048576 SQL_WARN_MIN_SPACE=1048576
FILE_WARN_MIN_SPACE=1048576 FILE_WARN_MIN_SPACE=1048576
[REMOTE_OPTIONS] ###### REMOTE ONLY OPTIONS
## In case of pulled or pushed backups, remote system URI needs to be supplied. ## In case of pulled or pushed backups, remote system URI needs to be supplied.
REMOTE_SYSTEM_URI="ssh://backupuser@remote.system.tld:22/" REMOTE_SYSTEM_URI="ssh://backupuser@remote.system.tld:22/"
## You can specify a RSA key (please use full path). If not defined, the default ~/.ssh/id_rsa will be used. See documentation for further information. ## You can specify a RSA key (please use full path). If not defined, the default ~/.ssh/id_rsa will be used. See documentation for further information.
SSH_RSA_PRIVATE_KEY="${HOME}/.ssh/id_rsa" SSH_RSA_PRIVATE_KEY="${HOME}/.ssh/id_rsa"
SSH_PASSWORD_FILE=""
_REMOTE_TOKEN="SomeAlphaNumericToken9"
## ssh compression should be used unless your remote connection is good enough (LAN) ## ssh compression should be used unless your remote connection is good enough (LAN)
SSH_COMPRESSION=yes SSH_COMPRESSION=yes
@ -83,7 +73,7 @@ REMOTE_3RD_PARTY_HOSTS="www.kernel.org www.google.com"
## If enabled, commands will be executed as superuser on remote side. See documentation for /etc/sudoers configuration ("find", "du", "tee" and "rsync" need to be allowed). Requiretty needs to be disabled. ## If enabled, commands will be executed as superuser on remote side. See documentation for /etc/sudoers configuration ("find", "du", "tee" and "rsync" need to be allowed). Requiretty needs to be disabled.
SUDO_EXEC=no SUDO_EXEC=no
[DATABASE BACKUP SETTINGS] ###### DATABASE SPECIFIC OPTIONS
## Database backup user ## Database backup user
SQL_USER=root SQL_USER=root
@ -108,7 +98,7 @@ MYSQLDUMP_OPTIONS="--opt --single-transaction"
## Generally, level 5 is a good compromise between cpu, memory hunger and compress ratio. Gzipped files are set to be rsyncable. ## Generally, level 5 is a good compromise between cpu, memory hunger and compress ratio. Gzipped files are set to be rsyncable.
COMPRESSION_LEVEL=3 COMPRESSION_LEVEL=3
[FILE BACKUP SETTINGS] ###### FILES SPECIFIC OPTIONS
## File backups are divided in tasks. Every directory in DIRECTORY_LIST will be processed as a unique task. ## File backups are divided in tasks. Every directory in DIRECTORY_LIST will be processed as a unique task.
## Every subdirectory of each directory in RECURSIVE_DIRECTORY_LIST will be processed as a unique task. ## Every subdirectory of each directory in RECURSIVE_DIRECTORY_LIST will be processed as a unique task.
@ -138,8 +128,6 @@ RSYNC_EXCLUDE_FROM=""
## List separator char. You may set an alternative separator char for your directories lists above. ## List separator char. You may set an alternative separator char for your directories lists above.
PATH_SEPARATOR_CHAR=";" PATH_SEPARATOR_CHAR=";"
RSYNC_OPTIONAL_ARGS=""
## Preserve basic linux permissions ## Preserve basic linux permissions
PRESERVE_PERMISSIONS=yes PRESERVE_PERMISSIONS=yes
@ -183,12 +171,10 @@ BANDWIDTH=0
## Paranoia option. Don't change this unless you read the documentation. ## Paranoia option. Don't change this unless you read the documentation.
RSYNC_EXECUTABLE=rsync RSYNC_EXECUTABLE=rsync
[ALERT_OPTIONS] ###### ALERT OPTIONS
## Alert email addresses separated by a space character ## Alert email addresses separated by a space character
DESTINATION_MAILS="" DESTINATION_MAILS=""
MAIL_BODY_CHARSET=""
## Windows specific (msys / cygwin environment) only mail options (used with mailsend.exe from muquit, http://github.com/muquit/mailsend or from sendemail.exe from Brandon Zehm, http://caspian.dotconf.net/menu/Software/SendEmail/ ## Windows specific (msys / cygwin environment) only mail options (used with mailsend.exe from muquit, http://github.com/muquit/mailsend or from sendemail.exe from Brandon Zehm, http://caspian.dotconf.net/menu/Software/SendEmail/
SENDER_MAIL="alert@your.system.tld" SENDER_MAIL="alert@your.system.tld"
@ -199,7 +185,7 @@ SMTP_ENCRYPTION=none
SMTP_USER= SMTP_USER=
SMTP_PASSWORD= SMTP_PASSWORD=
[BACKUP SETTINGS] ###### GENERAL BACKUP OPTIONS
## Max execution time of whole backup process. Soft max exec time generates a warning only. ## Max execution time of whole backup process. Soft max exec time generates a warning only.
## Hard max exec time generates a warning and stops the whole backup execution. ## Hard max exec time generates a warning and stops the whole backup execution.
@ -215,7 +201,7 @@ ROTATE_SQL_COPIES=7
ROTATE_FILE_BACKUPS=no ROTATE_FILE_BACKUPS=no
ROTATE_FILE_COPIES=7 ROTATE_FILE_COPIES=7
[EXECUTION_HOOKS] ###### EXECUTION HOOKS
## Commands can will be run before and / or after backup execution (remote execution will only happen if REMOTE_BACKUP is set). ## Commands can will be run before and / or after backup execution (remote execution will only happen if REMOTE_BACKUP is set).
## This is useful to make a snapshot before backing up data, or even handle snapshots of backed up data. ## This is useful to make a snapshot before backing up data, or even handle snapshots of backed up data.

View File

@ -29,7 +29,7 @@ RSYNC_EXECUTABLE=rsync
## Remote options (will make backups of remote system through ssh tunnel, public RSA key need to be put into /home/.ssh/authorized_keys in remote users home directory) ## Remote options (will make backups of remote system through ssh tunnel, public RSA key need to be put into /home/.ssh/authorized_keys in remote users home directory)
REMOTE_BACKUP=yes REMOTE_BACKUP=yes
SSH_RSA_PRIVATE_KEY=${HOME}/.ssh/id_rsa_local_obackup_tests SSH_RSA_PRIVATE_KEY=${HOME}/.ssh/id_rsa_local
REMOTE_USER=root REMOTE_USER=root
REMOTE_HOST=localhost REMOTE_HOST=localhost
REMOTE_PORT=22 REMOTE_PORT=22
@ -44,7 +44,7 @@ REMOTE_3RD_PARTY_HOST="www.kernel.org www.google.com"
SQL_USER=root SQL_USER=root
## Save all databases except the ones specified in the exlude list. Every found database will be backed up as separate task (see documentation for explanation about tasks) ## Save all databases except the ones specified in the exlude list. Every found database will be backed up as separate task (see documentation for explanation about tasks)
DATABASES_ALL=yes DATABASES_ALL=yes
DATABASES_ALL_EXCLUDE_LIST="test;information_schema;kopano_prod" DATABASES_ALL_EXCLUDE_LIST="test;information_schema;zarafa_prod"
# Alternatively, you can specifiy a manual list of databases to backup separated by spaces # Alternatively, you can specifiy a manual list of databases to backup separated by spaces
DATABASES_LIST="" DATABASES_LIST=""
## Max backup execution time per DB task. Soft is warning only. Hard is warning, stopping backup task and processing next one. Time is specified in seconds ## Max backup execution time per DB task. Soft is warning only. Hard is warning, stopping backup task and processing next one. Time is specified in seconds

View File

@ -2,9 +2,9 @@
###### obackup - Local or Remote, push or pull backup script for files & mysql databases ###### obackup - Local or Remote, push or pull backup script for files & mysql databases
###### (C) 2013-2016 by Orsiris de Jong (www.netpower.fr) ###### (C) 2013-2016 by Orsiris de Jong (www.netpower.fr)
###### obackup v2.1x config file rev 2016081701
[GENERAL] ###### GENERAL BACKUP OPTIONS
CONFIG_FILE_REVISION=2.1
## Backup identification string. ## Backup identification string.
INSTANCE_ID="pull-test" INSTANCE_ID="pull-test"
@ -20,7 +20,7 @@ FILE_BACKUP=yes
## Pulled backups are the safest option, as the backup server contains the RSA key and cannot be compromised by another server. ## Pulled backups are the safest option, as the backup server contains the RSA key and cannot be compromised by another server.
BACKUP_TYPE=pull BACKUP_TYPE=pull
[BACKUP STORAGE] ###### BACKUP STORAGE
## Storage paths of the backups (absolute paths of the local or remote system) ## Storage paths of the backups (absolute paths of the local or remote system)
SQL_STORAGE="${HOME}/obackup-storage/sql-pull" SQL_STORAGE="${HOME}/obackup-storage/sql-pull"
@ -34,8 +34,6 @@ CRYPT_STORAGE="${HOME}/obackup-storage/crypt-pull"
## GPG recipient (pubkey for this recipient must exist, see gpg2 --list-keys or gpg --list-keys ## GPG recipient (pubkey for this recipient must exist, see gpg2 --list-keys or gpg --list-keys
GPG_RECIPIENT="John Doe" GPG_RECIPIENT="John Doe"
PARALLEL_ENCRYPTION_PROCESSES=""
## Create backup directories if they do not exist ## Create backup directories if they do not exist
CREATE_DIRS=yes CREATE_DIRS=yes
@ -55,13 +53,13 @@ GET_BACKUP_SIZE=yes
SQL_WARN_MIN_SPACE=1048576 SQL_WARN_MIN_SPACE=1048576
FILE_WARN_MIN_SPACE=1048576 FILE_WARN_MIN_SPACE=1048576
[REMOTE_OPTIONS] ###### REMOTE ONLY OPTIONS
## In case of pulled or pushed backups, remote system URI needs to be supplied. ## In case of pulled or pushed backups, remote system URI needs to be supplied.
REMOTE_SYSTEM_URI="ssh://root@localhost:22/" REMOTE_SYSTEM_URI="ssh://root@localhost:49999/"
## You can specify a RSA key (please use full path). If not defined, the default ~/.ssh/id_rsa will be used. See documentation for further information. ## You can specify a RSA key (please use full path). If not defined, the default ~/.ssh/id_rsa will be used. See documentation for further information.
SSH_RSA_PRIVATE_KEY="${HOME}/.ssh/id_rsa_local_obackup_tests" SSH_RSA_PRIVATE_KEY="${HOME}/.ssh/id_rsa_local"
## Alternatively, you may specify an SSH password file (less secure). Needs sshpass utility installed. ## Alternatively, you may specify an SSH password file (less secure). Needs sshpass utility installed.
SSH_PASSWORD_FILE="" SSH_PASSWORD_FILE=""
@ -86,7 +84,7 @@ REMOTE_3RD_PARTY_HOSTS="www.kernel.org www.google.com"
## If enabled, commands will be executed as superuser on remote side. See documentation for /etc/sudoers configuration ("find", "du", "tee" and "rsync" need to be allowed). Requiretty needs to be disabled. ## If enabled, commands will be executed as superuser on remote side. See documentation for /etc/sudoers configuration ("find", "du", "tee" and "rsync" need to be allowed). Requiretty needs to be disabled.
SUDO_EXEC=no SUDO_EXEC=no
[DATABASE BACKUP SETTINGS] ###### DATABASE SPECIFIC OPTIONS
## Database backup user ## Database backup user
SQL_USER=root SQL_USER=root
@ -94,7 +92,7 @@ SQL_USER=root
## Enabling the following option will save all databases on local or remote given SQL instance except the ones specified in the exclude list. ## Enabling the following option will save all databases on local or remote given SQL instance except the ones specified in the exclude list.
## Every found database will be backed up as separate backup task. ## Every found database will be backed up as separate backup task.
DATABASES_ALL=yes DATABASES_ALL=yes
DATABASES_ALL_EXCLUDE_LIST="test;information_schema;kopano_prod" DATABASES_ALL_EXCLUDE_LIST="test;information_schema;zarafa_prod"
## Alternatively, if DATABASES_ALL=no, you can specify a list of databases to backup separated by spaces. ## Alternatively, if DATABASES_ALL=no, you can specify a list of databases to backup separated by spaces.
DATABASES_LIST=mysql DATABASES_LIST=mysql
@ -111,7 +109,7 @@ MYSQLDUMP_OPTIONS="--opt --single-transaction"
## Generally, level 5 is a good compromise between cpu, memory hunger and compress ratio. Gzipped files are set to be rsyncable. ## Generally, level 5 is a good compromise between cpu, memory hunger and compress ratio. Gzipped files are set to be rsyncable.
COMPRESSION_LEVEL=3 COMPRESSION_LEVEL=3
[FILE BACKUP SETTINGS] ###### FILES SPECIFIC OPTIONS
## File backups are divided in tasks. Every directory in DIRECTORY_LIST will be processed as a unique task. ## File backups are divided in tasks. Every directory in DIRECTORY_LIST will be processed as a unique task.
## Every subdirectory of each directory in RECURSIVE_DIRECTORY_LIST will be processed as a unique task. ## Every subdirectory of each directory in RECURSIVE_DIRECTORY_LIST will be processed as a unique task.
@ -141,8 +139,6 @@ RSYNC_EXCLUDE_FROM=""
## List separator char. You may set an alternative separator char for your directories lists above. ## List separator char. You may set an alternative separator char for your directories lists above.
PATH_SEPARATOR_CHAR=";" PATH_SEPARATOR_CHAR=";"
RSYNC_OPTIONAL_ARGS=""
## Preserve basic linux permissions ## Preserve basic linux permissions
PRESERVE_PERMISSIONS=yes PRESERVE_PERMISSIONS=yes
@ -186,12 +182,10 @@ BANDWIDTH=0
## Paranoia option. Don't change this unless you read the documentation. ## Paranoia option. Don't change this unless you read the documentation.
RSYNC_EXECUTABLE=rsync RSYNC_EXECUTABLE=rsync
[ALERT_OPTIONS] ###### ALERT OPTIONS
## Alert email addresses separated by a space character ## Alert email addresses separated by a space character
DESTINATION_MAILS="" DESTINATION_MAILS=""
MAIL_BODY_CHARSET=""
## Windows specific (msys / cygwin environment) only mail options (used with mailsend.exe from muquit, http://github.com/muquit/mailsend or from sendemail.exe from Brandon Zehm, http://caspian.dotconf.net/menu/Software/SendEmail/ ## Windows specific (msys / cygwin environment) only mail options (used with mailsend.exe from muquit, http://github.com/muquit/mailsend or from sendemail.exe from Brandon Zehm, http://caspian.dotconf.net/menu/Software/SendEmail/
SENDER_MAIL="alert@your.system.tld" SENDER_MAIL="alert@your.system.tld"
@ -202,7 +196,7 @@ SMTP_ENCRYPTION=none
SMTP_USER= SMTP_USER=
SMTP_PASSWORD= SMTP_PASSWORD=
[BACKUP SETTINGS] ###### GENERAL BACKUP OPTIONS
## Max execution time of whole backup process. Soft max exec time generates a warning only. ## Max execution time of whole backup process. Soft max exec time generates a warning only.
## Hard max exec time generates a warning and stops the whole backup execution. ## Hard max exec time generates a warning and stops the whole backup execution.
@ -218,7 +212,7 @@ ROTATE_SQL_COPIES=7
ROTATE_FILE_BACKUPS=yes ROTATE_FILE_BACKUPS=yes
ROTATE_FILE_COPIES=7 ROTATE_FILE_COPIES=7
[EXECUTION_HOOKS] ###### EXECUTION HOOKS
## Commands can will be run before and / or after backup execution (remote execution will only happen if REMOTE_BACKUP is set). ## Commands can will be run before and / or after backup execution (remote execution will only happen if REMOTE_BACKUP is set).
## This is useful to make a snapshot before backing up data, or even handle snapshots of backed up data. ## This is useful to make a snapshot before backing up data, or even handle snapshots of backed up data.

View File

@ -2,9 +2,9 @@
###### obackup - Local or Remote, push or pull backup script for files & mysql databases ###### obackup - Local or Remote, push or pull backup script for files & mysql databases
###### (C) 2013-2016 by Orsiris de Jong (www.netpower.fr) ###### (C) 2013-2016 by Orsiris de Jong (www.netpower.fr)
###### obackup v2.1x config file rev 2016081701
[GENERAL] ###### GENERAL BACKUP OPTIONS
CONFIG_FILE_REVISION=2.1
## Backup identification string. ## Backup identification string.
INSTANCE_ID="push-test" INSTANCE_ID="push-test"
@ -20,7 +20,7 @@ FILE_BACKUP=yes
## Pulled backups are the safest option, as the backup server contains the RSA key and cannot be compromised by another server. ## Pulled backups are the safest option, as the backup server contains the RSA key and cannot be compromised by another server.
BACKUP_TYPE=push BACKUP_TYPE=push
[BACKUP STORAGE] ###### BACKUP STORAGE
## Storage paths of the backups (absolute paths of the local or remote system) ## Storage paths of the backups (absolute paths of the local or remote system)
SQL_STORAGE="${HOME}/obackup-storage/sql-push" SQL_STORAGE="${HOME}/obackup-storage/sql-push"
@ -34,8 +34,6 @@ CRYPT_STORAGE="${HOME}/obackup-storage/crypt-push"
## GPG recipient (pubkey for this recipient must exist, see gpg2 --list-keys or gpg --list-keys ## GPG recipient (pubkey for this recipient must exist, see gpg2 --list-keys or gpg --list-keys
GPG_RECIPIENT="John Doe" GPG_RECIPIENT="John Doe"
PARALLEL_ENCRYPTION_PROCESSES=""
## Create backup directories if they do not exist ## Create backup directories if they do not exist
CREATE_DIRS=yes CREATE_DIRS=yes
@ -55,13 +53,13 @@ GET_BACKUP_SIZE=yes
SQL_WARN_MIN_SPACE=1048576 SQL_WARN_MIN_SPACE=1048576
FILE_WARN_MIN_SPACE=1048576 FILE_WARN_MIN_SPACE=1048576
[REMOTE_OPTIONS] ###### REMOTE ONLY OPTIONS
## In case of pulled or pushed backups, remote system URI needs to be supplied. ## In case of pulled or pushed backups, remote system URI needs to be supplied.
REMOTE_SYSTEM_URI="ssh://root@localhost:22/" REMOTE_SYSTEM_URI="ssh://root@localhost:49999/"
## You can specify a RSA key (please use full path). If not defined, the default ~/.ssh/id_rsa will be used. See documentation for further information. ## You can specify a RSA key (please use full path). If not defined, the default ~/.ssh/id_rsa will be used. See documentation for further information.
SSH_RSA_PRIVATE_KEY="${HOME}/.ssh/id_rsa_local_obackup_tests" SSH_RSA_PRIVATE_KEY="${HOME}/.ssh/id_rsa_local"
## Alternatively, you may specify an SSH password file (less secure). Needs sshpass utility installed. ## Alternatively, you may specify an SSH password file (less secure). Needs sshpass utility installed.
SSH_PASSWORD_FILE="" SSH_PASSWORD_FILE=""
@ -86,7 +84,7 @@ REMOTE_3RD_PARTY_HOSTS="www.kernel.org www.google.com"
## If enabled, commands will be executed as superuser on remote side. See documentation for /etc/sudoers configuration ("find", "du", "tee" and "rsync" need to be allowed). Requiretty needs to be disabled. ## If enabled, commands will be executed as superuser on remote side. See documentation for /etc/sudoers configuration ("find", "du", "tee" and "rsync" need to be allowed). Requiretty needs to be disabled.
SUDO_EXEC=no SUDO_EXEC=no
[DATABASE BACKUP SETTINGS] ###### DATABASE SPECIFIC OPTIONS
## Database backup user ## Database backup user
SQL_USER=root SQL_USER=root
@ -94,7 +92,7 @@ SQL_USER=root
## Enabling the following option will save all databases on local or remote given SQL instance except the ones specified in the exclude list. ## Enabling the following option will save all databases on local or remote given SQL instance except the ones specified in the exclude list.
## Every found database will be backed up as separate backup task. ## Every found database will be backed up as separate backup task.
DATABASES_ALL=yes DATABASES_ALL=yes
DATABASES_ALL_EXCLUDE_LIST="test;information_schema;kopano_prod" DATABASES_ALL_EXCLUDE_LIST="test;information_schema;zarafa_prod"
## Alternatively, if DATABASES_ALL=no, you can specify a list of databases to backup separated by spaces. ## Alternatively, if DATABASES_ALL=no, you can specify a list of databases to backup separated by spaces.
DATABASES_LIST=mysql DATABASES_LIST=mysql
@ -111,7 +109,7 @@ MYSQLDUMP_OPTIONS="--opt --single-transaction"
## Generally, level 5 is a good compromise between cpu, memory hunger and compress ratio. Gzipped files are set to be rsyncable. ## Generally, level 5 is a good compromise between cpu, memory hunger and compress ratio. Gzipped files are set to be rsyncable.
COMPRESSION_LEVEL=3 COMPRESSION_LEVEL=3
[FILE BACKUP SETTINGS] ###### FILES SPECIFIC OPTIONS
## File backups are divided in tasks. Every directory in DIRECTORY_LIST will be processed as a unique task. ## File backups are divided in tasks. Every directory in DIRECTORY_LIST will be processed as a unique task.
## Every subdirectory of each directory in RECURSIVE_DIRECTORY_LIST will be processed as a unique task. ## Every subdirectory of each directory in RECURSIVE_DIRECTORY_LIST will be processed as a unique task.
@ -141,8 +139,6 @@ RSYNC_EXCLUDE_FROM=""
## List separator char. You may set an alternative separator char for your directories lists above. ## List separator char. You may set an alternative separator char for your directories lists above.
PATH_SEPARATOR_CHAR=";" PATH_SEPARATOR_CHAR=";"
RSYNC_OPTIONAL_ARGS=""
## Preserve basic linux permissions ## Preserve basic linux permissions
PRESERVE_PERMISSIONS=yes PRESERVE_PERMISSIONS=yes
@ -186,12 +182,10 @@ BANDWIDTH=0
## Paranoia option. Don't change this unless you read the documentation. ## Paranoia option. Don't change this unless you read the documentation.
RSYNC_EXECUTABLE=rsync RSYNC_EXECUTABLE=rsync
[ALERT_OPTIONS] ###### ALERT OPTIONS
## Alert email addresses separated by a space character ## Alert email addresses separated by a space character
DESTINATION_MAILS="" DESTINATION_MAILS=""
MAIL_BODY_CHARSET=""
## Windows specific (msys / cygwin environment) only mail options (used with mailsend.exe from muquit, http://github.com/muquit/mailsend or from sendemail.exe from Brandon Zehm, http://caspian.dotconf.net/menu/Software/SendEmail/ ## Windows specific (msys / cygwin environment) only mail options (used with mailsend.exe from muquit, http://github.com/muquit/mailsend or from sendemail.exe from Brandon Zehm, http://caspian.dotconf.net/menu/Software/SendEmail/
SENDER_MAIL="alert@your.system.tld" SENDER_MAIL="alert@your.system.tld"
@ -202,7 +196,7 @@ SMTP_ENCRYPTION=none
SMTP_USER= SMTP_USER=
SMTP_PASSWORD= SMTP_PASSWORD=
[BACKUP SETTINGS] ###### GENERAL BACKUP OPTIONS
## Max execution time of whole backup process. Soft max exec time generates a warning only. ## Max execution time of whole backup process. Soft max exec time generates a warning only.
## Hard max exec time generates a warning and stops the whole backup execution. ## Hard max exec time generates a warning and stops the whole backup execution.
@ -218,7 +212,7 @@ ROTATE_SQL_COPIES=7
ROTATE_FILE_BACKUPS=yes ROTATE_FILE_BACKUPS=yes
ROTATE_FILE_COPIES=7 ROTATE_FILE_COPIES=7
[EXECUTION_HOOKS] ###### EXECUTION HOOKS
## Commands can will be run before and / or after backup execution (remote execution will only happen if REMOTE_BACKUP is set). ## Commands can will be run before and / or after backup execution (remote execution will only happen if REMOTE_BACKUP is set).
## This is useful to make a snapshot before backing up data, or even handle snapshots of backed up data. ## This is useful to make a snapshot before backing up data, or even handle snapshots of backed up data.

View File

@ -1,22 +1,8 @@
#!/usr/bin/env bash #!/usr/bin/env bash
## obackup basic tests suite 2020050401
# Supported environment variables
# TRAVIS_RUN=[true|false]
# SSH_PORT=22
# SKIP_REMOTE=[true|false]
#TODO Encrypted Pull runs on F25 fail for decryption #TODO Encrypted Pull runs on F25 fail for decryption
# If gpg key generation hangs, please install and configure rngd service
#eg yum install rng-tools ; systemctl start rngd
# or yum install epel-release && yum innstall haveged && systemctl start haveged
if [ "$SKIP_REMOTE" == "" ]; then ## obackup basic tests suite 2017040801
SKIP_REMOTE=false
fi
OBACKUP_DIR="$(pwd)" OBACKUP_DIR="$(pwd)"
OBACKUP_DIR=${OBACKUP_DIR%%/dev*} OBACKUP_DIR=${OBACKUP_DIR%%/dev*}
@ -76,7 +62,6 @@ DATABASE_EXCLUDED="information_schema.sql.xz"
CRYPT_EXTENSION=".obackup.gpg" CRYPT_EXTENSION=".obackup.gpg"
ROTATE_1_EXTENSION=".obackup.1" ROTATE_1_EXTENSION=".obackup.1"
ROTATE_2_EXTENSION=".obackup.2"
PASSFILE="passfile" PASSFILE="passfile"
CRYPT_TESTFILE="testfile" CRYPT_TESTFILE="testfile"
@ -87,9 +72,9 @@ OBACKUP_MIN_VERSION=x
OBACKUP_IS_STABLE=maybe OBACKUP_IS_STABLE=maybe
function SetupSSH { function SetupSSH {
echo -e 'y\n'| ssh-keygen -t rsa -b 2048 -N "" -f "${HOME}/.ssh/id_rsa_local_obackup_tests" echo -e 'y\n'| ssh-keygen -t rsa -b 2048 -N "" -f "${HOME}/.ssh/id_rsa_local"
if ! grep "$(cat ${HOME}/.ssh/id_rsa_local_obackup_tests.pub)" "${HOME}/.ssh/authorized_keys"; then if ! grep "$(cat ${HOME}/.ssh/id_rsa_local.pub)" "${HOME}/.ssh/authorized_keys"; then
echo "from=\"*\",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty,command=\"/usr/local/bin/ssh_filter.sh SomeAlphaNumericToken9\" $(cat ${HOME}/.ssh/id_rsa_local_obackup_tests.pub)" >> "${HOME}/.ssh/authorized_keys" echo "from=\"*\",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty,command=\"/usr/local/bin/ssh_filter.sh SomeAlphaNumericToken9\" $(cat ${HOME}/.ssh/id_rsa_local.pub)" >> "${HOME}/.ssh/authorized_keys"
fi fi
chmod 600 "${HOME}/.ssh/authorized_keys" chmod 600 "${HOME}/.ssh/authorized_keys"
@ -104,10 +89,13 @@ function SetupSSH {
} }
function RemoveSSH { function RemoveSSH {
if [ -f "${HOME}/.ssh/id_rsa_local_obackup_tests" ]; then local pubkey
echo "Restoring SSH authorized_keys file"
sed -i.bak "s|.*$(cat "${HOME}/.ssh/id_rsa_local_obackup_tests.pub")||g" "${HOME}/.ssh/authorized_keys" if [ -f "${HOME}/.ssh/id_rsa_local" ]; then
rm -f "${HOME}/.ssh/{id_rsa_local_obackup_tests.pub,id_rsa_local_obackup_tests}"
pubkey=$(cat "${HOME}/.ssh/id_rsa_local.pub")
sed -i.bak "s|.*$pubkey.*||g" "${HOME}/.ssh/authorized_keys"
rm -f "${HOME}/.ssh/{id_rsa_local.pub,id_rsa_local}"
fi fi
} }
@ -139,17 +127,16 @@ Passphrase: PassPhrase123
%commit %commit
%echo done %echo done
EOF EOF
if [ "$TRAVIS_RUN" == true ]; then
if type apt-get > /dev/null 2>&1; then
sudo apt-get install rng-tools
fi
# Setup fast entropy if type apt-get > /dev/null 2>&1; then
if type rngd > /dev/null 2>&1; then sudo apt-get install rng-tools
$SUDO_CMD rngd -r /dev/urandom fi
else
echo "No rngd support" # Setup fast entropy
fi if type rngd > /dev/null 2>&1; then
$SUDO_CMD rngd -r /dev/urandom
else
echo "No rngd support"
fi fi
$CRYPT_TOOL --batch --gen-key gpgcommand $CRYPT_TOOL --batch --gen-key gpgcommand
@ -178,27 +165,27 @@ function oneTimeSetUp () {
if [ "$TRAVIS_RUN" == true ]; then if [ "$TRAVIS_RUN" == true ]; then
echo "Running with travis settings" echo "Running with travis settings"
REMOTE_USER="travis" REMOTE_USER="travis"
RHOST_PING=false RHOST_PING="no"
SetConfFileValue "$CONF_DIR/$PULL_CONF" "REMOTE_3RD_PARTY_HOSTS" "" SetConfFileValue "$CONF_DIR/$PULL_CONF" "REMOTE_3RD_PARTY_HOSTS" ""
SetConfFileValue "$CONF_DIR/$PUSH_CONF" "REMOTE_3RD_PARTY_HOSTS" "" SetConfFileValue "$CONF_DIR/$PUSH_CONF" "REMOTE_3RD_PARTY_HOSTS" ""
# Config value didn't have S at the end in old files # Config value didn't have S at the end in old files
SetConfFileValue "$CONF_DIR/$OLD_CONF" "REMOTE_3RD_PARTY_HOST" "" SetConfFileValue "$CONF_DIR/$OLD_CONF" "REMOTE_3RD_PARTY_HOST" ""
SetConfFileValue "$CONF_DIR/$PULL_CONF" "REMOTE_HOST_PING" false SetConfFileValue "$CONF_DIR/$PULL_CONF" "REMOTE_HOST_PING" "no"
SetConfFileValue "$CONF_DIR/$PUSH_CONF" "REMOTE_HOST_PING" false SetConfFileValue "$CONF_DIR/$PUSH_CONF" "REMOTE_HOST_PING" "no"
SetConfFileValue "$CONF_DIR/$OLD_CONF" "REMOTE_HOST_PING" false SetConfFileValue "$CONF_DIR/$OLD_CONF" "REMOTE_HOST_PING" "no"
else else
echo "Running with local settings" echo "Running with local settings"
REMOTE_USER="root" REMOTE_USER="root"
RHOST_PING=true RHOST_PING="yes"
SetConfFileValue "$CONF_DIR/$PULL_CONF" "REMOTE_3RD_PARTY_HOSTS" "\"www.kernel.org www.google.com\"" SetConfFileValue "$CONF_DIR/$PULL_CONF" "REMOTE_3RD_PARTY_HOSTS" "\"www.kernel.org www.google.com\""
SetConfFileValue "$CONF_DIR/$PUSH_CONF" "REMOTE_3RD_PARTY_HOSTS" "\"www.kernel.org www.google.com\"" SetConfFileValue "$CONF_DIR/$PUSH_CONF" "REMOTE_3RD_PARTY_HOSTS" "\"www.kernel.org www.google.com\""
# Config value didn't have S at the end in old files # Config value didn't have S at the end in old files
SetConfFileValue "$CONF_DIR/$OLD_CONF" "REMOTE_3RD_PARTY_HOST" "\"www.kernel.org www.google.com\"" SetConfFileValue "$CONF_DIR/$OLD_CONF" "REMOTE_3RD_PARTY_HOST" "\"www.kernel.org www.google.com\""
SetConfFileValue "$CONF_DIR/$PULL_CONF" "REMOTE_HOST_PING" true SetConfFileValue "$CONF_DIR/$PULL_CONF" "REMOTE_HOST_PING" "yes"
SetConfFileValue "$CONF_DIR/$PUSH_CONF" "REMOTE_HOST_PING" true SetConfFileValue "$CONF_DIR/$PUSH_CONF" "REMOTE_HOST_PING" "yes"
SetConfFileValue "$CONF_DIR/$OLD_CONF" "REMOTE_HOST_PING" true SetConfFileValue "$CONF_DIR/$OLD_CONF" "REMOTE_HOST_PING" "yes"
fi fi
# Get default ssh port from env # Get default ssh port from env
@ -218,7 +205,7 @@ function oneTimeSetUp () {
SetupGPG SetupGPG
if [ "$SKIP_REMOTE" != true ]; then if [ "$SKIP_REMOTE" != "yes" ]; then
SetupSSH SetupSSH
fi fi
@ -233,22 +220,20 @@ function oneTimeSetUp () {
# Set basic values that could get changed later # Set basic values that could get changed later
for i in "$LOCAL_CONF" "$PULL_CONF" "$PUSH_CONF"; do for i in "$LOCAL_CONF" "$PULL_CONF" "$PUSH_CONF"; do
SetConfFileValue "$CONF_DIR/$i" "ENCRYPTION" false SetConfFileValue "$CONF_DIR/$i" "ENCRYPTION" "no"
SetConfFileValue "$CONF_DIR/$i" "DATABASES_ALL" true SetConfFileValue "$CONF_DIR/$i" "DATABASES_ALL" "yes"
SetConfFileValue "$CONF_DIR/$i" "DATABASES_LIST" "mysql" SetConfFileValue "$CONF_DIR/$i" "DATABASES_LIST" "mysql"
SetConfFileValue "$CONF_DIR/$i" "FILE_BACKUP" true SetConfFileValue "$CONF_DIR/$i" "FILE_BACKUP" "yes"
SetConfFileValue "$CONF_DIR/$i" "DIRECTORY_LIST" "${HOME}/obackup-testdata/testData" SetConfFileValue "$CONF_DIR/$i" "DIRECTORY_LIST" "${HOME}/obackup-testdata/testData"
SetConfFileValue "$CONF_DIR/$i" "RECURSIVE_DIRECTORY_LIST" "${HOME}/obackup-testdata/testDataRecursive" SetConfFileValue "$CONF_DIR/$i" "RECURSIVE_DIRECTORY_LIST" "${HOME}/obackup-testdata/testDataRecursive"
SetConfFileValue "$CONF_DIR/$i" "SQL_BACKUP" true SetConfFileValue "$CONF_DIR/$i" "SQL_BACKUP" "yes"
done done
} }
function oneTimeTearDown () { function oneTimeTearDown () {
SetConfFileValue "$OBACKUP_DIR/$OBACKUP_EXECUTABLE" "IS_STABLE" "$OBACKUP_IS_STABLE" SetConfFileValue "$OBACKUP_DIR/$OBACKUP_EXECUTABLE" "IS_STABLE" "$OBACKUP_IS_STABLE"
if [ "$SKIP_REMOTE" != true ]; then RemoveSSH
RemoveSSH
fi
#TODO: uncomment this when dev is done #TODO: uncomment this when dev is done
#rm -rf "$SOURCE_DIR" #rm -rf "$SOURCE_DIR"
@ -320,7 +305,7 @@ function test_Merge () {
assertEquals "Install failed" "0" $? assertEquals "Install failed" "0" $?
# Set obackup version to stable while testing to avoid warning message # Set obackup version to stable while testing to avoid warning message
SetConfFileValue "$OBACKUP_DIR/$OBACKUP_EXECUTABLE" "IS_STABLE" true SetConfFileValue "$OBACKUP_DIR/$OBACKUP_EXECUTABLE" "IS_STABLE" "yes"
} }
# Keep this function to check GPG behavior depending on OS. (GPG 2.1 / GPG 2.0x / GPG 1.4 don't behave the same way) # Keep this function to check GPG behavior depending on OS. (GPG 2.1 / GPG 2.0x / GPG 1.4 don't behave the same way)
@ -331,33 +316,13 @@ function test_GPG () {
# Detect if GnuPG >= 2.1 that does not allow automatic pin entry anymore # Detect if GnuPG >= 2.1 that does not allow automatic pin entry anymore
# GnuPG 2.1.11 has a bug that does not allow usage of pinentry mode 'loopback'
# GnuPC 2.1.12 has that bug resolved
cryptToolVersion=$($CRYPT_TOOL --version | head -1 | awk '{print $3}') cryptToolVersion=$($CRYPT_TOOL --version | head -1 | awk '{print $3}')
cryptToolMajorVersion=${cryptToolVersion%%.*} cryptToolMajorVersion=${cryptToolVersion%%.*}
cryptToolSubVersion=${cryptToolVersion#*.} cryptToolSubVersion=${cryptToolVersion#*.}
cryptToolSubVersion=${cryptToolSubVersion%.*} cryptToolSubVersion=${cryptToolSubVersion%.*}
cryptToolMinorVersion=${cryptToolVersion##*.}
echo "$CRYPT_TOOL is $cryptToolVersion"
if [ $cryptToolMajorVersion -eq 2 ] && [ $cryptToolSubVersion -ge 1 ]; then if [ $cryptToolMajorVersion -eq 2 ] && [ $cryptToolSubVersion -ge 1 ]; then
if [ $cryptToolMinorVersion -gt 11 ]; then additionalParameters="--pinentry-mode loopback"
echo "Using --pinentry-mode loopback [$cryptToolMajorVersion.$cryptToolSubVersion.$cryptToolMinorVersion]"
additionalParameters="--pinentry-mode loopback"
elif [ $cryptToolMinorVersion -eq 11 ]; then
echo "Using fix to allow --pinentry-mode loopback"
[ -f "${HOME}/.gnupg/gpg-agent.conf" ] || touch "${HOME}/.gnupg/gpg-agent.conf"
echo "allow-loopback-pinentry" >> "${HOME}/.gnupg/gpg-agent.conf"
gpgconf --reload gpg-agent
cat "${HOME}/.gnupg/gpg-agent.conf"
additionalParameters="--pinentry-mode loopback"
else
echo "Not using --pinentry-mode loopback [$cryptToolMajorVersion.$cryptToolSubVersion.$cryptToolMinorVersion]"
fi
fi fi
if [ "$CRYPT_TOOL" == "gpg2" ]; then if [ "$CRYPT_TOOL" == "gpg2" ]; then
@ -368,7 +333,6 @@ function test_GPG () {
echo "Decrypt using passphrase file" echo "Decrypt using passphrase file"
echo $CRYPT_TOOL $options --out "$TESTS_DIR/$CRYPT_TESTFILE" --batch --yes $additionalParameters --passphrase-file="$TESTS_DIR/$PASSFILE" --decrypt "$TESTS_DIR/$CRYPT_TESTFILE$CRYPT_EXTENSION"
$CRYPT_TOOL $options --out "$TESTS_DIR/$CRYPT_TESTFILE" --batch --yes $additionalParameters --passphrase-file="$TESTS_DIR/$PASSFILE" --decrypt "$TESTS_DIR/$CRYPT_TESTFILE$CRYPT_EXTENSION" $CRYPT_TOOL $options --out "$TESTS_DIR/$CRYPT_TESTFILE" --batch --yes $additionalParameters --passphrase-file="$TESTS_DIR/$PASSFILE" --decrypt "$TESTS_DIR/$CRYPT_TESTFILE$CRYPT_EXTENSION"
assertEquals "Decrypt file using passfile" "0" $? assertEquals "Decrypt file using passfile" "0" $?
@ -382,7 +346,7 @@ function test_GPG () {
} }
function test_LocalRun () { function test_LocalRun () {
SetConfFileValue "$CONF_DIR/$LOCAL_CONF" "ENCRYPTION" false SetConfFileValue "$CONF_DIR/$LOCAL_CONF" "ENCRYPTION" "no"
# Basic return code tests. Need to go deep into file presence testing # Basic return code tests. Need to go deep into file presence testing
cd "$OBACKUP_DIR" cd "$OBACKUP_DIR"
@ -415,7 +379,7 @@ function test_LocalRun () {
done done
diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-local/$SOURCE_DIR" | grep -i Exclu diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-local/$SOURCE_DIR" | grep -i Exclu
[ $(diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-local/$SOURCE_DIR" | grep -i Exclu | wc -l) -eq 3 ] [ $(diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-local/$SOURCE_DIR" | grep -i Exclu | wc -l) -eq 2 ]
assertEquals "Diff should only output excluded files" "0" $? assertEquals "Diff should only output excluded files" "0" $?
# Tests presence of rotated files # Tests presence of rotated files
@ -425,37 +389,20 @@ function test_LocalRun () {
for file in "${DatabasePresence[@]}"; do for file in "${DatabasePresence[@]}"; do
[ -f "$TARGET_DIR_SQL_LOCAL/$file$ROTATE_1_EXTENSION" ] [ -f "$TARGET_DIR_SQL_LOCAL/$file$ROTATE_1_EXTENSION" ]
assertEquals "Database rotated Presence [$TARGET_DIR_SQL_LOCAL/$file$ROTATE_1_EXTENSION]" "0" $? assertEquals "Database rotated Presence [$TARGET_DIR_SQL_LOCAL/$file]" "0" $?
done done
[ -d "$TARGET_DIR_FILE_LOCAL/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION" ] [ -d "$TARGET_DIR_FILE_LOCAL/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION" ]
assertEquals "File rotated Presence [$TARGET_DIR_FILE_LOCAL/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION]" "0" $? assertEquals "File rotated Presence [$TARGET_DIR_FILE_LOCAL/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION]" "0" $?
# Second test of rotated files
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$LOCAL_CONF"
assertEquals "Return code" "0" $?
for file in "${DatabasePresence[@]}"; do
[ -f "$TARGET_DIR_SQL_LOCAL/$file$ROTATE_1_EXTENSION" ]
assertEquals "Database rotated Presence [$TARGET_DIR_SQL_LOCAL/$file$ROTATE_1_EXTENSION]" "0" $?
[ -f "$TARGET_DIR_SQL_LOCAL/$file$ROTATE_2_EXTENSION" ]
assertEquals "Database rotated Presence [$TARGET_DIR_SQL_LOCAL/$file$ROTATE_2_EXTENSION]" "0" $?
done
[ -d "$TARGET_DIR_FILE_LOCAL/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION" ]
assertEquals "File rotated Presence [$TARGET_DIR_FILE_LOCAL/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION]" "0" $?
[ -d "$TARGET_DIR_FILE_LOCAL/$(dirname $SOURCE_DIR)$ROTATE_2_EXTENSION" ]
assertEquals "File rotated Presence [$TARGET_DIR_FILE_LOCAL/$(dirname $SOURCE_DIR)$ROTATE_2_EXTENSION]" "0" $?
} }
function test_PullRun () { function test_PullRun () {
if [ "$SKIP_REMOTE" == true ]; then if [ "$SKIP_REMOTE" == "yes" ]; then
return 0 return 0
fi fi
SetConfFileValue "$CONF_DIR/$PULL_CONF" "ENCRYPTION" false SetConfFileValue "$CONF_DIR/$PULL_CONF" "ENCRYPTION" "no"
# Basic return code tests. Need to go deep into file presence testing # Basic return code tests. Need to go deep into file presence testing
cd "$OBACKUP_DIR" cd "$OBACKUP_DIR"
@ -488,11 +435,12 @@ function test_PullRun () {
done done
diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-pull/$SOURCE_DIR" | grep -i Exclu diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-pull/$SOURCE_DIR" | grep -i Exclu
[ $(diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-pull/$SOURCE_DIR" | grep -i Exclu | wc -l) -eq 3 ] [ $(diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-pull/$SOURCE_DIR" | grep -i Exclu | wc -l) -eq 2 ]
assertEquals "Diff should only output excluded files" "0" $? assertEquals "Diff should only output excluded files" "0" $?
# Tests presence of rotated files # Tests presence of rotated files
cd "$OBACKUP_DIR"
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$PULL_CONF" REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$PULL_CONF"
assertEquals "Return code" "0" $? assertEquals "Return code" "0" $?
@ -504,31 +452,14 @@ function test_PullRun () {
[ -d "$TARGET_DIR_FILE_PULL/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION" ] [ -d "$TARGET_DIR_FILE_PULL/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION" ]
assertEquals "File rotated Presence [$TARGET_DIR_FILE_PULL/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION]" "0" $? assertEquals "File rotated Presence [$TARGET_DIR_FILE_PULL/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION]" "0" $?
# Second test of presence of rotated files
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$PULL_CONF"
assertEquals "Return code" "0" $?
for file in "${DatabasePresence[@]}"; do
[ -f "$TARGET_DIR_SQL_PULL/$file$ROTATE_1_EXTENSION" ]
assertEquals "Database rotated Presence [$TARGET_DIR_SQL_PULL/$file$ROTATE_1_EXTENSION]" "0" $?
[ -f "$TARGET_DIR_SQL_PULL/$file$ROTATE_2_EXTENSION" ]
assertEquals "Database rotated Presence [$TARGET_DIR_SQL_PULL/$file$ROTATE_2_EXTENSION]" "0" $?
done
[ -d "$TARGET_DIR_FILE_PULL/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION" ]
assertEquals "File rotated Presence [$TARGET_DIR_FILE_PULL/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION]" "0" $?
[ -d "$TARGET_DIR_FILE_PULL/$(dirname $SOURCE_DIR)$ROTATE_2_EXTENSION" ]
assertEquals "File rotated Presence [$TARGET_DIR_FILE_PULL/$(dirname $SOURCE_DIR)$ROTATE_2_EXTENSION]" "0" $?
} }
function test_PushRun () { function test_PushRun () {
if [ "$SKIP_REMOTE" == true ]; then if [ "$SKIP_REMOTE" == "yes" ]; then
return 0 return 0
fi fi
SetConfFileValue "$CONF_DIR/$PUSH_CONF" "ENCRYPTION" false SetConfFileValue "$CONF_DIR/$PUSH_CONF" "ENCRYPTION" "no"
# Basic return code tests. Need to go deep into file presence testing # Basic return code tests. Need to go deep into file presence testing
cd "$OBACKUP_DIR" cd "$OBACKUP_DIR"
@ -561,11 +492,11 @@ function test_PushRun () {
done done
diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-push/$SOURCE_DIR" | grep -i Exclu diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-push/$SOURCE_DIR" | grep -i Exclu
[ $(diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-push/$SOURCE_DIR" | grep -i Exclu | wc -l) -eq 3 ] [ $(diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-push/$SOURCE_DIR" | grep -i Exclu | wc -l) -eq 2 ]
assertEquals "Diff should only output excluded files" "0" $? assertEquals "Diff should only output excluded files" "0" $?
# Tests presence of rotated files # Tests presence of rotated files
cd "$OBACKUP_DIR"
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$PUSH_CONF" REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$PUSH_CONF"
assertEquals "Return code" "0" $? assertEquals "Return code" "0" $?
@ -577,26 +508,10 @@ function test_PushRun () {
[ -d "$TARGET_DIR_FILE_PUSH/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION" ] [ -d "$TARGET_DIR_FILE_PUSH/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION" ]
assertEquals "File rotated Presence [$TARGET_DIR_FILE_PUSH/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION]" "0" $? assertEquals "File rotated Presence [$TARGET_DIR_FILE_PUSH/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION]" "0" $?
# Second test of presence of rotated files
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$PUSH_CONF"
assertEquals "Return code" "0" $?
for file in "${DatabasePresence[@]}"; do
[ -f "$TARGET_DIR_SQL_PUSH/$file$ROTATE_1_EXTENSION" ]
assertEquals "Database rotated Presence [$TARGET_DIR_SQL_PUSH/$file$ROTATE_1_EXTENSION]" "0" $?
[ -f "$TARGET_DIR_SQL_PUSH/$file$ROTATE_2_EXTENSION" ]
assertEquals "Database rotated Presence [$TARGET_DIR_SQL_PUSH/$file$ROTATE_2_EXTENSION]" "0" $?
done
[ -d "$TARGET_DIR_FILE_PUSH/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION" ]
assertEquals "File rotated Presence [$TARGET_DIR_FILE_PUSH/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION]" "0" $?
[ -d "$TARGET_DIR_FILE_PUSH/$(dirname $SOURCE_DIR)$ROTATE_2_EXTENSION" ]
assertEquals "File rotated Presence [$TARGET_DIR_FILE_PUSH/$(dirname $SOURCE_DIR)$ROTATE_2_EXTENSION]" "0" $?
} }
function test_EncryptLocalRun () { function test_EncryptLocalRun () {
SetConfFileValue "$CONF_DIR/$LOCAL_CONF" "ENCRYPTION" true SetConfFileValue "$CONF_DIR/$LOCAL_CONF" "ENCRYPTION" "yes"
cd "$OBACKUP_DIR" cd "$OBACKUP_DIR"
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$LOCAL_CONF" REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$LOCAL_CONF"
@ -651,16 +566,16 @@ function test_EncryptLocalRun () {
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE --decrypt="$TARGET_DIR_CRYPT_LOCAL" --passphrase-file="$TESTS_DIR/$PASSFILE" REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE --decrypt="$TARGET_DIR_CRYPT_LOCAL" --passphrase-file="$TESTS_DIR/$PASSFILE"
assertEquals "Decrypt file storage in [$TARGET_DIR_CRYPT_LOCAL]" "0" $? assertEquals "Decrypt file storage in [$TARGET_DIR_CRYPT_LOCAL]" "0" $?
SetConfFileValue "$CONF_DIR/$LOCAL_CONF" "ENCRYPTION" false SetConfFileValue "$CONF_DIR/$LOCAL_CONF" "ENCRYPTION" "no"
} }
function test_EncryptPullRun () { function test_EncryptPullRun () {
if [ "$SKIP_REMOTE" == true ]; then if [ "$SKIP_REMOTE" == "yes" ]; then
return 0 return 0
fi fi
# Basic return code tests. Need to go deep into file presence testing # Basic return code tests. Need to go deep into file presence testing
SetConfFileValue "$CONF_DIR/$PULL_CONF" "ENCRYPTION" true SetConfFileValue "$CONF_DIR/$PULL_CONF" "ENCRYPTION" "yes"
cd "$OBACKUP_DIR" cd "$OBACKUP_DIR"
@ -694,7 +609,7 @@ function test_EncryptPullRun () {
# Only excluded files should be listed here # Only excluded files should be listed here
diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-pull/$SOURCE_DIR" | grep -i Exclu diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-pull/$SOURCE_DIR" | grep -i Exclu
[ $(diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-pull/$SOURCE_DIR" | grep -i Exclu | wc -l) -eq 3 ] [ $(diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-pull/$SOURCE_DIR" | grep -i Exclu | wc -l) -eq 2 ]
assertEquals "Diff should only output excluded files" "0" $? assertEquals "Diff should only output excluded files" "0" $?
# Tests presence of rotated files # Tests presence of rotated files
@ -717,16 +632,16 @@ function test_EncryptPullRun () {
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE --decrypt="$TARGET_DIR_CRYPT_PULL" --passphrase-file="$TESTS_DIR/$PASSFILE" REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE --decrypt="$TARGET_DIR_CRYPT_PULL" --passphrase-file="$TESTS_DIR/$PASSFILE"
assertEquals "Decrypt file storage in [$TARGET_DIR_CRYPT_PULL]" "0" $? assertEquals "Decrypt file storage in [$TARGET_DIR_CRYPT_PULL]" "0" $?
SetConfFileValue "$CONF_DIR/$PULL_CONF" "ENCRYPTION" false SetConfFileValue "$CONF_DIR/$PULL_CONF" "ENCRYPTION" "no"
} }
function test_EncryptPushRun () { function test_EncryptPushRun () {
if [ "$SKIP_REMOTE" == true ]; then if [ "$SKIP_REMOTE" == "yes" ]; then
return 0 return 0
fi fi
# Basic return code tests. Need to go deep into file presence testing # Basic return code tests. Need to go deep into file presence testing
SetConfFileValue "$CONF_DIR/$PUSH_CONF" "ENCRYPTION" true SetConfFileValue "$CONF_DIR/$PUSH_CONF" "ENCRYPTION" "yes"
cd "$OBACKUP_DIR" cd "$OBACKUP_DIR"
@ -782,7 +697,7 @@ function test_EncryptPushRun () {
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE --decrypt="$TARGET_DIR_FILE_PUSH" --passphrase-file="$TESTS_DIR/$PASSFILE" REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE --decrypt="$TARGET_DIR_FILE_PUSH" --passphrase-file="$TESTS_DIR/$PASSFILE"
assertEquals "Decrypt file storage in [$TARGET_DIR_FILE_PUSH]" "0" $? assertEquals "Decrypt file storage in [$TARGET_DIR_FILE_PUSH]" "0" $?
SetConfFileValue "$CONF_DIR/$PUSH_CONF" "ENCRYPTION" false SetConfFileValue "$CONF_DIR/$PUSH_CONF" "ENCRYPTION" "no"
} }
function test_missing_databases () { function test_missing_databases () {
@ -790,25 +705,25 @@ function test_missing_databases () {
# Prepare files for missing databases # Prepare files for missing databases
for i in "$LOCAL_CONF" "$PUSH_CONF" "$PULL_CONF"; do for i in "$LOCAL_CONF" "$PUSH_CONF" "$PULL_CONF"; do
SetConfFileValue "$CONF_DIR/$i" "DATABASES_ALL" false SetConfFileValue "$CONF_DIR/$i" "DATABASES_ALL" "no"
SetConfFileValue "$CONF_DIR/$i" "DATABASES_LIST" "\"zorglub;mysql\"" SetConfFileValue "$CONF_DIR/$i" "DATABASES_LIST" "\"zorglub;mysql\""
SetConfFileValue "$CONF_DIR/$i" "SQL_BACKUP" true SetConfFileValue "$CONF_DIR/$i" "SQL_BACKUP" "yes"
SetConfFileValue "$CONF_DIR/$i" "FILE_BACKUP" false SetConfFileValue "$CONF_DIR/$i" "FILE_BACKUP" "no"
REMOTE_HOST=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$i" REMOTE_HOST=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$i"
assertEquals "Missing databases should trigger error with [$i]" "1" $? assertEquals "Missing databases should trigger error with [$i]" "1" $?
SetConfFileValue "$CONF_DIR/$i" "DATABASES_ALL" true SetConfFileValue "$CONF_DIR/$i" "DATABASES_ALL" "yes"
SetConfFileValue "$CONF_DIR/$i" "DATABASES_LIST" "mysql" SetConfFileValue "$CONF_DIR/$i" "DATABASES_LIST" "mysql"
SetConfFileValue "$CONF_DIR/$i" "FILE_BACKUP" true SetConfFileValue "$CONF_DIR/$i" "FILE_BACKUP" "yes"
done done
for i in "$LOCAL_CONF" "$PUSH_CONF" "$PULL_CONF"; do for i in "$LOCAL_CONF" "$PUSH_CONF" "$PULL_CONF"; do
SetConfFileValue "$CONF_DIR/$i" "DIRECTORY_LIST" "${HOME}/obackup-testdata/nonPresentData" SetConfFileValue "$CONF_DIR/$i" "DIRECTORY_LIST" "${HOME}/obackup-testdata/nonPresentData"
SetConfFileValue "$CONF_DIR/$i" "RECURSIVE_DIRECTORY_LIST" "${HOME}/obackup-testdata/nonPresentDataRecursive" SetConfFileValue "$CONF_DIR/$i" "RECURSIVE_DIRECTORY_LIST" "${HOME}/obackup-testdata/nonPresentDataRecursive"
SetConfFileValue "$CONF_DIR/$i" "SQL_BACKUP" false SetConfFileValue "$CONF_DIR/$i" "SQL_BACKUP" "no"
SetConfFileValue "$CONF_DIR/$i" "FILE_BACKUP" true SetConfFileValue "$CONF_DIR/$i" "FILE_BACKUP" "yes"
REMOTE_HOST=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$i" REMOTE_HOST=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$i"
assertEquals "Missing files should trigger error with [$i]" "1" $? assertEquals "Missing files should trigger error with [$i]" "1" $?
@ -817,7 +732,7 @@ function test_missing_databases () {
echo "nope" echo "nope"
SetConfFileValue "$CONF_DIR/$i" "DIRECTORY_LIST" "${HOME}/obackup-testdata/testData" SetConfFileValue "$CONF_DIR/$i" "DIRECTORY_LIST" "${HOME}/obackup-testdata/testData"
SetConfFileValue "$CONF_DIR/$i" "RECURSIVE_DIRECTORY_LIST" "${HOME}/obackup-testdata/testDataRecursive" SetConfFileValue "$CONF_DIR/$i" "RECURSIVE_DIRECTORY_LIST" "${HOME}/obackup-testdata/testDataRecursive"
SetConfFileValue "$CONF_DIR/$i" "SQL_BACKUP" true SetConfFileValue "$CONF_DIR/$i" "SQL_BACKUP" "yes"
done done
} }

View File

@ -1,46 +0,0 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at kate.ward@forestent.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,434 +0,0 @@
# shUnit2
shUnit2 is a [xUnit](http://en.wikipedia.org/wiki/XUnit) unit test framework for Bourne based shell scripts, and it is designed to work in a similar manner to [JUnit](http://www.junit.org), [PyUnit](http://pyunit.sourceforge.net), etc.. If you have ever had the desire to write a unit test for a shell script, shUnit2 can do the job.
[![Travis CI](https://img.shields.io/travis/kward/shunit2.svg)](https://travis-ci.org/kward/shunit2)
## Table of Contents
* [Introduction](#introduction)
* [Credits / Contributors](#credits-contributors)
* [Feedback](#feedback)
* [Quickstart](#quickstart)
* [Function Reference](#function-reference)
* [General Info](#general-info)
* [Asserts](#asserts)
* [Failures](#failures)
* [Setup/Teardown](#setup-teardown)
* [Skipping](#skipping)
* [Suites](#suites)
* [Advanced Usage](#advanced-usage)
* [Some constants you can use](#some-constants-you-can-use)
* [Error Handling](#error-handling)
* [Including Line Numbers in Asserts (Macros)](#including-line-numbers-in-asserts-macros)
* [Test Skipping](#test-skipping)
* [Appendix](#appendix)
* [Getting help](#getting-help)
* [Zsh](#zsh)
---
## <a name="introduction"></a> Introduction
shUnit2 was originally developed to provide a consistent testing solution for [log4sh][log4sh], a shell based logging framework similar to [log4j](http://logging.apache.org). During the development of that product, a repeated problem of having things work just fine under one shell (`/bin/bash` on Linux to be specific), and then not working under another shell (`/bin/sh` on Solaris) kept coming up. Although several simple tests were run, they were not adequate and did not catch some corner cases. The decision was finally made to write a proper unit test framework after multiple brown-bag releases were made. _Research was done to look for an existing product that met the testing requirements, but no adequate product was found._
Tested Operating Systems (varies over time)
* Cygwin
* FreeBSD (user supported)
* Linux (Gentoo, Ubuntu)
* Mac OS X
* Solaris 8, 9, 10 (inc. OpenSolaris)
Tested Shells
* Bourne Shell (__sh__)
* BASH - GNU Bourne Again SHell (__bash__)
* DASH (__dash__)
* Korn Shell (__ksh__)
* pdksh - Public Domain Korn Shell (__pdksh__)
* zsh - Zsh (__zsh__) (since 2.1.2) _please see the Zsh shell errata for more information_
See the appropriate Release Notes for this release (`doc/RELEASE_NOTES-X.X.X.txt`) for the list of actual versions tested.
### <a name="credits-contributors"></a> Credits / Contributors
A list of contributors to shUnit2 can be found in `doc/contributors.md`. Many thanks go out to all those who have contributed to make this a better tool.
shUnit2 is the original product of many hours of work by Kate Ward, the primary author of the code. For related software, check out https://github.com/kward.
### <a name="feedback"></a> Feedback
Feedback is most certainly welcome for this document. Send your additions, comments and criticisms to the shunit2-users@google.com mailing list.
---
## <a name="quickstart"></a> Quickstart
This section will give a very quick start to running unit tests with shUnit2. More information is located in later sections.
Here is a quick sample script to show how easy it is to write a unit test in shell. _Note: the script as it stands expects that you are running it from the "examples" directory._
```sh
#! /bin/sh
# file: examples/equality_test.sh
testEquality() {
assertEquals 1 1
}
# Load shUnit2.
. ./shunit2
```
Running the unit test should give results similar to the following.
```console
$ cd examples
$ ./equality_test.sh
testEquality
Ran 1 test.
OK
```
W00t! You've just run your first successful unit test. So, what just happened? Quite a bit really, and it all happened simply by sourcing the `shunit2` library. The basic functionality for the script above goes like this:
* When shUnit2 is sourced, it will walk through any functions defined whose name starts with the string `test`, and add those to an internal list of tests to execute. Once a list of test functions to be run has been determined, shunit2 will go to work.
* Before any tests are executed, shUnit2 again looks for a function, this time one named `oneTimeSetUp()`. If it exists, it will be run. This function is normally used to setup the environment for all tests to be run. Things like creating directories for output or setting environment variables are good to place here. Just so you know, you can also declare a corresponding function named `oneTimeTearDown()` function that does the same thing, but once all the tests have been completed. It is good for removing temporary directories, etc.
* shUnit2 is now ready to run tests. Before doing so though, it again looks for another function that might be declared, one named `setUp()`. If the function exists, it will be run before each test. It is good for resetting the environment so that each test starts with a clean slate. **At this stage, the first test is finally run.** The success of the test is recorded for a report that will be generated later. After the test is run, shUnit2 looks for a final function that might be declared, one named `tearDown()`. If it exists, it will be run after each test. It is a good place for cleaning up after each test, maybe doing things like removing files that were created, or removing directories. This set of steps, `setUp() > test() > tearDown()`, is repeated for all of the available tests.
* Once all the work is done, shUnit2 will generate the nice report you saw above. A summary of all the successes and failures will be given so that you know how well your code is doing.
We should now try adding a test that fails. Change your unit test to look like this.
```sh
#! /bin/sh
# file: examples/party_test.sh
testEquality() {
assertEquals 1 1
}
testPartyLikeItIs1999() {
year=`date '+%Y'`
assertEquals "It's not 1999 :-(" '1999' "${year}"
}
# Load shUnit2.
. ./shunit2
```
So, what did you get? I guess it told you that this isn't 1999. Bummer, eh? Hopefully, you noticed a couple of things that were different about the second test. First, we added an optional message that the user will see if the assert fails. Second, we did comparisons of strings instead of integers as in the first test. It doesn't matter whether you are testing for equality of strings or integers. Both work equally well with shUnit2.
Hopefully, this is enough to get you started with unit testing. If you want a ton more examples, take a look at the tests provided with [log4sh][log4sh] or [shFlags][shflags]. Both provide excellent examples of more advanced usage. shUnit2 was after all written to meet the unit testing need that [log4sh][log4sh] had.
---
## <a name="function-reference"></a> Function Reference
### <a name="general-info"></a> General Info
Any string values passed should be properly quoted -- they should must be surrounded by single-quote (`'`) or double-quote (`"`) characters -- so that the shell will properly parse them.
### <a name="asserts"></a> Asserts
`assertEquals [message] expected actual`
Asserts that _expected_ and _actual_ are equal to one another. The _expected_ and _actual_ values can be either strings or integer values as both will be treated as strings. The _message_ is optional, and must be quoted.
`assertNotEquals [message] unexpected actual`
Asserts that _unexpected_ and _actual_ are not equal to one another. The _unexpected_ and _actual_ values can be either strings or integer values as both will be treaded as strings. The _message_ is optional, and must be quoted.
`assertSame [message] expected actual`
This function is functionally equivalent to `assertEquals`.
`assertNotSame [message] unexpected actual`
This function is functionally equivalent to `assertNotEquals`.
`assertNull [message] value`
Asserts that _value_ is _null_, or in shell terms, a zero-length string. The _value_ must be a string as an integer value does not translate into a zero-length string. The _message_ is optional, and must be quoted.
`assertNotNull [message] value`
Asserts that _value_ is _not null_, or in shell terms, a non-empty string. The _value_ may be a string or an integer as the later will be parsed as a non-empty string value. The _message_ is optional, and must be quoted.
`assertTrue [message] condition`
Asserts that a given shell test _condition_ is _true_. The condition can be as simple as a shell _true_ value (the value `0` -- equivalent to `${SHUNIT_TRUE}`), or a more sophisticated shell conditional expression. The _message_ is optional, and must be quoted.
A sophisticated shell conditional expression is equivalent to what the __if__ or __while__ shell built-ins would use (more specifically, what the __test__ command would use). Testing for example whether some value is greater than another value can be done this way.
`assertTrue "[ 34 -gt 23 ]"`
Testing for the ability to read a file can also be done. This particular test will fail.
`assertTrue 'test failed' "[ -r /some/non-existant/file' ]"`
As the expressions are standard shell __test__ expressions, it is possible to string multiple expressions together with `-a` and `-o` in the standard fashion. This test will succeed as the entire expression evaluates to _true_.
`assertTrue 'test failed' '[ 1 -eq 1 -a 2 -eq 2 ]'`
_One word of warning: be very careful with your quoting as shell is not the most forgiving of bad quoting, and things will fail in strange ways._
`assertFalse [message] condition`
Asserts that a given shell test _condition_ is _false_. The condition can be as simple as a shell _false_ value (the value `1` -- equivalent to `${SHUNIT_FALSE}`), or a more sophisticated shell conditional expression. The _message_ is optional, and must be quoted.
_For examples of more sophisticated expressions, see `assertTrue`._
### <a name="failures"></a> Failures
Just to clarify, failures __do not__ test the various arguments against one another. Failures simply fail, optionally with a message, and that is all they do. If you need to test arguments against one another, use asserts.
If all failures do is fail, why might one use them? There are times when you may have some very complicated logic that you need to test, and the simple asserts provided are simply not adequate. You can do your own validation of the code, use an `assertTrue ${SHUNIT_TRUE}` if your own tests succeeded, and use a failure to record a failure.
`fail [message]`
Fails the test immediately. The _message_ is optional, and must be quoted.
`failNotEquals [message] unexpected actual`
Fails the test immediately, reporting that the _unexpected_ and _actual_ values are not equal to one another. The _message_ is optional, and must be quoted.
_Note: no actual comparison of unexpected and actual is done._
`failSame [message] expected actual`
Fails the test immediately, reporting that the _expected_ and _actual_ values are the same. The _message_ is optional, and must be quoted.
_Note: no actual comparison of expected and actual is done._
`failNotSame [message] expected actual`
Fails the test immediately, reporting that the _expected_ and _actual_ values are not the same. The _message_ is optional, and must be quoted.
_Note: no actual comparison of expected and actual is done._
### <a name="setup-teardown"></a> Setup/Teardown
`oneTimeSetUp`
This function can be be optionally overridden by the user in their test suite.
If this function exists, it will be called once before any tests are run. It is useful to prepare a common environment for all tests.
`oneTimeTearDown`
This function can be be optionally overridden by the user in their test suite.
If this function exists, it will be called once after all tests are completed. It is useful to clean up the environment after all tests.
`setUp`
This function can be be optionally overridden by the user in their test suite.
If this function exists, it will be called before each test is run. It is useful to reset the environment before each test.
`tearDown`
This function can be be optionally overridden by the user in their test suite.
If this function exists, it will be called after each test completes. It is useful to clean up the environment after each test.
### <a name="skipping"></a> Skipping
`startSkipping`
This function forces the remaining _assert_ and _fail_ functions to be "skipped", i.e. they will have no effect. Each function skipped will be recorded so that the total of asserts and fails will not be altered.
`endSkipping`
This function returns calls to the _assert_ and _fail_ functions to their default behavior, i.e. they will be called.
`isSkipping`
This function returns the current state of skipping. It can be compared against `${SHUNIT_TRUE}` or `${SHUNIT_FALSE}` if desired.
### <a name="suites"></a> Suites
The default behavior of shUnit2 is that all tests will be found dynamically. If you have a specific set of tests you want to run, or you don't want to use the standard naming scheme of prefixing your tests with `test`, these functions are for you. Most users will never use them though.
`suite`
This function can be optionally overridden by the user in their test suite.
If this function exists, it will be called when `shunit2` is sourced. If it does not exist, shUnit2 will search the parent script for all functions beginning with the word `test`, and they will be added dynamically to the test suite.
`suite_addTest name`
This function adds a function named _name_ to the list of tests scheduled for execution as part of this test suite. This function should only be called from within the `suite()` function.
---
## <a name="advanced-usage"></a> Advanced Usage
### <a name="some-constants-you-can-use"></a> Some constants you can use
There are several constants provided by shUnit2 as variables that might be of use to you.
*Predefined*
| Constant | Value |
| --------------- | ----- |
| SHUNIT\_TRUE | Standard shell `true` value (the integer value 0). |
| SHUNIT\_FALSE | Standard shell `false` value (the integer value 1). |
| SHUNIT\_ERROR | The integer value 2. |
| SHUNIT\_TMPDIR | Path to temporary directory that will be automatically cleaned up upon exit of shUnit2. |
| SHUNIT\_VERSION | The version of shUnit2 you are running. |
*User defined*
| Constant | Value |
| ----------------- | ----- |
| SHUNIT\_CMD\_EXPR | Override which `expr` command is used. By default `expr` is used, except on BSD systems where `gexpr` is used. |
| SHUNIT\_COLOR | Enable colorized output. Options are 'auto', 'always', or 'never', with 'auto' being the default. |
| SHUNIT\_PARENT | The filename of the shell script containing the tests. This is needed specifically for Zsh support. |
| SHUNIT\_TEST\_PREFIX | Define this variable to add a prefix in front of each test name that is output in the test report. |
### <a name="error-handling"></a> Error handling
The constants values `SHUNIT_TRUE`, `SHUNIT_FALSE`, and `SHUNIT_ERROR` are returned from nearly every function to indicate the success or failure of the function. Additionally the variable `flags_error` is filled with a detailed error message if any function returns with a `SHUNIT_ERROR` value.
### <a name="including-line-numbers-in-asserts-macros"></a> Including Line Numbers in Asserts (Macros)
If you include lots of assert statements in an individual test function, it can become difficult to determine exactly which assert was thrown unless your messages are unique. To help somewhat, line numbers can be included in the assert messages. To enable this, a special shell "macro" must be used rather than the standard assert calls. _Shell doesn't actually have macros; the name is used here as the operation is similar to a standard macro._
For example, to include line numbers for a `assertEquals()` function call, replace the `assertEquals()` with `${_ASSERT_EQUALS_}`.
_**Example** -- Asserts with and without line numbers_
```sh
#! /bin/sh
# file: examples/lineno_test.sh
testLineNo() {
# This assert will have line numbers included (e.g. "ASSERT:[123] ...").
echo "ae: ${_ASSERT_EQUALS_}"
${_ASSERT_EQUALS_} 'not equal' 1 2
# This assert will not have line numbers included (e.g. "ASSERT: ...").
assertEquals 'not equal' 1 2
}
# Load shUnit2.
. ./shunit2
```
Notes:
1. Due to how shell parses command-line arguments, all strings used with macros should be quoted twice. Namely, single-quotes must be converted to single-double-quotes, and vice-versa. If the string being passed is absolutely for sure not empty, the extra quoting is not necessary.<br/><br/>Normal `assertEquals` call.<br/>`assertEquals 'some message' 'x' ''`<br/><br/>Macro `_ASSERT_EQUALS_` call. Note the extra quoting around the _message_ and the _null_ value.<br/>`_ASSERT_EQUALS_ '"some message"' 'x' '""'`
1. Line numbers are not supported in all shells. If a shell does not support them, no errors will be thrown. Supported shells include: __bash__ (>=3.0), __ksh__, __pdksh__, and __zsh__.
### <a name="test-skipping"></a> Test Skipping
There are times where the test code you have written is just not applicable to the system you are running on. This section describes how to skip these tests but maintain the total test count.
Probably the easiest example would be shell code that is meant to run under the __bash__ shell, but the unit test is running under the Bourne shell. There are things that just won't work. The following test code demonstrates two sample functions, one that will be run under any shell, and the another that will run only under the __bash__ shell.
_**Example** -- math include_
```sh
# file: examples/math.inc.
add_generic() {
num_a=$1
num_b=$2
expr $1 + $2
}
add_bash() {
num_a=$1
num_b=$2
echo $(($1 + $2))
}
```
And here is a corresponding unit test that correctly skips the `add_bash()` function when the unit test is not running under the __bash__ shell.
_**Example** -- math unit test_
```sh
#! /bin/sh
# file: examples/math_test.sh
testAdding() {
result=`add_generic 1 2`
assertEquals \
"the result of '${result}' was wrong" \
3 "${result}"
# Disable non-generic tests.
[ -z "${BASH_VERSION:-}" ] && startSkipping
result=`add_bash 1 2`
assertEquals \
"the result of '${result}' was wrong" \
3 "${result}"
}
oneTimeSetUp() {
# Load include to test.
. ./math.inc
}
# Load and run shUnit2.
. ./shunit2
```
Running the above test under the __bash__ shell will result in the following output.
```console
$ /bin/bash math_test.sh
testAdding
Ran 1 test.
OK
```
But, running the test under any other Unix shell will result in the following output.
```console
$ /bin/ksh math_test.sh
testAdding
Ran 1 test.
OK (skipped=1)
```
As you can see, the total number of tests has not changed, but the report indicates that some tests were skipped.
Skipping can be controlled with the following functions: `startSkipping()`, `endSkipping()`, and `isSkipping()`. Once skipping is enabled, it will remain enabled until the end of the current test function call, after which skipping is disabled.
---
## <a name="appendix"></a> Appendix
### <a name="getting-help"></a> Getting Help
For help, please send requests to either the shunit2-users@googlegroups.com mailing list (archives available on the web at http://groups.google.com/group/shunit2-users) or directly to Kate Ward <kate dot ward at forestent dot com>.
### <a name="zsh"></a> Zsh
For compatibility with Zsh, there is one requirement that must be met -- the `shwordsplit` option must be set. There are three ways to accomplish this.
1. In the unit-test script, add the following shell code snippet before sourcing the `shunit2` library.
```sh
setopt shwordsplit
```
1. When invoking __zsh__ from either the command-line or as a script with `#!`, add the `-y` parameter.
```sh
#! /bin/zsh -y
```
1. When invoking __zsh__ from the command-line, add `-o shwordsplit --` as parameters before the script name.
```console
$ zsh -o shwordsplit -- some_script
```
[log4sh]: https://github.com/kward/log4sh
[shflags]: https://github.com/kward/shflags

View File

@ -1,88 +0,0 @@
#! /bin/sh
# vim:et:ft=sh:sts=2:sw=2
#
# This script runs the provided unit tests and sends the output to the
# appropriate file.
#
# Copyright 2008-2017 Kate Ward. All Rights Reserved.
# Released under the Apache 2.0 license.
#
# Author: kate.ward@forestent.com (Kate Ward)
# https://github.com/kward/shunit2
#
# Source following.
# shellcheck disable=SC1090,SC1091
# FLAGS variables are dynamically created.
# shellcheck disable=SC2154
# Disagree with [ p ] && [ q ] vs [ p -a -q ] recommendation.
# shellcheck disable=SC2166
# Treat unset variables as an error.
set -u
die() {
[ $# -gt 0 ] && echo "error: $*" >&2
exit 1
}
BASE_DIR=$(dirname "$0")
LIB_DIR="${BASE_DIR}/lib"
### Load libraries.
. "${LIB_DIR}/shflags" || die 'unable to load shflags library'
. "${LIB_DIR}/shlib" || die 'unable to load shlib library'
. "${LIB_DIR}/versions" || die 'unable to load versions library'
# Redefining BASE_DIR now that we have the shlib functions. We need BASE_DIR so
# that we can properly load things, even in the event that this script is called
# from a different directory.
BASE_DIR=$(shlib_relToAbsPath "${BASE_DIR}")
# Define flags.
os_name=$(versions_osName |sed 's/ /_/g')
os_version=$(versions_osVersion)
DEFINE_boolean force false 'force overwrite' f
DEFINE_string output_dir "${TMPDIR}" 'output dir' d
DEFINE_string output_file "${os_name}-${os_version}.txt" 'output file' o
DEFINE_string runner 'test_runner' 'unit test runner' r
DEFINE_boolean dry_run false "suppress logging to a file" n
main() {
# Determine output filename.
# shellcheck disable=SC2154
output="${FLAGS_output_dir:+${FLAGS_output_dir}/}${FLAGS_output_file}"
output=$(shlib_relToAbsPath "${output}")
# Checks.
if [ "${FLAGS_dry_run}" -eq "${FLAGS_FALSE}" -a -f "${output}" ]; then
if [ "${FLAGS_force}" -eq "${FLAGS_TRUE}" ]; then
rm -f "${output}"
else
echo "not overwriting '${output}'" >&2
exit "${FLAGS_ERROR}"
fi
fi
if [ "${FLAGS_dry_run}" -eq "${FLAGS_FALSE}" ]; then
touch "${output}" 2>/dev/null || die "unable to write to '${output}'"
fi
# Run tests.
(
if [ "${FLAGS_dry_run}" -eq "${FLAGS_FALSE}" ]; then
"./${FLAGS_runner}" |tee "${output}"
else
"./${FLAGS_runner}"
fi
)
if [ "${FLAGS_dry_run}" -eq "${FLAGS_FALSE}" ]; then
echo >&2
echo "Output written to '${output}'." >&2
fi
}
FLAGS "$@" || exit $?
[ "${FLAGS_help}" -eq "${FLAGS_FALSE}" ] || exit
eval set -- "${FLAGS_ARGV}"
main "${@:-}"

File diff suppressed because it is too large Load Diff

View File

@ -1,39 +0,0 @@
# vim:et:ft=sh:sts=2:sw=2
#
# Copyright 2008 Kate Ward. All Rights Reserved.
# Released under the LGPL (GNU Lesser General Public License).
#
# Author: kate.ward@forestent.com (Kate Ward)
#
# Library of shell functions.
# Convert a relative path into it's absolute equivalent.
#
# This function will automatically prepend the current working directory if the
# path is not already absolute. It then removes all parent references (../) to
# reconstruct the proper absolute path.
#
# Args:
# shlib_path_: string: relative path
# Outputs:
# string: absolute path
shlib_relToAbsPath()
{
shlib_path_=$1
# prepend current directory to relative paths
echo "${shlib_path_}" |grep '^/' >/dev/null 2>&1 \
|| shlib_path_="${PWD}/${shlib_path_}"
# clean up the path. if all seds supported true regular expressions, then
# this is what it would be:
shlib_old_=${shlib_path_}
while true; do
shlib_new_=`echo "${shlib_old_}" |sed 's/[^/]*\/\.\.\/*//;s/\/\.\//\//'`
[ "${shlib_old_}" = "${shlib_new_}" ] && break
shlib_old_=${shlib_new_}
done
echo "${shlib_new_}"
unset shlib_path_ shlib_old_ shlib_new_
}

View File

@ -1,272 +0,0 @@
#! /bin/sh
# vim:et:ft=sh:sts=2:sw=2
#
# Versions determines the versions of all installed shells.
#
# Copyright 2008-2018 Kate Ward. All Rights Reserved.
# Released under the Apache 2.0 License.
#
# Author: kate.ward@forestent.com (Kate Ward)
# https://github.com/kward/shlib
#
# This library provides reusable functions that determine actual names and
# versions of installed shells and the OS. The library can also be run as a
# script if set executable.
#
# Disable checks that aren't fully portable (POSIX != portable).
# shellcheck disable=SC2006
ARGV0=`basename "$0"`
LSB_RELEASE='/etc/lsb-release'
VERSIONS_SHELLS='ash /bin/bash /bin/dash /bin/ksh /bin/pdksh /bin/zsh /bin/sh /usr/xpg4/bin/sh /sbin/sh'
true; TRUE=$?
false; FALSE=$?
ERROR=2
UNAME_R=`uname -r`
UNAME_S=`uname -s`
__versions_haveStrings=${ERROR}
versions_osName() {
os_name_='unrecognized'
os_system_=${UNAME_S}
os_release_=${UNAME_R}
case ${os_system_} in
CYGWIN_NT-*) os_name_='Cygwin' ;;
Darwin)
os_name_=`/usr/bin/sw_vers -productName`
os_version_=`versions_osVersion`
case ${os_version_} in
10.4|10.4.[0-9]*) os_name_='Mac OS X Tiger' ;;
10.5|10.5.[0-9]*) os_name_='Mac OS X Leopard' ;;
10.6|10.6.[0-9]*) os_name_='Mac OS X Snow Leopard' ;;
10.7|10.7.[0-9]*) os_name_='Mac OS X Lion' ;;
10.8|10.8.[0-9]*) os_name_='Mac OS X Mountain Lion' ;;
10.9|10.9.[0-9]*) os_name_='Mac OS X Mavericks' ;;
10.10|10.10.[0-9]*) os_name_='Mac OS X Yosemite' ;;
10.11|10.11.[0-9]*) os_name_='Mac OS X El Capitan' ;;
10.12|10.12.[0-9]*) os_name_='macOS Sierra' ;;
10.13|10.13.[0-9]*) os_name_='macOS High Sierra' ;;
*) os_name_='macOS' ;;
esac
;;
FreeBSD) os_name_='FreeBSD' ;;
Linux) os_name_='Linux' ;;
SunOS)
os_name_='SunOS'
if [ -r '/etc/release' ]; then
if grep 'OpenSolaris' /etc/release >/dev/null; then
os_name_='OpenSolaris'
else
os_name_='Solaris'
fi
fi
;;
esac
echo ${os_name_}
unset os_name_ os_system_ os_release_ os_version_
}
versions_osVersion() {
os_version_='unrecognized'
os_system_=${UNAME_S}
os_release_=${UNAME_R}
case ${os_system_} in
CYGWIN_NT-*)
os_version_=`expr "${os_release_}" : '\([0-9]*\.[0-9]\.[0-9]*\).*'`
;;
Darwin)
os_version_=`/usr/bin/sw_vers -productVersion`
;;
FreeBSD)
os_version_=`expr "${os_release_}" : '\([0-9]*\.[0-9]*\)-.*'`
;;
Linux)
if [ -r '/etc/os-release' ]; then
os_version_=`awk -F= '$1~/PRETTY_NAME/{print $2}' /etc/os-release \
|sed 's/"//g'`
elif [ -r '/etc/redhat-release' ]; then
os_version_=`cat /etc/redhat-release`
elif [ -r '/etc/SuSE-release' ]; then
os_version_=`head -n 1 /etc/SuSE-release`
elif [ -r "${LSB_RELEASE}" ]; then
if grep -q 'DISTRIB_ID=Ubuntu' "${LSB_RELEASE}"; then
# shellcheck disable=SC2002
os_version_=`cat "${LSB_RELEASE}" \
|awk -F= '$1~/DISTRIB_DESCRIPTION/{print $2}' \
|sed 's/"//g;s/ /-/g'`
fi
fi
;;
SunOS)
if [ -r '/etc/release' ]; then
if grep 'OpenSolaris' /etc/release >/dev/null; then # OpenSolaris
os_version_=`grep 'OpenSolaris' /etc/release |awk '{print $2"("$3")"}'`
else # Solaris
major_=`echo "${os_release_}" |sed 's/[0-9]*\.\([0-9]*\)/\1/'`
minor_=`grep Solaris /etc/release |sed 's/[^u]*\(u[0-9]*\).*/\1/'`
os_version_="${major_}${minor_}"
fi
fi
;;
esac
echo "${os_version_}"
unset os_release_ os_system_ os_version_ major_ minor_
}
versions_shellVersion() {
shell_=$1
shell_present_=${FALSE}
case "${shell_}" in
ash) [ -x '/bin/busybox' ] && shell_present_=${TRUE} ;;
*) [ -x "${shell_}" ] && shell_present_=${TRUE} ;;
esac
if [ ${shell_present_} -eq ${FALSE} ]; then
echo 'not installed'
return ${FALSE}
fi
version_=''
case ${shell_} in
/sbin/sh) ;; # SunOS
/usr/xpg4/bin/sh)
version_=`versions_shell_xpg4 "${shell_}"`
;; # SunOS
*/sh)
# This could be one of any number of shells. Try until one fits.
version_=''
[ -z "${version_}" ] && version_=`versions_shell_bash "${shell_}"`
# dash cannot be self determined yet
[ -z "${version_}" ] && version_=`versions_shell_ksh "${shell_}"`
# pdksh is covered in versions_shell_ksh()
[ -z "${version_}" ] && version_=`versions_shell_xpg4 "${shell_}"`
[ -z "${version_}" ] && version_=`versions_shell_zsh "${shell_}"`
;;
ash) version_=`versions_shell_ash "${shell_}"` ;;
*/bash) version_=`versions_shell_bash "${shell_}"` ;;
*/dash)
# Assuming Ubuntu Linux until somebody comes up with a better test. The
# following test will return an empty string if dash is not installed.
version_=`versions_shell_dash`
;;
*/ksh) version_=`versions_shell_ksh "${shell_}"` ;;
*/pdksh) version_=`versions_shell_pdksh "${shell_}"` ;;
*/zsh) version_=`versions_shell_zsh "${shell_}"` ;;
*) version_='invalid'
esac
echo "${version_:-unknown}"
unset shell_ version_
}
# The ash shell is included in BusyBox.
versions_shell_ash() {
busybox --help |head -1 |sed 's/BusyBox v\([0-9.]*\) .*/\1/'
}
versions_shell_bash() {
$1 --version : 2>&1 |grep 'GNU bash' |sed 's/.*version \([^ ]*\).*/\1/'
}
versions_shell_dash() {
eval dpkg >/dev/null 2>&1
[ $? -eq 127 ] && return # Return if dpkg not found.
dpkg -l |grep ' dash ' |awk '{print $3}'
}
versions_shell_ksh() {
versions_shell_=$1
versions_version_=''
# Try a few different ways to figure out the version.
versions_version_=`${versions_shell_} --version : 2>&1`
# shellcheck disable=SC2181
if [ $? -eq 0 ]; then
versions_version_=`echo "${versions_version_}" \
|sed 's/.*\([0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]\).*/\1/'`
else
versions_version_=''
fi
if [ -z "${versions_version_}" ]; then
_versions_have_strings
versions_version_=`strings "${versions_shell_}" 2>&1 \
|grep Version \
|sed 's/^.*Version \(.*\)$/\1/;s/ s+ \$$//;s/ /-/g'`
fi
if [ -z "${versions_version_}" ]; then
versions_version_=`versions_shell_pdksh "${versions_shell_}"`
fi
echo "${versions_version_}"
unset versions_shell_ versions_version_
}
versions_shell_pdksh() {
_versions_have_strings
strings "$1" 2>&1 \
|grep 'PD KSH' \
|sed -e 's/.*PD KSH \(.*\)/\1/;s/ /-/g'
}
versions_shell_xpg4() {
_versions_have_strings
strings "$1" 2>&1 \
|grep 'Version' \
|sed -e 's/^@(#)Version //'
}
versions_shell_zsh() {
versions_shell_=$1
# Try a few different ways to figure out the version.
# shellcheck disable=SC2016
versions_version_=`echo 'echo ${ZSH_VERSION}' |${versions_shell_}`
if [ -z "${versions_version_}" ]; then
versions_version_=`${versions_shell_} --version : 2>&1`
# shellcheck disable=SC2181
if [ $? -eq 0 ]; then
versions_version_=`echo "${versions_version_}" |awk '{print $2}'`
else
versions_version_=''
fi
fi
echo "${versions_version_}"
unset versions_shell_ versions_version_
}
# Determine if the 'strings' binary installed.
_versions_have_strings() {
[ ${__versions_haveStrings} -ne ${ERROR} ] && return
if eval strings /dev/null >/dev/null 2>&1; then
__versions_haveStrings=${TRUE}
return
fi
echo 'WARN: strings not installed. try installing binutils?' >&2
__versions_haveStrings=${FALSE}
}
versions_main() {
# Treat unset variables as an error.
set -u
os_name=`versions_osName`
os_version=`versions_osVersion`
echo "os: ${os_name} version: ${os_version}"
for shell in ${VERSIONS_SHELLS}; do
shell_version=`versions_shellVersion "${shell}"`
echo "shell: ${shell} version: ${shell_version}"
done
}
if [ "${ARGV0}" = 'versions' ]; then
versions_main "$@"
fi

File diff suppressed because it is too large Load Diff

View File

@ -1,262 +0,0 @@
#! /bin/sh
# vim:et:ft=sh:sts=2:sw=2
#
# shUnit2 unit tests of miscellaneous things
#
# Copyright 2008-2018 Kate Ward. All Rights Reserved.
# Released under the Apache 2.0 license.
#
# Author: kate.ward@forestent.com (Kate Ward)
# https://github.com/kward/shunit2
#
### ShellCheck http://www.shellcheck.net/
# $() are not fully portable (POSIX != portable).
# shellcheck disable=SC2006
# Disable source following.
# shellcheck disable=SC1090,SC1091
# Not wanting to escape single quotes.
# shellcheck disable=SC1003
# These variables will be overridden by the test helpers.
stdoutF="${TMPDIR:-/tmp}/STDOUT"
stderrF="${TMPDIR:-/tmp}/STDERR"
# Load test helpers.
. ./shunit2_test_helpers
# Note: the test script is prefixed with '#' chars so that shUnit2 does not
# incorrectly interpret the embedded functions as real functions.
testUnboundVariable() {
unittestF="${SHUNIT_TMPDIR}/unittest"
sed 's/^#//' >"${unittestF}" <<EOF
## Treat unset variables as an error when performing parameter expansion.
#set -u
#
#boom() { x=\$1; } # This function goes boom if no parameters are passed!
#test_boom() {
# assertEquals 1 1
# boom # No parameter given
# assertEquals 0 \$?
#}
#SHUNIT_COLOR='none'
#. ${TH_SHUNIT}
EOF
( exec "${SHUNIT_SHELL:-sh}" "${unittestF}" >"${stdoutF}" 2>"${stderrF}" )
assertFalse 'expected a non-zero exit value' $?
grep '^ASSERT:Unknown failure' "${stdoutF}" >/dev/null
assertTrue 'assert message was not generated' $?
grep '^Ran [0-9]* test' "${stdoutF}" >/dev/null
assertTrue 'test count message was not generated' $?
grep '^FAILED' "${stdoutF}" >/dev/null
assertTrue 'failure message was not generated' $?
}
# assertEquals repeats message argument.
# https://github.com/kward/shunit2/issues/7
testIssue7() {
# Disable coloring so 'ASSERT:' lines can be matched correctly.
_shunit_configureColor 'none'
( assertEquals 'Some message.' 1 2 >"${stdoutF}" 2>"${stderrF}" )
diff "${stdoutF}" - >/dev/null <<EOF
ASSERT:Some message. expected:<1> but was:<2>
EOF
rtrn=$?
assertEquals "${SHUNIT_TRUE}" "${rtrn}"
[ "${rtrn}" -eq "${SHUNIT_TRUE}" ] || cat "${stderrF}" >&2
}
# Support prefixes on test output.
# https://github.com/kward/shunit2/issues/29
testIssue29() {
unittestF="${SHUNIT_TMPDIR}/unittest"
sed 's/^#//' >"${unittestF}" <<EOF
## Support test prefixes.
#test_assert() { assertTrue ${SHUNIT_TRUE}; }
#SHUNIT_COLOR='none'
#SHUNIT_TEST_PREFIX='--- '
#. ${TH_SHUNIT}
EOF
( exec "${SHUNIT_SHELL:-sh}" "${unittestF}" >"${stdoutF}" 2>"${stderrF}" )
grep '^--- test_assert' "${stdoutF}" >/dev/null
rtrn=$?
assertEquals "${SHUNIT_TRUE}" "${rtrn}"
[ "${rtrn}" -eq "${SHUNIT_TRUE}" ] || cat "${stdoutF}" >&2
}
# shUnit2 should not exit with 0 when it has syntax errors.
# https://github.com/kward/shunit2/issues/69
testIssue69() {
unittestF="${SHUNIT_TMPDIR}/unittest"
for t in Equals NotEquals Null NotNull Same NotSame True False; do
assert="assert${t}"
sed 's/^#//' >"${unittestF}" <<EOF
## Asserts with invalid argument counts should be counted as failures.
#test_assert() { ${assert}; }
#SHUNIT_COLOR='none'
#. ${TH_SHUNIT}
EOF
( exec "${SHUNIT_SHELL:-sh}" "${unittestF}" >"${stdoutF}" 2>"${stderrF}" )
grep '^FAILED' "${stdoutF}" >/dev/null
assertTrue "failure message for ${assert} was not generated" $?
done
}
# Ensure that test fails if setup/teardown functions fail.
testIssue77() {
unittestF="${SHUNIT_TMPDIR}/unittest"
for func in oneTimeSetUp setUp tearDown oneTimeTearDown; do
sed 's/^#//' >"${unittestF}" <<EOF
## Environment failure should end test.
#${func}() { return ${SHUNIT_FALSE}; }
#test_true() { assertTrue ${SHUNIT_TRUE}; }
#SHUNIT_COLOR='none'
#. ${TH_SHUNIT}
EOF
( exec "${SHUNIT_SHELL:-sh}" "${unittestF}" >"${stdoutF}" 2>"${stderrF}" )
grep '^FAILED' "${stdoutF}" >/dev/null
assertTrue "failure of ${func}() did not end test" $?
done
}
# Ensure a test failure is recorded for code containing syntax errors.
# https://github.com/kward/shunit2/issues/84
testIssue84() {
unittestF="${SHUNIT_TMPDIR}/unittest"
sed 's/^#//' >"${unittestF}" <<\EOF
## Function with syntax error.
#syntax_error() { ${!#3442} -334 a$@2[1]; }
#test_syntax_error() {
# syntax_error
# assertTrue ${SHUNIT_TRUE}
#}
#SHUNIT_COLOR='none'
#SHUNIT_TEST_PREFIX='--- '
#. ${TH_SHUNIT}
EOF
( exec "${SHUNIT_SHELL:-sh}" "${unittestF}" >"${stdoutF}" 2>"${stderrF}" )
grep '^FAILED' "${stdoutF}" >/dev/null
assertTrue "failure message for ${assert} was not generated" $?
}
testPrepForSourcing() {
assertEquals '/abc' "`_shunit_prepForSourcing '/abc'`"
assertEquals './abc' "`_shunit_prepForSourcing './abc'`"
assertEquals './abc' "`_shunit_prepForSourcing 'abc'`"
}
testEscapeCharInStr() {
while read -r desc char str want; do
got=`_shunit_escapeCharInStr "${char}" "${str}"`
assertEquals "${desc}" "${want}" "${got}"
done <<'EOF'
backslash \ '' ''
backslash_pre \ \def \\def
backslash_mid \ abc\def abc\\def
backslash_post \ abc\ abc\\
quote " '' ''
quote_pre " "def \"def
quote_mid " abc"def abc\"def
quote_post " abc" abc\"
string $ '' ''
string_pre $ $def \$def
string_mid $ abc$def abc\$def
string_post $ abc$ abc\$
EOF
# TODO(20170924:kward) fix or remove.
# actual=`_shunit_escapeCharInStr "'" ''`
# assertEquals '' "${actual}"
# assertEquals "abc\\'" `_shunit_escapeCharInStr "'" "abc'"`
# assertEquals "abc\\'def" `_shunit_escapeCharInStr "'" "abc'def"`
# assertEquals "\\'def" `_shunit_escapeCharInStr "'" "'def"`
# # Must put the backtick in a variable so the shell doesn't misinterpret it
# # while inside a backticked sequence (e.g. `echo '`'` would fail).
# backtick='`'
# actual=`_shunit_escapeCharInStr ${backtick} ''`
# assertEquals '' "${actual}"
# assertEquals '\`abc' \
# `_shunit_escapeCharInStr "${backtick}" ${backtick}'abc'`
# assertEquals 'abc\`' \
# `_shunit_escapeCharInStr "${backtick}" 'abc'${backtick}`
# assertEquals 'abc\`def' \
# `_shunit_escapeCharInStr "${backtick}" 'abc'${backtick}'def'`
}
testEscapeCharInStr_specialChars() {
# Make sure our forward slash doesn't upset sed.
assertEquals '/' "`_shunit_escapeCharInStr '\' '/'`"
# Some shells escape these differently.
# TODO(20170924:kward) fix or remove.
#assertEquals '\\a' `_shunit_escapeCharInStr '\' '\a'`
#assertEquals '\\b' `_shunit_escapeCharInStr '\' '\b'`
}
# Test the various ways of declaring functions.
#
# Prefixing (then stripping) with comment symbol so these functions aren't
# treated as real functions by shUnit2.
testExtractTestFunctions() {
f="${SHUNIT_TMPDIR}/extract_test_functions"
sed 's/^#//' <<EOF >"${f}"
## Function on a single line.
#testABC() { echo 'ABC'; }
## Multi-line function with '{' on next line.
#test_def()
# {
# echo 'def'
#}
## Multi-line function with '{' on first line.
#testG3 () {
# echo 'G3'
#}
## Function with numerical values in name.
#function test4() { echo '4'; }
## Leading space in front of function.
# test5() { echo '5'; }
## Function with '_' chars in name.
#some_test_function() { echo 'some func'; }
## Function that sets variables.
#func_with_test_vars() {
# testVariable=1234
#}
EOF
actual=`_shunit_extractTestFunctions "${f}"`
assertEquals 'testABC test_def testG3 test4 test5' "${actual}"
}
# Test that certain external commands sometimes "stubbed" by users
# are escaped. See Issue #54.
testProtectedCommands() {
for c in mkdir rm cat chmod; do
grep "^[^#]*${c} " "${TH_SHUNIT}" | grep -qv "command ${c}"
assertFalse "external call to ${c} not protected somewhere" $?
done
grep '^[^#]*[^ ] *\[' "${TH_SHUNIT}" | grep -qv 'command \['
assertFalse "call to [ ... ] not protected somewhere" $?
grep '^[^#]* *\.' "${TH_SHUNIT}" | grep -qv 'command \.'
assertFalse "call to . not protected somewhere" $?
}
setUp() {
for f in "${stdoutF}" "${stderrF}"; do
cp /dev/null "${f}"
done
# Reconfigure coloring as some tests override default behavior.
_shunit_configureColor "${SHUNIT_COLOR_DEFAULT}"
}
oneTimeSetUp() {
SHUNIT_COLOR_DEFAULT="${SHUNIT_COLOR}"
th_oneTimeSetUp
}
# Load and run shUnit2.
# shellcheck disable=SC2034
[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
. "${TH_SHUNIT}"

124
dev/tests/shunit2/shunit2_test.sh Executable file
View File

@ -0,0 +1,124 @@
#! /bin/sh
# $Id$
# vim:et:ft=sh:sts=2:sw=2
#
# Copyright 2008 Kate Ward. All Rights Reserved.
# Released under the LGPL (GNU Lesser General Public License)
# Author: kate.ward@forestent.com (Kate Ward)
#
# shUnit2 unit test suite runner.
#
# This script runs all the unit tests that can be found, and generates a nice
# report of the tests.
MY_NAME=`basename $0`
MY_PATH=`dirname $0`
PREFIX='shunit2_test_'
SHELLS='/bin/sh /bin/bash /bin/dash /bin/ksh /bin/pdksh /bin/zsh'
TESTS=''
for test in ${PREFIX}[a-z]*.sh; do
TESTS="${TESTS} ${test}"
done
# load common unit test functions
. ../lib/versions
. ./shunit2_test_helpers
usage()
{
echo "usage: ${MY_NAME} [-e key=val ...] [-s shell(s)] [-t test(s)]"
}
env=''
# process command line flags
while getopts 'e:hs:t:' opt; do
case ${opt} in
e) # set an environment variable
key=`expr "${OPTARG}" : '\([^=]*\)='`
val=`expr "${OPTARG}" : '[^=]*=\(.*\)'`
if [ -z "${key}" -o -z "${val}" ]; then
usage
exit 1
fi
eval "${key}='${val}'"
export ${key}
env="${env:+${env} }${key}"
;;
h) usage; exit 0 ;; # output help
s) shells=${OPTARG} ;; # list of shells to run
t) tests=${OPTARG} ;; # list of tests to run
*) usage; exit 1 ;;
esac
done
shift `expr ${OPTIND} - 1`
# fill shells and/or tests
shells=${shells:-${SHELLS}}
tests=${tests:-${TESTS}}
# error checking
if [ -z "${tests}" ]; then
th_error 'no tests found to run; exiting'
exit 1
fi
cat <<EOF
#------------------------------------------------------------------------------
# System data
#
# test run info
shells: ${shells}
tests: ${tests}
EOF
for key in ${env}; do
eval "echo \"${key}=\$${key}\""
done
echo
# output system data
echo "# system info"
echo "$ date"
date
echo
echo "$ uname -mprsv"
uname -mprsv
#
# run tests
#
for shell in ${shells}; do
echo
# check for existance of shell
if [ ! -x ${shell} ]; then
th_warn "unable to run tests with the ${shell} shell"
continue
fi
cat <<EOF
#------------------------------------------------------------------------------
# Running the test suite with ${shell}
#
EOF
SHUNIT_SHELL=${shell} # pass shell onto tests
shell_name=`basename ${shell}`
shell_version=`versions_shellVersion "${shell}"`
echo "shell name: ${shell_name}"
echo "shell version: ${shell_version}"
# execute the tests
for suite in ${tests}; do
suiteName=`expr "${suite}" : "${PREFIX}\(.*\).sh"`
echo
echo "--- Executing the '${suiteName}' test suite ---"
( exec ${shell} ./${suite} 2>&1; )
done
done

View File

@ -1,25 +1,23 @@
#! /bin/sh #! /bin/sh
# $Id$
# vim:et:ft=sh:sts=2:sw=2 # vim:et:ft=sh:sts=2:sw=2
# #
# shunit2 unit test for assert functions. # Copyright 2008 Kate Ward. All Rights Reserved.
# # Released under the LGPL (GNU Lesser General Public License)
# Copyright 2008-2017 Kate Ward. All Rights Reserved.
# Released under the Apache 2.0 license.
# #
# Author: kate.ward@forestent.com (Kate Ward) # Author: kate.ward@forestent.com (Kate Ward)
# https://github.com/kward/shunit2
# #
# Disable source following. # shUnit2 unit test for assert functions
# shellcheck disable=SC1090,SC1091
# These variables will be overridden by the test helpers. # load test helpers
stdoutF="${TMPDIR:-/tmp}/STDOUT"
stderrF="${TMPDIR:-/tmp}/STDERR"
# Load test helpers.
. ./shunit2_test_helpers . ./shunit2_test_helpers
commonEqualsSame() { #------------------------------------------------------------------------------
# suite tests
#
commonEqualsSame()
{
fn=$1 fn=$1
( ${fn} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" ) ( ${fn} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
@ -44,7 +42,8 @@ commonEqualsSame() {
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}" th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
} }
commonNotEqualsSame() { commonNotEqualsSame()
{
fn=$1 fn=$1
( ${fn} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" ) ( ${fn} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
@ -66,23 +65,28 @@ commonNotEqualsSame() {
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}" th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
} }
testAssertEquals() { testAssertEquals()
{
commonEqualsSame 'assertEquals' commonEqualsSame 'assertEquals'
} }
testAssertNotEquals() { testAssertNotEquals()
{
commonNotEqualsSame 'assertNotEquals' commonNotEqualsSame 'assertNotEquals'
} }
testAssertSame() { testAssertSame()
{
commonEqualsSame 'assertSame' commonEqualsSame 'assertSame'
} }
testAssertNotSame() { testAssertNotSame()
{
commonNotEqualsSame 'assertNotSame' commonNotEqualsSame 'assertNotSame'
} }
testAssertNull() { testAssertNull()
{
( assertNull '' >"${stdoutF}" 2>"${stderrF}" ) ( assertNull '' >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'null' $? "${stdoutF}" "${stderrF}" th_assertTrueWithNoOutput 'null' $? "${stdoutF}" "${stderrF}"
@ -115,7 +119,6 @@ testAssertNotNull()
th_assertTrueWithNoOutput 'not null, with single-quote' $? \ th_assertTrueWithNoOutput 'not null, with single-quote' $? \
"${stdoutF}" "${stderrF}" "${stdoutF}" "${stderrF}"
# shellcheck disable=SC2016
( assertNotNull 'x$b' >"${stdoutF}" 2>"${stderrF}" ) ( assertNotNull 'x$b' >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'not null, with dollar' $? \ th_assertTrueWithNoOutput 'not null, with dollar' $? \
"${stdoutF}" "${stderrF}" "${stdoutF}" "${stderrF}"
@ -127,13 +130,14 @@ testAssertNotNull()
( assertNotNull '' >"${stdoutF}" 2>"${stderrF}" ) ( assertNotNull '' >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'null' $? "${stdoutF}" "${stderrF}" th_assertFalseWithOutput 'null' $? "${stdoutF}" "${stderrF}"
# There is no test for too few arguments as $1 might actually be null. # there is no test for too few arguments as $1 might actually be null
( assertNotNull arg1 arg2 arg3 >"${stdoutF}" 2>"${stderrF}" ) ( assertNotNull arg1 arg2 arg3 >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}" th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
} }
testAssertTrue() { testAssertTrue()
{
( assertTrue 0 >"${stdoutF}" 2>"${stderrF}" ) ( assertTrue 0 >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'true' $? "${stdoutF}" "${stderrF}" th_assertTrueWithNoOutput 'true' $? "${stdoutF}" "${stderrF}"
@ -159,7 +163,8 @@ testAssertTrue() {
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}" th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
} }
testAssertFalse() { testAssertFalse()
{
( assertFalse 1 >"${stdoutF}" 2>"${stderrF}" ) ( assertFalse 1 >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'false' $? "${stdoutF}" "${stderrF}" th_assertTrueWithNoOutput 'false' $? "${stdoutF}" "${stderrF}"
@ -185,13 +190,17 @@ testAssertFalse() {
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}" th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
} }
oneTimeSetUp() { #------------------------------------------------------------------------------
# suite functions
#
oneTimeSetUp()
{
th_oneTimeSetUp th_oneTimeSetUp
MSG='This is a test message' MSG='This is a test message'
} }
# Load and run shunit2. # load and run shUnit2
# shellcheck disable=SC2034
[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0 [ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
. "${TH_SHUNIT}" . ${TH_SHUNIT}

View File

@ -1,25 +1,23 @@
#! /bin/sh #! /bin/sh
# $Id$
# vim:et:ft=sh:sts=2:sw=2 # vim:et:ft=sh:sts=2:sw=2
# #
# shUnit2 unit test for failure functions # Copyright 2008 Kate Ward. All Rights Reserved.
#
# Copyright 2008-2017 Kate Ward. All Rights Reserved.
# Released under the LGPL (GNU Lesser General Public License) # Released under the LGPL (GNU Lesser General Public License)
# #
# Author: kate.ward@forestent.com (Kate Ward) # Author: kate.ward@forestent.com (Kate Ward)
# https://github.com/kward/shunit2
# #
# Disable source following. # shUnit2 unit test for failure functions
# shellcheck disable=SC1090,SC1091
# These variables will be overridden by the test helpers. # load common unit-test functions
stdoutF="${TMPDIR:-/tmp}/STDOUT"
stderrF="${TMPDIR:-/tmp}/STDERR"
# Load test helpers.
. ./shunit2_test_helpers . ./shunit2_test_helpers
testFail() { #-----------------------------------------------------------------------------
# suite tests
#
testFail()
{
( fail >"${stdoutF}" 2>"${stderrF}" ) ( fail >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'fail' $? "${stdoutF}" "${stderrF}" th_assertFalseWithOutput 'fail' $? "${stdoutF}" "${stderrF}"
@ -30,7 +28,8 @@ testFail() {
th_assertFalseWithOutput 'too many arguments' $? "${stdoutF}" "${stderrF}" th_assertFalseWithOutput 'too many arguments' $? "${stdoutF}" "${stderrF}"
} }
testFailNotEquals() { testFailNotEquals()
{
( failNotEquals 'x' 'x' >"${stdoutF}" 2>"${stderrF}" ) ( failNotEquals 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'same' $? "${stdoutF}" "${stderrF}" th_assertFalseWithOutput 'same' $? "${stdoutF}" "${stderrF}"
@ -50,7 +49,8 @@ testFailNotEquals() {
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}" th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
} }
testFailSame() { testFailSame()
{
( failSame 'x' 'x' >"${stdoutF}" 2>"${stderrF}" ) ( failSame 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'same' $? "${stdoutF}" "${stderrF}" th_assertFalseWithOutput 'same' $? "${stdoutF}" "${stderrF}"
@ -70,13 +70,17 @@ testFailSame() {
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}" th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
} }
oneTimeSetUp() { #-----------------------------------------------------------------------------
# suite functions
#
oneTimeSetUp()
{
th_oneTimeSetUp th_oneTimeSetUp
MSG='This is a test message' MSG='This is a test message'
} }
# Load and run shUnit2. # load and run shUnit2
# shellcheck disable=SC2034
[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0 [ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
. "${TH_SHUNIT}" . ${TH_SHUNIT}

View File

@ -1,112 +1,104 @@
# $Id$
# vim:et:ft=sh:sts=2:sw=2 # vim:et:ft=sh:sts=2:sw=2
# #
# shUnit2 unit test common functions
#
# Copyright 2008 Kate Ward. All Rights Reserved. # Copyright 2008 Kate Ward. All Rights Reserved.
# Released under the Apache 2.0 license. # Released under the LGPL (GNU Lesser General Public License)
# #
# Author: kate.ward@forestent.com (Kate Ward) # Author: kate.ward@forestent.com (Kate Ward)
# https://github.com/kward/shunit2
# #
### ShellCheck (http://www.shellcheck.net/) # shUnit2 unit test common functions
# Commands are purposely escaped so they can be mocked outside shUnit2.
# shellcheck disable=SC1001,SC1012
# expr may be antiquated, but it is the only solution in some cases.
# shellcheck disable=SC2003
# $() are not fully portable (POSIX != portable).
# shellcheck disable=SC2006
# Treat unset variables as an error when performing parameter expansion. # treat unset variables as an error when performing parameter expansion
set -u set -u
# Set shwordsplit for zsh. # set shwordsplit for zsh
\[ -n "${ZSH_VERSION:-}" ] && setopt shwordsplit [ -n "${ZSH_VERSION:-}" ] && setopt shwordsplit
# #
# Constants. # constants
# #
# Path to shUnit2 library. Can be overridden by setting SHUNIT_INC. # path to shUnit2 library. can be overridden by setting SHUNIT_INC
TH_SHUNIT=${SHUNIT_INC:-./shunit2}; export TH_SHUNIT TH_SHUNIT=${SHUNIT_INC:-./shunit2}
# Configure debugging. Set the DEBUG environment variable to any # configure debugging. set the DEBUG environment variable to any
# non-empty value to enable debug output, or TRACE to enable trace # non-empty value to enable debug output, or TRACE to enable trace
# output. # output.
TRACE=${TRACE:+'th_trace '} TRACE=${TRACE:+'th_trace '}
\[ -n "${TRACE}" ] && DEBUG=1 [ -n "${TRACE}" ] && DEBUG=1
\[ -z "${TRACE}" ] && TRACE=':' [ -z "${TRACE}" ] && TRACE=':'
DEBUG=${DEBUG:+'th_debug '} DEBUG=${DEBUG:+'th_debug '}
\[ -z "${DEBUG}" ] && DEBUG=':' [ -z "${DEBUG}" ] && DEBUG=':'
# #
# Variables. # variables
# #
th_RANDOM=0 th_RANDOM=0
# #
# Functions. # functions
# #
# Logging functions. # message functions
th_trace() { echo "${MY_NAME}:TRACE $*" >&2; } th_trace() { echo "${MY_NAME}:TRACE $@" >&2; }
th_debug() { echo "${MY_NAME}:DEBUG $*" >&2; } th_debug() { echo "${MY_NAME}:DEBUG $@" >&2; }
th_info() { echo "${MY_NAME}:INFO $*" >&2; } th_info() { echo "${MY_NAME}:INFO $@" >&2; }
th_warn() { echo "${MY_NAME}:WARN $*" >&2; } th_warn() { echo "${MY_NAME}:WARN $@" >&2; }
th_error() { echo "${MY_NAME}:ERROR $*" >&2; } th_error() { echo "${MY_NAME}:ERROR $@" >&2; }
th_fatal() { echo "${MY_NAME}:FATAL $*" >&2; } th_fatal() { echo "${MY_NAME}:FATAL $@" >&2; }
# Output subtest name. # output subtest name
th_subtest() { echo " $*" >&2; } th_subtest() { echo " $@" >&2; }
th_oneTimeSetUp() { th_oneTimeSetUp()
# These files will be cleaned up automatically by shUnit2. {
# these files will be cleaned up automatically by shUnit2
stdoutF="${SHUNIT_TMPDIR}/stdout" stdoutF="${SHUNIT_TMPDIR}/stdout"
stderrF="${SHUNIT_TMPDIR}/stderr" stderrF="${SHUNIT_TMPDIR}/stderr"
returnF="${SHUNIT_TMPDIR}/return" returnF="${SHUNIT_TMPDIR}/return"
expectedF="${SHUNIT_TMPDIR}/expected" expectedF="${SHUNIT_TMPDIR}/expected"
export stdoutF stderrF returnF expectedF
} }
# Generate a random number. # generate a random number
th_generateRandom() { th_generateRandom()
{
tfgr_random=${th_RANDOM} tfgr_random=${th_RANDOM}
while \[ "${tfgr_random}" = "${th_RANDOM}" ]; do while [ "${tfgr_random}" = "${th_RANDOM}" ]; do
# shellcheck disable=SC2039 if [ -n "${RANDOM:-}" ]; then
if \[ -n "${RANDOM:-}" ]; then
# $RANDOM works # $RANDOM works
# shellcheck disable=SC2039
tfgr_random=${RANDOM}${RANDOM}${RANDOM}$$ tfgr_random=${RANDOM}${RANDOM}${RANDOM}$$
elif \[ -r '/dev/urandom' ]; then elif [ -r '/dev/urandom' ]; then
tfgr_random=`od -vAn -N4 -tu4 </dev/urandom |sed 's/^[^0-9]*//'` tfgr_random=`od -vAn -N4 -tu4 </dev/urandom |sed 's/^[^0-9]*//'`
else else
tfgr_date=`date '+%H%M%S'` tfgr_date=`date '+%H%M%S'`
tfgr_random=`expr "${tfgr_date}" \* $$` tfgr_random=`expr ${tfgr_date} \* $$`
unset tfgr_date unset tfgr_date
fi fi
\[ "${tfgr_random}" = "${th_RANDOM}" ] && sleep 1 [ "${tfgr_random}" = "${th_RANDOM}" ] && sleep 1
done done
th_RANDOM=${tfgr_random} th_RANDOM=${tfgr_random}
unset tfgr_random unset tfgr_random
} }
# This section returns the data section from the specified section of a file. A # this section returns the data section from the specified section of a file. a
# data section is defined by a [header], one or more lines of data, and then a # datasection is defined by a [header], one or more lines of data, and then a
# blank line. # blank line.
th_getDataSect() { th_getDataSect()
{
th_sgrep "\\[$1\\]" "$2" |sed '1d' th_sgrep "\\[$1\\]" "$2" |sed '1d'
} }
# This function greps a section from a file. a section is defined as a group of # this function greps a section from a file. a section is defined as a group of
# lines preceded and followed by blank lines.. # lines preceeded and followed by blank lines.
th_sgrep() { th_sgrep()
{
th_pattern_=$1 th_pattern_=$1
shift shift
# shellcheck disable=SC2068
sed -e '/./{H;$!d;}' -e "x;/${th_pattern_}/"'!d;' $@ |sed '1d' sed -e '/./{H;$!d;}' -e "x;/${th_pattern_}/"'!d;' $@ |sed '1d'
unset th_pattern_ unset th_pattern_
@ -121,14 +113,15 @@ th_sgrep() {
# th_rtrn_: integer: the return value of the subtest performed # th_rtrn_: integer: the return value of the subtest performed
# th_stdout_: string: filename where stdout was redirected to # th_stdout_: string: filename where stdout was redirected to
# th_stderr_: string: filename where stderr was redirected to # th_stderr_: string: filename where stderr was redirected to
th_assertTrueWithNoOutput() { th_assertTrueWithNoOutput()
{
th_test_=$1 th_test_=$1
th_rtrn_=$2 th_rtrn_=$2
th_stdout_=$3 th_stdout_=$3
th_stderr_=$4 th_stderr_=$4
assertTrue "${th_test_}; expected return value of zero" "${th_rtrn_}" assertTrue "${th_test_}; expected return value of zero" ${th_rtrn_}
\[ "${th_rtrn_}" -ne "${SHUNIT_TRUE}" ] && \cat "${th_stderr_}" [ ${th_rtrn_} -ne ${SHUNIT_TRUE} ] && cat "${th_stderr_}"
assertFalse "${th_test_}; expected no output to STDOUT" \ assertFalse "${th_test_}; expected no output to STDOUT" \
"[ -s '${th_stdout_}' ]" "[ -s '${th_stdout_}' ]"
assertFalse "${th_test_}; expected no output to STDERR" \ assertFalse "${th_test_}; expected no output to STDERR" \
@ -152,13 +145,13 @@ th_assertFalseWithOutput()
th_stdout_=$3 th_stdout_=$3
th_stderr_=$4 th_stderr_=$4
assertFalse "${th_test_}; expected non-zero return value" "${th_rtrn_}" assertFalse "${th_test_}; expected non-zero return value" ${th_rtrn_}
assertTrue "${th_test_}; expected output to STDOUT" \ assertTrue "${th_test_}; expected output to STDOUT" \
"[ -s '${th_stdout_}' ]" "[ -s '${th_stdout_}' ]"
assertFalse "${th_test_}; expected no output to STDERR" \ assertFalse "${th_test_}; expected no output to STDERR" \
"[ -s '${th_stderr_}' ]" "[ -s '${th_stderr_}' ]"
\[ -s "${th_stdout_}" -a ! -s "${th_stderr_}" ] || \ [ -s "${th_stdout_}" -a ! -s "${th_stderr_}" ] || \
_th_showOutput "${SHUNIT_FALSE}" "${th_stdout_}" "${th_stderr_}" _th_showOutput ${SHUNIT_FALSE} "${th_stdout_}" "${th_stderr_}"
unset th_test_ th_rtrn_ th_stdout_ th_stderr_ unset th_test_ th_rtrn_ th_stdout_ th_stderr_
} }
@ -171,19 +164,20 @@ th_assertFalseWithOutput()
# th_rtrn_: integer: the return value of the subtest performed # th_rtrn_: integer: the return value of the subtest performed
# th_stdout_: string: filename where stdout was redirected to # th_stdout_: string: filename where stdout was redirected to
# th_stderr_: string: filename where stderr was redirected to # th_stderr_: string: filename where stderr was redirected to
th_assertFalseWithError() { th_assertFalseWithError()
{
th_test_=$1 th_test_=$1
th_rtrn_=$2 th_rtrn_=$2
th_stdout_=$3 th_stdout_=$3
th_stderr_=$4 th_stderr_=$4
assertFalse "${th_test_}; expected non-zero return value" "${th_rtrn_}" assertFalse "${th_test_}; expected non-zero return value" ${th_rtrn_}
assertFalse "${th_test_}; expected no output to STDOUT" \ assertFalse "${th_test_}; expected no output to STDOUT" \
"[ -s '${th_stdout_}' ]" "[ -s '${th_stdout_}' ]"
assertTrue "${th_test_}; expected output to STDERR" \ assertTrue "${th_test_}; expected output to STDERR" \
"[ -s '${th_stderr_}' ]" "[ -s '${th_stderr_}' ]"
\[ ! -s "${th_stdout_}" -a -s "${th_stderr_}" ] || \ [ ! -s "${th_stdout_}" -a -s "${th_stderr_}" ] || \
_th_showOutput "${SHUNIT_FALSE}" "${th_stdout_}" "${th_stderr_}" _th_showOutput ${SHUNIT_FALSE} "${th_stdout_}" "${th_stderr_}"
unset th_test_ th_rtrn_ th_stdout_ th_stderr_ unset th_test_ th_rtrn_ th_stdout_ th_stderr_
} }
@ -192,33 +186,34 @@ th_assertFalseWithError() {
# when a non-zero return value is encountered. To properly catch these values, # when a non-zero return value is encountered. To properly catch these values,
# they are either written to disk, or recognized as an error the file is empty. # they are either written to disk, or recognized as an error the file is empty.
th_clearReturn() { cp /dev/null "${returnF}"; } th_clearReturn() { cp /dev/null "${returnF}"; }
th_queryReturn() { th_queryReturn()
if \[ -s "${returnF}" ]; then {
th_return=`\cat "${returnF}"` if [ -s "${returnF}" ]; then
th_return=`cat "${returnF}"`
else else
th_return=${SHUNIT_ERROR} th_return=${SHUNIT_ERROR}
fi fi
export th_return
} }
# Providing external and internal calls to the showOutput helper function. # Providing external and internal calls to the showOutput helper function.
th_showOutput() { _th_showOutput "$@"; } th_showOutput() { _th_showOutput $@; }
_th_showOutput() { _th_showOutput()
{
_th_return_=$1 _th_return_=$1
_th_stdout_=$2 _th_stdout_=$2
_th_stderr_=$3 _th_stderr_=$3
isSkipping isSkipping
if \[ $? -eq "${SHUNIT_FALSE}" -a "${_th_return_}" != "${SHUNIT_TRUE}" ]; then if [ $? -eq ${SHUNIT_FALSE} -a ${_th_return_} != ${SHUNIT_TRUE} ]; then
if \[ -n "${_th_stdout_}" -a -s "${_th_stdout_}" ]; then if [ -n "${_th_stdout_}" -a -s "${_th_stdout_}" ]; then
echo '>>> STDOUT' >&2 echo '>>> STDOUT' >&2
\cat "${_th_stdout_}" >&2 cat "${_th_stdout_}" >&2
fi fi
if \[ -n "${_th_stderr_}" -a -s "${_th_stderr_}" ]; then if [ -n "${_th_stderr_}" -a -s "${_th_stderr_}" ]; then
echo '>>> STDERR' >&2 echo '>>> STDERR' >&2
\cat "${_th_stderr_}" >&2 cat "${_th_stderr_}" >&2
fi fi
if \[ -n "${_th_stdout_}" -o -n "${_th_stderr_}" ]; then if [ -n "${_th_stdout_}" -o -n "${_th_stderr_}" ]; then
echo '<<< end output' >&2 echo '<<< end output' >&2
fi fi
fi fi
@ -227,7 +222,7 @@ _th_showOutput() {
} }
# #
# Main. # main
# #
${TRACE} 'trace output enabled' ${TRACE} 'trace output enabled'

View File

@ -1,110 +1,108 @@
#! /bin/sh #! /bin/sh
# $Id$
# vim:et:ft=sh:sts=2:sw=2 # vim:et:ft=sh:sts=2:sw=2
# #
# shunit2 unit test for macros. # Copyright 2008 Kate Ward. All Rights Reserved.
# # Released under the LGPL (GNU Lesser General Public License)
# Copyright 2008-2017 Kate Ward. All Rights Reserved.
# Released under the Apache 2.0 license.
#
# Author: kate.ward@forestent.com (Kate Ward) # Author: kate.ward@forestent.com (Kate Ward)
# https://github.com/kward/shunit2
# #
### ShellCheck http://www.shellcheck.net/ # shUnit2 unit test for macros.
# Disable source following.
# shellcheck disable=SC1090,SC1091
# Presence of LINENO variable is checked.
# shellcheck disable=SC2039
# These variables will be overridden by the test helpers. # load test helpers
stdoutF="${TMPDIR:-/tmp}/STDOUT"
stderrF="${TMPDIR:-/tmp}/STDERR"
# Load test helpers.
. ./shunit2_test_helpers . ./shunit2_test_helpers
testAssertEquals() { #------------------------------------------------------------------------------
# Start skipping if LINENO not available. # suite tests
#
testAssertEquals()
{
# start skipping if LINENO not available
[ -z "${LINENO:-}" ] && startSkipping [ -z "${LINENO:-}" ] && startSkipping
( ${_ASSERT_EQUALS_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" ) ( ${_ASSERT_EQUALS_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_ASSERT_EQUALS_ failure' ${rtrn} assertTrue '_ASSERT_EQUALS_ failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_ASSERT_EQUALS_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" ) ( ${_ASSERT_EQUALS_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_ASSERT_EQUALS_ w/ msg failure' ${rtrn} assertTrue '_ASSERT_EQUALS_ w/ msg failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
} }
testAssertNotEquals() { testAssertNotEquals()
# Start skipping if LINENO not available. {
# start skipping if LINENO not available
[ -z "${LINENO:-}" ] && startSkipping [ -z "${LINENO:-}" ] && startSkipping
( ${_ASSERT_NOT_EQUALS_} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" ) ( ${_ASSERT_NOT_EQUALS_} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_ASSERT_NOT_EQUALS_ failure' ${rtrn} assertTrue '_ASSERT_NOT_EQUALS_ failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_ASSERT_NOT_EQUALS_} '"some msg"' 'x' 'x' >"${stdoutF}" 2>"${stderrF}" ) ( ${_ASSERT_NOT_EQUALS_} '"some msg"' 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_ASSERT_NOT_EQUALS_ w/ msg failure' ${rtrn} assertTrue '_ASSERT_NOT_EQUALS_ w/ msg failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
} }
testSame() { testSame()
# Start skipping if LINENO not available. {
# start skipping if LINENO not available
[ -z "${LINENO:-}" ] && startSkipping [ -z "${LINENO:-}" ] && startSkipping
( ${_ASSERT_SAME_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" ) ( ${_ASSERT_SAME_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_ASSERT_SAME_ failure' ${rtrn} assertTrue '_ASSERT_SAME_ failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_ASSERT_SAME_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" ) ( ${_ASSERT_SAME_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_ASSERT_SAME_ w/ msg failure' ${rtrn} assertTrue '_ASSERT_SAME_ w/ msg failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
} }
testNotSame() { testNotSame()
# Start skipping if LINENO not available. {
# start skipping if LINENO not available
[ -z "${LINENO:-}" ] && startSkipping [ -z "${LINENO:-}" ] && startSkipping
( ${_ASSERT_NOT_SAME_} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" ) ( ${_ASSERT_NOT_SAME_} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_ASSERT_NOT_SAME_ failure' ${rtrn} assertTrue '_ASSERT_NOT_SAME_ failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_ASSERT_NOT_SAME_} '"some msg"' 'x' 'x' >"${stdoutF}" 2>"${stderrF}" ) ( ${_ASSERT_NOT_SAME_} '"some msg"' 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_ASSERT_NOT_SAME_ w/ msg failure' ${rtrn} assertTrue '_ASSERT_NOT_SAME_ w/ msg failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
} }
testNull() { testNull()
# Start skipping if LINENO not available. {
# start skipping if LINENO not available
[ -z "${LINENO:-}" ] && startSkipping [ -z "${LINENO:-}" ] && startSkipping
( ${_ASSERT_NULL_} 'x' >"${stdoutF}" 2>"${stderrF}" ) ( ${_ASSERT_NULL_} 'x' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_ASSERT_NULL_ failure' ${rtrn} assertTrue '_ASSERT_NULL_ failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_ASSERT_NULL_} '"some msg"' 'x' >"${stdoutF}" 2>"${stderrF}" ) ( ${_ASSERT_NULL_} '"some msg"' 'x' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_ASSERT_NULL_ w/ msg failure' ${rtrn} assertTrue '_ASSERT_NULL_ w/ msg failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
} }
testNotNull() testNotNull()
@ -116,64 +114,68 @@ testNotNull()
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_ASSERT_NOT_NULL_ failure' ${rtrn} assertTrue '_ASSERT_NOT_NULL_ failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_ASSERT_NOT_NULL_} '"some msg"' '""' >"${stdoutF}" 2>"${stderrF}" ) ( ${_ASSERT_NOT_NULL_} '"some msg"' '""' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_ASSERT_NOT_NULL_ w/ msg failure' ${rtrn} assertTrue '_ASSERT_NOT_NULL_ w/ msg failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stdoutF}" "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stdoutF}" "${stderrF}" >&2
} }
testAssertTrue() { testAssertTrue()
# Start skipping if LINENO not available. {
# start skipping if LINENO not available
[ -z "${LINENO:-}" ] && startSkipping [ -z "${LINENO:-}" ] && startSkipping
( ${_ASSERT_TRUE_} "${SHUNIT_FALSE}" >"${stdoutF}" 2>"${stderrF}" ) ( ${_ASSERT_TRUE_} ${SHUNIT_FALSE} >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_ASSERT_TRUE_ failure' ${rtrn} assertTrue '_ASSERT_TRUE_ failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_ASSERT_TRUE_} '"some msg"' "${SHUNIT_FALSE}" >"${stdoutF}" 2>"${stderrF}" )
( ${_ASSERT_TRUE_} '"some msg"' ${SHUNIT_FALSE} >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_ASSERT_TRUE_ w/ msg failure' ${rtrn} assertTrue '_ASSERT_TRUE_ w/ msg failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
} }
testAssertFalse() { testAssertFalse()
# Start skipping if LINENO not available. {
# start skipping if LINENO not available
[ -z "${LINENO:-}" ] && startSkipping [ -z "${LINENO:-}" ] && startSkipping
( ${_ASSERT_FALSE_} "${SHUNIT_TRUE}" >"${stdoutF}" 2>"${stderrF}" ) ( ${_ASSERT_FALSE_} ${SHUNIT_TRUE} >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_ASSERT_FALSE_ failure' ${rtrn} assertTrue '_ASSERT_FALSE_ failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_ASSERT_FALSE_} '"some msg"' "${SHUNIT_TRUE}" >"${stdoutF}" 2>"${stderrF}" ) ( ${_ASSERT_FALSE_} '"some msg"' ${SHUNIT_TRUE} >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_ASSERT_FALSE_ w/ msg failure' ${rtrn} assertTrue '_ASSERT_FALSE_ w/ msg failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
} }
testFail() { testFail()
# Start skipping if LINENO not available. {
# start skipping if LINENO not available
[ -z "${LINENO:-}" ] && startSkipping [ -z "${LINENO:-}" ] && startSkipping
( ${_FAIL_} >"${stdoutF}" 2>"${stderrF}" ) ( ${_FAIL_} >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_FAIL_ failure' ${rtrn} assertTrue '_FAIL_ failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_FAIL_} '"some msg"' >"${stdoutF}" 2>"${stderrF}" ) ( ${_FAIL_} '"some msg"' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_FAIL_ w/ msg failure' ${rtrn} assertTrue '_FAIL_ w/ msg failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
} }
testFailNotEquals() testFailNotEquals()
@ -185,57 +187,60 @@ testFailNotEquals()
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_FAIL_NOT_EQUALS_ failure' ${rtrn} assertTrue '_FAIL_NOT_EQUALS_ failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_FAIL_NOT_EQUALS_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" ) ( ${_FAIL_NOT_EQUALS_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_FAIL_NOT_EQUALS_ w/ msg failure' ${rtrn} assertTrue '_FAIL_NOT_EQUALS_ w/ msg failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
} }
testFailSame() { testFailSame()
# Start skipping if LINENO not available. {
# start skipping if LINENO not available
[ -z "${LINENO:-}" ] && startSkipping [ -z "${LINENO:-}" ] && startSkipping
( ${_FAIL_SAME_} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" ) ( ${_FAIL_SAME_} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_FAIL_SAME_ failure' ${rtrn} assertTrue '_FAIL_SAME_ failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_FAIL_SAME_} '"some msg"' 'x' 'x' >"${stdoutF}" 2>"${stderrF}" ) ( ${_FAIL_SAME_} '"some msg"' 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_FAIL_SAME_ w/ msg failure' ${rtrn} assertTrue '_FAIL_SAME_ w/ msg failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
} }
testFailNotSame() { testFailNotSame()
# Start skipping if LINENO not available. {
# start skipping if LINENO not available
[ -z "${LINENO:-}" ] && startSkipping [ -z "${LINENO:-}" ] && startSkipping
( ${_FAIL_NOT_SAME_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" ) ( ${_FAIL_NOT_SAME_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_FAIL_NOT_SAME_ failure' ${rtrn} assertTrue '_FAIL_NOT_SAME_ failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_FAIL_NOT_SAME_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" ) ( ${_FAIL_NOT_SAME_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$? rtrn=$?
assertTrue '_FAIL_NOT_SAME_ w/ msg failure' ${rtrn} assertTrue '_FAIL_NOT_SAME_ w/ msg failure' ${rtrn}
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2 [ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
} }
oneTimeSetUp() { #------------------------------------------------------------------------------
# suite functions
#
oneTimeSetUp()
{
th_oneTimeSetUp th_oneTimeSetUp
} }
# Disable output coloring as it breaks the tests. # load and run shUnit2
SHUNIT_COLOR='none'; export SHUNIT_COLOR [ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
. ${TH_SHUNIT}
# Load and run shUnit2.
# shellcheck disable=SC2034
[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT="$0"
. "${TH_SHUNIT}"

View File

@ -0,0 +1,160 @@
#! /bin/sh
# $Id$
# vim:et:ft=sh:sts=2:sw=2
#
# Copyright 2008 Kate Ward. All Rights Reserved.
# Released under the LGPL (GNU Lesser General Public License)
#
# Author: kate.ward@forestent.com (Kate Ward)
#
# shUnit2 unit tests of miscellaneous things
# load test helpers
. ./shunit2_test_helpers
#------------------------------------------------------------------------------
# suite tests
#
# Note: the test script is prefixed with '#' chars so that shUnit2 does not
# incorrectly interpret the embedded functions as real functions.
testUnboundVariable()
{
unittestF="${SHUNIT_TMPDIR}/unittest"
sed 's/^#//' >"${unittestF}" <<EOF
## treat unset variables as an error when performing parameter expansion
#set -u
#
#boom() { x=\$1; } # this function goes boom if no parameters are passed!
#test_boom()
#{
# assertEquals 1 1
# boom # No parameter given
# assertEquals 0 \$?
#}
#. ${TH_SHUNIT}
EOF
( exec ${SHUNIT_SHELL:-sh} "${unittestF}" >"${stdoutF}" 2>"${stderrF}" )
assertFalse 'expected a non-zero exit value' $?
grep '^ASSERT:Unknown failure' "${stdoutF}" >/dev/null
assertTrue 'assert message was not generated' $?
grep '^Ran [0-9]* test' "${stdoutF}" >/dev/null
assertTrue 'test count message was not generated' $?
grep '^FAILED' "${stdoutF}" >/dev/null
assertTrue 'failure message was not generated' $?
}
testIssue7()
{
( assertEquals 'Some message.' 1 2 >"${stdoutF}" 2>"${stderrF}" )
diff "${stdoutF}" - >/dev/null <<EOF
ASSERT:Some message. expected:<1> but was:<2>
EOF
rtrn=$?
assertEquals ${SHUNIT_TRUE} ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
}
testPrepForSourcing()
{
assertEquals '/abc' `_shunit_prepForSourcing '/abc'`
assertEquals './abc' `_shunit_prepForSourcing './abc'`
assertEquals './abc' `_shunit_prepForSourcing 'abc'`
}
testEscapeCharInStr()
{
actual=`_shunit_escapeCharInStr '\' ''`
assertEquals '' "${actual}"
assertEquals 'abc\\' `_shunit_escapeCharInStr '\' 'abc\'`
assertEquals 'abc\\def' `_shunit_escapeCharInStr '\' 'abc\def'`
assertEquals '\\def' `_shunit_escapeCharInStr '\' '\def'`
actual=`_shunit_escapeCharInStr '"' ''`
assertEquals '' "${actual}"
assertEquals 'abc\"' `_shunit_escapeCharInStr '"' 'abc"'`
assertEquals 'abc\"def' `_shunit_escapeCharInStr '"' 'abc"def'`
assertEquals '\"def' `_shunit_escapeCharInStr '"' '"def'`
actual=`_shunit_escapeCharInStr '$' ''`
assertEquals '' "${actual}"
assertEquals 'abc\$' `_shunit_escapeCharInStr '$' 'abc$'`
assertEquals 'abc\$def' `_shunit_escapeCharInStr '$' 'abc$def'`
assertEquals '\$def' `_shunit_escapeCharInStr '$' '$def'`
# actual=`_shunit_escapeCharInStr "'" ''`
# assertEquals '' "${actual}"
# assertEquals "abc\\'" `_shunit_escapeCharInStr "'" "abc'"`
# assertEquals "abc\\'def" `_shunit_escapeCharInStr "'" "abc'def"`
# assertEquals "\\'def" `_shunit_escapeCharInStr "'" "'def"`
# # must put the backtick in a variable so the shell doesn't misinterpret it
# # while inside a backticked sequence (e.g. `echo '`'` would fail).
# backtick='`'
# actual=`_shunit_escapeCharInStr ${backtick} ''`
# assertEquals '' "${actual}"
# assertEquals '\`abc' \
# `_shunit_escapeCharInStr "${backtick}" ${backtick}'abc'`
# assertEquals 'abc\`' \
# `_shunit_escapeCharInStr "${backtick}" 'abc'${backtick}`
# assertEquals 'abc\`def' \
# `_shunit_escapeCharInStr "${backtick}" 'abc'${backtick}'def'`
}
testEscapeCharInStr_specialChars()
{
# make sure our forward slash doesn't upset sed
assertEquals '/' `_shunit_escapeCharInStr '\' '/'`
# some shells escape these differently
#assertEquals '\\a' `_shunit_escapeCharInStr '\' '\a'`
#assertEquals '\\b' `_shunit_escapeCharInStr '\' '\b'`
}
# Test the various ways of declaring functions.
#
# Prefixing (then stripping) with comment symbol so these functions aren't
# treated as real functions by shUnit2.
testExtractTestFunctions()
{
f="${SHUNIT_TMPDIR}/extract_test_functions"
sed 's/^#//' <<EOF >"${f}"
#testABC() { echo 'ABC'; }
#test_def() {
# echo 'def'
#}
#testG3 ()
#{
# echo 'G3'
#}
#function test4() { echo '4'; }
# test5() { echo '5'; }
#some_test_function() { echo 'some func'; }
#func_with_test_vars() {
# testVariable=1234
#}
EOF
actual=`_shunit_extractTestFunctions "${f}"`
assertEquals 'testABC test_def testG3 test4 test5' "${actual}"
}
#------------------------------------------------------------------------------
# suite functions
#
setUp()
{
for f in ${expectedF} ${stdoutF} ${stderrF}; do
cp /dev/null ${f}
done
}
oneTimeSetUp()
{
th_oneTimeSetUp
}
# load and run shUnit2
[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
. ${TH_SHUNIT}

View File

@ -1,38 +1,41 @@
#! /bin/sh #! /bin/sh
# $Id$
# vim:et:ft=sh:sts=2:sw=2 # vim:et:ft=sh:sts=2:sw=2
# #
# shUnit2 unit test for standalone operation. # Copyright 2010 Kate Ward. All Rights Reserved.
# # Released under the LGPL (GNU Lesser General Public License)
# Copyright 2010-2017 Kate Ward. All Rights Reserved.
# Released under the Apache 2.0 license.
#
# Author: kate.ward@forestent.com (Kate Ward) # Author: kate.ward@forestent.com (Kate Ward)
# https://github.com/kward/shunit2 #
# shUnit2 unit test for standalone operation.
# #
# This unit test is purely to test that calling shunit2 directly, while passing # This unit test is purely to test that calling shunit2 directly, while passing
# the name of a unit test script, works. When run, this script determines if it # the name of a unit test script, works. When run, this script determines if it
# is running as a standalone program, and calls main() if it is. # is running as a standalone program, and calls main() if it is.
#
### ShellCheck http://www.shellcheck.net/
# $() are not fully portable (POSIX != portable).
# shellcheck disable=SC2006
# Disable source following.
# shellcheck disable=SC1090,SC1091
ARGV0="`basename "$0"`" ARGV0=`basename "$0"`
# Load test helpers. # load test helpers
. ./shunit2_test_helpers . ./shunit2_test_helpers
testStandalone() { #------------------------------------------------------------------------------
assertTrue "${SHUNIT_TRUE}" # suite tests
#
testStandalone()
{
assertTrue ${SHUNIT_TRUE}
} }
main() { #------------------------------------------------------------------------------
# main
#
main()
{
${TH_SHUNIT} "${ARGV0}" ${TH_SHUNIT} "${ARGV0}"
} }
# Are we running as a standalone? # are we running as a standalone?
if [ "${ARGV0}" = 'shunit2_test_standalone.sh' ]; then if [ "${ARGV0}" = 'shunit2_test_standalone.sh' ]; then
if [ $# -gt 0 ]; then main "$@"; else main; fi if [ $# -gt 0 ]; then main "$@"; else main; fi
fi fi

View File

@ -1,165 +0,0 @@
#! /bin/sh
# vim:et:ft=sh:sts=2:sw=2
#
# Unit test suite runner.
#
# Copyright 2008-2017 Kate Ward. All Rights Reserved.
# Released under the Apache 2.0 license.
#
# Author: kate.ward@forestent.com (Kate Ward)
# https://github.com/kward/shlib
#
# This script runs all the unit tests that can be found, and generates a nice
# report of the tests.
#
### ShellCheck (http://www.shellcheck.net/)
# Disable source following.
# shellcheck disable=SC1090,SC1091
# expr may be antiquated, but it is the only solution in some cases.
# shellcheck disable=SC2003
# $() are not fully portable (POSIX != portable).
# shellcheck disable=SC2006
# Return if test_runner already loaded.
[ -z "${RUNNER_LOADED:-}" ] || return 0
RUNNER_LOADED=0
RUNNER_ARGV0=`basename "$0"`
RUNNER_SHELLS='/bin/sh ash /bin/bash /bin/dash /bin/ksh /bin/pdksh /bin/zsh'
RUNNER_TEST_SUFFIX='_test.sh'
runner_warn() { echo "runner:WARN $*" >&2; }
runner_error() { echo "runner:ERROR $*" >&2; }
runner_fatal() { echo "runner:FATAL $*" >&2; exit 1; }
runner_usage() {
echo "usage: ${RUNNER_ARGV0} [-e key=val ...] [-s shell(s)] [-t test(s)]"
}
_runner_tests() { echo ./*${RUNNER_TEST_SUFFIX} |sed 's#./##g'; }
_runner_testName() {
# shellcheck disable=SC1117
_runner_testName_=`expr "${1:-}" : "\(.*\)${RUNNER_TEST_SUFFIX}"`
if [ -n "${_runner_testName_}" ]; then
echo "${_runner_testName_}"
else
echo 'unknown'
fi
unset _runner_testName_
}
main() {
# Find and load versions library.
for _runner_dir_ in . ${LIB_DIR:-lib}; do
if [ -r "${_runner_dir_}/versions" ]; then
_runner_lib_dir_="${_runner_dir_}"
break
fi
done
[ -n "${_runner_lib_dir_}" ] || runner_fatal 'Unable to find versions library.'
. "${_runner_lib_dir_}/versions" || runner_fatal 'Unable to load versions library.'
unset _runner_dir_ _runner_lib_dir_
# Process command line flags.
env=''
while getopts 'e:hs:t:' opt; do
case ${opt} in
e) # set an environment variable
key=`expr "${OPTARG}" : '\([^=]*\)='`
val=`expr "${OPTARG}" : '[^=]*=\(.*\)'`
# shellcheck disable=SC2166
if [ -z "${key}" -o -z "${val}" ]; then
runner_usage
exit 1
fi
eval "${key}='${val}'"
eval "export ${key}"
env="${env:+${env} }${key}"
;;
h) runner_usage; exit 0 ;; # help output
s) shells=${OPTARG} ;; # list of shells to run
t) tests=${OPTARG} ;; # list of tests to run
*) runner_usage; exit 1 ;;
esac
done
shift "`expr ${OPTIND} - 1`"
# Fill shells and/or tests.
shells=${shells:-${RUNNER_SHELLS}}
[ -z "${tests}" ] && tests=`_runner_tests`
# Error checking.
if [ -z "${tests}" ]; then
runner_error 'no tests found to run; exiting'
exit 1
fi
cat <<EOF
#------------------------------------------------------------------------------
# System data.
#
$ uname -mprsv
`uname -mprsv`
OS Name: `versions_osName`
OS Version: `versions_osVersion`
### Test run info.
shells: ${shells}
tests: ${tests}
EOF
for key in ${env}; do
eval "echo \"${key}=\$${key}\""
done
# Run tests.
for shell in ${shells}; do
echo
cat <<EOF
#------------------------------------------------------------------------------
# Running the test suite with ${shell}.
#
EOF
# Check for existence of shell.
shell_bin=${shell}
shell_name=''
shell_present=${FALSE}
case ${shell} in
ash)
shell_bin=`which busybox |grep -v '^no busybox'`
[ $? -eq "${TRUE}" -a -n "${shell_bin}" ] && shell_present="${TRUE}"
shell_bin="${shell_bin} ash"
shell_name=${shell}
;;
*)
[ -x "${shell_bin}" ] && shell_present="${TRUE}"
shell_name=`basename "${shell}"`
;;
esac
if [ "${shell_present}" -eq "${FALSE}" ]; then
runner_warn "unable to run tests with the ${shell_name} shell"
continue
fi
shell_version=`versions_shellVersion "${shell}"`
echo "shell name: ${shell_name}"
echo "shell version: ${shell_version}"
# Execute the tests.
for t in ${tests}; do
echo
echo "--- Executing the '`_runner_testName "${t}"`' test suite. ---"
# ${shell_bin} needs word splitting.
# shellcheck disable=SC2086
( exec ${shell_bin} "./${t}" 2>&1; )
done
done
}
# Execute main() if this is run in standalone mode (i.e. not from a unit test).
[ -z "${SHUNIT_VERSION}" ] && main "$@"

View File

@ -1,10 +1,10 @@
#!/usr/bin/env bash #!/usr/bin/env bash
###### obackup - Local or Remote, push or pull backup script for files & mysql databases ###### obackup - Local or Remote, push or pull backup script for files & mysql databases
###### (C) 2013-2019 by Orsiris de Jong (www.netpower.fr) ###### (C) 2013-2016 by Orsiris de Jong (www.netpower.fr)
###### obackup v2.1x config file rev 2017020901
[GENERAL] ###### GENERAL BACKUP OPTIONS
CONFIG_FILE_REVISION=2.1
## Backup identification string. ## Backup identification string.
INSTANCE_ID="test-backup" INSTANCE_ID="test-backup"
@ -13,14 +13,14 @@ INSTANCE_ID="test-backup"
LOGFILE="" LOGFILE=""
## Elements to backup ## Elements to backup
SQL_BACKUP=true SQL_BACKUP=yes
FILE_BACKUP=true FILE_BACKUP=yes
## Backups can be done local, pulled from another server or pushed to a backup server. Available options are [local,pull,push]. ## Backups can be done local, pulled from another server or pushed to a backup server. Available options are [local,pull,push].
## Pulled backups are the safest option, as the backup server contains the RSA key and cannot be compromised by another server. ## Pulled backups are the safest option, as the backup server contains the RSA key and cannot be compromised by another server.
BACKUP_TYPE=local BACKUP_TYPE=local
[BACKUP STORAGE] ###### BACKUP STORAGE
## Storage paths of the backups (absolute paths of the local or remote system). Please use ${HOME} instead of ~ if needed. ## Storage paths of the backups (absolute paths of the local or remote system). Please use ${HOME} instead of ~ if needed.
SQL_STORAGE="/home/storage/backup/sql" SQL_STORAGE="/home/storage/backup/sql"
@ -29,7 +29,7 @@ FILE_STORAGE="/home/storage/backup/files"
## Backup encryption using GPG and rsync. ## Backup encryption using GPG and rsync.
## Push backups get encrypted locally in CRYPT_STORAGE before they are sent to the remote system ## Push backups get encrypted locally in CRYPT_STORAGE before they are sent to the remote system
## Local and pull backups get encrypted after backup, in CRYPT_STORAGE ## Local and pull backups get encrypted after backup, in CRYPT_STORAGE
ENCRYPTION=false ENCRYPTION=no
## Backup encryption needs a temporary storage space in order to encrypt files before sending them (absolute paths of the local or remote system) ## Backup encryption needs a temporary storage space in order to encrypt files before sending them (absolute paths of the local or remote system)
## In case of a pull backup, an encrypted copy of FILE_BACKUP goes here ## In case of a pull backup, an encrypted copy of FILE_BACKUP goes here
@ -42,24 +42,24 @@ GPG_RECIPIENT="John Doe"
PARALLEL_ENCRYPTION_PROCESSES= PARALLEL_ENCRYPTION_PROCESSES=
## Create backup directories if they do not exist ## Create backup directories if they do not exist
CREATE_DIRS=true CREATE_DIRS=yes
## Keep absolute source path in your backup, eg: /your/backup/storage/the/remote/server/files ## Keep absolute source path in your backup, eg: /your/backup/storage/the/remote/server/files
## You should leave this enabled if you intend to use 'backup task division' functionality of oBackup, or everything will end up in the same directory. ## You should leave this enabled if you intend to use 'backup task division' functionality of oBackup, or everything will end up in the same directory.
KEEP_ABSOLUTE_PATHS=true KEEP_ABSOLUTE_PATHS=yes
## Generate an alert if backup size is lower than given value in Kb (this can also help identifying empty mount dirs). ## Generate an alert if backup size is lower than given value in Kb (this can also help identifying empty mount dirs).
BACKUP_SIZE_MINIMUM=1024 BACKUP_SIZE_MINIMUM=1024
## Check backup size before proceeding ## Check backup size before proceeding
GET_BACKUP_SIZE=true GET_BACKUP_SIZE=yes
## Generate an alert if storage free space is lower than given value in Kb. ## Generate an alert if storage free space is lower than given value in Kb.
## Keep in mind that disabling backup file size test will only test min space against SQL backup size. ## Keep in mind that disabling backup file size test will only test min space against SQL backup size.
SQL_WARN_MIN_SPACE=1048576 SQL_WARN_MIN_SPACE=1048576
FILE_WARN_MIN_SPACE=1048576 FILE_WARN_MIN_SPACE=1048576
[REMOTE_OPTIONS] ###### REMOTE ONLY OPTIONS
## In case of pulled or pushed backups, remote system URI needs to be supplied. ## In case of pulled or pushed backups, remote system URI needs to be supplied.
REMOTE_SYSTEM_URI="ssh://backupuser@remote.system.tld:22/" REMOTE_SYSTEM_URI="ssh://backupuser@remote.system.tld:22/"
@ -74,36 +74,34 @@ SSH_PASSWORD_FILE=""
_REMOTE_TOKEN=SomeAlphaNumericToken9 _REMOTE_TOKEN=SomeAlphaNumericToken9
## ssh compression should be used unless your remote connection is good enough (LAN) ## ssh compression should be used unless your remote connection is good enough (LAN)
SSH_COMPRESSION=true SSH_COMPRESSION=yes
## Ignore ssh known hosts verification. DANGER WILL ROBINSON DANGER: This can lead to security risks. Only enable if you know what you're doing. ## Ignore ssh known hosts verification. DANGER WILL ROBINSON DANGER: This can lead to security risks. Only enable if you know what you're doing.
## Works on Redhat / CentOS, doesn't work on Debian / Ubunutu ## Works on Redhat / CentOS, doesn't work on Debian / Ubunutu
SSH_IGNORE_KNOWN_HOSTS=false SSH_IGNORE_KNOWN_HOSTS=no
## Remote rsync executable path. Leave this empty in most cases ## Remote rsync executable path. Leave this empty in most cases
RSYNC_REMOTE_PATH="" RSYNC_REMOTE_PATH=""
## Check for connectivity to remote host before launching remote backup tasks. Be sure the hosts responds to ping. Failing to ping will skip current task. ## Check for connectivity to remote host before launching remote backup tasks. Be sure the hosts responds to ping. Failing to ping will skip current task.
REMOTE_HOST_PING=true REMOTE_HOST_PING=yes
## Check for internet access by pinging one or more 3rd party hosts before remote backup tasks. Leave empty if you don't want this check to be be performed. Failing to ping will skip current task. ## Check for internet access by pinging one or more 3rd party hosts before remote backup tasks. Leave empty if you don't want this check to be be performed. Failing to ping will skip current task.
REMOTE_3RD_PARTY_HOSTS="www.kernel.org www.google.com" REMOTE_3RD_PARTY_HOSTS="www.kernel.org www.google.com"
## If enabled, commands will be executed as superuser on remote side. See documentation for /etc/sudoers configuration ("find", "du", "tee" and "rsync" need to be allowed). Requiretty needs to be disabled. ## If enabled, commands will be executed as superuser on remote side. See documentation for /etc/sudoers configuration ("find", "du", "tee" and "rsync" need to be allowed). Requiretty needs to be disabled.
SUDO_EXEC=false SUDO_EXEC=no
[DATABASE BACKUP SETTINGS] ###### DATABASE SPECIFIC OPTIONS
## Database backup user (should be the same you are running obackup with) ## Database backup user (should be the same you are running obackup with)
SQL_USER=root SQL_USER=root
## Enabling the following option will save all databases on local or remote given SQL instance except the ones specified in the exclude list. ## Enabling the following option will save all databases on local or remote given SQL instance except the ones specified in the exclude list.
## Every found database will be backed up as separate backup task. ## Every found database will be backed up as separate backup task.
DATABASES_ALL=true DATABASES_ALL=yes
DATABASES_ALL_EXCLUDE_LIST="test;mysql" DATABASES_ALL_EXCLUDE_LIST="test;mysql"
DATABASES_LIST="" ## Alternatively, if DATABASES_ALL=no, you can specify a list of databases to backup separated by semi-colons.
## Alternatively, if DATABASES_ALL=false, you can specify a list of databases to backup separated by semi-colons.
#DATABASES_LIST="somedatabase" #DATABASES_LIST="somedatabase"
## Max backup execution time per Database task. Soft max exec time generates a warning only. Hard max exec time generates a warning and stops current backup task. ## Max backup execution time per Database task. Soft max exec time generates a warning only. Hard max exec time generates a warning and stops current backup task.
@ -120,13 +118,12 @@ MYSQLDUMP_OPTIONS="--opt --single-transaction"
## If you use encryption, compression will only bring small benefits as GPG already has pretty good compression included ## If you use encryption, compression will only bring small benefits as GPG already has pretty good compression included
COMPRESSION_LEVEL=3 COMPRESSION_LEVEL=3
[FILE BACKUP SETTINGS] ###### FILES SPECIFIC OPTIONS
## File backups are divided in tasks. Every directory in DIRECTORY_LIST will be processed as a unique task. ## File backups are divided in tasks. Every directory in DIRECTORY_LIST will be processed as a unique task.
## Every subdirectory of each directory in RECURSIVE_DIRECTORY_LIST will be processed as a unique task. ## Every subdirectory of each directory in RECURSIVE_DIRECTORY_LIST will be processed as a unique task.
## Example: RECURSIVE_DIRECTORY_LIST="/home;/var" will create backup tasks tasks "/home/dir1, "/home/dir2", ... "/home/dirN", "/var/log", "/var/lib"... "/var/something". ## Example: RECURSIVE_DIRECTORY_LIST="/home;/var" will create backup tasks tasks "/home/dir1, "/home/dir2", ... "/home/dirN", "/var/log", "/var/lib"... "/var/something".
## You can exclude directories from the avove backup task creation, ex: avoid backing up "/home/dir2" by adding it to RECURSIVE_EXCLUDE_LIST. ## You can exclude directories from the avove backup task creation, ex: avoid backing up "/home/dir2" by adding it to RECURSIVE_EXCLUDE_LIST.
## Note that since we recurse only by one level, excluding /home/dir2/somedir won't have any effect.
## Please use ${HOME} instead of ~ if needed. ## Please use ${HOME} instead of ~ if needed.
## Directories backup list. List of semicolon separated directories that will be backed up. ## Directories backup list. List of semicolon separated directories that will be backed up.
@ -138,10 +135,10 @@ RECURSIVE_EXCLUDE_LIST="/home/backupuser;/home/lost+found"
RSYNC_PATTERN_FIRST=include RSYNC_PATTERN_FIRST=include
## List of files / directories to incldue / exclude from sync on both sides (see rsync patterns, wildcards work). ## List of files / directories to incldue / exclude from sync on both sides (see rsync patterns, wildcards work).
## Paths are relative to sync dirs. List elements are separated by a semicolon. Specifying "cache" will remove every found cache subdirectory. ## Paths are relative to sync dirs. List elements are separated by a semicolon.
RSYNC_INCLUDE_PATTERN="" RSYNC_INCLUDE_PATTERN=""
RSYNC_EXCLUDE_PATTERN="" RSYNC_EXCLUDE_PATTERN=""
#RSYNC_EXCLUDE_PATTERN="tmp;archives;cache" #RSYNC_EXCLUDE_PATTERN="tmp;archives"
## Files that contains lists of files / directories to include / exclude from sync on both sides. Leave this empty if you don't want to use an exclusion file. ## Files that contains lists of files / directories to include / exclude from sync on both sides. Leave this empty if you don't want to use an exclusion file.
## This file has to be in the same directory as the config file ## This file has to be in the same directory as the config file
@ -155,44 +152,43 @@ PATH_SEPARATOR_CHAR=";"
## Optional arguments passed to rsync executable. The following are already managed by the program and shoul never be passed here ## Optional arguments passed to rsync executable. The following are already managed by the program and shoul never be passed here
## -rltD -n -P -o -g --executability -A -X -zz -L -K -H -8 -u -i --stats --checksum --bwlimit --partial --partial-dir --exclude --exclude-from --include--from --no-whole-file --whole-file --list-only ## -rltD -n -P -o -g --executability -A -X -zz -L -K -H -8 -u -i --stats --checksum --bwlimit --partial --partial-dir --exclude --exclude-from --include--from --no-whole-file --whole-file --list-only
## When dealing with different filesystems for sync, or using SMB mountpoints, try adding --modify-window=2 --omit-dir-times as optional arguments
RSYNC_OPTIONAL_ARGS="" RSYNC_OPTIONAL_ARGS=""
## Preserve basic linux permissions ## Preserve basic linux permissions
PRESERVE_PERMISSIONS=true PRESERVE_PERMISSIONS=yes
PRESERVE_OWNER=true PRESERVE_OWNER=yes
PRESERVE_GROUP=true PRESERVE_GROUP=yes
## On MACOS X, does not work and will be ignored ## On MACOS X, does not work and will be ignored
PRESERVE_EXECUTABILITY=true PRESERVE_EXECUTABILITY=yes
## Preserve ACLS. Make sure source and target FS can hold same ACLs or you'll get loads of errors. ## Preserve ACLS. Make sure source and target FS can hold same ACLs or you'll get loads of errors.
PRESERVE_ACL=false PRESERVE_ACL=no
## Preserve Xattr. MAke sure source and target FS can hold same Xattr or you'll get loads of errors. ## Preserve Xattr. MAke sure source and target FS can hold same Xattr or you'll get loads of errors.
PRESERVE_XATTR=false PRESERVE_XATTR=no
## Transforms symlinks into referent files/dirs ## Transforms symlinks into referent files/dirs
COPY_SYMLINKS=true COPY_SYMLINKS=yes
## Treat symlinked dirs as dirs. CAUTION: This also follows symlinks outside of the replica root. ## Treat symlinked dirs as dirs. CAUTION: This also follows symlinks outside of the replica root.
KEEP_DIRLINKS=true KEEP_DIRLINKS=yes
## Preserve hard links. Make sure source and target FS can manage hard links or you will lose them. ## Preserve hard links. Make sure source and target FS can manage hard links or you will lose them.
PRESERVE_HARDLINKS=false PRESERVE_HARDLINKS=no
## Let RSYNC compress file transfers. Do not use this on local-local backup schemes. Also, this is not useful if SSH compression is enabled. ## Let RSYNC compress file transfers. Do not use this on local-local backup schemes. Also, this is not useful if SSH compression is enabled.
RSYNC_COMPRESS=false RSYNC_COMPRESS=no
## Max execution time per file backup task. Soft is warning only. Hard is warning, stopping backup and processing next one one file list. Tilme is specified in seconds ## Max execution time per file backup task. Soft is warning only. Hard is warning, stopping backup and processing next one one file list. Tilme is specified in seconds
SOFT_MAX_EXEC_TIME_FILE_TASK=3600 SOFT_MAX_EXEC_TIME_FILE_TASK=3600
HARD_MAX_EXEC_TIME_FILE_TASK=7200 HARD_MAX_EXEC_TIME_FILE_TASK=7200
## Keep partial uploads that can be resumed on next run, experimental feature ## Keep partial uploads that can be resumed on next run, experimental feature
PARTIAL=false PARTIAL=no
## Delete files on destination that vanished from source. Do not turn this on unless you enabled backup rotation or a snapshotting FS like zfs to keep those vanished files on the destination. ## Delete files on destination that vanished from source. Do not turn this on unless you enabled backup rotation or a snapshotting FS like zfs to keep those vanished files on the destination.
DELETE_VANISHED_FILES=false DELETE_VANISHED_FILES=no
## Use delta copy algortithm (usefull when local paths are network drives), defaults to true ## Use delta copy algortithm (usefull when local paths are network drives), defaults to yes
DELTA_COPIES=true DELTA_COPIES=yes
## Bandwidth limit Kbytes / second for file backups. Leave 0 to disable limitation. ## Bandwidth limit Kbytes / second for file backups. Leave 0 to disable limitation.
BANDWIDTH=0 BANDWIDTH=0
@ -200,7 +196,7 @@ BANDWIDTH=0
## Paranoia option. Don't change this unless you read the documentation. ## Paranoia option. Don't change this unless you read the documentation.
RSYNC_EXECUTABLE=rsync RSYNC_EXECUTABLE=rsync
[ALERT_OPTIONS] ###### ALERT OPTIONS
## Alert email addresses separated by a space character ## Alert email addresses separated by a space character
DESTINATION_MAILS="your@mail.address" DESTINATION_MAILS="your@mail.address"
@ -219,7 +215,7 @@ SMTP_ENCRYPTION=none
SMTP_USER= SMTP_USER=
SMTP_PASSWORD= SMTP_PASSWORD=
[BACKUP SETTINGS] ###### GENERAL BACKUP OPTIONS
## Max execution time of whole backup process. Soft max exec time generates a warning only. ## Max execution time of whole backup process. Soft max exec time generates a warning only.
## Hard max exec time generates a warning and stops the whole backup execution. ## Hard max exec time generates a warning and stops the whole backup execution.
@ -230,12 +226,12 @@ HARD_MAX_EXEC_TIME_TOTAL=36000
KEEP_LOGGING=1801 KEEP_LOGGING=1801
## Backup Rotation. You may rotate backups if you don't use snapshots on your backup server. ## Backup Rotation. You may rotate backups if you don't use snapshots on your backup server.
ROTATE_SQL_BACKUPS=false ROTATE_SQL_BACKUPS=no
ROTATE_SQL_COPIES=7 ROTATE_SQL_COPIES=7
ROTATE_FILE_BACKUPS=false ROTATE_FILE_BACKUPS=no
ROTATE_FILE_COPIES=7 ROTATE_FILE_COPIES=7
[EXECUTION_HOOKS] ###### EXECUTION HOOKS
## Commands can will be run before and / or after backup execution (remote execution will only happen if REMOTE_BACKUP is set). ## Commands can will be run before and / or after backup execution (remote execution will only happen if REMOTE_BACKUP is set).
## This is useful to make a snapshot before backing up data, or even handle snapshots of backed up data. ## This is useful to make a snapshot before backing up data, or even handle snapshots of backed up data.
@ -250,7 +246,7 @@ MAX_EXEC_TIME_PER_CMD_BEFORE=0
MAX_EXEC_TIME_PER_CMD_AFTER=0 MAX_EXEC_TIME_PER_CMD_AFTER=0
## Stops whole backup execution if one of the above commands fail ## Stops whole backup execution if one of the above commands fail
STOP_ON_CMD_ERROR=false STOP_ON_CMD_ERROR=no
## Run local and remote after backup cmd's even on failure ## Run local and remote after backup cmd's even on failure
RUN_AFTER_CMD_ON_ERROR=false RUN_AFTER_CMD_ON_ERROR=no

2738
install.sh

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

1994
obackup.sh

File diff suppressed because it is too large Load Diff

View File

@ -6,8 +6,8 @@ AUTHOR="(C) 2016 by Orsiris de Jong"
CONTACT="http://www.netpower.fr/obacup - ozy@netpower.fr" CONTACT="http://www.netpower.fr/obacup - ozy@netpower.fr"
OLD_PROGRAM_VERSION="v1.x" OLD_PROGRAM_VERSION="v1.x"
NEW_PROGRAM_VERSION="v2.1x" NEW_PROGRAM_VERSION="v2.1x"
CONFIG_FILE_REVISION=2.1 CONFIG_FILE_VERSION=2017020901
PROGRAM_BUILD=2019102101 PROGRAM_BUILD=2016113001
if ! type "$BASH" > /dev/null; then if ! type "$BASH" > /dev/null; then
echo "Please run this script only with bash shell. Tested on bash >= 3.2" echo "Please run this script only with bash shell. Tested on bash >= 3.2"
@ -109,42 +109,42 @@ RUN_AFTER_CMD_ON_ERROR
VALUES=( VALUES=(
test-backup test-backup
'' ''
true yes
true yes
local local
/home/storage/sql /home/storage/sql
/home/storage/files /home/storage/files
false no
/home/storage/crypt /home/storage/crypt
'Your Name used with GPG signature' 'Your Name used with GPG signature'
'' ''
true yes
true yes
1024 1024
true yes
1048576 1048576
1048576 1048576
ssh://backupuser@remote.system.tld:22/ ssh://backupuser@remote.system.tld:22/
${HOME}/.ssh/id_rsa ${HOME}/.ssh/id_rsa
'' ''
SomeAlphaNumericToken9 SomeAlphaNumericToken9
true yes
false no
'' ''
true yes
'www.kernel.org www.google.com' 'www.kernel.org www.google.com'
false no
root root
true yes
test test
'' ''
3600 3600
7200 7200
'--opt --single-transaction' '--opt --single-transaction'
3 3
'' /some/path
'' /home
'/lost+found;/tmp' /home/backupuser\;/host/lost+found
include include
'' ''
'' ''
@ -152,21 +152,21 @@ include
'' ''
\; \;
'' ''
true yes
true yes
true yes
true yes
false no
false no
true yes
true yes
false no
false no
3600 3600
7200 7200
false no
false no
true yes
0 0
rsync rsync
infrastructure@example.com infrastructure@example.com
@ -180,9 +180,9 @@ none
30000 30000
36000 36000
1801 1801
false no
7 7
false no
7 7
'' ''
'' ''
@ -190,8 +190,8 @@ false
'' ''
0 0
0 0
false no
false no
) )
function Usage { function Usage {
@ -222,7 +222,7 @@ function LoadConfigFile {
fi fi
} }
function CheckAndBackup { function RewriteOldConfigFiles {
local config_file="${1}" local config_file="${1}"
if ! grep "BACKUP_ID=" $config_file > /dev/null && ! grep "INSTANCE_ID=" $config_file > /dev/null; then if ! grep "BACKUP_ID=" $config_file > /dev/null && ! grep "INSTANCE_ID=" $config_file > /dev/null; then
@ -236,10 +236,6 @@ function CheckAndBackup {
echo "Cannot backup config file." echo "Cannot backup config file."
exit 1 exit 1
fi fi
}
function RewriteOldConfigFiles {
local config_file="${1}"
echo "Rewriting config file $config_file" echo "Rewriting config file $config_file"
@ -249,13 +245,13 @@ function RewriteOldConfigFiles {
sed -i'.tmp' 's/^LOCAL_SQL_STORAGE=/SQL_STORAGE=/g' "$config_file" sed -i'.tmp' 's/^LOCAL_SQL_STORAGE=/SQL_STORAGE=/g' "$config_file"
sed -i'.tmp' 's/^LOCAL_FILE_STORAGE=/FILE_STORAGE=/g' "$config_file" sed -i'.tmp' 's/^LOCAL_FILE_STORAGE=/FILE_STORAGE=/g' "$config_file"
sed -i'.tmp' 's/^DISABLE_GET_BACKUP_FILE_SIZE=no/GET_BACKUP_SIZE=true/g' "$config_file" sed -i'.tmp' 's/^DISABLE_GET_BACKUP_FILE_SIZE=no/GET_BACKUP_SIZE=yes/g' "$config_file"
sed -i'.tmp' 's/^DISABLE_GET_BACKUP_FILE_SIZE=yes/GET_BACKUP_SIZE=false/g' "$config_file" sed -i'.tmp' 's/^DISABLE_GET_BACKUP_FILE_SIZE=yes/GET_BACKUP_SIZE=no/g' "$config_file"
sed -i'.tmp' 's/^LOCAL_STORAGE_KEEP_ABSOLUTE_PATHS=/KEEP_ABSOLUTE_PATHS=/g' "$config_file" sed -i'.tmp' 's/^LOCAL_STORAGE_KEEP_ABSOLUTE_PATHS=/KEEP_ABSOLUTE_PATHS=/g' "$config_file"
sed -i'.tmp' 's/^LOCAL_STORAGE_WARN_MIN_SPACE=/SQL_WARN_MIN_SPACE=/g' "$config_file" sed -i'.tmp' 's/^LOCAL_STORAGE_WARN_MIN_SPACE=/SQL_WARN_MIN_SPACE=/g' "$config_file"
if ! grep "^FILE_WARN_MIN_SPACE=" "$config_file" > /dev/null; then if ! grep "^FILE_WARN_MIN_SPACE=" "$config_file" > /dev/null; then
VALUE=$(grep "SQL_WARN_MIN_SPACE=" "$config_file") VALUE=$(cat $config_file | grep "SQL_WARN_MIN_SPACE=")
VALUE="${VALUE#*=}" VALUE=${VALUE#*=}
sed -i'.tmp' '/^SQL_WARN_MIN_SPACE=*/a\'$'\n''FILE_WARN_MIN_SPACE='$VALUE'\'$'\n''' "$config_file" sed -i'.tmp' '/^SQL_WARN_MIN_SPACE=*/a\'$'\n''FILE_WARN_MIN_SPACE='$VALUE'\'$'\n''' "$config_file"
fi fi
sed -i'.tmp' 's/^DIRECTORIES_SIMPLE_LIST=/DIRECTORY_LIST=/g' "$config_file" sed -i'.tmp' 's/^DIRECTORIES_SIMPLE_LIST=/DIRECTORY_LIST=/g' "$config_file"
@ -263,29 +259,28 @@ function RewriteOldConfigFiles {
sed -i'.tmp' 's/^DIRECTORIES_RECURSE_EXCLUDE_LIST=/RECURSIVE_EXCLUDE_LIST=/g' "$config_file" sed -i'.tmp' 's/^DIRECTORIES_RECURSE_EXCLUDE_LIST=/RECURSIVE_EXCLUDE_LIST=/g' "$config_file"
sed -i'.tmp' 's/^ROTATE_BACKUPS=/ROTATE_SQL_BACKUPS=/g' "$config_file" sed -i'.tmp' 's/^ROTATE_BACKUPS=/ROTATE_SQL_BACKUPS=/g' "$config_file"
if ! grep "^ROTATE_FILE_BACKUPS=" "$config_file" > /dev/null; then if ! grep "^ROTATE_FILE_BACKUPS=" "$config_file" > /dev/null; then
VALUE=$(grep "ROTATE_SQL_BACKUPS=" "$config_file") VALUE=$(cat $config_file | grep "ROTATE_SQL_BACKUPS=")
VALUE="${VALUE#*=}" VALUE=${VALUE#*=}
sed -i'.tmp' '/^ROTATE_SQL_BACKUPS=*/a\'$'\n''ROTATE_FILE_BACKUPS='$VALUE'\'$'\n''' "$config_file" sed -i'.tmp' '/^ROTATE_SQL_BACKUPS=*/a\'$'\n''ROTATE_FILE_BACKUPS='$VALUE'\'$'\n''' "$config_file"
fi fi
sed -i'.tmp' 's/^ROTATE_COPIES=/ROTATE_SQL_COPIES=/g' "$config_file" sed -i'.tmp' 's/^ROTATE_COPIES=/ROTATE_SQL_COPIES=/g' "$config_file"
if ! grep "^ROTATE_FILE_COPIES=" "$config_file" > /dev/null; then if ! grep "^ROTATE_FILE_COPIES=" "$config_file" > /dev/null; then
VALUE=$(grep "ROTATE_SQL_COPIES=" "$config_file") VALUE=$(cat $config_file | grep "ROTATE_SQL_COPIES=")
VALUE="${VALUE#*=}" VALUE=${VALUE#*=}
sed -i'.tmp' '/^ROTATE_SQL_COPIES=*/a\'$'\n''ROTATE_FILE_COPIES='$VALUE'\'$'\n''' "$config_file" sed -i'.tmp' '/^ROTATE_SQL_COPIES=*/a\'$'\n''ROTATE_FILE_COPIES='$VALUE'\'$'\n''' "$config_file"
fi fi
REMOTE_BACKUP=$(grep "REMOTE_BACKUP=" "$config_file") REMOTE_BACKUP=$(cat $config_file | grep "REMOTE_BACKUP=")
REMOTE_BACKUP="${REMOTE_BACKUP#*=}" REMOTE_BACKUP=${REMOTE_BACKUP#*=}
if [ "$REMOTE_BACKUP" == "yes" ]; then if [ "$REMOTE_BACKUP" == "yes" ]; then
REMOTE_USER=$(grep "REMOTE_USER=" "$config_file") REMOTE_USER=$(cat $config_file | grep "REMOTE_USER=")
REMOTE_USER="${REMOTE_USER#*=}" REMOTE_USER=${REMOTE_USER#*=}
REMOTE_HOST=$(grep "REMOTE_HOST=" "$config_file") REMOTE_HOST=$(cat $config_file | grep "REMOTE_HOST=")
REMOTE_HOST="${REMOTE_HOST#*=}" REMOTE_HOST=${REMOTE_HOST#*=}
REMOTE_PORT=$(grep "REMOTE_PORT=" "$config_file") REMOTE_PORT=$(cat $config_file | grep "REMOTE_PORT=")
REMOTE_PORT="${REMOTE_PORT#*=}" REMOTE_PORT=${REMOTE_PORT#*=}
REMOTE_SYSTEM_URI="ssh://$REMOTE_USER@$REMOTE_HOST:$REMOTE_PORT/" REMOTE_SYSTEM_URI="ssh://$REMOTE_USER@$REMOTE_HOST:$REMOTE_PORT/"
sed -i'.tmp' 's#^REMOTE_BACKUP=true#REMOTE_SYSTEM_URI='$REMOTE_SYSTEM_URI'#g' "$config_file"
sed -i'.tmp' 's#^REMOTE_BACKUP=yes#REMOTE_SYSTEM_URI='$REMOTE_SYSTEM_URI'#g' "$config_file" sed -i'.tmp' 's#^REMOTE_BACKUP=yes#REMOTE_SYSTEM_URI='$REMOTE_SYSTEM_URI'#g' "$config_file"
sed -i'.tmp' '/^REMOTE_USER==*/d' "$config_file" sed -i'.tmp' '/^REMOTE_USER==*/d' "$config_file"
sed -i'.tmp' '/^REMOTE_HOST==*/d' "$config_file" sed -i'.tmp' '/^REMOTE_HOST==*/d' "$config_file"
@ -308,76 +303,27 @@ function AddMissingConfigOptions {
if ! grep "^${KEYWORDS[$counter]}=" > /dev/null "$config_file"; then if ! grep "^${KEYWORDS[$counter]}=" > /dev/null "$config_file"; then
echo "${KEYWORDS[$counter]} not found" echo "${KEYWORDS[$counter]} not found"
if [ $counter -gt 0 ]; then if [ $counter -gt 0 ]; then
if [ "${VALUES[$counter]}" == true ] || [ "${VALUES[$counter]}" == false ]; then sed -i'.tmp' '/^'${KEYWORDS[$((counter-1))]}'=*/a\'$'\n'${KEYWORDS[$counter]}'="'"${VALUES[$counter]}"'"\'$'\n''' "$config_file"
sed -i'.tmp' '/^'${KEYWORDS[$((counter-1))]}'=*/a\'$'\n'${KEYWORDS[$counter]}'='"${VALUES[$counter]}"'\'$'\n''' "$config_file"
else
sed -i'.tmp' '/^'${KEYWORDS[$((counter-1))]}'=*/a\'$'\n'${KEYWORDS[$counter]}'="'"${VALUES[$counter]}"'"\'$'\n''' "$config_file"
fi
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "Cannot add missing ${[KEYWORDS[$counter]}." echo "Cannot add missing ${[KEYWORDS[$counter]}."
exit 1 exit 1
fi fi
else else
if [ "${VALUES[$counter]}" == true ] || [ "${VALUES[$counter]}" == false ]; then sed -i'.tmp' '/onfig file rev*/a\'$'\n'${KEYWORDS[$counter]}'="'"${VALUES[$counter]}"'"\'$'\n''' "$config_file"
sed -i'.tmp' '/[GENERAL\]$//a\'$'\n'${KEYWORDS[$counter]}'='"${VALUES[$counter]}"'\'$'\n''' "$config_file"
else
sed -i'.tmp' '/[GENERAL\]$//a\'$'\n'${KEYWORDS[$counter]}'="'"${VALUES[$counter]}"'"\'$'\n''' "$config_file"
fi
if [ $? -ne 0 ]; then
echo "Cannot add missing ${[KEYWORDS[$counter]}."
exit 1
fi
fi fi
echo "Added missing ${KEYWORDS[$counter]} config option with default option [${VALUES[$counter]}]" echo "Added missing ${KEYWORDS[$counter]} config option with default option [${VALUES[$counter]}]"
else
# Not the most elegant but the quickest way :)
if grep "^${KEYWORDS[$counter]}=yes$" > /dev/null "$config_file"; then
sed -i'.tmp' 's/^'${KEYWORDS[$counter]}'=.*/'${KEYWORDS[$counter]}'=true/g' "$config_file"
if [ $? -ne 0 ]; then
echo "Cannot rewrite ${[KEYWORDS[$counter]} boolean to true."
exit 1
fi
elif grep "^${KEYWORDS[$counter]}=no$" > /dev/null "$config_file"; then
sed -i'.tmp' 's/^'${KEYWORDS[$counter]}'=.*/'${KEYWORDS[$counter]}'=false/g' "$config_file"
if [ $? -ne 0 ]; then
echo "Cannot rewrite ${[KEYWORDS[$counter]} boolean to false."
exit 1
fi
fi
fi fi
counter=$((counter+1)) counter=$((counter+1))
done done
} }
function RewriteSections {
local config_file="${1}"
# Earlier config files has GENERAL BACKUP OPTIONS set twice. Let's replace the first one only. sed does a horrible job doing this, at least if portability is required
awk '/###### GENERAL BACKUP OPTIONS/ && !done { gsub(/###### GENERAL BACKUP OPTIONS/, "[GENERAL]"); done=1}; 1' "$config_file" >> "$config_file.tmp"
#sed -i'.tmp' 's/###### GENERAL BACKUP OPTIONS/[GENERAL]/' "$config_file"
# Fix using earlier tmp file from awk
sed 's/###### BACKUP STORAGE/[BACKUP STORAGE]/g' "$config_file.tmp" > "$config_file"
sed -i'.tmp' 's/###### REMOTE ONLY OPTIONS/[REMOTE_OPTIONS]/g' "$config_file"
sed -i'.tmp' 's/###### DATABASE SPECIFIC OPTIONS/[DATABASE BACKUP SETTINGS]/g' "$config_file"
sed -i'.tmp' 's/###### FILES SPECIFIC OPTIONS/[FILE BACKUP SETTINGS]/g' "$config_file"
sed -i'.tmp' 's/###### ALERT OPTIONS/[ALERT_OPTIONS]/g' "$config_file"
sed -i'.tmp' 's/###### GENERAL BACKUP OPTIONS/[BACKUP SETTINGS]/g' "$config_file"
sed -i'.tmp' 's/###### EXECUTION HOOKS/[EXECUTION_HOOKS]/g' "$config_file"
}
function UpdateConfigHeader { function UpdateConfigHeader {
local config_file="${1}" local config_file="${1}"
if ! grep "^CONFIG_FILE_REVISION=" > /dev/null "$config_file"; then # "onfig file rev" to deal with earlier variants of the file
if grep "\[GENERAL\]" > /dev/null "$config_file"; then sed -i'.tmp' 's/.*onfig file rev.*/##### '$SUBPROGRAM' config file rev '$CONFIG_FILE_VERSION' '$NEW_PROGRAM_VERSION'/' "$config_file"
sed -i'.tmp' '/^\[GENERAL\]$/a\'$'\n'CONFIG_FILE_REVISION=$CONFIG_FILE_REVISION$'\n''' "$config_file"
else
sed -i'.tmp' '/.*onfig file rev.*/a\'$'\n'CONFIG_FILE_REVISION=$CONFIG_FILE_REVISION$'\n''' "$config_file"
fi
# "onfig file rev" to deal with earlier variants of the file
#sed -i'.tmp' 's/.*onfig file rev.*//' "$config_file"
fi rm -f "$config_file.tmp"
} }
if [ "$1" != "" ] && [ -f "$1" ] && [ -w "$1" ]; then if [ "$1" != "" ] && [ -f "$1" ] && [ -w "$1" ]; then
@ -385,13 +331,9 @@ if [ "$1" != "" ] && [ -f "$1" ] && [ -w "$1" ]; then
# Make sure there is no ending slash # Make sure there is no ending slash
CONF_FILE="${CONF_FILE%/}" CONF_FILE="${CONF_FILE%/}"
LoadConfigFile "$CONF_FILE" LoadConfigFile "$CONF_FILE"
CheckAndBackup "$CONF_FILE"
RewriteSections "$CONF_FILE"
RewriteOldConfigFiles "$CONF_FILE" RewriteOldConfigFiles "$CONF_FILE"
AddMissingConfigOptions "$CONF_FILE" AddMissingConfigOptions "$CONF_FILE"
UpdateConfigHeader "$CONF_FILE" UpdateConfigHeader "$CONF_FILE"
rm -f "$CONF_FILE.tmp"
echo "Configuration file upgrade finished"
else else
Usage Usage
fi fi