1
0
mirror of https://github.com/deajan/obackup.git synced 2026-02-12 01:50:55 +01:00

333 Commits

Author SHA1 Message Date
deajan
552f6e2b80 Update changelog for v2.1 beta1 release 2017-01-04 09:14:21 +01:00
deajan
19365df1ba Added pgrep preflight check 2017-01-04 09:08:23 +01:00
deajan
2d2530c55d Rebuilt targets for v2.1beta1 2017-01-04 09:00:47 +01:00
deajan
4e0bbbcd88 Update version number 2017-01-04 08:59:22 +01:00
deajan
f96d8ba5e0 Update version number 2017-01-04 08:59:13 +01:00
deajan
cdf62e3be8 Minor fixes 2017-01-04 08:55:04 +01:00
deajan
2d1cd20f06 Fixed wrong variable used 2017-01-03 20:07:40 +01:00
deajan
da9de0acdb Fixed another missing eval statement 2017-01-03 14:30:52 +01:00
deajan
b35d883403 More fixes 2017-01-03 14:24:47 +01:00
deajan
5dba4dc808 Multiple typo fixes and forgotten cmd exec 2017-01-03 14:13:53 +01:00
deajan
6335e889ea Fixed typo (again) 2017-01-03 13:17:43 +01:00
deajan
71dd3e939d More easy debugging 2017-01-03 13:01:18 +01:00
deajan
13ad1b9372 Fixed remaining typos 2017-01-03 12:35:42 +01:00
deajan
321aba05ff Minor fixes 2017-01-03 12:32:24 +01:00
deajan
4e75fdc445 Added bootstrap warning message 2017-01-03 11:25:24 +01:00
deajan
38b1a0ee0b Simplified FILE_SIZE_LIST since local and remote code is now same 2017-01-03 11:15:03 +01:00
deajan
fe8a5c51fa TrapError should output messages to stderr 2017-01-02 23:03:07 +01:00
deajan
643ede6524 Fixed wrong file size fetched remotely 2017-01-02 22:55:43 +01:00
deajan
6a51acbb49 Fixed local RemoteLogger usage 2017-01-02 22:48:59 +01:00
deajan
c448ff3937 Added missing remotelogger in getDirectoriesSizeRemote 2017-01-02 22:42:59 +01:00
deajan
89c74723bd Added debug log 2017-01-02 22:41:05 +01:00
deajan
bc3e3e9e66 Some work comments 2017-01-02 22:35:30 +01:00
deajan
bc396270df Fixed bogus backup when no recursive directories set 2017-01-02 22:26:00 +01:00
deajan
eead765d69 Reverted exit code handling 2017-01-02 22:23:44 +01:00
deajan
2db6a9ca29 Retval and comparaison code compliance 2017-01-02 17:08:39 +01:00
deajan
6c00014520 Fixing bogus remote logging 2017-01-02 16:58:21 +01:00
deajan
e968a67c04 Fixing typo 2017-01-02 16:47:07 +01:00
deajan
0c2b52e0bf Added parallel decryption support 2017-01-02 14:08:21 +01:00
deajan
c1aee74a3e Added parallel crypt support 2017-01-02 13:15:33 +01:00
deajan
a3e156edf9 Added PARALLEL_ENCRYPTION_PROCESSES parameter 2017-01-02 13:12:56 +01:00
deajan
83aed0fe20 Removed debug env 2017-01-02 12:46:47 +01:00
deajan
121d78d8aa Improved testsuite 2017-01-02 12:37:07 +01:00
deajan
290821e949 Missing manual database list detection fix 2017-01-02 12:01:58 +01:00
deajan
085a167442 Update help comment 2017-01-02 12:00:24 +01:00
deajan
afce24c7ec Minor decrypt tweaks 2016-12-31 12:20:50 +01:00
deajan
4524df8a76 Removed double tests 2016-12-31 00:01:31 +01:00
deajan
d16e1e13c3 Improved GPG tests 2016-12-31 00:00:32 +01:00
deajan
2b69b4b3c1 Fix typos 2016-12-29 00:47:48 +01:00
deajan
259b05ff33 Improved GPG v >= 2.1 batch support 2016-12-29 00:44:33 +01:00
deajan
53eaeb4dd7 Fixed batch encrypt support 2016-12-29 00:20:01 +01:00
deajan
dfee375b8d Fixed typos 2016-12-28 23:28:47 +01:00
deajan
b4255318e6 Simplified CreateDirectoryRemote 2016-12-28 23:25:11 +01:00
deajan
a81f741204 Reenabled all tests 2016-12-28 22:20:59 +01:00
deajan
e6b29356c7 Updated comments 2016-12-28 22:20:30 +01:00
deajan
805239e669 Update file backup paths when encryption is used 2016-12-28 22:18:59 +01:00
deajan
a8b6bcbf13 Updated functional tests 2016-12-28 22:15:44 +01:00
deajan
b6ad787e23 Removing unnecessary files 2016-12-28 22:14:45 +01:00
deajan
de44f55cb8 Updated some test paths 2016-12-28 22:13:43 +01:00
deajan
1f8fef8c52 Set of minor fixes and WIPs 2016-12-27 12:53:35 +01:00
deajan
e3c6ae3a40 Added global local os variables 2016-12-27 12:52:59 +01:00
deajan
549bb803dd Fixed installer stats don't include os 2016-12-27 12:52:37 +01:00
deajan
7eb1acd079 Fixed alert message for bogus conf items with wrong value 2016-12-24 15:45:39 +01:00
deajan
10e6ede85c Multiple minor fixes 2016-12-24 15:41:28 +01:00
deajan
582f79d38f Re-enabled all tests 2016-12-24 13:01:31 +01:00
deajan
1a23c48d3c Fixed hard max exec time can be lower that soft max exec time 2016-12-24 12:55:00 +01:00
deajan
749e4ffdbd Increased wait time for timed tests 2016-12-24 12:43:37 +01:00
deajan
92ad2ac81d soft max exec time should trigger warning 2016-12-23 22:41:31 +01:00
deajan
db5593ddb9 Updated conf filename 2016-12-23 22:36:46 +01:00
deajan
0eac2b5520 Removed unused mail 2016-12-23 22:34:59 +01:00
deajan
190c6dd3f7 Time test tweaks 2016-12-23 22:29:02 +01:00
deajan
3a4aab72b1 Mail fixes for tests 2016-12-23 22:25:23 +01:00
deajan
d934add237 Small minor fixes 2016-12-23 22:12:07 +01:00
deajan
9d6b95e7a6 Added --no-prefix option 2016-12-23 21:51:34 +01:00
deajan
33bb142467 Added timed execution tests 2016-12-23 21:46:43 +01:00
deajan
a4ad02e7f7 More old upgrade remote fixes 2016-12-23 21:35:02 +01:00
deajan
f3a7faf0dd Fixed bogus ssh port on travis 2016-12-23 21:29:02 +01:00
deajan
0d969d992b Disable REMOTE_HOST_PING on travis (again) 2016-12-23 21:23:49 +01:00
deajan
3c72bffdbf Disabled remote ping on travis 2016-12-23 20:43:58 +01:00
deajan
94b4f58740 Moved compression settings to separate function run twice after of detection 2016-12-23 20:31:47 +01:00
deajan
be559a124c Updated test email address 2016-12-23 20:20:22 +01:00
deajan
546df675e7 Fixed warnings and errors don't trigger actions on exit 2016-12-23 20:15:38 +01:00
deajan
cd1c597e68 Added encrypt / decrypt batch info output 2016-12-23 20:12:05 +01:00
deajan
c144278c2a Updated test framework 2016-12-23 19:25:26 +01:00
deajan
6cc894b69d Fixed missing REMOTE_FIND_CMD command 2016-12-23 18:23:13 +01:00
deajan
630b7d9eff Fixed Logger function when called without proper loglevel 2016-12-23 18:15:42 +01:00
deajan
40102defd6 Moved JoinString as it's needed for Logger 2016-12-23 18:13:48 +01:00
deajan
4ee20b130e Corrected wrong number of arguments checked for BackupDatabase sub functions 2016-12-23 18:02:03 +01:00
deajan
d48fe86d75 Another dev comment 2016-12-23 17:59:58 +01:00
deajan
6864cedb14 Updated shellcheck from osync 2016-12-23 17:52:24 +01:00
deajan
85e680cc75 New __CheckArguments call syntax 2016-12-23 17:51:57 +01:00
deajan
7e63f1dab2 WIP for refactoring ListRemoteDirectories 2016-12-23 17:51:15 +01:00
deajan
39cfa7e89e WIP comment 2016-12-20 22:59:09 +01:00
deajan
47b6d21480 WIP for bootstrap 2016-12-20 22:53:22 +01:00
deajan
1dbd3e9a72 Added common files from osync 2016-12-20 21:19:58 +01:00
deajan
7967251206 Added merge and bootstrap from osync 2016-12-20 21:19:31 +01:00
deajan
25fb362762 More work comments 2016-12-19 23:19:45 +01:00
deajan
a207fc2008 Added RSYNC_OPTIONAL_ARGS upgrade value 2016-12-19 23:17:25 +01:00
deajan
d4a072846c Added RSYNC_OPTIONAL_ARGS 2016-12-19 23:15:42 +01:00
deajan
6d6ea301aa Added TSTAMP suffix to tmp exec files 2016-12-19 23:02:19 +01:00
deajan
d02d26fee1 Updated ofunctions from osync 2016-12-19 22:57:36 +01:00
deajan
31eaee7993 Updated multiple comments and remote sudo execs 2016-12-19 22:57:21 +01:00
deajan
da29cf978f Updated ofunctions from obackup 2016-12-04 11:18:11 +01:00
deajan
039e057915 Some minor improvements ported from osync 2016-12-04 11:17:51 +01:00
deajan
625a65bee0 Moved trap functions later in init phase 2016-11-30 14:21:53 +01:00
deajan
ff791f2ccf Rebuilt targets 2016-11-30 14:04:41 +01:00
deajan
161599617d Updated ofunctions from osync 2016-11-30 13:59:27 +01:00
deajan
a527350afa Ported some fixes from osync 2016-11-30 13:59:15 +01:00
deajan
d7cba2acc2 Updated WaitFor & ParallelExec tests 2016-11-30 13:59:01 +01:00
deajan
2e043210b2 Upgrade script now handles headers on BSD / Mac 2016-11-30 13:30:14 +01:00
deajan
b8216f8d6a Improved os detection 2016-11-30 13:28:56 +01:00
deajan
8f0efbcb46 Simplified batch tool 2016-11-30 13:28:37 +01:00
deajan
92852357e3 Updated merge for BSD / Mac compat 2016-11-30 13:28:26 +01:00
deajan
b156013598 Some Mac fixes from osync 2016-11-21 15:11:40 +01:00
deajan
e3a717c7fa Better android OS detection 2016-11-19 12:59:56 +01:00
deajan
b21fafd523 Fixed Android / CentOS 5 compat 2016-11-19 12:59:33 +01:00
deajan
b1ea1dfa2c Removed debugging exit 2016-11-17 21:19:12 +01:00
deajan
6c9e4082be Simplified cleanup functions 2016-11-17 21:05:07 +01:00
deajan
16fc043322 Rebuilt targets 2016-11-17 21:02:15 +01:00
deajan
3fe2f1eb88 Updated obackup according to new ofunctions 2016-11-17 21:01:09 +01:00
deajan
169f7b157c Updated ofunctions.sh from osync 2016-11-17 21:00:56 +01:00
deajan
2167bc8e09 More fixes from osync 2016-11-15 13:13:28 +01:00
deajan
6f697cc63f Cosmetic change 2016-11-15 13:13:05 +01:00
deajan
03f93d2576 Added --errors-only switch 2016-11-14 17:17:29 +01:00
deajan
709af82ff4 Updated ofunctions from osync 2016-11-14 17:17:17 +01:00
deajan
88a720be21 Rebuilt targets 2016-11-12 00:39:19 +01:00
deajan
321a20558a fi-Fixed 2016-11-12 00:39:06 +01:00
deajan
711465f701 Rebuilt targets 2016-11-10 16:25:37 +01:00
deajan
3975763ed2 Fixed typo 2016-11-10 16:25:07 +01:00
deajan
177321a9de Fixes from osync 2016-11-10 16:25:00 +01:00
deajan
6075b92c91 Some logging fixes 2016-11-10 16:18:29 +01:00
deajan
a8892f88d0 Implement TLS and SSL support for busybox sendmail 2016-10-23 20:28:20 +02:00
deajan
f2e0f63a33 Safer upgrade test 2016-10-23 19:01:14 +02:00
deajan
3ca39c7a0a Fixed trying other mail systems in busybox 2016-10-23 18:13:06 +02:00
deajan
e4e1273e5a Implemented SSH_PASSWORD_FILE option 2016-10-23 18:04:39 +02:00
deajan
545a7e05f5 Updated config file and upgrade script 2016-10-23 17:54:41 +02:00
deajan
9641b9d277 It's typo heavean 2016-10-23 17:50:10 +02:00
deajan
0a8b17814b Typo fixed and code compliance 2016-10-23 17:46:23 +02:00
deajan
caa462fee7 More busybox sendmail tests 2016-10-23 14:09:27 +02:00
deajan
7a17118305 Fixed typo 2016-10-23 14:04:23 +02:00
deajan
f2a62821d6 Basic sendemail busybox implementation 2016-10-23 14:02:52 +02:00
deajan
04524e6bec More portable quick sync check 2016-10-23 13:50:38 +02:00
deajan
b697c13c76 Another compression fix for busybox 2016-10-23 13:48:34 +02:00
deajan
4044ea6dbc Minor portability fix 2016-10-23 13:40:55 +02:00
deajan
b331ffb1f2 Further busybox compat. xz compression level not supported 2016-10-23 13:34:33 +02:00
deajan
ba8541d5f0 More portable file size functions 2016-10-23 13:27:02 +02:00
deajan
4f4fcf831a Fixed HumanToNumeric function 2016-10-23 13:26:41 +02:00
deajan
2118a27bcc Better preflight check 2016-10-19 12:27:37 +02:00
deajan
4982d326d3 Minor improvement 2016-10-17 17:42:13 +02:00
deajan
7f5126ad28 Added Remote_3rd_party_host value correction 2016-09-14 20:27:03 +02:00
deajan
4e5dfa31a9 Fixed upgrade script caveats 2016-09-14 20:19:14 +02:00
deajan
6cbb3fe5da Removed other connectivity checks 2016-09-09 12:55:45 +02:00
deajan
bcbbdc9cc3 Disabled connectivity checks 2016-09-09 12:47:37 +02:00
deajan
b838100556 Connectivity checks generate warnings instead of errors 2016-09-09 12:46:33 +02:00
deajan
281cb38ba5 Added upgrade script test 2016-09-09 12:36:30 +02:00
deajan
da5dc6f4bc Improved upgrade script 2016-09-09 12:31:54 +02:00
deajan
7e918cfbc2 Added pre-upgrade config file 2016-09-09 12:31:40 +02:00
deajan
1d5d1b4b17 Added encrpytion options to upgrade script 2016-09-09 09:01:25 +02:00
deajan
4e9eee7c48 Rebuilt targets 2016-09-09 08:31:29 +02:00
deajan
cfbec5120b Fixed typo 2016-09-09 08:31:16 +02:00
deajan
a35edebafc Check for gpg recipient if encryption is used 2016-09-09 08:14:29 +02:00
deajan
8573ecfcb5 Rebuilt targets 2016-09-08 22:51:24 +02:00
deajan
179434e9b0 Updated common scripts 2016-09-08 22:51:00 +02:00
deajan
25cb702d22 Better GPG compatibility 2016-09-04 22:48:09 +02:00
deajan
8924ae31ff Still trying to resolve the mystery of GPG in travis 2016-09-04 22:34:42 +02:00
deajan
c6f386bf01 Trying to resolve Travis GPG mystery 2016-09-04 22:25:24 +02:00
deajan
6a1b2b9d48 Added more gpg tests 2016-09-04 22:20:18 +02:00
deajan
5a8d1ab811 Another typo bites the dust 2016-09-04 22:05:01 +02:00
deajan
e59fcdbe15 Re enabled gpg fix 2016-09-04 21:51:26 +02:00
deajan
da5d98e922 PassPhrase test 2016-09-04 21:41:05 +02:00
deajan
20cf783bdb Added gpg unit test 2016-09-04 21:37:24 +02:00
deajan
93dbcb4f74 Common travis run 2016-09-04 21:26:19 +02:00
deajan
5aae319a2e Still trying to debug travis 2016-09-04 21:16:48 +02:00
deajan
176564769c Mismatch in loglevels 2016-09-04 21:16:32 +02:00
deajan
f0170d949a Trying to debug travis run 2016-09-04 21:11:05 +02:00
deajan
e741aa6bc2 Fixed typo 2016-09-04 19:42:22 +02:00
deajan
e0a8c5567d Added SetEncryption before all functions 2016-09-04 09:38:11 +02:00
deajan
1187360b62 Run all tests 2016-09-04 09:33:29 +02:00
deajan
099575fb88 Work in progress on encrpytion 2016-09-04 09:30:12 +02:00
deajan
272adb24e7 Fixed typos 2016-09-04 08:37:21 +02:00
deajan
c84e4eae9e Updated tests 2016-09-04 08:07:17 +02:00
deajan
69b41e01dd Fixed double error count in WaitForTaskCompletion 2016-09-04 08:03:28 +02:00
deajan
af9924a648 Updated tests 2016-09-03 14:18:03 +02:00
deajan
2346594adc Kept IsNumeric expand for variable variables 2016-09-03 13:30:38 +02:00
deajan
cd3da2d589 Added Integer test function 2016-09-03 13:10:14 +02:00
deajan
65a3a0a19a Fixed unit tests 2016-09-02 23:22:29 +02:00
deajan
26664835c0 Updated unit test config files 2016-09-02 23:20:48 +02:00
deajan
cf8f3f20ef Minor fixes 2016-09-02 23:20:07 +02:00
deajan
fc8ef801a4 Fix EscapeSpaces on bash >= 4.3 2016-09-02 21:18:18 +02:00
deajan
3b090d8e43 Changed travis ssh config 2016-09-02 20:36:45 +02:00
deajan
3134131975 Made tee work with push command 2016-09-02 20:32:17 +02:00
deajan
bfbd57490b Added known_hosts workaround for ssh tests 2016-09-02 20:31:49 +02:00
deajan
064b3f875b Debugging travis 2016-09-02 16:33:49 +02:00
deajan
dd42cd08b0 Fixed problem with gpg 1 2016-09-02 16:33:23 +02:00
deajan
05590e1266 Debug Travis CI tests 2016-09-02 16:08:23 +02:00
deajan
2ce18fb721 Fix paths 2016-09-02 15:53:39 +02:00
deajan
caf8301679 Another typo fixing :' sic 2016-09-02 15:47:02 +02:00
deajan
f9fef31e73 OMG! bad coding fix 2016-09-02 15:36:18 +02:00
deajan
9fabedaeb4 Added return codes logs to connectivitychecks 2016-09-02 15:34:24 +02:00
deajan
fa038f9994 Fixed typo 2016-09-02 13:38:35 +02:00
deajan
c42b80ccb0 Trying to disable SSH known hosts for Travis tests 2016-09-02 13:37:01 +02:00
deajan
a14431c828 Fixed shunit2 runner 2016-09-02 13:27:39 +02:00
deajan
e5b0b89b70 Updated Travis command 2016-09-02 13:22:52 +02:00
deajan
326f8a8245 Added conf file selection 2016-09-02 13:22:24 +02:00
deajan
e77a20595b Changed conf files for travis 2016-09-02 13:18:27 +02:00
deajan
4c8ae70b73 Added dev comment 2016-09-02 13:14:41 +02:00
deajan
2f7cea1736 Bumped program build 2016-09-02 13:14:30 +02:00
deajan
428c6464e2 Updated config files 2016-09-02 13:14:04 +02:00
deajan
f28b884c01 Added encryption unit tests 2016-09-02 13:13:53 +02:00
deajan
694b73983a Fixed encryption paths 2016-09-02 12:16:25 +02:00
deajan
6647a7be59 WIP: making encryption coherent 2016-09-01 22:55:11 +02:00
deajan
35c7dc97c2 Multiple typo fixes 2016-09-01 22:15:16 +02:00
deajan
66babd22fa Fixed paths 2016-09-01 20:32:38 +02:00
deajan
758f074d65 Fixed typo 2016-09-01 17:27:53 +02:00
deajan
92258308fe Encryption implemented 2016-09-01 16:07:20 +02:00
deajan
672672af2b Code cleanup 2016-09-01 15:50:29 +02:00
deajan
67b42842db Added encryption support for SQL backups 2016-09-01 15:44:29 +02:00
deajan
5ca743f7da Added crypt disk space calculation 2016-09-01 15:11:56 +02:00
deajan
1c88c73b5c WIP: encrypt & decrypt functions working 2016-09-01 14:37:01 +02:00
deajan
084fd91005 Work in progress for encryption 2016-08-31 23:32:21 +02:00
deajan
2ac8b646a9 Rebuilt targets 2016-08-31 12:28:19 +02:00
deajan
f74aad596d Changed --database since it's deprecated 2016-08-31 12:27:53 +02:00
deajan
89975337c9 Updated test conf files 2016-08-31 11:28:12 +02:00
deajan
bb8cf4a27c Rebuilt targets 2016-08-31 11:26:21 +02:00
deajan
7ccc469dd6 Added new config values to update 2016-08-31 11:25:27 +02:00
deajan
c0e9c14fa2 Changed paths 2016-08-31 11:25:09 +02:00
deajan
0127da6ac3 Added more mysqldump flexibility 2016-08-31 11:15:12 +02:00
deajan
fbdd12dc72 Added ParallelExec function test 2016-08-30 21:50:04 +02:00
deajan
e6be72f5a8 Removed debug line 2016-08-30 19:17:14 +02:00
deajan
9da86de41e Fixed merge program variable 2016-08-30 18:56:18 +02:00
deajan
a6f52125b0 Updated ofunctions 2016-08-30 18:55:36 +02:00
deajan
8cef797979 Trying to diag Travis results 2016-08-30 18:50:19 +02:00
deajan
0cab87309f Improved merge.sh with exit codes. 2016-08-30 18:28:27 +02:00
deajan
5371f859e9 Test travis system 2016-08-30 18:28:08 +02:00
deajan
c8e73d152f Added /root / ~ warning 2016-08-30 18:24:02 +02:00
deajan
133e1ebe0e Improved travis CI tests 2016-08-30 18:12:59 +02:00
deajan
a9077af7bb Minor fixes 2016-08-30 18:11:54 +02:00
deajan
b1fafab1f7 Merge README.md with Travis info 2016-08-30 16:27:19 +02:00
deajan
2ee2624d9d Added os info for Travis CI 2016-08-30 16:27:02 +02:00
deajan
a583835cf5 Added merge test 2016-08-30 16:26:30 +02:00
deajan
35558cf48c Added merge test 2016-08-30 16:24:30 +02:00
Orsiris de Jong
b091262231 Added Travis CI info to README 2016-08-30 16:14:32 +02:00
deajan
9daa4c10bf Updated tests for Travis CI 2016-08-30 16:12:10 +02:00
deajan
8c17c2dfe2 Added ~ expansion 2016-08-30 15:36:08 +02:00
deajan
d415a727bf Added more explicit log message 2016-08-29 18:27:34 +02:00
deajan
cf908c4fab Replaced 0/1 with booleans 2016-08-29 18:27:12 +02:00
deajan
4a9ebd0abc Updated ofunctions 2016-08-29 18:26:47 +02:00
deajan
40eb0f760c Fixed result variable name 2016-08-26 15:18:07 +02:00
deajan
4755fbacb7 Rebuilt targets 2016-08-26 15:10:30 +02:00
deajan
9aaf4ff11d Updated changelog 2016-08-26 15:09:59 +02:00
deajan
9cfcf338e1 SendAlert now triggered as last action 2016-08-26 15:09:01 +02:00
deajan
bf8e45217c Logs sent per mail are now only limited to current run 2016-08-26 15:08:01 +02:00
deajan
8817c68f7c Better __CheckArguments function 2016-08-26 14:46:11 +02:00
deajan
134aaec0da SendAlert arg check is now ranged 2016-08-26 14:23:57 +02:00
deajan
5b9f6c5fa2 __CheckArguments now also takes ranges 2016-08-26 14:23:36 +02:00
deajan
809e6a1d4f Improved logging output 2016-08-26 12:28:53 +02:00
deajan
2c55108ba6 Fixed bogus double log alert sending 2016-08-26 12:09:22 +02:00
deajan
cfd76c0c4c Fixed typo 2016-08-26 12:06:49 +02:00
deajan
d1c845ba67 More clear subject line in alerts 2016-08-26 12:01:35 +02:00
deajan
e966682ecc Fixed double time check in database backup 2016-08-26 11:57:55 +02:00
deajan
fc84c633a8 Work in progress for encryption 2016-08-26 11:55:58 +02:00
deajan
a342b1ba1d Increased remote host soft max time 2016-08-23 23:40:20 +02:00
deajan
da0a45e3d3 Rebuilt targets 2016-08-22 09:11:32 +02:00
deajan
0d4664cae0 Fixed typo 2016-08-22 08:32:56 +02:00
deajan
53677b00d3 Updated serial 2016-08-22 08:30:19 +02:00
deajan
1e1adf3470 Fixed code removal 2016-08-22 08:29:58 +02:00
deajan
ae23418f7b Fixed signal posfix compliance 2016-08-22 08:25:47 +02:00
deajan
775866b62d Fixed RunAfterHook exec + signal posix compliance 2016-08-22 08:25:31 +02:00
deajan
add5c69929 Upgrade script updates header too 2016-08-19 10:20:25 +02:00
deajan
51b43487e2 More waitFor function tests + arg list 2016-08-18 22:38:28 +02:00
deajan
f550b03d4d Adapted arg list for WaitFor function 2016-08-18 22:38:06 +02:00
deajan
cb8bd6b326 Rebuilt targets 2016-08-18 22:16:15 +02:00
deajan
37b94c6712 Adapted parameters for WaitFor function 2016-08-18 22:15:08 +02:00
deajan
5c8eb7fdf9 Removed unused parameter in WaitFor function 2016-08-18 22:14:46 +02:00
deajan
db32308c40 Updated test conf files 2016-08-18 17:15:10 +02:00
Orsiris de Jong
7fb22d0732 Fixed INSTANCE_ID for push test 2016-08-18 17:11:43 +02:00
deajan
6d63a4b754 Better unit tests 2016-08-18 17:10:53 +02:00
deajan
d006328b61 Rebuilt targets 2016-08-18 14:23:57 +02:00
deajan
6cbb732d7c Added basic shunit2 tests 2016-08-18 14:19:52 +02:00
deajan
bda19b3822 Finally fixed all space related errors 2016-08-18 12:58:05 +02:00
deajan
073291934b Added another debugging function 2016-08-18 12:57:50 +02:00
deajan
b7c49ed6d9 Rebuilt targets 2016-08-18 11:53:18 +02:00
deajan
9fed22d4f9 Added some test files 2016-08-18 11:52:46 +02:00
deajan
f6c916f6ef Fixed infamous typo 2016-08-18 11:05:30 +02:00
deajan
9f6e676a9a Fixed GetDirectoriesSize...again 2016-08-18 10:58:35 +02:00
deajan
a6be969081 Fixed spaces in backup dirs 2016-08-18 10:18:56 +02:00
deajan
179f274147 Added more preserve configurable options 2016-08-17 17:21:32 +02:00
deajan
2d49c957de Fixed backup rotation bug introduced with code rework 2016-08-17 15:27:40 +02:00
deajan
a4e4c5b7be Fixed warning when testing process state 2016-08-17 15:26:54 +02:00
deajan
a4038750a7 Added bash test 2016-08-17 13:44:14 +02:00
deajan
2658730f70 Fixed typo 2016-08-17 10:44:28 +02:00
deajan
565d0f5524 Rebuilt target 2016-08-17 10:24:38 +02:00
deajan
56173e9d1c Improved batch runner 2016-08-17 10:24:27 +02:00
deajan
87dd096f61 Rebuilt targets 2016-08-17 10:01:55 +02:00
deajan
a84a8c95f9 Standarised exit codes 2016-08-17 10:01:27 +02:00
deajan
aa581c5ce1 Improved batch runner 2016-08-17 10:00:23 +02:00
deajan
a74e83185a Fixed double backup path in rotate functions 2016-08-17 09:20:52 +02:00
deajan
01ddddcb49 Added keep logging preflight check 2016-08-17 09:14:09 +02:00
deajan
7076a9dfaa Updated changelog 2016-08-16 23:19:49 +02:00
deajan
234961e982 Rebuilt targets 2016-08-16 23:19:18 +02:00
deajan
9fe0aa3546 Fixed double RunAfterHook launch 2016-08-16 23:19:01 +02:00
deajan
49748686b9 Rebuilt Targets 2016-08-16 22:55:16 +02:00
deajan
18f318d138 Last fix for killchilds 2016-08-16 22:54:36 +02:00
deajan
075c673717 Cannot use function before declaration 2016-08-16 22:53:22 +02:00
deajan
01725786f1 Rebuilt targets 2016-08-16 22:40:02 +02:00
deajan
0e085000d7 Fixed WaitForTaskCompletion killchilds call 2016-08-16 22:39:13 +02:00
deajan
cccb358373 Moved keep logging back to ofunctions 2016-08-16 22:37:36 +02:00
deajan
0df0f8c44c Rebuilt targets 2016-08-16 21:24:48 +02:00
deajan
cd35771399 Added keep logging argument 2016-08-16 21:23:57 +02:00
deajan
5e7206ce8b Added keep logging to upgrade script 2016-08-16 21:16:09 +02:00
deajan
d7040e8d0a Updated version 2016-08-16 21:14:57 +02:00
deajan
10708e9b45 Added keep logging conf value 2016-08-16 21:13:37 +02:00
deajan
9ca9ab6ed0 Removed keep logging 2016-08-16 21:13:21 +02:00
deajan
ce841a29ac Rebuilt targets 2016-08-16 20:39:49 +02:00
deajan
c822c6e3eb Updated waitfortaskcompletion kill functionality 2016-08-16 20:39:13 +02:00
deajan
4e95245374 Ignoring backup size should give size -1Ko 2016-08-15 11:52:55 +02:00
deajan
bc5404cbdd Merge branch 'master' of https://github.com/deajan/obackup 2016-08-15 10:29:54 +02:00
deajan
2d3ce5cd6f Rebuilt targets 2016-08-15 10:29:13 +02:00
deajan
357f4a1b71 Handle uninterruptible sleep state processes 2016-08-15 10:15:05 +02:00
deajan
f0f22e0afa Updated troubleshooting 2016-08-14 13:03:23 +02:00
Orsiris de Jong
ba80a28fee Update README.md 2016-08-09 15:42:06 +02:00
deajan
0d958ed923 Rebuilt targets 2016-08-08 19:00:52 +02:00
deajan
ec5720623d Added shellcheck wrapper for debug env 2016-08-08 18:55:22 +02:00
deajan
7b378dafb7 Added missing -r to read 2016-08-08 18:43:35 +02:00
deajan
40af118c1e Rebuilt targets 2016-08-08 16:35:11 +02:00
deajan
f7a3c4b3f0 Removed unused IFS statements 2016-08-08 16:32:06 +02:00
deajan
81c9ddf202 Rebuilt targets 2016-08-08 15:49:48 +02:00
deajan
93a5dbcf78 Various fixes 2016-08-08 15:47:47 +02:00
deajan
0b67c9790d Updated changelog 2016-08-08 00:29:27 +02:00
deajan
dbeb79980f Rebuilt targets 2016-08-08 00:23:48 +02:00
deajan
6fb84d1441 Fixed wrong variable names 2016-08-08 00:10:28 +02:00
deajan
dbe0020c85 Fixed wrong variable names 2016-08-08 00:03:19 +02:00
deajan
cc5a2a4fd1 Rebuilt targets 2016-08-07 23:47:21 +02:00
deajan
88a927c0b4 Refactor wait functions 2016-08-07 23:45:40 +02:00
deajan
1a98f1c855 Rebuilt targets 2016-08-06 16:16:19 +02:00
deajan
b66f25f436 Updated merge tool 2016-08-06 16:14:07 +02:00
deajan
4b888812ff Revert "Updated merge tool"
This reverts commit 0aee59e05e.
2016-08-06 16:13:11 +02:00
deajan
0aee59e05e Updated merge tool 2016-08-06 16:12:31 +02:00
deajan
7cdb0e6ac7 Added installer template 2016-08-06 15:15:46 +02:00
deajan
d860242781 Made merge.sh work with all programs 2016-08-06 15:15:11 +02:00
deajan
9ef1841543 Bumped version to dev 2016-08-06 14:04:35 +02:00
deajan
69d1729b2e Begin code rewrite 2016-08-06 14:02:52 +02:00
deajan
8828a21741 Added new logger prefix option 2016-08-06 13:53:12 +02:00
deajan
deb10dd3f0 Updated ofunctions 2016-08-06 13:52:17 +02:00
30 changed files with 13629 additions and 4051 deletions

15
.travis.yml Normal file
View File

@@ -0,0 +1,15 @@
language:
bash
services:
mysql
os:
linux
osx
before_script:
mysql -e 'CREATE DATABASE travistest;'
script:
TRAVIS_RUN=true dev/tests/run_tests.sh

View File

@@ -2,16 +2,95 @@ KNOWN ISSUES
------------
- Backup size check does not honor rsync exclude patterns
- Encryption does not honor rsync exclude patterns
- Bandwidth parameter is ignored for SQL backups
- Missing symlink support when run from MSYS environment
- Mysqldump errors aren't taken in account
CHANGELOG
---------
README: FreeBSD execution needs mailer (not found), sudo missing, bash needed, sed missing (see if StripQuotes mandatory)
04 Jan 2017: obackup v2.1 beta1 released
----------------------------------------
- Fixed wrong file size fetched remotely since v2.1 rewrite
- Fixed missing databases in manual list fails to trigger an alert
- Improved support for GPG ver >= 2.1
- Added encryption / decryption parallel execution support
- Improved compatibility for RotateCopies
- Unit tests now run on CentOS 5,6
- Added optional rsync arguments configuration value
- Forcec bash usage on remote connections in order to be FreeBSD 11 compatible
- Spinner is less prone to move logging on screen
- Fixed another random error involving warns and errors triggered by earlier runs with same PID flag files
- Adde more preflight checks (pgrep presence)
- Added --no-prefix, --error-only and --summary switches
- Updated installer from osync
- Updated merge.sh script to handle includes
- Improved remote logging
- Simplified osync-batch runner (internally and for user)
- Better filename handling
- Easier to read log output
- Always passes --silent to obackup
- All options that do not belong to obackup-batch are automatically passed to obackup
- Improved installer OS detection
- Fixed upgrade script cannot update header on BSD / MacOS X
- Fixed SendEmail function on MacOS X
- Fixed MAX_SOFT_EXEC_TIME_PER_XX_TASK not enforced bug introduced with newer ofunctions from v2.1
- PRESERVE_ACL and PRESERVE_XATTR are ignored when local or remote OS is MacOS or msys or Cygwin
- Fixed PRESERVE_EXECUTABILITY was ommited volontary on MacOS X because of rsync syntax
- merge.sh is now BSD and Mac compatible
- Unit tests are now BSD and Mac compatible
- Local runs should not check for remote connectivity
- Fixed error alerts cannot be triggered from subprocesses
- Fixed error flags
- Faster remote OS detection
- Added busybox (and Android Termux) support
- More portable file size functions
- More portable compression program commands
- More paranoia checks
- Added busybox sendmail support
- Added tls and ssl support for sendmail
- Added ssh password file support
- Added unit tests
- Added basic unit tests for all three operation modes
- Added process management function tests
- Added file rotation tests
- Added upgrade script test
- Added encryption tests
- Added missing files / databases test
- Added timed execution tests
- Implemented backup encryption using GPG (see documentation for advantages and caveats)
- Backup encrypted but still use differential engine :)
- Database backup improvements
- Added mysqldump options to config file
- Improved unit tests
- Added more preflight checks
- Logs sent by mail are easier to read
- Better subject (currently running or finished run)
- Fixed bogus double log sent in alert mails
- Only current run log is now sent
- Alert sending is now triggered after last action
- Made unix signals posix compliant
- Improved upgrade script
- Upgrade script now updates header
- Can add any missing value now
- Added encrpytion support
- Fixed problem with spaces in directories to backup (again !)
- Added options to ignore permissions, ownership and groups
- Improved batch runner
- Batch runner works for directories and direct paths
- Fixed batch runner does not rerun obackup on warnings only
- Code compliance
- More clear semantic
- Made keep logging value configurable and not mandatory
- Fixed handling of processes in uninterruptible sleep state
- Code cleanup
- Refactored waiting functions
- Fixed double RunAfterHook launch
06 Aug 2016: obackup v2.0 released
----------------------------------
- Made logging begin before remote checks for sanity purposes
- RunAfterCommands can get executed when trapquit
- Improved process killing and process time control
@@ -38,6 +117,8 @@ README: FreeBSD execution needs mailer (not found), sudo missing, bash needed, s
- A long list of minor improvements and bug fixes
v0-1.x - Jan 2013 - Oct 2015
----------------------------
- New function to kill child processes
- Fixed no_maxtime not honored
- Improved some logging, also added highlighting to stdout errors
@@ -96,7 +177,10 @@ v0-1.x - Jan 2013 - Oct 2015
- Improved OS detection and added prelimnary MacOS X support
- Improved execution hook logs
- Improved RunLocalCommand execution hook
- 02 Nov. 2013: v1.84 RC3
02 Nov. 2013: obackup v1.84RC3 released
---------------------------------------
- Updated documentation
- Minor rewrites in recursive backup code
- Added base directory files backup for recursive directories backup
@@ -123,7 +207,10 @@ v0-1.x - Jan 2013 - Oct 2015
- Improved dryrun output
- Improved remote connecivity detection
- Fixed a typo in configuration file
- 18 Aug. 2013: Now v1.84 RC2
18 Aug. 2013: obackup v1.84RC2 released
---------------------------------------
- Added possibility to change default logfile
- Simplified dryrun (removed dryrun function and merged it with main function)
- Simplified Init function
@@ -135,14 +222,17 @@ v0-1.x - Jan 2013 - Oct 2015
- Added --verbose switch (will add databases list, rsync commands, and file backup list)
- Improved task execution checks and more code cleanup
- Fixed CleanUp function if DEBUG=yes, also function is now launched from TrapQuit
- 16 Jul. 2013: version tagged as v1.84 RC1
16 Jul. 2013: obackup v1.84RC1 released
---------------------------------------
- Code cleanup
- Uploaded first documentation
- Fixed an issue with RotateBackups
- Updated obackup to log failed ssh command results
- Updated ssh command filter to log failed commands
- Updated ssh command filter to accept personalized commands
- 23 Jun. 2013 v 1.84 RC1 approaching
- 23 Jun. 2013: v1.84 RC1 approaching
- Added ssh commands filter, updated documentation
- Rewrote local space check function
- Added ability to run another executable than rsync (see documentation on sudo execution)
@@ -152,5 +242,8 @@ v0-1.x - Jan 2013 - Oct 2015
- Updated command line argument --silent processing
- Added remote before and after command execution hook
- Added local before and after command execution hook
- 14 Jun 2013
14 Jun 2013
-----------
- Initial public release, fully functionnal

View File

@@ -1,5 +1,5 @@
obackup
=======
# obackup [![Build Status](https://travis-ci.org/deajan/obackup.svg?branch=master)](https://travis-ci.org/deajan/obackup) [![GitHub Release](https://img.shields.io/github/release/deajan/obackup.svg?label=Latest)](https://github.com/deajan/obackup/releases/latest)
A robust file & database backup script that works for local and remote push or pull backups via ssh.
@@ -34,7 +34,7 @@ You may disable this behavior in the config file.
You can download the latest obackup script from authors website.
You may also clone the following git which will maybe have some more recent builds.
$ git clone -b "v2.0" git://github.com/deajan/obackup.git
$ git clone -b "v2.0-maint" git://github.com/deajan/obackup.git
$ cd obackup
$ ./install.sh
@@ -69,6 +69,10 @@ All backup activity is logged to "/var/log/obackup_backupname.log" or current di
You may mix "--silent" and "--verbose" parameters to output verbose input only in the log files.
## Troubleshooting
Whenever you may encounter rsync zombie processes and/or in unterruptible sleep state processes, you should force unmounting network drives obackup is supposed to deal with.
## Final words
Backup tasks aren't always reliable, connectivity loss, insufficient disk space, hacked servers with tons of unusefull stuff to backup... Anything can happen.

36
dev/bootstrap.sh Executable file
View File

@@ -0,0 +1,36 @@
#!/usr/bin/env bash
## dev pre-processor bootstrap rev 2016121302
## Yeah !!! A really tech sounding name... In fact it's just include emulation in bash
if [ ! -f "./merge.sh" ]; then
echo "Plrase run bootstrap.sh from osync/dev directory."
exit 1
fi
outputFileName="$0"
source "merge.sh"
__PREPROCESSOR_PROGRAM=obackup
__PREPROCESSOR_Constants
cp "n_$__PREPROCESSOR_PROGRAM.sh" "$outputFileName.tmp.sh"
if [ $? != 0 ]; then
echo "Cannot copy original file [n_$__PREPROCESSOR_PROGRAM.sh] to [$outputFileName.tmp.sh]."
exit 1
fi
for subset in "${__PREPROCESSOR_SUBSETS[@]}"; do
__PREPROCESSOR_MergeSubset "$subset" "${subset//SUBSET/SUBSET END}" "ofunctions.sh" "$outputFileName.tmp.sh"
done
chmod +x "$0.tmp.sh"
if [ $? != 0 ]; then
echo "Cannot make [$outputFileName] executable.."
exit 1
fi
# Termux fix
if type termux-fix-shebang > /dev/null 2>&1; then
termux-fix-shebang "$outputFileName.tmp.sh"
fi
"$outputFileName.tmp.sh" "$@"

172
dev/common_batch.sh Executable file
View File

@@ -0,0 +1,172 @@
#!/usr/bin/env bash
SUBPROGRAM=[prgname]
PROGRAM="$SUBPROGRAM-batch" # Batch program to run osync / obackup instances sequentially and rerun failed ones
AUTHOR="(L) 2013-2016 by Orsiris de Jong"
CONTACT="http://www.netpower.fr - ozy@netpower.fr"
PROGRAM_BUILD=2016120401
## Runs an osync /obackup instance for every conf file found
## If an instance fails, run it again if time permits
if ! type "$BASH" > /dev/null; then
echo "Please run this script only with bash shell. Tested on bash >= 3.2"
exit 127
fi
## If maximum execution time is not reached, failed instances will be rerun. Max exec time is in seconds. Example is set to 10 hours.
MAX_EXECUTION_TIME=36000
## Specifies the number of total runs an instance may get
MAX_RUNS=3
## Log file path
if [ -w /var/log ]; then
LOG_FILE=/var/log/$SUBPROGRAM-batch.log
else
LOG_FILE=./$SUBPROGRAM-batch.log
fi
# No need to edit under this line ##############################################################
function _logger {
local value="${1}" # What to log
echo -e "$value" >> "$LOG_FILE"
}
function Logger {
local value="${1}" # What to log
local level="${2}" # Log level: DEBUG, NOTICE, WARN, ERROR, CRITIAL
prefix="$(date) - "
if [ "$level" == "CRITICAL" ]; then
_logger "$prefix\e[41m$value\e[0m"
elif [ "$level" == "ERROR" ]; then
_logger "$prefix\e[91m$value\e[0m"
elif [ "$level" == "WARN" ]; then
_logger "$prefix\e[93m$value\e[0m"
elif [ "$level" == "NOTICE" ]; then
_logger "$prefix$value"
elif [ "$level" == "DEBUG" ]; then
if [ "$DEBUG" == "yes" ]; then
_logger "$prefix$value"
fi
else
_logger "\e[41mLogger function called without proper loglevel.\e[0m"
_logger "$prefix$value"
fi
}
function CheckEnvironment {
## osync / obackup executable full path can be set here if it cannot be found on the system
if ! type $SUBPROGRAM.sh > /dev/null 2>&1
then
if [ -f /usr/local/bin/$SUBPROGRAM.sh ]
then
SUBPROGRAM_EXECUTABLE=/usr/local/bin/$SUBPROGRAM.sh
else
Logger "Could not find [/usr/local/bin/$SUBPROGRAM.sh]" "CRITICAL"
( >&2 echo "Could not find [/usr/local/bin/$SUBPROGRAM.sh]" )
exit 1
fi
else
SUBPROGRAM_EXECUTABLE=$(type -p $SUBPROGRAM.sh)
fi
if [ "$CONF_FILE_PATH" == "" ]; then
Usage
fi
}
function Batch {
local runs=1 # Number of batch runs
local runList # Actual conf file list to run
local runAgainList # List of failed conf files sto run again
local confFile
local result
local i
# Using -e because find will accept directories or files
if [ ! -e "$CONF_FILE_PATH" ]; then
Logger "Cannot find conf file path [$CONF_FILE_PATH]." "CRITICAL"
Usage
else
# Ugly hack to read files into an array while preserving special characters
runList=()
while IFS= read -d $'\0' -r file; do runList+=("$file"); done < <(find "$CONF_FILE_PATH" -maxdepth 1 -iname "*.conf" -print0)
while ([ $MAX_EXECUTION_TIME -gt $SECONDS ] || [ $MAX_EXECUTION_TIME -eq 0 ]) && [ "${#runList[@]}" -gt 0 ] && [ $runs -le $MAX_RUNS ]; do
runAgainList=()
Logger "Sequential run n°$runs of $SUBPROGRAM instances for:" "NOTICE"
for confFile in "${runList[@]}"; do
Logger "$(basename $confFile)" "NOTICE"
done
for confFile in "${runList[@]}"; do
$SUBPROGRAM_EXECUTABLE "$confFile" --silent $opts &
wait $!
result=$?
if [ $result != 0 ]; then
if [ $result == 1 ] || [ $result == 128 ]; then # Do not handle exit code 128 because it is already handled here
Logger "Instance $(basename $confFile) failed with exit code [$result]." "ERROR"
runAgainList+=("$confFile")
elif [ $result == 2 ]; then
Logger "Instance $(basename $confFile) finished with warnings." "WARN"
fi
else
Logger "Instance $(basename $confFile) succeed." "NOTICE"
fi
done
runList=("${runAgainList[@]}")
runs=$(($runs + 1))
done
fi
}
function Usage {
echo "$PROGRAM $PROGRAM_BUILD"
echo $AUTHOR
echo $CONTACT
echo ""
echo "Batch script to sequentially run osync or obackup instances and rerun failed ones."
echo "Usage: $PROGRAM.sh [OPTIONS] [$SUBPROGRAM OPTIONS]"
echo ""
echo "[OPTIONS]"
echo "--path=/path/to/conf Path to osync / obackup conf files, defaults to /etc/osync or /etc/obackup"
echo "--max-runs=X Number of max runs per instance, (defaults to 3)"
echo "--max-exec-time=X Retry failed instances only if max execution time not reached (defaults to 36000 seconds). Set to 0 to bypass execution time check"
echo "[$SUBPROGRAM OPTIONS]"
echo "Specify whatever options $PROGRAM accepts. Example"
echo "$PROGRAM.sh --path=/etc/$SUBPROGRAM --no-maxtime"
echo ""
echo "No output will be written to stdout/stderr."
echo "Verify log file in [$LOG_FILE]."
exit 128
}
opts=""
for i in "$@"
do
case $i in
--path=*)
CONF_FILE_PATH=${i##*=}
;;
--max-runs=*)
MAX_RUNS=${i##*=}
;;
--max-exec-time=*)
MAX_EXECUTION_TIME=${i##*=}
;;
--help|-h|-?)
Usage
;;
*)
opts="$opts$i "
;;
esac
done
CheckEnvironment
Logger "$(date) $SUBPROGRAM batch run" "NOTICE"
Batch

292
dev/common_install.sh Executable file
View File

@@ -0,0 +1,292 @@
#!/usr/bin/env bash
include #### _OFUNCTIONS_BOOTSTRAP SUBSET ####
PROGRAM=[prgname]
PROGRAM_VERSION=[version]
PROGRAM_BINARY=$PROGRAM".sh"
PROGRAM_BATCH=$PROGRAM"-batch.sh"
SCRIPT_BUILD=2016122701
## osync / obackup / pmocr / zsnap install script
## Tested on RHEL / CentOS 6 & 7, Fedora 23, Debian 7 & 8, Mint 17 and FreeBSD 8, 10 and 11
## Please adapt this to fit your distro needs
# Get current install.sh path from http://stackoverflow.com/a/246128/2635443
SCRIPT_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
CONF_DIR=$FAKEROOT/etc/$PROGRAM
BIN_DIR="$FAKEROOT/usr/local/bin"
SERVICE_DIR_INIT=$FAKEROOT/etc/init.d
# Should be /usr/lib/systemd/system, but /lib/systemd/system exists on debian & rhel / fedora
SERVICE_DIR_SYSTEMD_SYSTEM=$FAKEROOT/lib/systemd/system
SERVICE_DIR_SYSTEMD_USER=$FAKEROOT/etc/systemd/user
## osync specific code
OSYNC_SERVICE_FILE_INIT="osync-srv"
OSYNC_SERVICE_FILE_SYSTEMD_SYSTEM="osync-srv@.service"
OSYNC_SERVICE_FILE_SYSTEMD_USER="osync-srv@.service.user"
## pmocr specfic code
PMOCR_SERVICE_FILE_INIT="pmocr-srv"
PMOCR_SERVICE_FILE_SYSTEMD_SYSTEM="pmocr-srv@.service"
## Generic code
## Default log file
if [ -w $FAKEROOT/var/log ]; then
LOG_FILE="$FAKEROOT/var/log/$PROGRAM-install.log"
elif ([ "$HOME" != "" ] && [ -w "$HOME" ]); then
LOG_FILE="$HOME/$PROGRAM-install.log"
else
LOG_FILE="./$PROGRAM-install.log"
fi
include #### QuickLogger SUBSET ####
include #### UrlEncode SUBSET ####
include #### GetLocalOS SUBSET ####
function SetLocalOSSettings {
USER=root
# LOCAL_OS and LOCAL_OS_FULL are global variables set at GetLocalOS
case $LOCAL_OS in
*"BSD"*)
GROUP=wheel
;;
*"MacOSX"*)
GROUP=admin
;;
*"msys"*|*"Cygwin"*)
USER=""
GROUP=""
;;
*)
GROUP=root
;;
esac
if [ "$LOCAL_OS" == "Android" ] || [ "$LOCAL_OS" == "MacOSX" ] || [ "$LOCAL_OS" == "BusyBox" ]; then
QuickLogger "Cannot be installed on [$LOCAL_OS]. Please use $PROGRAM.sh directly."
exit 1
fi
if ([ "$USER" != "" ] && [ "$(whoami)" != "$USER" ] && [ "$FAKEROOT" == "" ]); then
QuickLogger "Must be run as $USER."
exit 1
fi
OS=$(UrlEncode "$LOCAL_OS_FULL")
}
function GetInit {
if [ -f /sbin/init ]; then
if file /sbin/init | grep systemd > /dev/null; then
init="systemd"
else
init="initV"
fi
else
QuickLogger "Can't detect initV or systemd. Service files won't be installed. You can still run $PROGRAM manually or via cron."
init="none"
fi
}
function CreateConfDir {
if [ ! -d "$CONF_DIR" ]; then
mkdir "$CONF_DIR"
if [ $? == 0 ]; then
QuickLogger "Created directory [$CONF_DIR]."
else
QuickLogger "Cannot create directory [$CONF_DIR]."
exit 1
fi
else
QuickLogger "Config directory [$CONF_DIR] exists."
fi
}
function CopyExampleFiles {
if [ -f "$SCRIPT_PATH/sync.conf.example" ]; then
cp "$SCRIPT_PATH/sync.conf.example" "$CONF_DIR/sync.conf.example"
fi
if [ -f "$SCRIPT_PATH/host_backup.conf.example" ]; then
cp "$SCRIPT_PATH/host_backup.conf.example" "$CONF_DIR/host_backup.conf.example"
fi
if [ -f "$SCRIPT_PATH/exlude.list.example" ]; then
cp "$SCRIPT_PATH/exclude.list.example" "$CONF_DIR/exclude.list.example"
fi
if [ -f "$SCRIPT_PATH/snapshot.conf.example" ]; then
cp "$SCRIPT_PATH/snapshot.conf.example" "$CONF_DIR/snapshot.conf.example"
fi
if [ -f "$SCRIPT_PATH/default.conf" ]; then
if [ -f "$CONF_DIR/default.conf" ]; then
cp "$SCRIPT_PATH/default.conf" "$CONF_DIR/default.conf.new"
QuickLogger "Copied default.conf to [$CONF_DIR/default.conf.new]."
else
cp "$SCRIPT_PATH/default.conf" "$CONF_DIR/default.conf"
fi
fi
}
function CopyProgram {
cp "$SCRIPT_PATH/$PROGRAM_BINARY" "$BIN_DIR"
if [ $? != 0 ]; then
QuickLogger "Cannot copy $PROGRAM_BINARY to [$BIN_DIR]. Make sure to run install script in the directory containing all other files."
QuickLogger "Also make sure you have permissions to write to [$BIN_DIR]."
exit 1
else
chmod 755 "$BIN_DIR/$PROGRAM_BINARY"
QuickLogger "Copied $PROGRAM_BINARY to [$BIN_DIR]."
fi
if [ -f "$SCRIPT_PATH/$PROGRAM_BATCH" ]; then
cp "$SCRIPT_PATH/$PROGRAM_BATCH" "$BIN_DIR"
if [ $? != 0 ]; then
QuickLogger "Cannot copy $PROGRAM_BATCH to [$BIN_DIR]."
else
chmod 755 "$BIN_DIR/$PROGRAM_BATCH"
QuickLogger "Copied $PROGRAM_BATCH to [$BIN_DIR]."
fi
fi
if [ -f "$SCRIPT_PATH/ssh_filter.sh" ]; then
cp "$SCRIPT_PATH/ssh_filter.sh" "$BIN_DIR"
if [ $? != 0 ]; then
QuickLogger "Cannot copy ssh_filter.sh to [$BIN_DIR]."
else
chmod 755 "$BIN_DIR/ssh_filter.sh"
if ([ "$USER" != "" ] && [ "$GROUP" != "" ] && [ "$FAKEROOT" == "" ]); then
chown $USER:$GROUP "$BIN_DIR/ssh_filter.sh"
fi
QuickLogger "Copied ssh_filter.sh to [$BIN_DIR]."
fi
fi
}
function CopyServiceFiles {
# OSYNC SPECIFIC
if ([ "$init" == "systemd" ] && [ -f "$SCRIPT_PATH/$OSYNC_SERVICE_FILE_SYSTEMD_SYSTEM" ]); then
cp "$SCRIPT_PATH/$OSYNC_SERVICE_FILE_SYSTEMD_SYSTEM" "$SERVICE_DIR_SYSTEMD_SYSTEM" && cp "$SCRIPT_PATH/$OSYNC_SERVICE_FILE_SYSTEMD_USER" "$SERVICE_DIR_SYSTEMD_USER/$SERVICE_FILE_SYSTEMD_SYSTEM"
if [ $? != 0 ]; then
QuickLogger "Cannot copy the systemd file to [$SERVICE_DIR_SYSTEMD_SYSTEM] or [$SERVICE_DIR_SYSTEMD_USER]."
else
QuickLogger "Created osync-srv service in [$SERVICE_DIR_SYSTEMD_SYSTEM] and [$SERVICE_DIR_SYSTEMD_USER]."
QuickLogger "Can be activated with [systemctl start osync-srv@instance.conf] where instance.conf is the name of the config file in $CONF_DIR."
QuickLogger "Can be enabled on boot with [systemctl enable osync-srv@instance.conf]."
QuickLogger "In userland, active with [systemctl --user start osync-srv@instance.conf]."
fi
elif ([ "$init" == "initV" ] && [ -f "$SCRIPT_PATH/$OSYNC_SERVICE_FILE_INIT" ]); then
cp "$SCRIPT_PATH/$OSYNC_SERVICE_FILE_INIT" "$SERVICE_DIR_INIT"
if [ $? != 0 ]; then
QuickLogger "Cannot copy osync-srv to [$SERVICE_DIR_INIT]."
else
chmod 755 "$SERVICE_DIR_INIT/$OSYNC_SERVICE_FILE_INIT"
QuickLogger "Created osync-srv service in [$SERVICE_DIR_INIT]."
QuickLogger "Can be activated with [service $OSYNC_SERVICE_FILE_INIT start]."
QuickLogger "Can be enabled on boot with [chkconfig $OSYNC_SERVICE_FILE_INIT on]."
fi
fi
# PMOCR SPECIFIC
if ([ "$init" == "systemd" ] && [ -f "$SCRIPT_PATH/$PMOCR_SERVICE_FILE_SYSTEMD_SYSTEM" ]); then
cp "$SCRIPT_PATH/$PMOCR_SERVICE_FILE_SYSTEMD_SYSTEM" "$SERVICE_DIR_SYSTEMD_SYSTEM"
if [ $? != 0 ]; then
QuickLogger "Cannot copy the systemd file to [$SERVICE_DIR_SYSTEMD_SYSTEM] or [$SERVICE_DIR_SYSTEMD_USER]."
else
QuickLogger "Created pmocr-srv service in [$SERVICE_DIR_SYSTEMD_SYSTEM] and [$SERVICE_DIR_SYSTEMD_USER]."
QuickLogger "Can be activated with [systemctl start pmocr-srv@default.conf] where default.conf is the name of the config file in $CONF_DIR."
QuickLogger "Can be enabled on boot with [systemctl enable pmocr-srv@default.conf]."
fi
elif ([ "$init" == "initV" ] && [ -f "$SCRIPT_PATH/$PMOCR_SERVICE_FILE_INIT" ]); then
cp "$SCRIPT_PATH/$PMOCR_SERVICE_FILE_INIT" "$SERVICE_DIR_INIT"
if [ $? != 0 ]; then
QuickLogger "Cannot copy pmoct-srv to [$SERVICE_DIR_INIT]."
else
chmod 755 "$SERVICE_DIR_INIT/$PMOCR_SERVICE_FILE_INIT"
QuickLogger "Created osync-srv service in [$SERVICE_DIR_INIT]."
QuickLogger "Can be activated with [service $PMOCR_SERVICE_FILE_INIT start]."
QuickLogger "Can be enabled on boot with [chkconfig $PMOCR_SERVICE_FILE_INIT on]."
fi
fi
}
function Statistics {
if type wget > /dev/null; then
wget -qO- "$STATS_LINK" > /dev/null 2>&1
if [ $? == 0 ]; then
return 0
fi
fi
if type curl > /dev/null; then
curl "$STATS_LINK" -o /dev/null > /dev/null 2>&1
if [ $? == 0 ]; then
return 0
fi
fi
QuickLogger "Neiter wget nor curl could be used for. Cannot run statistics. Use the provided link please."
return 1
}
function Usage {
echo "Installs $PROGRAM into $BIN_DIR"
echo "options:"
echo "--silent Will log and bypass user interaction."
echo "--no-stats Used with --silent in order to refuse sending anonymous install stats."
exit 127
}
_LOGGER_SILENT=false
_STATS=1
for i in "$@"
do
case $i in
--silent)
_LOGGER_SILENT=true
;;
--no-stats)
_STATS=0
;;
--help|-h|-?)
Usage
esac
done
if [ "$FAKEROOT" != "" ]; then
mkdir -p "$SERVICE_DIR_SYSTEMD_SYSTEM" "$SERVICE_DIR_SYSTEMD_USER" "$BIN_DIR"
fi
GetLocalOS
SetLocalOSSettings
CreateConfDir
CopyExampleFiles
CopyProgram
GetInit
CopyServiceFiles
STATS_LINK="http://instcount.netpower.fr?program=$PROGRAM&version=$PROGRAM_VERSION&os=$OS"
QuickLogger "$PROGRAM installed. Use with $BIN_DIR/$PROGRAM"
if [ $_STATS -eq 1 ]; then
if [ $_LOGGER_SILENT == true ]; then
Statistics
else
QuickLogger "In order to make install statistics, the script would like to connect to $STATS_LINK"
read -r -p "No data except those in the url will be send. Allow [Y/n]" response
case $response in
[nN])
exit
;;
*)
Statistics
exit $?
;;
esac
fi
fi

File diff suppressed because it is too large Load Diff

View File

@@ -1,71 +1,173 @@
#!/usr/bin/env bash
## MERGE 2016080601
## MERGE 2016121901
## Merges ofunctions.sh and n_program.sh into program.sh
## Adds installer
PROGRAM=obackup
VERSION=$(grep "PROGRAM_VERSION=" n_$PROGRAM.sh)
VERSION=${VERSION#*=}
function __PREPROCESSOR_Merge {
PROGRAM=obackup
VERSION=$(grep "PROGRAM_VERSION=" n_$PROGRAM.sh)
VERSION=${VERSION#*=}
PARANOIA_DEBUG_LINE="__WITH_PARANOIA_DEBUG"
PARANOIA_DEBUG_BEGIN="#__BEGIN_WITH_PARANOIA_DEBUG"
PARANOIA_DEBUG_END="#__END_WITH_PARANOIA_DEBUG"
MINIMUM_FUNCTION_BEGIN="#### MINIMAL-FUNCTION-SET BEGIN ####"
MINIMUM_FUNCTION_END="#### MINIMAL-FUNCTION-SET END ####"
__PREPROCESSOR_Constants
function Unexpand {
unexpand n_$PROGRAM.sh > tmp_$PROGRAM.sh
source "ofunctions.sh"
if [ $? != 0 ]; then
echo "Please run $0 in dev directory with ofunctions.sh"
exit 1
fi
__PREPROCESSOR_Unexpand "n_$PROGRAM.sh" "debug_$PROGRAM.sh"
for subset in "${__PREPROCESSOR_SUBSETS[@]}"; do
__PREPROCESSOR_MergeSubset "$subset" "${subset//SUBSET/SUBSET END}" "ofunctions.sh" "debug_$PROGRAM.sh"
done
__PREPROCESSOR_CleanDebug
__PREPROCESSOR_CopyCommons
rm -f tmp_$PROGRAM.sh
if [ $? != 0 ]; then
QuickLogger "Cannot remove tmp_$PROGRAM.sh"
exit 1
fi
}
function MergeAll {
function __PREPROCESSOR_Constants {
PARANOIA_DEBUG_LINE="#__WITH_PARANOIA_DEBUG"
PARANOIA_DEBUG_BEGIN="#__BEGIN_WITH_PARANOIA_DEBUG"
PARANOIA_DEBUG_END="#__END_WITH_PARANOIA_DEBUG"
sed "/source \"\.\/ofunctions.sh\"/r ofunctions.sh" tmp_$PROGRAM.sh | grep -v 'source "./ofunctions.sh"' > debug_$PROGRAM.sh
chmod +x debug_$PROGRAM.sh
__PREPROCESSOR_SUBSETS=(
'#### OFUNCTIONS FULL SUBSET ####'
'#### OFUNCTIONS MINI SUBSET ####'
'#### _OFUNCTIONS_BOOTSTRAP SUBSET ####'
'#### DEBUG SUBSET ####'
'#### TrapError SUBSET ####'
'#### RemoteLogger SUBSET ####'
'#### QuickLogger SUBSET ####'
'#### GetLocalOS SUBSET ####'
'#### IsInteger SUBSET ####'
'#### UrlEncode SUBSET ####'
'#### HumanToNumeric SUBSET ####'
'#### ArrayContains SUBSET ####'
)
}
function MergeMinimum {
sed -n "/$MINIMUM_FUNCTION_BEGIN/,/$MINIMUM_FUNCTION_END/p" ofunctions.sh > tmp_minimal.sh
sed "/source \"\.\/ofunctions.sh\"/r tmp_minimal.sh" tmp_$PROGRAM.sh | grep -v 'source "./ofunctions.sh"' | grep -v "$PARANOIA_DEBUG_LINE" > debug_$PROGRAM.sh
rm -f tmp_minimal.sh
chmod +x debug_$PROGRAM.sh
function __PREPROCESSOR_Unexpand {
local source="${1}"
local destination="${2}"
unexpand "$source" > "$destination"
if [ $? != 0 ]; then
QuickLogger "Cannot unexpand [$source] to [$destination]."
exit 1
fi
}
function __PREPROCESSOR_MergeSubset {
local subsetBegin="${1}"
local subsetEnd="${2}"
local subsetFile="${3}"
local mergedFile="${4}"
function CleanDebug {
sed -n "/$subsetBegin/,/$subsetEnd/p" "$subsetFile" > "$subsetFile.$subsetBegin"
if [ $? != 0 ]; then
QuickLogger "Cannot sed subset [$subsetBegin -- $subsetEnd] in [$subsetFile]."
exit 1
fi
sed "/include $subsetBegin/r $subsetFile.$subsetBegin" "$mergedFile" | grep -v -E "$subsetBegin\$|$subsetEnd\$" > "$mergedFile.tmp"
if [ $? != 0 ]; then
QuickLogger "Cannot add subset [$subsetBegin] to [$mergedFile]."
exit 1
fi
rm -f "$subsetFile.$subsetBegin"
if [ $? != 0 ]; then
QuickLogger "Cannot remove temporary subset [$subsetFile.$subsetBegin]."
exit 1
fi
# sed explanation
#/pattern1/{ # if pattern1 is found
# p # print it
# :a # loop
# N # and accumulate lines
# /pattern2/!ba # until pattern2 is found
# s/.*\n// # delete the part before pattern2
#}
#p
rm -f "$mergedFile"
if [ $? != 0 ]; then
QuickLogger "Cannot remove merged original file [$mergedFile]."
exit 1
fi
sed -n '/'$PARANOIA_DEBUG_BEGIN'/{p; :a; N; /'$PARANOIA_DEBUG_END'/!ba; s/.*\n//}; p' debug_$PROGRAM.sh | grep -v "$PARANOIA_DEBUG_LINE" > ../$PROGRAM.sh
chmod +x ../$PROGRAM.sh
mv "$mergedFile.tmp" "$mergedFile"
if [ $? != 0 ]; then
QuickLogger "Cannot move merged tmp file to original [$mergedFile]."
exit 1
fi
}
function CopyCommons {
function __PREPROCESSOR_CleanDebug {
sed '/'$PARANOIA_DEBUG_BEGIN'/,/'$PARANOIA_DEBUG_END'/d' debug_$PROGRAM.sh | grep -v "$PARANOIA_DEBUG_LINE" > ../$PROGRAM.sh
if [ $? != 0 ]; then
QuickLogger "Cannot remove PARANOIA_DEBUG code from standard build."
exit 1
fi
chmod +x "debug_$PROGRAM.sh"
if [ $? != 0 ]; then
QuickLogger "Cannot chmod debug_$PROGRAM.sh"
exit 1
else
QuickLogger "Prepared ./debug_$PROGRAM.sh"
fi
chmod +x "../$PROGRAM.sh"
if [ $? != 0 ]; then
QuickLogger "Cannot chmod $PROGRAM.sh"
exit 1
else
QuickLogger "Prepared ../$PROGRAM.sh"
fi
}
function __PREPROCESSOR_CopyCommons {
sed "s/\[prgname\]/$PROGRAM/g" common_install.sh > ../tmp_install.sh
if [ $? != 0 ]; then
QuickLogger "Cannot assemble install."
exit 1
fi
for subset in "${__PREPROCESSOR_SUBSETS[@]}"; do
__PREPROCESSOR_MergeSubset "$subset" "${subset//SUBSET/SUBSET END}" "ofunctions.sh" "../tmp_install.sh"
done
sed "s/\[version\]/$VERSION/g" ../tmp_install.sh > ../install.sh
if [ $? != 0 ]; then
QuickLogger "Cannot change install version."
exit 1
fi
if [ -f "common_batch.sh" ]; then
sed "s/\[prgname\]/$PROGRAM/g" common_batch.sh > ../$PROGRAM-batch.sh
if [ $? != 0 ]; then
QuickLogger "Cannot assemble batch runner."
exit 1
fi
chmod +x ../$PROGRAM-batch.sh
if [ $? != 0 ]; then
QuickLogger "Cannot chmod $PROGRAM-batch.sh"
exit 1
else
QuickLogger "Prepared ../$PROGRAM-batch.sh"
fi
fi
chmod +x ../install.sh
chmod +x ../$PROGRAM-batch.sh
if [ $? != 0 ]; then
QuickLogger "Cannot chmod install.sh"
exit 1
else
QuickLogger "Prepared ../install.sh"
fi
rm -f ../tmp_install.sh
if [ $? != 0 ]; then
QuickLogger "Cannot chmod $PROGRAM.sh"
exit 1
fi
}
Unexpand
if [ "$PROGRAM" == "osync" ] || [ "$PROGRAM" == "obackup" ]; then
MergeAll
else
MergeMinimum
# If sourced don't do anything
if [ "$(basename $0)" == "merge.sh" ]; then
__PREPROCESSOR_Merge
fi
CleanDebug
CopyCommons
rm -f tmp_$PROGRAM.sh

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

8
dev/shellcheck.sh Executable file
View File

@@ -0,0 +1,8 @@
#!/usr/bin/env bash
#SC1090 = not following non constants source
#SC1091 = not following source
#SC2086 = quoting errors (shellcheck is way too picky about quoting)
#SC2120 = only for debug version
shellcheck -e SC1090,SC1091,SC2086,SC2119,SC2120 $1

228
dev/tests/conf/local.conf Normal file
View File

@@ -0,0 +1,228 @@
#!/usr/bin/env bash
###### obackup - Local or Remote, push or pull backup script for files & mysql databases
###### (C) 2013-2016 by Orsiris de Jong (www.netpower.fr)
###### obackup v2.1x config file rev 2016081701
###### GENERAL BACKUP OPTIONS
## Backup identification string.
INSTANCE_ID="local-test"
## Log file location. Leaving this empty will create log file at /var/log/obackup.INSTANCE_ID.log (or current directory if /var/log doesn't exist).
LOGFILE=""
## Elements to backup
SQL_BACKUP=yes
FILE_BACKUP=yes
## Backups can be done local, pulled from another server or pushed to a backup server. Available options are [local,pull,push].
## Pulled backups are the safest option, as the backup server contains the RSA key and cannot be compromised by another server.
BACKUP_TYPE=local
###### BACKUP STORAGE
## Storage paths of the backups (absolute paths of the local or remote system)
SQL_STORAGE="${HOME}/obackup-storage/sql-local"
FILE_STORAGE="${HOME}/obackup-storage/files-local"
## Encryption
ENCRYPTION=yes
## Backup encryption needs a temporary storage space in order to encrypt files before sending them (absolute paths of the local or remote system)
CRYPT_STORAGE="${HOME}/obackup-storage/crypt-local"
## GPG recipient (pubkey for this recipient must exist, see gpg2 --list-keys or gpg --list-keys
GPG_RECIPIENT="John Doe"
## Create backup directories if they do not exist
CREATE_DIRS=yes
## Keep absolute source path in your backup, eg: /your/backup/storage/the/remote/server/files
## You should leave this enabled if you intend to use 'backup task division' functionality of oBackup, or everything will end up in the same directory.
KEEP_ABSOLUTE_PATHS=yes
## Generate an alert if backup size is lower than given value in Kb (this can also help identifying empty mount dirs).
BACKUP_SIZE_MINIMUM=1024
## Check backup size before proceeding
GET_BACKUP_SIZE=yes
## Generate an alert if storage free space is lower than given value in Kb.
## Keep in mind that disabling backup file size test will only test min space against SQL backup size.
SQL_WARN_MIN_SPACE=1048576
FILE_WARN_MIN_SPACE=1048576
###### REMOTE ONLY OPTIONS
## In case of pulled or pushed backups, remote system URI needs to be supplied.
REMOTE_SYSTEM_URI="ssh://backupuser@remote.system.tld:22/"
## You can specify a RSA key (please use full path). If not defined, the default ~/.ssh/id_rsa will be used. See documentation for further information.
SSH_RSA_PRIVATE_KEY="${HOME}/.ssh/id_rsa"
## ssh compression should be used unless your remote connection is good enough (LAN)
SSH_COMPRESSION=yes
## Ignore ssh known hosts verification. DANGER WILL ROBINSON DANGER: This can lead to security risks. Only enable if you know what you're doing.
SSH_IGNORE_KNOWN_HOSTS=no
## Remote rsync executable path. Leave this empty in most cases
RSYNC_REMOTE_PATH=""
## Check for connectivity to remote host before launching remote backup tasks. Be sure the hosts responds to ping. Failing to ping will skip current task.
REMOTE_HOST_PING=yes
## Check for internet access by pinging one or more 3rd party hosts before remote backup tasks. Leave empty if you don't want this check to be be performed. Failing to ping will skip current task.
REMOTE_3RD_PARTY_HOSTS="www.kernel.org www.google.com"
## If enabled, commands will be executed as superuser on remote side. See documentation for /etc/sudoers configuration ("find", "du", "tee" and "rsync" need to be allowed). Requiretty needs to be disabled.
SUDO_EXEC=no
###### DATABASE SPECIFIC OPTIONS
## Database backup user
SQL_USER=root
## Enabling the following option will save all databases on local or remote given SQL instance except the ones specified in the exclude list.
## Every found database will be backed up as separate backup task.
DATABASES_ALL=yes
DATABASES_ALL_EXCLUDE_LIST="test;information_schema;zarafa_prod"
## Alternatively, if DATABASES_ALL=no, you can specify a list of databases to backup separated by spaces.
DATABASES_LIST="mysql"
## Max backup execution time per Database task. Soft max exec time generates a warning only. Hard max exec time generates a warning and stops current backup task.
## If a task gets stopped, next one in the task list gets executed. Time is specified in seconds.
SOFT_MAX_EXEC_TIME_DB_TASK=3600
HARD_MAX_EXEC_TIME_DB_TASK=7200
## mysqldump options (ex: --extended-insert, --single-transaction, --quick...). See MySQL / MariaDB manual
## default option: --opt
MYSQLDUMP_OPTIONS="--opt --single-transaction"
## Preferred SQL dump compression. Compression methods can be xz, lzma, pigz or gzip (will fallback from xz to gzip depending if available)
## Generally, level 5 is a good compromise between cpu, memory hunger and compress ratio. Gzipped files are set to be rsyncable.
COMPRESSION_LEVEL=3
###### FILES SPECIFIC OPTIONS
## File backups are divided in tasks. Every directory in DIRECTORY_LIST will be processed as a unique task.
## Every subdirectory of each directory in RECURSIVE_DIRECTORY_LIST will be processed as a unique task.
## Example: RECURSIVE_DIRECTORY_LIST="/home;/var" will create backup tasks tasks "/home/dir1, "/home/dir2", ... "/home/dirN", "/var/log", "/var/lib"... "/var/something".
## You can exclude directories from the avove backup task creation, ex: avoid backing up "/home/dir2" by adding it to RECURSIVE_EXCLUDE_LIST.
## Directories backup list. List of semicolon separated directories that will be backed up.
DIRECTORY_LIST="${HOME}/obackup-testdata/testData"
RECURSIVE_DIRECTORY_LIST="${HOME}/obackup-testdata/testDataRecursive"
RECURSIVE_EXCLUDE_LIST="${HOME}/obackup-testdata/testDataRecursive/Excluded"
## Rsync exclude / include order (the option set here will be set first, eg: include will make include then exclude patterns)
RSYNC_PATTERN_FIRST=include
## List of files / directories to incldue / exclude from sync on both sides (see rsync patterns, wildcards work).
## Paths are relative to sync dirs. List elements are separated by a semicolon.
RSYNC_INCLUDE_PATTERN=""
RSYNC_EXCLUDE_PATTERN="*.ded"
#RSYNC_EXCLUDE_PATTERN="tmp;archives"
## Files that contains lists of files / directories to include / exclude from sync on both sides. Leave this empty if you don't want to use an exclusion file.
## This file has to be in the same directory as the config file
## Paths are relative to sync dirs. One element per line.
RSYNC_INCLUDE_FROM=""
RSYNC_EXCLUDE_FROM=""
#RSYNC_EXCLUDE_FROM="exclude.list"
## List separator char. You may set an alternative separator char for your directories lists above.
PATH_SEPARATOR_CHAR=";"
## Preserve basic linux permissions
PRESERVE_PERMISSIONS=yes
PRESERVE_OWNER=yes
PRESERVE_GROUP=yes
## On MACOS X, does not work and will be ignored
PRESERVE_EXECUTABILITY=yes
## Preserve ACLS. Make sure source and target FS can hold same ACLs or you'll get loads of errors.
PRESERVE_ACL=no
## Preserve Xattr. MAke sure source and target FS can hold same Xattr or you'll get loads of errors.
PRESERVE_XATTR=no
## Transforms symlinks into referent files/dirs
COPY_SYMLINKS=yes
## Treat symlinked dirs as dirs. CAUTION: This also follows symlinks outside of the replica root.
KEEP_DIRLINKS=yes
## Preserve hard links. Make sure source and target FS can manage hard links or you will lose them.
PRESERVE_HARDLINKS=no
## Let RSYNC compress file transfers. Do not use this on local-local backup schemes. Also, this is not useful if SSH compression is enabled.
RSYNC_COMPRESS=no
## Max execution time per file backup task. Soft is warning only. Hard is warning, stopping backup and processing next one one file list. Tilme is specified in seconds
SOFT_MAX_EXEC_TIME_FILE_TASK=3600
HARD_MAX_EXEC_TIME_FILE_TASK=7200
## Keep partial uploads that can be resumed on next run, experimental feature
PARTIAL=no
## Delete files on destination that vanished from source. Do not turn this on unless you enabled backup rotation or a snapshotting FS like zfs to keep those vanished files on the destination.
DELETE_VANISHED_FILES=no
## Use delta copy algortithm (usefull when local paths are network drives), defaults to yes
DELTA_COPIES=yes
## Bandwidth limit Kbytes / second for file backups. Leave 0 to disable limitation.
BANDWIDTH=0
## Paranoia option. Don't change this unless you read the documentation.
RSYNC_EXECUTABLE=rsync
###### ALERT OPTIONS
## Alert email addresses separated by a space character
DESTINATION_MAILS=""
## Windows specific (msys / cygwin environment) only mail options (used with mailsend.exe from muquit, http://github.com/muquit/mailsend or from sendemail.exe from Brandon Zehm, http://caspian.dotconf.net/menu/Software/SendEmail/
SENDER_MAIL="alert@your.system.tld"
SMTP_SERVER=smtp.your.isp.tld
SMTP_PORT=25
# encryption can be tls, ssl or none
SMTP_ENCRYPTION=none
SMTP_USER=
SMTP_PASSWORD=
###### GENERAL BACKUP OPTIONS
## Max execution time of whole backup process. Soft max exec time generates a warning only.
## Hard max exec time generates a warning and stops the whole backup execution.
SOFT_MAX_EXEC_TIME_TOTAL=30000
HARD_MAX_EXEC_TIME_TOTAL=36000
## Log a message every KEEP_LOGGING seconds just to know the task is still alive
KEEP_LOGGING=1801
## Backup Rotation. You may rotate backups if you don't use snapshots on your backup server.
ROTATE_SQL_BACKUPS=yes
ROTATE_SQL_COPIES=7
ROTATE_FILE_BACKUPS=yes
ROTATE_FILE_COPIES=7
###### EXECUTION HOOKS
## Commands can will be run before and / or after backup execution (remote execution will only happen if REMOTE_BACKUP is set).
## This is useful to make a snapshot before backing up data, or even handle snapshots of backed up data.
LOCAL_RUN_BEFORE_CMD=""
LOCAL_RUN_AFTER_CMD=""
REMOTE_RUN_BEFORE_CMD=""
REMOTE_RUN_AFTER_CMD=""
## Max execution time of commands before they get force killed. Leave 0 if you don't want this to happen. Time is specified in seconds.
MAX_EXEC_TIME_PER_CMD_BEFORE=0
MAX_EXEC_TIME_PER_CMD_AFTER=0
## Stops whole backup execution if one of the above commands fail
STOP_ON_CMD_ERROR=no
## Run local and remote after backup cmd's even on failure
RUN_AFTER_CMD_ON_ERROR=yes

View File

@@ -0,0 +1,222 @@
#!/usr/bin/env bash
###### obackup - Local or Remote, push or pull backup script for files & mysql databases
###### (C) 2013-2016 by Orsiris de Jong (www.netpower.fr)
###### obackup v2.1x config file rev 2016081701
###### GENERAL BACKUP OPTIONS
## Backup identification string.
INSTANCE_ID="local-test"
## Log file location. Leaving this empty will create log file at /var/log/obackup.INSTANCE_ID.log (or current directory if /var/log doesn't exist).
LOGFILE=""
## Elements to backup
SQL_BACKUP=yes
FILE_BACKUP=yes
## Backups can be done local, pulled from another server or pushed to a backup server. Available options are [local,pull,push].
## Pulled backups are the safest option, as the backup server contains the RSA key and cannot be compromised by another server.
BACKUP_TYPE=local
###### BACKUP STORAGE
## Storage paths of the backups (absolute paths of the local or remote system)
SQL_STORAGE="${HOME}/obackup-storage/sql"
FILE_STORAGE="${HOME}/obackup-storage/files"
## Backup encryption using GPG and duplicity. Feature not ready yet.
ENCRYPTION=no
## Create backup directories if they do not exist
CREATE_DIRS=yes
## Keep absolute source path in your backup, eg: /your/backup/storage/the/remote/server/files
## You should leave this enabled if you intend to use 'backup task division' functionality of oBackup, or everything will end up in the same directory.
KEEP_ABSOLUTE_PATHS=yes
## Generate an alert if backup size is lower than given value in Kb (this can also help identifying empty mount dirs).
BACKUP_SIZE_MINIMUM=1024
## Check backup size before proceeding
GET_BACKUP_SIZE=yes
## Generate an alert if storage free space is lower than given value in Kb.
## Keep in mind that disabling backup file size test will only test min space against SQL backup size.
SQL_WARN_MIN_SPACE=1048576
FILE_WARN_MIN_SPACE=1048576
###### REMOTE ONLY OPTIONS
## In case of pulled or pushed backups, remote system URI needs to be supplied.
REMOTE_SYSTEM_URI="ssh://backupuser@remote.system.tld:22/"
## You can specify a RSA key (please use full path). If not defined, the default ~/.ssh/id_rsa will be used. See documentation for further information.
SSH_RSA_PRIVATE_KEY="${HOME}/.ssh/id_rsa"
## ssh compression should be used unless your remote connection is good enough (LAN)
SSH_COMPRESSION=yes
## Ignore ssh known hosts verification. DANGER WILL ROBINSON DANGER: This can lead to security risks. Only enable if you know what you're doing.
SSH_IGNORE_KNOWN_HOSTS=no
## Remote rsync executable path. Leave this empty in most cases
RSYNC_REMOTE_PATH=""
## Check for connectivity to remote host before launching remote backup tasks. Be sure the hosts responds to ping. Failing to ping will skip current task.
REMOTE_HOST_PING=yes
## Check for internet access by pinging one or more 3rd party hosts before remote backup tasks. Leave empty if you don't want this check to be be performed. Failing to ping will skip current task.
REMOTE_3RD_PARTY_HOSTS="www.kernel.org www.google.com"
## If enabled, commands will be executed as superuser on remote side. See documentation for /etc/sudoers configuration ("find", "du", "tee" and "rsync" need to be allowed). Requiretty needs to be disabled.
SUDO_EXEC=no
###### DATABASE SPECIFIC OPTIONS
## Database backup user
SQL_USER=root
## Enabling the following option will save all databases on local or remote given SQL instance except the ones specified in the exclude list.
## Every found database will be backed up as separate backup task.
DATABASES_ALL=yes
DATABASES_ALL_EXCLUDE_LIST="test;information_schema"
## Alternatively, if DATABASES_ALL=no, you can specify a list of databases to backup separated by spaces.
DATABASES_LIST="mysql"
## Max backup execution time per Database task. Soft max exec time generates a warning only. Hard max exec time generates a warning and stops current backup task.
## If a task gets stopped, next one in the task list gets executed. Time is specified in seconds.
SOFT_MAX_EXEC_TIME_DB_TASK=1000
HARD_MAX_EXEC_TIME_DB_TASK=1000
## mysqldump options (ex: --extended-insert, --single-transaction, --quick...). See MySQL / MariaDB manual
## default option: --opt
MYSQLDUMP_OPTIONS="--opt --single-transaction"
## Preferred SQL dump compression. Compression methods can be xz, lzma, pigz or gzip (will fallback from xz to gzip depending if available)
## Generally, level 5 is a good compromise between cpu, memory hunger and compress ratio. Gzipped files are set to be rsyncable.
COMPRESSION_LEVEL=3
###### FILES SPECIFIC OPTIONS
## File backups are divided in tasks. Every directory in DIRECTORY_LIST will be processed as a unique task.
## Every subdirectory of each directory in RECURSIVE_DIRECTORY_LIST will be processed as a unique task.
## Example: RECURSIVE_DIRECTORY_LIST="/home;/var" will create backup tasks tasks "/home/dir1, "/home/dir2", ... "/home/dirN", "/var/log", "/var/lib"... "/var/something".
## You can exclude directories from the avove backup task creation, ex: avoid backing up "/home/dir2" by adding it to RECURSIVE_EXCLUDE_LIST.
## Directories backup list. List of semicolon separated directories that will be backed up.
DIRECTORY_LIST="${HOME}/obackup-testdata/testData"
RECURSIVE_DIRECTORY_LIST="${HOME}/obackup-testdata/testDataRecursive"
RECURSIVE_EXCLUDE_LIST="${HOME}/obackup-testdata/testDataRecursive/Excluded"
## Rsync exclude / include order (the option set here will be set first, eg: include will make include then exclude patterns)
RSYNC_PATTERN_FIRST=include
## List of files / directories to incldue / exclude from sync on both sides (see rsync patterns, wildcards work).
## Paths are relative to sync dirs. List elements are separated by a semicolon.
RSYNC_INCLUDE_PATTERN=""
RSYNC_EXCLUDE_PATTERN="*.ded"
#RSYNC_EXCLUDE_PATTERN="tmp;archives"
## Files that contains lists of files / directories to include / exclude from sync on both sides. Leave this empty if you don't want to use an exclusion file.
## This file has to be in the same directory as the config file
## Paths are relative to sync dirs. One element per line.
RSYNC_INCLUDE_FROM=""
RSYNC_EXCLUDE_FROM=""
#RSYNC_EXCLUDE_FROM="exclude.list"
## List separator char. You may set an alternative separator char for your directories lists above.
PATH_SEPARATOR_CHAR=";"
## Preserve basic linux permissions
PRESERVE_PERMISSIONS=yes
PRESERVE_OWNER=yes
PRESERVE_GROUP=yes
## On MACOS X, does not work and will be ignored
PRESERVE_EXECUTABILITY=yes
## Preserve ACLS. Make sure source and target FS can hold same ACLs or you'll get loads of errors.
PRESERVE_ACL=no
## Preserve Xattr. MAke sure source and target FS can hold same Xattr or you'll get loads of errors.
PRESERVE_XATTR=no
## Transforms symlinks into referent files/dirs
COPY_SYMLINKS=yes
## Treat symlinked dirs as dirs. CAUTION: This also follows symlinks outside of the replica root.
KEEP_DIRLINKS=yes
## Preserve hard links. Make sure source and target FS can manage hard links or you will lose them.
PRESERVE_HARDLINKS=no
## Let RSYNC compress file transfers. Do not use this on local-local backup schemes. Also, this is not useful if SSH compression is enabled.
RSYNC_COMPRESS=no
## Max execution time per file backup task. Soft is warning only. Hard is warning, stopping backup and processing next one one file list. Tilme is specified in seconds
SOFT_MAX_EXEC_TIME_FILE_TASK=1000
HARD_MAX_EXEC_TIME_FILE_TASK=1000
## Keep partial uploads that can be resumed on next run, experimental feature
PARTIAL=no
## Delete files on destination that vanished from source. Do not turn this on unless you enabled backup rotation or a snapshotting FS like zfs to keep those vanished files on the destination.
DELETE_VANISHED_FILES=no
## Use delta copy algortithm (usefull when local paths are network drives), defaults to yes
DELTA_COPIES=yes
## Bandwidth limit Kbytes / second for file backups. Leave 0 to disable limitation.
BANDWIDTH=0
## Paranoia option. Don't change this unless you read the documentation.
RSYNC_EXECUTABLE=rsync
###### ALERT OPTIONS
## Alert email addresses separated by a space character
DESTINATION_MAILS=""
## Windows specific (msys / cygwin environment) only mail options (used with mailsend.exe from muquit, http://github.com/muquit/mailsend or from sendemail.exe from Brandon Zehm, http://caspian.dotconf.net/menu/Software/SendEmail/
SENDER_MAIL="alert@your.system.tld"
SMTP_SERVER=smtp.your.isp.tld
SMTP_PORT=25
# encryption can be tls, ssl or none
SMTP_ENCRYPTION=none
SMTP_USER=
SMTP_PASSWORD=
###### GENERAL BACKUP OPTIONS
## Max execution time of whole backup process. Soft max exec time generates a warning only.
## Hard max exec time generates a warning and stops the whole backup execution.
SOFT_MAX_EXEC_TIME_TOTAL=1000
HARD_MAX_EXEC_TIME_TOTAL=1
## Log a message every KEEP_LOGGING seconds just to know the task is still alive
KEEP_LOGGING=1801
## Backup Rotation. You may rotate backups if you don't use snapshots on your backup server.
ROTATE_SQL_BACKUPS=no
ROTATE_SQL_COPIES=7
ROTATE_FILE_BACKUPS=no
ROTATE_FILE_COPIES=7
###### EXECUTION HOOKS
## Commands can will be run before and / or after backup execution (remote execution will only happen if REMOTE_BACKUP is set).
## This is useful to make a snapshot before backing up data, or even handle snapshots of backed up data.
LOCAL_RUN_BEFORE_CMD=""
LOCAL_RUN_AFTER_CMD=""
REMOTE_RUN_BEFORE_CMD=""
REMOTE_RUN_AFTER_CMD=""
## Max execution time of commands before they get force killed. Leave 0 if you don't want this to happen. Time is specified in seconds.
MAX_EXEC_TIME_PER_CMD_BEFORE=0
MAX_EXEC_TIME_PER_CMD_AFTER=0
## Stops whole backup execution if one of the above commands fail
STOP_ON_CMD_ERROR=no
## Run local and remote after backup cmd's even on failure
RUN_AFTER_CMD_ON_ERROR=yes

101
dev/tests/conf/old.conf Normal file
View File

@@ -0,0 +1,101 @@
#!/bin/bash
###### Remote (or local) backup script for files & databases
###### (C) 2013 by Ozy de Jong (www.badministrateur.com)
###### Config file rev 0408201301
## Backup identification, any string you want
BACKUP_ID="really-old-config-file"
## General backup options
BACKUP_SQL=yes
BACKUP_FILES=yes
## Local storage paths
LOCAL_SQL_STORAGE="${HOME}/obackup-storage/sql-old"
LOCAL_FILE_STORAGE="${HOME}/obackup-storage/files-old"
## Keep the absolute source path in your backup, eg: /your/backup/storage/the/remote/server/files
## You should leave this enabled if you use recursive directories backup lists or they'll all end in the same path.
LOCAL_STORAGE_KEEP_ABSOLUTE_PATHS=yes
## Generate an alert if backup size is lower than given value in Kb, useful for local mounted backups.
BACKUP_SIZE_MINIMUM=1024
## Generate an alert if local storage free space is lower than given value in Kb.
LOCAL_STORAGE_WARN_MIN_SPACE=1048576
## If enabled, file backups will be processed with sudo command. See documentation for /etc/sudoers configuration ("find", "du" and "rsync" need to be allowed). Requiretty needs to be disabled.
SUDO_EXEC=no
## Paranoia option. Don't change this unless you read the documentation and still feel concerned about security issues.
RSYNC_EXECUTABLE=rsync
## Remote options (will make backups of remote system through ssh tunnel, public RSA key need to be put into /home/.ssh/authorized_keys in remote users home directory)
REMOTE_BACKUP=yes
SSH_RSA_PRIVATE_KEY=${HOME}/.ssh/id_rsa_local
REMOTE_USER=root
REMOTE_HOST=localhost
REMOTE_PORT=22
## ssh compression should be used unless your remote connection is good enough (LAN)
SSH_COMPRESSION=yes
## Check for connectivity to remote host before launching remote backup tasks. Be sure the hosts responds to ping. Failing to ping will skip current task.
REMOTE_HOST_PING=yes
## Check for internet access by pinging one or more 3rd party hosts before remote backup tasks. Leave empty if you don't want this check to be be performed. Failing to ping will skip current task.
REMOTE_3RD_PARTY_HOST="www.kernel.org www.google.com"
## Databases options
SQL_USER=root
## Save all databases except the ones specified in the exlude list. Every found database will be backed up as separate task (see documentation for explanation about tasks)
DATABASES_ALL=yes
DATABASES_ALL_EXCLUDE_LIST="test;information_schema;zarafa_prod"
# Alternatively, you can specifiy a manual list of databases to backup separated by spaces
DATABASES_LIST=""
## Max backup execution time per DB task. Soft is warning only. Hard is warning, stopping backup task and processing next one. Time is specified in seconds
SOFT_MAX_EXEC_TIME_DB_TASK=3600
HARD_MAX_EXEC_TIME_DB_TASK=7200
## Preferred sql dump compression. Can be set to xz, lzma or gzip.
## Generally, xz level 5 is a good compromise between cpu, memory hunger and compress ratio. Gzipped files are set to be rsyncable.
COMPRESSION_PROGRAM=xz
COMPRESSION_LEVEL=3
## Dump compression should be done on remote side but can also be done locally to lower remote system usage (will take more bandwidth, check for ssh compression)
COMPRESSION_REMOTE=yes
## Path separator. You can set whatever seperator you want in your directories list below. You may change this in case you have some esoteric filenames (try to use unconventional separators like | ).
PATH_SEPARATOR_CHAR=";"
## File backup lists. Double quoted directory list separated by the $PATH_SEPARATOR_CHAR. Every directory will be processed as task (see documentation for explanation about tasks)
DIRECTORIES_SIMPLE_LIST="${HOME}/obackup-testdata/testData"
## Recurse directory list separated by the $PATH_SEPARATOR_CHAR. Will create a backup task per subdirectory (one level only), eg RECURSE_LIST="/home /var" will create tasks "/home/dir1", "/home/dir2", ... "/home/dirN", "/var/log", "/var/lib"... "/var/whatever"
DIRECTORIES_RECURSE_LIST="${HOME}/obackup-testdata/testDataRecursive"
## You can optionally exclude directories from RECURSE_LIST tasks, eg on the above example you could exclude /home/dir2 by adding it to RECURSE_EXCLUDE_LIST
DIRECTORIES_RECURSE_EXCLUDE_LIST="${HOME}/obackup-testdata/testDataRecursive/Excluded"
## Be aware that every recurse list will have it's own root (exclude pattern is relative from /home/web for /home/web/{recursedir})
RSYNC_EXCLUDE_PATTERN="*.ded"
## Preserve ACLS. Make sure target FS can hold ACLs or you'll get loads of errors.
PRESERVE_ACL=no
## Preserve Xattr
PRESERVE_XATTR=no
## Let RSYNC compress file transfers. Do not use if you already enabled SSH compression.
RSYNC_COMPRESS=yes
## Max execution time per file backup task. Soft is warning only. Hard is warning, stopping backup and processing next one one file list. Tilme is specified in seconds
SOFT_MAX_EXEC_TIME_FILE_TASK=3600
HARD_MAX_EXEC_TIME_FILE_TASK=7200
## Alert email adresses separated by a space character
DESTINATION_MAILS="your@mail.tld"
## Max execution time of whole backup process. Soft is warning only. Hard is warning and stopping whole backup process.
SOFT_MAX_EXEC_TIME_TOTAL=30000
HARD_MAX_EXEC_TIME_TOTAL=36000
## Backup Rotation in case you don't use a snapshot aware file system like zfs or btrfs to perform a snapshot before every backup
ROTATE_BACKUPS=yes
ROTATE_COPIES=7
## Commands that will be run before and / or after backup execution (remote execution will only happen if REMOTE_BACKUP is set to yes). Very usefull to initiate snapshots.
## Set max execution time to 0 if you want these commands not to get stopped, else set a value in seconds after which execution will be stopped.
LOCAL_RUN_BEFORE_CMD=""
LOCAL_RUN_AFTER_CMD=""
REMOTE_RUN_BEFORE_CMD=""
REMOTE_RUN_AFTER_CMD=""
MAX_EXEC_TIME_PER_CMD_BEFORE=0
MAX_EXEC_TIME_PER_CMD_AFTER=0

228
dev/tests/conf/pull.conf Normal file
View File

@@ -0,0 +1,228 @@
#!/usr/bin/env bash
###### obackup - Local or Remote, push or pull backup script for files & mysql databases
###### (C) 2013-2016 by Orsiris de Jong (www.netpower.fr)
###### obackup v2.1x config file rev 2016081701
###### GENERAL BACKUP OPTIONS
## Backup identification string.
INSTANCE_ID="pull-test"
## Log file location. Leaving this empty will create log file at /var/log/obackup.INSTANCE_ID.log (or current directory if /var/log doesn't exist).
LOGFILE=""
## Elements to backup
SQL_BACKUP=yes
FILE_BACKUP=yes
## Backups can be done local, pulled from another server or pushed to a backup server. Available options are [local,pull,push].
## Pulled backups are the safest option, as the backup server contains the RSA key and cannot be compromised by another server.
BACKUP_TYPE=pull
###### BACKUP STORAGE
## Storage paths of the backups (absolute paths of the local or remote system)
SQL_STORAGE="${HOME}/obackup-storage/sql-pull"
FILE_STORAGE="${HOME}/obackup-storage/files-pull"
## Encryption
ENCRYPTION=yes
## Backup encryption needs a temporary storage space in order to encrypt files before sending them (absolute paths of the local or remote system)
CRYPT_STORAGE="${HOME}/obackup-storage/crypt-pull"
## GPG recipient (pubkey for this recipient must exist, see gpg2 --list-keys or gpg --list-keys
GPG_RECIPIENT="John Doe"
## Create backup directories if they do not exist
CREATE_DIRS=yes
## Keep absolute source path in your backup, eg: /your/backup/storage/the/remote/server/files
## You should leave this enabled if you intend to use 'backup task division' functionality of oBackup, or everything will end up in the same directory.
KEEP_ABSOLUTE_PATHS=yes
## Generate an alert if backup size is lower than given value in Kb (this can also help identifying empty mount dirs).
BACKUP_SIZE_MINIMUM=1024
## Check backup size before proceeding
GET_BACKUP_SIZE=yes
## Generate an alert if storage free space is lower than given value in Kb.
## Keep in mind that disabling backup file size test will only test min space against SQL backup size.
SQL_WARN_MIN_SPACE=1048576
FILE_WARN_MIN_SPACE=1048576
###### REMOTE ONLY OPTIONS
## In case of pulled or pushed backups, remote system URI needs to be supplied.
REMOTE_SYSTEM_URI="ssh://root@localhost:49999/"
## You can specify a RSA key (please use full path). If not defined, the default ~/.ssh/id_rsa will be used. See documentation for further information.
SSH_RSA_PRIVATE_KEY="${HOME}/.ssh/id_rsa_local"
## ssh compression should be used unless your remote connection is good enough (LAN)
SSH_COMPRESSION=yes
## Ignore ssh known hosts verification. DANGER WILL ROBINSON DANGER: This can lead to security risks. Only enable if you know what you're doing.
SSH_IGNORE_KNOWN_HOSTS=no
## Remote rsync executable path. Leave this empty in most cases
RSYNC_REMOTE_PATH=""
## Check for connectivity to remote host before launching remote backup tasks. Be sure the hosts responds to ping. Failing to ping will skip current task.
REMOTE_HOST_PING=yes
## Check for internet access by pinging one or more 3rd party hosts before remote backup tasks. Leave empty if you don't want this check to be be performed. Failing to ping will skip current task.
REMOTE_3RD_PARTY_HOSTS="www.kernel.org www.google.com"
## If enabled, commands will be executed as superuser on remote side. See documentation for /etc/sudoers configuration ("find", "du", "tee" and "rsync" need to be allowed). Requiretty needs to be disabled.
SUDO_EXEC=no
###### DATABASE SPECIFIC OPTIONS
## Database backup user
SQL_USER=root
## Enabling the following option will save all databases on local or remote given SQL instance except the ones specified in the exclude list.
## Every found database will be backed up as separate backup task.
DATABASES_ALL=yes
DATABASES_ALL_EXCLUDE_LIST="test;information_schema;zarafa_prod"
## Alternatively, if DATABASES_ALL=no, you can specify a list of databases to backup separated by spaces.
DATABASES_LIST="mysql"
## Max backup execution time per Database task. Soft max exec time generates a warning only. Hard max exec time generates a warning and stops current backup task.
## If a task gets stopped, next one in the task list gets executed. Time is specified in seconds.
SOFT_MAX_EXEC_TIME_DB_TASK=3600
HARD_MAX_EXEC_TIME_DB_TASK=7200
## mysqldump options (ex: --extended-insert, --single-transaction, --quick...). See MySQL / MariaDB manual
## default option: --opt
MYSQLDUMP_OPTIONS="--opt --single-transaction"
## Preferred SQL dump compression. Compression methods can be xz, lzma, pigz or gzip (will fallback from xz to gzip depending if available)
## Generally, level 5 is a good compromise between cpu, memory hunger and compress ratio. Gzipped files are set to be rsyncable.
COMPRESSION_LEVEL=3
###### FILES SPECIFIC OPTIONS
## File backups are divided in tasks. Every directory in DIRECTORY_LIST will be processed as a unique task.
## Every subdirectory of each directory in RECURSIVE_DIRECTORY_LIST will be processed as a unique task.
## Example: RECURSIVE_DIRECTORY_LIST="/home;/var" will create backup tasks tasks "/home/dir1, "/home/dir2", ... "/home/dirN", "/var/log", "/var/lib"... "/var/something".
## You can exclude directories from the avove backup task creation, ex: avoid backing up "/home/dir2" by adding it to RECURSIVE_EXCLUDE_LIST.
## Directories backup list. List of semicolon separated directories that will be backed up.
DIRECTORY_LIST="${HOME}/obackup-testdata/testData"
RECURSIVE_DIRECTORY_LIST="${HOME}/obackup-testdata/testDataRecursive"
RECURSIVE_EXCLUDE_LIST="${HOME}/obackup-testdata/testDataRecursive/Excluded"
## Rsync exclude / include order (the option set here will be set first, eg: include will make include then exclude patterns)
RSYNC_PATTERN_FIRST=include
## List of files / directories to incldue / exclude from sync on both sides (see rsync patterns, wildcards work).
## Paths are relative to sync dirs. List elements are separated by a semicolon.
RSYNC_INCLUDE_PATTERN=""
RSYNC_EXCLUDE_PATTERN="*.ded"
#RSYNC_EXCLUDE_PATTERN="tmp;archives"
## Files that contains lists of files / directories to include / exclude from sync on both sides. Leave this empty if you don't want to use an exclusion file.
## This file has to be in the same directory as the config file
## Paths are relative to sync dirs. One element per line.
RSYNC_INCLUDE_FROM=""
RSYNC_EXCLUDE_FROM=""
#RSYNC_EXCLUDE_FROM="exclude.list"
## List separator char. You may set an alternative separator char for your directories lists above.
PATH_SEPARATOR_CHAR=";"
## Preserve basic linux permissions
PRESERVE_PERMISSIONS=yes
PRESERVE_OWNER=yes
PRESERVE_GROUP=yes
## On MACOS X, does not work and will be ignored
PRESERVE_EXECUTABILITY=yes
## Preserve ACLS. Make sure source and target FS can hold same ACLs or you'll get loads of errors.
PRESERVE_ACL=no
## Preserve Xattr. MAke sure source and target FS can hold same Xattr or you'll get loads of errors.
PRESERVE_XATTR=no
## Transforms symlinks into referent files/dirs
COPY_SYMLINKS=yes
## Treat symlinked dirs as dirs. CAUTION: This also follows symlinks outside of the replica root.
KEEP_DIRLINKS=yes
## Preserve hard links. Make sure source and target FS can manage hard links or you will lose them.
PRESERVE_HARDLINKS=no
## Let RSYNC compress file transfers. Do not use this on local-local backup schemes. Also, this is not useful if SSH compression is enabled.
RSYNC_COMPRESS=no
## Max execution time per file backup task. Soft is warning only. Hard is warning, stopping backup and processing next one one file list. Tilme is specified in seconds
SOFT_MAX_EXEC_TIME_FILE_TASK=3600
HARD_MAX_EXEC_TIME_FILE_TASK=7200
## Keep partial uploads that can be resumed on next run, experimental feature
PARTIAL=no
## Delete files on destination that vanished from source. Do not turn this on unless you enabled backup rotation or a snapshotting FS like zfs to keep those vanished files on the destination.
DELETE_VANISHED_FILES=no
## Use delta copy algortithm (usefull when local paths are network drives), defaults to yes
DELTA_COPIES=yes
## Bandwidth limit Kbytes / second for file backups. Leave 0 to disable limitation.
BANDWIDTH=0
## Paranoia option. Don't change this unless you read the documentation.
RSYNC_EXECUTABLE=rsync
###### ALERT OPTIONS
## Alert email addresses separated by a space character
DESTINATION_MAILS=""
## Windows specific (msys / cygwin environment) only mail options (used with mailsend.exe from muquit, http://github.com/muquit/mailsend or from sendemail.exe from Brandon Zehm, http://caspian.dotconf.net/menu/Software/SendEmail/
SENDER_MAIL="alert@your.system.tld"
SMTP_SERVER=smtp.your.isp.tld
SMTP_PORT=25
# encryption can be tls, ssl or none
SMTP_ENCRYPTION=none
SMTP_USER=
SMTP_PASSWORD=
###### GENERAL BACKUP OPTIONS
## Max execution time of whole backup process. Soft max exec time generates a warning only.
## Hard max exec time generates a warning and stops the whole backup execution.
SOFT_MAX_EXEC_TIME_TOTAL=30000
HARD_MAX_EXEC_TIME_TOTAL=36000
## Log a message every KEEP_LOGGING seconds just to know the task is still alive
KEEP_LOGGING=1801
## Backup Rotation. You may rotate backups if you don't use snapshots on your backup server.
ROTATE_SQL_BACKUPS=yes
ROTATE_SQL_COPIES=7
ROTATE_FILE_BACKUPS=yes
ROTATE_FILE_COPIES=7
###### EXECUTION HOOKS
## Commands can will be run before and / or after backup execution (remote execution will only happen if REMOTE_BACKUP is set).
## This is useful to make a snapshot before backing up data, or even handle snapshots of backed up data.
LOCAL_RUN_BEFORE_CMD=""
LOCAL_RUN_AFTER_CMD=""
REMOTE_RUN_BEFORE_CMD=""
REMOTE_RUN_AFTER_CMD=""
## Max execution time of commands before they get force killed. Leave 0 if you don't want this to happen. Time is specified in seconds.
MAX_EXEC_TIME_PER_CMD_BEFORE=0
MAX_EXEC_TIME_PER_CMD_AFTER=0
## Stops whole backup execution if one of the above commands fail
STOP_ON_CMD_ERROR=no
## Run local and remote after backup cmd's even on failure
RUN_AFTER_CMD_ON_ERROR=yes

228
dev/tests/conf/push.conf Normal file
View File

@@ -0,0 +1,228 @@
#!/usr/bin/env bash
###### obackup - Local or Remote, push or pull backup script for files & mysql databases
###### (C) 2013-2016 by Orsiris de Jong (www.netpower.fr)
###### obackup v2.1x config file rev 2016081701
###### GENERAL BACKUP OPTIONS
## Backup identification string.
INSTANCE_ID="push-test"
## Log file location. Leaving this empty will create log file at /var/log/obackup.INSTANCE_ID.log (or current directory if /var/log doesn't exist).
LOGFILE=""
## Elements to backup
SQL_BACKUP=yes
FILE_BACKUP=yes
## Backups can be done local, pulled from another server or pushed to a backup server. Available options are [local,pull,push].
## Pulled backups are the safest option, as the backup server contains the RSA key and cannot be compromised by another server.
BACKUP_TYPE=push
###### BACKUP STORAGE
## Storage paths of the backups (absolute paths of the local or remote system)
SQL_STORAGE="${HOME}/obackup-storage/sql-push"
FILE_STORAGE="${HOME}/obackup-storage/files-push"
## Encryption
ENCRYPTION=yes
## Backup encryption needs a temporary storage space in order to encrypt files before sending them (absolute paths of the local or remote system)
CRYPT_STORAGE="${HOME}/obackup-storage/crypt-push"
## GPG recipient (pubkey for this recipient must exist, see gpg2 --list-keys or gpg --list-keys
GPG_RECIPIENT="John Doe"
## Create backup directories if they do not exist
CREATE_DIRS=yes
## Keep absolute source path in your backup, eg: /your/backup/storage/the/remote/server/files
## You should leave this enabled if you intend to use 'backup task division' functionality of oBackup, or everything will end up in the same directory.
KEEP_ABSOLUTE_PATHS=yes
## Generate an alert if backup size is lower than given value in Kb (this can also help identifying empty mount dirs).
BACKUP_SIZE_MINIMUM=1024
## Check backup size before proceeding
GET_BACKUP_SIZE=yes
## Generate an alert if storage free space is lower than given value in Kb.
## Keep in mind that disabling backup file size test will only test min space against SQL backup size.
SQL_WARN_MIN_SPACE=1048576
FILE_WARN_MIN_SPACE=1048576
###### REMOTE ONLY OPTIONS
## In case of pulled or pushed backups, remote system URI needs to be supplied.
REMOTE_SYSTEM_URI="ssh://root@localhost:49999/"
## You can specify a RSA key (please use full path). If not defined, the default ~/.ssh/id_rsa will be used. See documentation for further information.
SSH_RSA_PRIVATE_KEY="${HOME}/.ssh/id_rsa_local"
## ssh compression should be used unless your remote connection is good enough (LAN)
SSH_COMPRESSION=yes
## Ignore ssh known hosts verification. DANGER WILL ROBINSON DANGER: This can lead to security risks. Only enable if you know what you're doing.
SSH_IGNORE_KNOWN_HOSTS=no
## Remote rsync executable path. Leave this empty in most cases
RSYNC_REMOTE_PATH=""
## Check for connectivity to remote host before launching remote backup tasks. Be sure the hosts responds to ping. Failing to ping will skip current task.
REMOTE_HOST_PING=yes
## Check for internet access by pinging one or more 3rd party hosts before remote backup tasks. Leave empty if you don't want this check to be be performed. Failing to ping will skip current task.
REMOTE_3RD_PARTY_HOSTS="www.kernel.org www.google.com"
## If enabled, commands will be executed as superuser on remote side. See documentation for /etc/sudoers configuration ("find", "du", "tee" and "rsync" need to be allowed). Requiretty needs to be disabled.
SUDO_EXEC=no
###### DATABASE SPECIFIC OPTIONS
## Database backup user
SQL_USER=root
## Enabling the following option will save all databases on local or remote given SQL instance except the ones specified in the exclude list.
## Every found database will be backed up as separate backup task.
DATABASES_ALL=yes
DATABASES_ALL_EXCLUDE_LIST="test;information_schema;zarafa_prod"
## Alternatively, if DATABASES_ALL=no, you can specify a list of databases to backup separated by spaces.
DATABASES_LIST="mysql"
## Max backup execution time per Database task. Soft max exec time generates a warning only. Hard max exec time generates a warning and stops current backup task.
## If a task gets stopped, next one in the task list gets executed. Time is specified in seconds.
SOFT_MAX_EXEC_TIME_DB_TASK=3600
HARD_MAX_EXEC_TIME_DB_TASK=7200
## mysqldump options (ex: --extended-insert, --single-transaction, --quick...). See MySQL / MariaDB manual
## default option: --opt
MYSQLDUMP_OPTIONS="--opt --single-transaction"
## Preferred SQL dump compression. Compression methods can be xz, lzma, pigz or gzip (will fallback from xz to gzip depending if available)
## Generally, level 5 is a good compromise between cpu, memory hunger and compress ratio. Gzipped files are set to be rsyncable.
COMPRESSION_LEVEL=3
###### FILES SPECIFIC OPTIONS
## File backups are divided in tasks. Every directory in DIRECTORY_LIST will be processed as a unique task.
## Every subdirectory of each directory in RECURSIVE_DIRECTORY_LIST will be processed as a unique task.
## Example: RECURSIVE_DIRECTORY_LIST="/home;/var" will create backup tasks tasks "/home/dir1, "/home/dir2", ... "/home/dirN", "/var/log", "/var/lib"... "/var/something".
## You can exclude directories from the avove backup task creation, ex: avoid backing up "/home/dir2" by adding it to RECURSIVE_EXCLUDE_LIST.
## Directories backup list. List of semicolon separated directories that will be backed up.
DIRECTORY_LIST="${HOME}/obackup-testdata/testData"
RECURSIVE_DIRECTORY_LIST="${HOME}/obackup-testdata/testDataRecursive"
RECURSIVE_EXCLUDE_LIST="${HOME}/obackup-testdata/testDataRecursive/Excluded"
## Rsync exclude / include order (the option set here will be set first, eg: include will make include then exclude patterns)
RSYNC_PATTERN_FIRST=include
## List of files / directories to incldue / exclude from sync on both sides (see rsync patterns, wildcards work).
## Paths are relative to sync dirs. List elements are separated by a semicolon.
RSYNC_INCLUDE_PATTERN=""
RSYNC_EXCLUDE_PATTERN="*.ded"
#RSYNC_EXCLUDE_PATTERN="tmp;archives"
## Files that contains lists of files / directories to include / exclude from sync on both sides. Leave this empty if you don't want to use an exclusion file.
## This file has to be in the same directory as the config file
## Paths are relative to sync dirs. One element per line.
RSYNC_INCLUDE_FROM=""
RSYNC_EXCLUDE_FROM=""
#RSYNC_EXCLUDE_FROM="exclude.list"
## List separator char. You may set an alternative separator char for your directories lists above.
PATH_SEPARATOR_CHAR=";"
## Preserve basic linux permissions
PRESERVE_PERMISSIONS=yes
PRESERVE_OWNER=yes
PRESERVE_GROUP=yes
## On MACOS X, does not work and will be ignored
PRESERVE_EXECUTABILITY=yes
## Preserve ACLS. Make sure source and target FS can hold same ACLs or you'll get loads of errors.
PRESERVE_ACL=no
## Preserve Xattr. MAke sure source and target FS can hold same Xattr or you'll get loads of errors.
PRESERVE_XATTR=no
## Transforms symlinks into referent files/dirs
COPY_SYMLINKS=yes
## Treat symlinked dirs as dirs. CAUTION: This also follows symlinks outside of the replica root.
KEEP_DIRLINKS=yes
## Preserve hard links. Make sure source and target FS can manage hard links or you will lose them.
PRESERVE_HARDLINKS=no
## Let RSYNC compress file transfers. Do not use this on local-local backup schemes. Also, this is not useful if SSH compression is enabled.
RSYNC_COMPRESS=no
## Max execution time per file backup task. Soft is warning only. Hard is warning, stopping backup and processing next one one file list. Tilme is specified in seconds
SOFT_MAX_EXEC_TIME_FILE_TASK=3600
HARD_MAX_EXEC_TIME_FILE_TASK=7200
## Keep partial uploads that can be resumed on next run, experimental feature
PARTIAL=no
## Delete files on destination that vanished from source. Do not turn this on unless you enabled backup rotation or a snapshotting FS like zfs to keep those vanished files on the destination.
DELETE_VANISHED_FILES=no
## Use delta copy algortithm (usefull when local paths are network drives), defaults to yes
DELTA_COPIES=yes
## Bandwidth limit Kbytes / second for file backups. Leave 0 to disable limitation.
BANDWIDTH=0
## Paranoia option. Don't change this unless you read the documentation.
RSYNC_EXECUTABLE=rsync
###### ALERT OPTIONS
## Alert email addresses separated by a space character
DESTINATION_MAILS=""
## Windows specific (msys / cygwin environment) only mail options (used with mailsend.exe from muquit, http://github.com/muquit/mailsend or from sendemail.exe from Brandon Zehm, http://caspian.dotconf.net/menu/Software/SendEmail/
SENDER_MAIL="alert@your.system.tld"
SMTP_SERVER=smtp.your.isp.tld
SMTP_PORT=25
# encryption can be tls, ssl or none
SMTP_ENCRYPTION=none
SMTP_USER=
SMTP_PASSWORD=
###### GENERAL BACKUP OPTIONS
## Max execution time of whole backup process. Soft max exec time generates a warning only.
## Hard max exec time generates a warning and stops the whole backup execution.
SOFT_MAX_EXEC_TIME_TOTAL=30000
HARD_MAX_EXEC_TIME_TOTAL=36000
## Log a message every KEEP_LOGGING seconds just to know the task is still alive
KEEP_LOGGING=1801
## Backup Rotation. You may rotate backups if you don't use snapshots on your backup server.
ROTATE_SQL_BACKUPS=yes
ROTATE_SQL_COPIES=7
ROTATE_FILE_BACKUPS=yes
ROTATE_FILE_COPIES=7
###### EXECUTION HOOKS
## Commands can will be run before and / or after backup execution (remote execution will only happen if REMOTE_BACKUP is set).
## This is useful to make a snapshot before backing up data, or even handle snapshots of backed up data.
LOCAL_RUN_BEFORE_CMD=""
LOCAL_RUN_AFTER_CMD=""
REMOTE_RUN_BEFORE_CMD=""
REMOTE_RUN_AFTER_CMD=""
## Max execution time of commands before they get force killed. Leave 0 if you don't want this to happen. Time is specified in seconds.
MAX_EXEC_TIME_PER_CMD_BEFORE=0
MAX_EXEC_TIME_PER_CMD_AFTER=0
## Stops whole backup execution if one of the above commands fail
STOP_ON_CMD_ERROR=no
## Run local and remote after backup cmd's even on failure
RUN_AFTER_CMD_ON_ERROR=yes

936
dev/tests/run_tests.sh Executable file
View File

@@ -0,0 +1,936 @@
#!/usr/bin/env bash
#TODO Encrypted Pull runs on F25 fail for decryption
## obackup basic tests suite 2017010201
OBACKUP_DIR="$(pwd)"
OBACKUP_DIR=${OBACKUP_DIR%%/dev*}
DEV_DIR="$OBACKUP_DIR/dev"
TESTS_DIR="$DEV_DIR/tests"
CONF_DIR="$TESTS_DIR/conf"
LOCAL_CONF="local.conf"
PULL_CONF="pull.conf"
PUSH_CONF="push.conf"
OLD_CONF="old.conf"
TMP_OLD_CONF="tmp.old.conf"
MAX_EXEC_CONF="max-exec-time.conf"
OBACKUP_EXECUTABLE="obackup.sh"
OBACKUP_DEV_EXECUTABLE="dev/n_obackup.sh"
OBACKUP_UPGRADE="upgrade-v1.x-2.1x.sh"
TMP_FILE="$DEV_DIR/tmp"
SOURCE_DIR="${HOME}/obackup-testdata"
TARGET_DIR="${HOME}/obackup-storage"
TARGET_DIR_SQL_LOCAL="$TARGET_DIR/sql-local"
TARGET_DIR_FILE_LOCAL="$TARGET_DIR/files-local"
TARGET_DIR_CRYPT_LOCAL="$TARGET_DIR/crypt-local"
TARGET_DIR_SQL_PULL="$TARGET_DIR/sql-pull"
TARGET_DIR_FILE_PULL="$TARGET_DIR/files-pull"
TARGET_DIR_CRYPT_PULL="$TARGET_DIR/crypt-pull"
TARGET_DIR_SQL_PUSH="$TARGET_DIR/sql-push"
TARGET_DIR_FILE_PUSH="$TARGET_DIR/files-push"
TARGET_DIR_CRYPT_PUSH="$TARGET_DIR/crypt-push"
SIMPLE_DIR="testData"
RECURSIVE_DIR="testDataRecursive"
S_DIR_1="dir rect ory"
R_EXCLUDED_DIR="Excluded"
R_DIR_1="a"
R_DIR_2="b"
R_DIR_3="c d"
S_FILE_1="some file"
R_FILE_1="file_1"
R_FILE_2="file 2"
R_FILE_3="file 3"
N_FILE_1="non recurse file"
EXCLUDED_FILE="exclu.ded"
DATABASE_1="mysql.sql.xz"
DATABASE_2="performance_schema.sql.xz"
DATABASE_EXCLUDED="information_schema.sql.xz"
CRYPT_EXTENSION=".obackup.gpg"
ROTATE_1_EXTENSION=".obackup.1"
PASSFILE="passfile"
CRYPT_TESTFILE="testfile"
# Later populated variables
OBACKUP_VERSION=2.x
OBACKUP_MIN_VERSION=x
OBACKUP_IS_STABLE=maybe
# Setup an array with all function modes
#declare -Ag osyncParameters
function GetConfFileValue () {
local file="${1}"
local name="${2}"
local value
value=$(grep "^$name=" "$file")
if [ $? == 0 ]; then
value="${value##*=}"
echo "$value"
else
assertEquals "$name does not exist in [$file]." "1" "0"
fi
}
function SetConfFileValue () {
local file="${1}"
local name="${2}"
local value="${3}"
if grep "^$name=" "$file" > /dev/null; then
# Using -i.tmp for BSD compat
sed -i.tmp "s#^$name=.*#$name=$value#" "$file"
rm -f "$file.tmp"
assertEquals "Set $name to [$value]." "0" $?
else
assertEquals "$name does not exist in [$file]." "1" "0"
fi
}
function SetupSSH {
echo -e 'y\n'| ssh-keygen -t rsa -b 2048 -N "" -f "${HOME}/.ssh/id_rsa_local"
if ! grep "$(cat ${HOME}/.ssh/id_rsa_local.pub)" "${HOME}/.ssh/authorized_keys"; then
cat "${HOME}/.ssh/id_rsa_local.pub" >> "${HOME}/.ssh/authorized_keys"
fi
chmod 600 "${HOME}/.ssh/authorized_keys"
# Add localhost to known hosts so self connect works
if [ -z "$(ssh-keygen -F localhost)" ]; then
ssh-keyscan -H localhost >> "${HOME}/.ssh/known_hosts"
fi
# Update remote conf files with SSH port
sed -i.tmp 's#ssh://.*@localhost:[0-9]*/#ssh://'$REMOTE_USER'@localhost:'$SSH_PORT'/#' "$CONF_DIR/$PULL_CONF"
sed -i.tmp 's#ssh://.*@localhost:[0-9]*/#ssh://'$REMOTE_USER'@localhost:'$SSH_PORT'/#' "$CONF_DIR/$PUSH_CONF"
}
function RemoveSSH {
local pubkey
if [ -f "${HOME}/.ssh/id_rsa_local" ]; then
pubkey=$(cat "${HOME}/.ssh/id_rsa_local.pub")
sed -i.bak "#$pubkey#d" "${HOME}/.ssh/authorized_keys"
rm -f "${HOME}/.ssh/{id_rsa_local.pub,id_rsa_local}"
fi
}
function SetupGPG {
if type gpg2 > /dev/null; then
CRYPT_TOOL=gpg2
elif type gpg > /dev/null; then
CRYPT_TOOL=gpg
else
echo "No gpg support"
fi
echo "Crypt tool=$CRYPT_TOOL"
if ! $CRYPT_TOOL --list-keys | grep "John Doe" > /dev/null; then
cat >gpgcommand <<EOF
%echo Generating a GPG Key
Key-Type: RSA
Key-Length: 4096
Name-Real: John Doe
Name-Comment: obackup-test-key
Name-Email: john@example.com
Expire-Date: 0
Passphrase: PassPhrase123
# Do a commit here, so that we can later print "done" :-)
%commit
%echo done
EOF
if type apt-get > /dev/null 2>&1; then
sudo apt-get install rng-tools
fi
# Setup fast entropy
if type rngd > /dev/null 2>&1; then
rngd -r /dev/urandom
else
echo "No rngd support"
fi
$CRYPT_TOOL --batch --gen-key gpgcommand
echo "Currently owned $CRYPT_TOOL keys"
echo $($CRYPT_TOOL --list-keys)
rm -f gpgcommand
fi
echo "PassPhrase123" > "$TESTS_DIR/$PASSFILE"
}
function oneTimeSetUp () {
START_TIME=$SECONDS
source "$DEV_DIR/ofunctions.sh"
GetLocalOS
echo "Detected OS: $LOCAL_OS"
# Set some travis related changes
if [ "$TRAVIS_RUN" == true ]; then
echo "Running with travis settings"
REMOTE_USER="travis"
RHOST_PING="no"
SetConfFileValue "$CONF_DIR/$PULL_CONF" "REMOTE_3RD_PARTY_HOSTS" ""
SetConfFileValue "$CONF_DIR/$PUSH_CONF" "REMOTE_3RD_PARTY_HOSTS" ""
# Config value didn't have S at the end in old files
SetConfFileValue "$CONF_DIR/$OLD_CONF" "REMOTE_3RD_PARTY_HOST" ""
SetConfFileValue "$CONF_DIR/$PULL_CONF" "REMOTE_HOST_PING" "no"
SetConfFileValue "$CONF_DIR/$PUSH_CONF" "REMOTE_HOST_PING" "no"
SetConfFileValue "$CONF_DIR/$OLD_CONF" "REMOTE_HOST_PING" "no"
else
echo "Running with local settings"
REMOTE_USER="root"
RHOST_PING="yes"
SetConfFileValue "$CONF_DIR/$PULL_CONF" "REMOTE_3RD_PARTY_HOSTS" "\"www.kernel.org www.google.com\""
SetConfFileValue "$CONF_DIR/$PUSH_CONF" "REMOTE_3RD_PARTY_HOSTS" "\"www.kernel.org www.google.com\""
# Config value didn't have S at the end in old files
SetConfFileValue "$CONF_DIR/$OLD_CONF" "REMOTE_3RD_PARTY_HOST" "\"www.kernel.org www.google.com\""
SetConfFileValue "$CONF_DIR/$PULL_CONF" "REMOTE_HOST_PING" "yes"
SetConfFileValue "$CONF_DIR/$PUSH_CONF" "REMOTE_HOST_PING" "yes"
SetConfFileValue "$CONF_DIR/$OLD_CONF" "REMOTE_HOST_PING" "yes"
fi
# Get default ssh port from env
if [ "$SSH_PORT" == "" ]; then
SSH_PORT=22
fi
SetupGPG
if [ "$SKIP_REMOTE" != "yes" ]; then
SetupSSH
fi
# Get OBACKUP version
OBACKUP_VERSION=$(GetConfFileValue "$OBACKUP_DIR/$OBACKUP_DEV_EXECUTABLE" "PROGRAM_VERSION")
OBACKUP_VERSION="${OBACKUP_VERSION##*=}"
OBACKUP_MIN_VERSION="${OBACKUP_VERSION:2:1}"
OBACKUP_IS_STABLE=$(GetConfFileValue "$OBACKUP_DIR/$OBACKUP_DEV_EXECUTABLE" "IS_STABLE")
echo "Running with $OBACKUP_VERSION ($OBACKUP_MIN_VERSION) STABLE=$OBACKUP_IS_STABLE"
# Set basic values that could get changed later
for i in "$LOCAL_CONF" "$PULL_CONF" "$PUSH_CONF"; do
SetConfFileValue "$CONF_DIR/$i" "ENCRYPTION" "no"
SetConfFileValue "$CONF_DIR/$i" "DATABASES_ALL" "yes"
SetConfFileValue "$CONF_DIR/$i" "DATABASES_LIST" "mysql"
SetConfFileValue "$CONF_DIR/$i" "FILE_BACKUP" "yes"
SetConfFileValue "$CONF_DIR/$i" "DIRECTORY_LIST" "${HOME}/obackup-testdata/testData"
SetConfFileValue "$CONF_DIR/$i" "RECURSIVE_DIRECTORY_LIST" "${HOME}/obackup-testdata/testDataRecursive"
SetConfFileValue "$CONF_DIR/$i" "SQL_BACKUP" "yes"
done
}
function oneTimeTearDown () {
SetConfFileValue "$OBACKUP_DIR/$OBACKUP_EXECUTABLE" "IS_STABLE" "$OBACKUP_IS_STABLE"
RemoveSSH
#TODO: uncomment this when dev is done
#rm -rf "$SOURCE_DIR"
#rm -rf "$TARGET_DIR"
rm -f "$TMP_FILE"
ELAPSED_TIME=$(($SECONDS - $START_TIME))
echo "It took $ELAPSED_TIME seconds to run these tests."
}
function setUp () {
rm -rf "$SOURCE_DIR"
rm -rf "$TARGET_DIR"
mkdir -p "$SOURCE_DIR/$SIMPLE_DIR/$S_DIR_1"
mkdir -p "$SOURCE_DIR/$RECURSIVE_DIR/$R_EXCLUDED_DIR"
mkdir -p "$SOURCE_DIR/$RECURSIVE_DIR/$R_DIR_1"
mkdir -p "$SOURCE_DIR/$RECURSIVE_DIR/$R_DIR_2"
mkdir -p "$SOURCE_DIR/$RECURSIVE_DIR/$R_DIR_3"
touch "$SOURCE_DIR/$SIMPLE_DIR/$S_DIR_1/$S_FILE_1"
touch "$SOURCE_DIR/$SIMPLE_DIR/$EXCLUDED_FILE"
touch "$SOURCE_DIR/$RECURSIVE_DIR/$N_FILE_1"
touch "$SOURCE_DIR/$RECURSIVE_DIR/$R_DIR_1/$R_FILE_1"
touch "$SOURCE_DIR/$RECURSIVE_DIR/$R_DIR_2/$R_FILE_2"
dd if=/dev/urandom of="$SOURCE_DIR/$RECURSIVE_DIR/$R_DIR_3/$R_FILE_3" bs=1M count=2
touch "$SOURCE_DIR/$RECURSIVE_DIR/$R_DIR_3/$EXCLUDED_FILE"
FilePresence=(
"$SOURCE_DIR/$SIMPLE_DIR/$S_DIR_1/$S_FILE_1"
"$SOURCE_DIR/$RECURSIVE_DIR/$N_FILE_1"
"$SOURCE_DIR/$RECURSIVE_DIR/$R_DIR_1/$R_FILE_1"
"$SOURCE_DIR/$RECURSIVE_DIR/$R_DIR_2/$R_FILE_2"
"$SOURCE_DIR/$RECURSIVE_DIR/$R_DIR_3/$R_FILE_3"
)
DatabasePresence=(
"$DATABASE_1"
"$DATABASE_2"
)
FileExcluded=(
"$SOURCE_DIR/$SIMPLE_DIR/$EXCLUDED_FILE"
"$SOURCE_DIR/$RECURSIVE_DIR/$R_DIR_3/$R_EXCLUDED_FILE"
)
DatabaseExcluded=(
"$DATABASE_EXCLUDED"
)
DirectoriesExcluded=(
"$RECURSIVE_DIR/$R_EXCLUDED_DIR"
)
}
function test_Merge () {
cd "$DEV_DIR"
./merge.sh
assertEquals "Merging code" "0" $?
# Set obackup version to stable while testing to avoid warning message
SetConfFileValue "$OBACKUP_DIR/$OBACKUP_EXECUTABLE" "IS_STABLE" "yes"
}
# Keep this function to check GPG behavior depending on OS. (GPG 2.1 / GPG 2.0x / GPG 1.4 don't behave the same way)
function test_GPG () {
echo "Encrypting file"
$CRYPT_TOOL --out "$TESTS_DIR/$CRYPT_TESTFILE$CRYPT_EXTENSION" --recipient "John Doe" --batch --yes --encrypt "$TESTS_DIR/$PASSFILE"
assertEquals "Encrypt file" "0" $?
# Detect if GnuPG >= 2.1 that does not allow automatic pin entry anymore
cryptToolVersion=$($CRYPT_TOOL --version | head -1 | awk '{print $3}')
cryptToolMajorVersion=${cryptToolVersion%%.*}
cryptToolSubVersion=${cryptToolVersion#*.}
cryptToolSubVersion=${cryptToolSubVersion%.*}
if [ $cryptToolMajorVersion -eq 2 ] && [ $cryptToolSubVersion -ge 1 ]; then
additionalParameters="--pinentry-mode loopback"
fi
if [ "$CRYPT_TOOL" == "gpg2" ]; then
options="--batch --yes"
elif [ "$CRYPT_TOOL" == "gpg" ]; then
options="--no-use-agent --batch"
fi
echo "Decrypt using passphrase file"
$CRYPT_TOOL $options --out "$TESTS_DIR/$CRYPT_TESTFILE" --batch --yes $additionalParameters --passphrase-file="$TESTS_DIR/$PASSFILE" --decrypt "$TESTS_DIR/$CRYPT_TESTFILE$CRYPT_EXTENSION"
assertEquals "Decrypt file using passfile" "0" $?
echo "Decrypt using passphrase"
$CRYPT_TOOL $options --out "$TESTS_DIR/$CRYPT_TESTFILE" --batch --yes $additionalParameters --passphrase PassPhrase123 --decrypt "$TESTS_DIR/$CRYPT_TESTFILE$CRYPT_EXTENSION"
assertEquals "Decrypt file using passphrase" "0" $?
echo "Decrypt using passphrase file with cat"
$CRYPT_TOOL $options --out "$TESTS_DIR/$CRYPT_TESTFILE" --batch --yes $additionalParameters --passphrase $(cat "$TESTS_DIR/$PASSFILE") --decrypt "$TESTS_DIR/$CRYPT_TESTFILE$CRYPT_EXTENSION"
assertEquals "Decrypt file using passphrase" "0" $?
}
function test_LocalRun () {
SetConfFileValue "$CONF_DIR/$LOCAL_CONF" "ENCRYPTION" "no"
# Basic return code tests. Need to go deep into file presence testing
cd "$OBACKUP_DIR"
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$LOCAL_CONF"
assertEquals "Return code" "0" $?
for file in "${FilePresence[@]}"; do
[ -f "$TARGET_DIR_FILE_LOCAL/$file" ]
assertEquals "File Presence [$TARGET_DIR_FILE_LOCAL/$file]" "0" $?
done
for file in "${FileExcluded[@]}"; do
[ -f "$TARGET_DIR_FILE_LOCAL/$file" ]
assertEquals "File Excluded [$TARGET_DIR_FILE_LOCAL/$file]" "1" $?
done
for file in "${DatabasePresence[@]}"; do
[ -f "$TARGET_DIR_SQL_LOCAL/$file" ]
assertEquals "Database Presence [$TARGET_DIR_SQL_LOCAL/$file]" "0" $?
done
for file in "${DatabaseExcluded[@]}"; do
[ -f "$TARGET_DIR_SQL_LOCAL/$file" ]
assertEquals "Database Excluded [$TARGET_DIR_SQL_LOCAL/$file]" "1" $?
done
for directory in "${DirectoriesExcluded[@]}"; do
[ -d "$TARGET_DIR_FILE_LOCAL/$directory" ]
assertEquals "Directory Excluded [$TARGET_DIR_FILE_LOCAL/$directory]" "1" $?
done
diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-local/$SOURCE_DIR" | grep -i Exclu
[ $(diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-local/$SOURCE_DIR" | grep -i Exclu | wc -l) -eq 2 ]
assertEquals "Diff should only output excluded files" "0" $?
# Tests presence of rotated files
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$LOCAL_CONF"
assertEquals "Return code" "0" $?
for file in "${DatabasePresence[@]}"; do
[ -f "$TARGET_DIR_SQL_LOCAL/$file$ROTATE_1_EXTENSION" ]
assertEquals "Database rotated Presence [$TARGET_DIR_SQL_LOCAL/$file]" "0" $?
done
[ -d "$TARGET_DIR_FILE_LOCAL/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION" ]
assertEquals "File rotated Presence [$TARGET_DIR_FILE_LOCAL/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION]" "0" $?
}
function test_PullRun () {
if [ "$SKIP_REMOTE" == "yes" ]; then
return 0
fi
SetConfFileValue "$CONF_DIR/$PULL_CONF" "ENCRYPTION" "no"
# Basic return code tests. Need to go deep into file presence testing
cd "$OBACKUP_DIR"
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$PULL_CONF"
assertEquals "Return code" "0" $?
for file in "${FilePresence[@]}"; do
[ -f "$TARGET_DIR_FILE_PULL/$file" ]
assertEquals "File Presence [$TARGET_DIR_FILE_PULL/$file]" "0" $?
done
for file in "${FileExcluded[@]}"; do
[ -f "$TARGET_DIR_FILE_PULL/$file" ]
assertEquals "File Excluded [$TARGET_DIR_FILE_PULL/$file]" "1" $?
done
for file in "${DatabasePresence[@]}"; do
[ -f "$TARGET_DIR_SQL_PULL/$file" ]
assertEquals "Database Presence [$TARGET_DIR_SQL_PULL/$file]" "0" $?
done
for file in "${DatabaseExcluded[@]}"; do
[ -f "$TARGET_DIR_SQL_PULL/$file" ]
assertEquals "Database Excluded [$TARGET_DIR_SQL_PULL/$file]" "1" $?
done
for directory in "${DirectoriesExcluded[@]}"; do
[ -d "$TARGET_DIR_FILE_PULL/$directory" ]
assertEquals "Directory Excluded [$TARGET_DIR_FILE_PULL/$directory]" "1" $?
done
diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-pull/$SOURCE_DIR" | grep -i Exclu
[ $(diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-pull/$SOURCE_DIR" | grep -i Exclu | wc -l) -eq 2 ]
assertEquals "Diff should only output excluded files" "0" $?
# Tests presence of rotated files
cd "$OBACKUP_DIR"
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$PULL_CONF"
assertEquals "Return code" "0" $?
for file in "${DatabasePresence[@]}"; do
[ -f "$TARGET_DIR_SQL_PULL/$file$ROTATE_1_EXTENSION" ]
assertEquals "Database rotated Presence [$TARGET_DIR_SQL_PULL/$file$ROTATE_1_EXTENSION]" "0" $?
done
[ -d "$TARGET_DIR_FILE_PULL/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION" ]
assertEquals "File rotated Presence [$TARGET_DIR_FILE_PULL/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION]" "0" $?
}
function test_PushRun () {
if [ "$SKIP_REMOTE" == "yes" ]; then
return 0
fi
SetConfFileValue "$CONF_DIR/$PUSH_CONF" "ENCRYPTION" "no"
# Basic return code tests. Need to go deep into file presence testing
cd "$OBACKUP_DIR"
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$PUSH_CONF"
assertEquals "Return code" "0" $?
for file in "${FilePresence[@]}"; do
[ -f "$TARGET_DIR_FILE_PUSH/$file" ]
assertEquals "File Presence [$TARGET_DIR_FILE_PUSH/$file]" "0" $?
done
for file in "${FileExcluded[@]}"; do
[ -f "$TARGET_DIR_FILE_PUSH/$file" ]
assertEquals "File Excluded [$TARGET_DIR_FILE_PUSH/$file]" "1" $?
done
for file in "${DatabasePresence[@]}"; do
[ -f "$TARGET_DIR_SQL_PUSH/$file" ]
assertEquals "Database Presence [$TARGET_DIR_SQL_PUSH/$file]" "0" $?
done
for file in "${DatabaseExcluded[@]}"; do
[ -f "$TARGET_DIR_SQL_PUSH/$file" ]
assertEquals "Database Excluded [$TARGET_DIR_SQL_PUSH/$file]" "1" $?
done
for directory in "${DirectoriesExcluded[@]}"; do
[ -d "$TARGET_DIR_FILE_PUSH/$directory" ]
assertEquals "Directory Excluded [$TARGET_DIR_FILE_PUSH/$directory]" "1" $?
done
diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-push/$SOURCE_DIR" | grep -i Exclu
[ $(diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-push/$SOURCE_DIR" | grep -i Exclu | wc -l) -eq 2 ]
assertEquals "Diff should only output excluded files" "0" $?
# Tests presence of rotated files
cd "$OBACKUP_DIR"
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$PUSH_CONF"
assertEquals "Return code" "0" $?
for file in "${DatabasePresence[@]}"; do
[ -f "$TARGET_DIR_SQL_PUSH/$file$ROTATE_1_EXTENSION" ]
assertEquals "Database rotated Presence [$TARGET_DIR_SQL_PUSH/$file$ROTATE_1_EXTENSION]" "0" $?
done
[ -d "$TARGET_DIR_FILE_PUSH/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION" ]
assertEquals "File rotated Presence [$TARGET_DIR_FILE_PUSH/$(dirname $SOURCE_DIR)$ROTATE_1_EXTENSION]" "0" $?
}
function test_EncryptLocalRun () {
SetConfFileValue "$CONF_DIR/$LOCAL_CONF" "ENCRYPTION" "yes"
cd "$OBACKUP_DIR"
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$LOCAL_CONF"
for file in "${FilePresence[@]}"; do
[ -f "$TARGET_DIR_CRYPT_LOCAL/$file$CRYPT_EXTENSION" ]
assertEquals "File Presence [$TARGET_DIR_CRYPT_LOCAL/$file$CRYPT_EXTENSION]" "0" $?
done
# TODO: Exclusion lists don't work with encrypted files yet
# for file in "${FileExcluded[@]}"; do
# [ -f "$TARGET_DIR_CRYPT_LOCAL/$file$CRYPT_EXTENSION" ]
# assertEquals "File Excluded [$TARGET_DIR_CRYPT_LOCAL/$file$CRYPT_EXTENSION]" "1" $?
# done
for file in "${DatabasePresence[@]}"; do
[ -f "$TARGET_DIR_SQL_LOCAL/$file$CRYPT_EXTENSION" ]
assertEquals "Database Presence [$TARGET_DIR_SQL_LOCAL/$file$CRYPT_EXTENSION]" "0" $?
done
# for file in "${DatabaseExcluded[@]}"; do
# [ -f "$TARGET_DIR_SQL_LOCAL/$file$CRYPT_EXTENSION" ]
# assertEquals "Database Excluded [$TARGET_DIR_SQL_LOCAL/$file$CRYPT_EXTENSION]" "1" $?
# done
# for directory in "${DirectoriesExcluded[@]}"; do
# [ -d "$TARGET_DIR_CRYPT_LOCAL/$directory" ]
# assertEquals "Directory Excluded [$TARGET_DIR_CRYPT_LOCAL/$directory]" "1" $?
# done
diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-local/$SOURCE_DIR" | grep -i Exclu
[ $(diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-local/$SOURCE_DIR" | grep -i Exclu | wc -l) -eq 5 ]
assertEquals "Diff should only output excluded files" "0" $?
# Tests presence of rotated files
cd "$OBACKUP_DIR"
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$LOCAL_CONF"
assertEquals "Return code" "0" $?
for file in "${DatabasePresence[@]}"; do
[ -f "$TARGET_DIR_SQL_LOCAL/$file$CRYPT_EXTENSION$ROTATE_1_EXTENSION" ]
assertEquals "Database rotated Presence [$TARGET_DIR_SQL_LOCAL/$file$CRYPT_EXTENSION$ROTATE_1_EXTENSION]" "0" $?
done
[ -d "$TARGET_DIR_CRYPT_LOCAL/$(dirname $SOURCE_DIR)$CRYPT_EXTENSION$ROTATE_1_EXTENSION" ]
assertEquals "File rotated Presence [$TARGET_DIR_CRYPT_LOCAL/$(dirname $SOURCE_DIR)$CRYPT_EXTENSION$ROTATE_1_EXTENSION]" "0" $?
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE --decrypt="$TARGET_DIR_SQL_LOCAL" --passphrase-file="$TESTS_DIR/$PASSFILE"
assertEquals "Decrypt sql storage in [$TARGET_DIR_SQL_LOCAL]" "0" $?
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE --decrypt="$TARGET_DIR_CRYPT_LOCAL" --passphrase-file="$TESTS_DIR/$PASSFILE"
assertEquals "Decrypt file storage in [$TARGET_DIR_CRYPT_LOCAL]" "0" $?
SetConfFileValue "$CONF_DIR/$LOCAL_CONF" "ENCRYPTION" "no"
}
function test_EncryptPullRun () {
if [ "$SKIP_REMOTE" == "yes" ]; then
return 0
fi
# Basic return code tests. Need to go deep into file presence testing
SetConfFileValue "$CONF_DIR/$PULL_CONF" "ENCRYPTION" "yes"
cd "$OBACKUP_DIR"
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$PULL_CONF"
assertEquals "Return code" "0" $?
for file in "${FilePresence[@]}"; do
[ -f "$TARGET_DIR_CRYPT_PULL/$file$CRYPT_EXTENSION" ]
assertEquals "File Presence [$TARGET_DIR_CRYPT_PULL/$file$CRYPT_EXTENSION]" "0" $?
done
# for file in "${FileExcluded[@]}"; do
# [ -f "$TARGET_DIR_CRYPT_PULL/$file$CRYPT_EXTENSION" ]
# assertEquals "File Excluded [$TARGET_DIR_CRYPT_PULL/$file$CRYPT_EXTENSION]" "1" $?
# done
for file in "${DatabasePresence[@]}"; do
[ -f "$TARGET_DIR_SQL_PULL/$file$CRYPT_EXTENSION" ]
assertEquals "Database Presence [$TARGET_DIR_SQL_PULL/$file$CRYPT_EXTENSION]" "0" $?
done
# for file in "${DatabaseExcluded[@]}"; do
# [ -f "$TARGET_DIR_SQL_PULL/$file$CRYPT_EXTENSION" ]
# assertEquals "Database Excluded [$TARGET_DIR_SQL_PULL/$file$CRYPT_EXTENSION]" "1" $?
# done
# for directory in "${DirectoriesExcluded[@]}"; do
# [ -d "$TARGET_DIR_FILE_PULL/$directory" ]
# assertEquals "Directory Excluded [$TARGET_DIR_FILE_PULL/$directory]" "1" $?
# done
# Only excluded files should be listed here
diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-pull/$SOURCE_DIR" | grep -i Exclu
[ $(diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-pull/$SOURCE_DIR" | grep -i Exclu | wc -l) -eq 2 ]
assertEquals "Diff should only output excluded files" "0" $?
# Tests presence of rotated files
cd "$OBACKUP_DIR"
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$PULL_CONF"
assertEquals "Return code" "0" $?
for file in "${DatabasePresence[@]}"; do
[ -f "$TARGET_DIR_SQL_PULL/$file$CRYPT_EXTENSION$ROTATE_1_EXTENSION" ]
assertEquals "Database rotated Presence [$TARGET_DIR_SQL_PULL/$file$CRYPT_EXTENSION$ROTATE_1_EXTENSION]" "0" $?
done
[ -d "$TARGET_DIR_FILE_CRYPT/$(dirname $SOURCE_DIR)$CRYPT_EXTENSION$ROTATE_1_EXTENSION" ]
assertEquals "File rotated Presence [$TARGET_DIR_FILE_CRYPT/$(dirname $SOURCE_DIR)$CRYPT_EXTENSION$ROTATE_1_EXTENSION]" "0" $?
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE --decrypt="$TARGET_DIR_SQL_PULL" --passphrase-file="$TESTS_DIR/$PASSFILE"
assertEquals "Decrypt sql storage in [$TARGET_DIR_SQL_PULL]" "0" $?
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE --decrypt="$TARGET_DIR_CRYPT_PULL" --passphrase-file="$TESTS_DIR/$PASSFILE"
assertEquals "Decrypt file storage in [$TARGET_DIR_CRYPT_PULL]" "0" $?
SetConfFileValue "$CONF_DIR/$PULL_CONF" "ENCRYPTION" "no"
}
function test_EncryptPushRun () {
if [ "$SKIP_REMOTE" == "yes" ]; then
return 0
fi
# Basic return code tests. Need to go deep into file presence testing
SetConfFileValue "$CONF_DIR/$PUSH_CONF" "ENCRYPTION" "yes"
cd "$OBACKUP_DIR"
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$PUSH_CONF"
assertEquals "Return code" "0" $?
# Same here, why do we check for crypt extension in file_push instead of file_crypt
for file in "${FilePresence[@]}"; do
[ -f "$TARGET_DIR_FILE_PUSH/$file$CRYPT_EXTENSION" ]
assertEquals "File Presence [$TARGET_DIR_FILE_PUSH/$file$CRYPT_EXTENSION]" "0" $?
done
# for file in "${FileExcluded[@]}"; do
# [ -f "$TARGET_DIR_FILE_PUSH/$file$CRYPT_EXTENSION" ]
# assertEquals "File Excluded [$TARGET_DIR_FILE_PUSH/$file$CRYPT_EXTENSION]" "1" $?
# done
for file in "${DatabasePresence[@]}"; do
[ -f "$TARGET_DIR_SQL_PUSH/$file$CRYPT_EXTENSION" ]
assertEquals "Database Presence [$TARGET_DIR_SQL_PUSH/$file$CRYPT_EXTENSION]" "0" $?
done
# for file in "${DatabaseExcluded[@]}"; do
# [ -f "$TARGET_DIR_SQL_PUSH/$file$CRYPT_EXTENSION" ]
# assertEquals "Database Excluded [$TARGET_DIR_SQL_PUSH/$file$CRYPT_EXTENSION]" "1" $?
# done
# for directory in "${DirectoriesExcluded[@]}"; do
# [ -d "$TARGET_DIR_FILE_PUSH/$directory" ]
# assertEquals "Directory Excluded [$TARGET_DIR_FILE_PUSH/$directory]" "1" $?
# done
diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-push/$SOURCE_DIR" | grep -i Exclu
[ $(diff -qr "$SOURCE_DIR" "$TARGET_DIR/files-push/$SOURCE_DIR" | grep -i Exclu | wc -l) -eq 5 ]
assertEquals "Diff should only output excluded files" "0" $?
# Tests presence of rotated files
cd "$OBACKUP_DIR"
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$PUSH_CONF"
assertEquals "Return code" "0" $?
for file in "${DatabasePresence[@]}"; do
[ -f "$TARGET_DIR_SQL_PUSH/$file$CRYPT_EXTENSION$ROTATE_1_EXTENSION" ]
assertEquals "Database rotated Presence [$TARGET_DIR_SQL_PUSH/$file$CRYPT_EXTENSION]" "0" $?
done
[ -d "$TARGET_DIR_FILE_PUSH/$(dirname $SOURCE_DIR)$CRYPT_EXTENSION$ROTATE_1_EXTENSION" ]
assertEquals "File rotated Presence [$TARGET_DIR_FILE_PUSH/$(dirname $SOURCE_DIR)$CRYPT_EXTENSION$ROTATE_1_EXTENSION]" "0" $?
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE --decrypt="$TARGET_DIR_SQL_PUSH" --passphrase-file="$TESTS_DIR/$PASSFILE" --verbose
assertEquals "Decrypt sql storage in [$TARGET_DIR_SQL_PUSH]" "0" $?
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE --decrypt="$TARGET_DIR_FILE_PUSH" --passphrase-file="$TESTS_DIR/$PASSFILE"
assertEquals "Decrypt file storage in [$TARGET_DIR_FILE_PUSH]" "0" $?
SetConfFileValue "$CONF_DIR/$PUSH_CONF" "ENCRYPTION" "no"
}
function test_missing_databases () {
cd "$OBACKUP_DIR"
# Prepare files for missing databases
for i in "$LOCAL_CONF" "$PUSH_CONF" "$PULL_CONF"; do
SetConfFileValue "$CONF_DIR/$i" "DATABASES_ALL" "no"
SetConfFileValue "$CONF_DIR/$i" "DATABASES_LIST" "\"zorglub;mysql\""
SetConfFileValue "$CONF_DIR/$i" "SQL_BACKUP" "yes"
SetConfFileValue "$CONF_DIR/$i" "FILE_BACKUP" "no"
REMOTE_HOST=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$i"
assertEquals "Missing databases should trigger error with [$i]" "1" $?
SetConfFileValue "$CONF_DIR/$i" "DATABASES_ALL" "yes"
SetConfFileValue "$CONF_DIR/$i" "DATABASES_LIST" "mysql"
SetConfFileValue "$CONF_DIR/$i" "FILE_BACKUP" "yes"
done
for i in "$LOCAL_CONF" "$PUSH_CONF" "$PULL_CONF"; do
SetConfFileValue "$CONF_DIR/$i" "DIRECTORY_LIST" "${HOME}/obackup-testdata/nonPresentData"
SetConfFileValue "$CONF_DIR/$i" "RECURSIVE_DIRECTORY_LIST" "${HOME}/obackup-testdata/nonPresentDataRecursive"
SetConfFileValue "$CONF_DIR/$i" "SQL_BACKUP" "no"
SetConfFileValue "$CONF_DIR/$i" "FILE_BACKUP" "yes"
REMOTE_HOST=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$i"
assertEquals "Missing files should trigger error with [$i]" "1" $?
echo "glob"
return
echo "nope"
SetConfFileValue "$CONF_DIR/$i" "DIRECTORY_LIST" "${HOME}/obackup-testdata/testData"
SetConfFileValue "$CONF_DIR/$i" "RECURSIVE_DIRECTORY_LIST" "${HOME}/obackup-testdata/testDataRecursive"
SetConfFileValue "$CONF_DIR/$i" "SQL_BACKUP" "yes"
done
}
function test_timed_execution () {
cd "$OBACKUP_DIR"
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "SOFT_MAX_EXEC_TIME_DB_TASK" 1
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "HARD_MAX_EXEC_TIME_DB_TASK" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "SOFT_MAX_EXEC_TIME_FILE_TASK" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "HARD_MAX_EXEC_TIME_FILE_TASK" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "SOFT_MAX_EXEC_TIME_TOTAL" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "HARD_MAX_EXEC_TIME_TOTAL" 1000
SLEEP_TIME=2 REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$MAX_EXEC_CONF"
assertEquals "Soft max exec time db reached in obackup Return code" "2" $?
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "SOFT_MAX_EXEC_TIME_DB_TASK" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "HARD_MAX_EXEC_TIME_DB_TASK" 1
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "SOFT_MAX_EXEC_TIME_FILE_TASK" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "HARD_MAX_EXEC_TIME_FILE_TASK" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "SOFT_MAX_EXEC_TIME_TOTAL" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "HARD_MAX_EXEC_TIME_TOTAL" 1000
SLEEP_TIME=2 REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$MAX_EXEC_CONF"
assertEquals "Hard max exec time db reached in obackup Return code" "1" $?
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "SOFT_MAX_EXEC_TIME_DB_TASK" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "HARD_MAX_EXEC_TIME_DB_TASK" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "SOFT_MAX_EXEC_TIME_FILE_TASK" 1
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "HARD_MAX_EXEC_TIME_FILE_TASK" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "SOFT_MAX_EXEC_TIME_TOTAL" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "HARD_MAX_EXEC_TIME_TOTAL" 1000
SLEEP_TIME=2 REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$MAX_EXEC_CONF"
assertEquals "Soft max exec time file reached in obackup Return code" "2" $?
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "SOFT_MAX_EXEC_TIME_DB_TASK" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "HARD_MAX_EXEC_TIME_DB_TASK" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "SOFT_MAX_EXEC_TIME_FILE_TASK" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "HARD_MAX_EXEC_TIME_FILE_TASK" 1
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "SOFT_MAX_EXEC_TIME_TOTAL" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "HARD_MAX_EXEC_TIME_TOTAL" 1000
SLEEP_TIME=2 REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$MAX_EXEC_CONF"
assertEquals "Hard max exec time file reached in obackup Return code" "1" $?
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "SOFT_MAX_EXEC_TIME_DB_TASK" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "HARD_MAX_EXEC_TIME_DB_TASK" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "SOFT_MAX_EXEC_TIME_FILE_TASK" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "HARD_MAX_EXEC_TIME_FILE_TASK" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "SOFT_MAX_EXEC_TIME_TOTAL" 1
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "HARD_MAX_EXEC_TIME_TOTAL" 1000
SLEEP_TIME=1.5 REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$MAX_EXEC_CONF"
assertEquals "Soft max exec time total reached in obackup Return code" "2" $?
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "SOFT_MAX_EXEC_TIME_DB_TASK" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "HARD_MAX_EXEC_TIME_DB_TASK" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "SOFT_MAX_EXEC_TIME_FILE_TASK" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "HARD_MAX_EXEC_TIME_FILE_TASK" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "SOFT_MAX_EXEC_TIME_TOTAL" 1000
SetConfFileValue "$CONF_DIR/$MAX_EXEC_CONF" "HARD_MAX_EXEC_TIME_TOTAL" 1
SLEEP_TIME=2 REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$MAX_EXEC_CONF"
assertEquals "Hard max exec time total reached in obackup Return code" "1" $?
}
function test_WaitForTaskCompletion () {
local pids
# Standard wait
sleep 1 &
pids="$!"
sleep 2 &
pids="$pids;$!"
WaitForTaskCompletion $pids 0 0 $SLEEP_TIME $KEEP_LOGGING true true false ${FUNCNAME[0]}
assertEquals "WaitForTaskCompletion test 1" "0" $?
# Standard wait with warning
sleep 2 &
pids="$!"
sleep 5 &
pids="$pids;$!"
WaitForTaskCompletion $pids 3 0 $SLEEP_TIME $KEEP_LOGGING true true false ${FUNCNAME[0]}
assertEquals "WaitForTaskCompletion test 2" "0" $?
# Both pids are killed
sleep 5 &
pids="$!"
sleep 5 &
pids="$pids;$!"
WaitForTaskCompletion $pids 0 2 $SLEEP_TIME $KEEP_LOGGING true true false ${FUNCNAME[0]}
assertEquals "WaitForTaskCompletion test 3" "2" $?
# One of two pids are killed
sleep 2 &
pids="$!"
sleep 10 &
pids="$pids;$!"
WaitForTaskCompletion $pids 0 3 $SLEEP_TIME $KEEP_LOGGING true true false ${FUNCNAME[0]}
assertEquals "WaitForTaskCompletion test 4" "1" $?
# Count since script begin, the following should output two warnings and both pids should get killed
sleep 20 &
pids="$!"
sleep 20 &
pids="$pids;$!"
WaitForTaskCompletion $pids 3 5 $SLEEP_TIME $KEEP_LOGGING false true false ${FUNCNAME[0]}
assertEquals "WaitForTaskCompletion test 5" "2" $?
}
function test_ParallelExec () {
local cmd
# Test if parallelExec works correctly in array mode
cmd="sleep 2;sleep 2;sleep 2;sleep 2"
ParallelExec 4 "$cmd"
assertEquals "ParallelExec test 1" "0" $?
cmd="sleep 2;du /none;sleep 2"
ParallelExec 2 "$cmd"
assertEquals "ParallelExec test 2" "1" $?
cmd="sleep 4;du /none;sleep 3;du /none;sleep 2"
ParallelExec 3 "$cmd"
assertEquals "ParallelExec test 3" "2" $?
# Test if parallelExec works correctly in file mode
echo "sleep 2" > "$TMP_FILE"
echo "sleep 2" >> "$TMP_FILE"
echo "sleep 2" >> "$TMP_FILE"
echo "sleep 2" >> "$TMP_FILE"
ParallelExec 4 "$TMP_FILE" true
assertEquals "ParallelExec test 4" "0" $?
echo "sleep 2" > "$TMP_FILE"
echo "du /nome" >> "$TMP_FILE"
echo "sleep 2" >> "$TMP_FILE"
ParallelExec 2 "$TMP_FILE" true
assertEquals "ParallelExec test 5" "1" $?
echo "sleep 4" > "$TMP_FILE"
echo "du /none" >> "$TMP_FILE"
echo "sleep 3" >> "$TMP_FILE"
echo "du /none" >> "$TMP_FILE"
echo "sleep 2" >> "$TMP_FILE"
ParallelExec 3 "$TMP_FILE" true
assertEquals "ParallelExec test 6" "2" $?
#function ParallelExec $numberOfProcesses $commandsArg $readFromFile $softTime $HardTime $sleepTime $keepLogging $counting $Spinner $noError $callerName
# Test if parallelExec works correctly in array mode with full time control
cmd="sleep 5;sleep 5;sleep 5;sleep 5;sleep 5"
ParallelExec 4 "$cmd" false 1 0 .05 3600 true true false ${FUNCNAME[0]}
assertEquals "ParallelExec full test 1" "0" $?
cmd="sleep 2;du /none;sleep 2;sleep 2;sleep 4"
ParallelExec 2 "$cmd" false 0 0 .1 2 true false false ${FUNCNAME[0]}
assertEquals "ParallelExec full test 2" "1" $?
cmd="sleep 4;du /none;sleep 3;du /none;sleep 2"
ParallelExec 3 "$cmd" false 1 2 .05 7000 true true false ${FUNCNAME[0]}
assertNotEquals "ParallelExec full test 3" "0" $?
}
function test_UpgradeConfPullRun () {
# Basic return code tests. Need to go deep into file presence testing
cd "$OBACKUP_DIR"
# Make a security copy of the old config file
cp "$CONF_DIR/$OLD_CONF" "$CONF_DIR/$TMP_OLD_CONF"
./$OBACKUP_UPGRADE "$CONF_DIR/$TMP_OLD_CONF"
assertEquals "Conf file upgrade" "0" $?
# Update remote conf files with SSH port
sed -i.tmp 's#ssh://.*@localhost:[0-9]*/#ssh://'$REMOTE_USER'@localhost:'$SSH_PORT'/#' "$CONF_DIR/$TMP_OLD_CONF"
REMOTE_HOST_PING=$RHOST_PING ./$OBACKUP_EXECUTABLE "$CONF_DIR/$TMP_OLD_CONF"
assertEquals "Upgraded conf file execution test" "0" $?
rm -f "$CONF_DIR/$TMP_OLD_CONF"
rm -f "$CONF_DIR/$TMP_OLD_CONF.save"
}
. "$TESTS_DIR/shunit2/shunit2"

1067
dev/tests/shunit2/shunit2 Executable file

File diff suppressed because it is too large Load Diff

124
dev/tests/shunit2/shunit2_test.sh Executable file
View File

@@ -0,0 +1,124 @@
#! /bin/sh
# $Id$
# vim:et:ft=sh:sts=2:sw=2
#
# Copyright 2008 Kate Ward. All Rights Reserved.
# Released under the LGPL (GNU Lesser General Public License)
# Author: kate.ward@forestent.com (Kate Ward)
#
# shUnit2 unit test suite runner.
#
# This script runs all the unit tests that can be found, and generates a nice
# report of the tests.
MY_NAME=`basename $0`
MY_PATH=`dirname $0`
PREFIX='shunit2_test_'
SHELLS='/bin/sh /bin/bash /bin/dash /bin/ksh /bin/pdksh /bin/zsh'
TESTS=''
for test in ${PREFIX}[a-z]*.sh; do
TESTS="${TESTS} ${test}"
done
# load common unit test functions
. ../lib/versions
. ./shunit2_test_helpers
usage()
{
echo "usage: ${MY_NAME} [-e key=val ...] [-s shell(s)] [-t test(s)]"
}
env=''
# process command line flags
while getopts 'e:hs:t:' opt; do
case ${opt} in
e) # set an environment variable
key=`expr "${OPTARG}" : '\([^=]*\)='`
val=`expr "${OPTARG}" : '[^=]*=\(.*\)'`
if [ -z "${key}" -o -z "${val}" ]; then
usage
exit 1
fi
eval "${key}='${val}'"
export ${key}
env="${env:+${env} }${key}"
;;
h) usage; exit 0 ;; # output help
s) shells=${OPTARG} ;; # list of shells to run
t) tests=${OPTARG} ;; # list of tests to run
*) usage; exit 1 ;;
esac
done
shift `expr ${OPTIND} - 1`
# fill shells and/or tests
shells=${shells:-${SHELLS}}
tests=${tests:-${TESTS}}
# error checking
if [ -z "${tests}" ]; then
th_error 'no tests found to run; exiting'
exit 1
fi
cat <<EOF
#------------------------------------------------------------------------------
# System data
#
# test run info
shells: ${shells}
tests: ${tests}
EOF
for key in ${env}; do
eval "echo \"${key}=\$${key}\""
done
echo
# output system data
echo "# system info"
echo "$ date"
date
echo
echo "$ uname -mprsv"
uname -mprsv
#
# run tests
#
for shell in ${shells}; do
echo
# check for existance of shell
if [ ! -x ${shell} ]; then
th_warn "unable to run tests with the ${shell} shell"
continue
fi
cat <<EOF
#------------------------------------------------------------------------------
# Running the test suite with ${shell}
#
EOF
SHUNIT_SHELL=${shell} # pass shell onto tests
shell_name=`basename ${shell}`
shell_version=`versions_shellVersion "${shell}"`
echo "shell name: ${shell_name}"
echo "shell version: ${shell_version}"
# execute the tests
for suite in ${tests}; do
suiteName=`expr "${suite}" : "${PREFIX}\(.*\).sh"`
echo
echo "--- Executing the '${suiteName}' test suite ---"
( exec ${shell} ./${suite} 2>&1; )
done
done

View File

@@ -0,0 +1,206 @@
#! /bin/sh
# $Id$
# vim:et:ft=sh:sts=2:sw=2
#
# Copyright 2008 Kate Ward. All Rights Reserved.
# Released under the LGPL (GNU Lesser General Public License)
#
# Author: kate.ward@forestent.com (Kate Ward)
#
# shUnit2 unit test for assert functions
# load test helpers
. ./shunit2_test_helpers
#------------------------------------------------------------------------------
# suite tests
#
commonEqualsSame()
{
fn=$1
( ${fn} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'equal' $? "${stdoutF}" "${stderrF}"
( ${fn} "${MSG}" 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'equal; with msg' $? "${stdoutF}" "${stderrF}"
( ${fn} 'abc def' 'abc def' >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'equal with spaces' $? "${stdoutF}" "${stderrF}"
( ${fn} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'not equal' $? "${stdoutF}" "${stderrF}"
( ${fn} '' '' >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'null values' $? "${stdoutF}" "${stderrF}"
( ${fn} arg1 >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
( ${fn} arg1 arg2 arg3 arg4 >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
}
commonNotEqualsSame()
{
fn=$1
( ${fn} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'not same' $? "${stdoutF}" "${stderrF}"
( ${fn} "${MSG}" 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'not same, with msg' $? "${stdoutF}" "${stderrF}"
( ${fn} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'same' $? "${stdoutF}" "${stderrF}"
( ${fn} '' '' >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'null values' $? "${stdoutF}" "${stderrF}"
( ${fn} arg1 >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
( ${fn} arg1 arg2 arg3 arg4 >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
}
testAssertEquals()
{
commonEqualsSame 'assertEquals'
}
testAssertNotEquals()
{
commonNotEqualsSame 'assertNotEquals'
}
testAssertSame()
{
commonEqualsSame 'assertSame'
}
testAssertNotSame()
{
commonNotEqualsSame 'assertNotSame'
}
testAssertNull()
{
( assertNull '' >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'null' $? "${stdoutF}" "${stderrF}"
( assertNull "${MSG}" '' >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'null, with msg' $? "${stdoutF}" "${stderrF}"
( assertNull 'x' >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'not null' $? "${stdoutF}" "${stderrF}"
( assertNull >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
( assertNull arg1 arg2 arg3 >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
}
testAssertNotNull()
{
( assertNotNull 'x' >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'not null' $? "${stdoutF}" "${stderrF}"
( assertNotNull "${MSG}" 'x' >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'not null, with msg' $? "${stdoutF}" "${stderrF}"
( assertNotNull 'x"b' >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'not null, with double-quote' $? \
"${stdoutF}" "${stderrF}"
( assertNotNull "x'b" >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'not null, with single-quote' $? \
"${stdoutF}" "${stderrF}"
( assertNotNull 'x$b' >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'not null, with dollar' $? \
"${stdoutF}" "${stderrF}"
( assertNotNull 'x`b' >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'not null, with backtick' $? \
"${stdoutF}" "${stderrF}"
( assertNotNull '' >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'null' $? "${stdoutF}" "${stderrF}"
# there is no test for too few arguments as $1 might actually be null
( assertNotNull arg1 arg2 arg3 >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
}
testAssertTrue()
{
( assertTrue 0 >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'true' $? "${stdoutF}" "${stderrF}"
( assertTrue "${MSG}" 0 >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'true, with msg' $? "${stdoutF}" "${stderrF}"
( assertTrue '[ 0 -eq 0 ]' >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'true condition' $? "${stdoutF}" "${stderrF}"
( assertTrue 1 >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'false' $? "${stdoutF}" "${stderrF}"
( assertTrue '[ 0 -eq 1 ]' >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'false condition' $? "${stdoutF}" "${stderrF}"
( assertTrue '' >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'null' $? "${stdoutF}" "${stderrF}"
( assertTrue >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
( assertTrue arg1 arg2 arg3 >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
}
testAssertFalse()
{
( assertFalse 1 >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'false' $? "${stdoutF}" "${stderrF}"
( assertFalse "${MSG}" 1 >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'false, with msg' $? "${stdoutF}" "${stderrF}"
( assertFalse '[ 0 -eq 1 ]' >"${stdoutF}" 2>"${stderrF}" )
th_assertTrueWithNoOutput 'false condition' $? "${stdoutF}" "${stderrF}"
( assertFalse 0 >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'true' $? "${stdoutF}" "${stderrF}"
( assertFalse '[ 0 -eq 0 ]' >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'true condition' $? "${stdoutF}" "${stderrF}"
( assertFalse '' >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'true condition' $? "${stdoutF}" "${stderrF}"
( assertFalse >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
( assertFalse arg1 arg2 arg3 >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
}
#------------------------------------------------------------------------------
# suite functions
#
oneTimeSetUp()
{
th_oneTimeSetUp
MSG='This is a test message'
}
# load and run shUnit2
[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
. ${TH_SHUNIT}

View File

@@ -0,0 +1,86 @@
#! /bin/sh
# $Id$
# vim:et:ft=sh:sts=2:sw=2
#
# Copyright 2008 Kate Ward. All Rights Reserved.
# Released under the LGPL (GNU Lesser General Public License)
#
# Author: kate.ward@forestent.com (Kate Ward)
#
# shUnit2 unit test for failure functions
# load common unit-test functions
. ./shunit2_test_helpers
#-----------------------------------------------------------------------------
# suite tests
#
testFail()
{
( fail >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'fail' $? "${stdoutF}" "${stderrF}"
( fail "${MSG}" >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'fail with msg' $? "${stdoutF}" "${stderrF}"
( fail arg1 >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'too many arguments' $? "${stdoutF}" "${stderrF}"
}
testFailNotEquals()
{
( failNotEquals 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'same' $? "${stdoutF}" "${stderrF}"
( failNotEquals "${MSG}" 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'same with msg' $? "${stdoutF}" "${stderrF}"
( failNotEquals 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'not same' $? "${stdoutF}" "${stderrF}"
( failNotEquals '' '' >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'null values' $? "${stdoutF}" "${stderrF}"
( failNotEquals >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
( failNotEquals arg1 arg2 arg3 arg4 >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
}
testFailSame()
{
( failSame 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'same' $? "${stdoutF}" "${stderrF}"
( failSame "${MSG}" 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'same with msg' $? "${stdoutF}" "${stderrF}"
( failSame 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'not same' $? "${stdoutF}" "${stderrF}"
( failSame '' '' >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithOutput 'null values' $? "${stdoutF}" "${stderrF}"
( failSame >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
( failSame arg1 arg2 arg3 arg4 >"${stdoutF}" 2>"${stderrF}" )
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
}
#-----------------------------------------------------------------------------
# suite functions
#
oneTimeSetUp()
{
th_oneTimeSetUp
MSG='This is a test message'
}
# load and run shUnit2
[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
. ${TH_SHUNIT}

View File

@@ -0,0 +1,229 @@
# $Id$
# vim:et:ft=sh:sts=2:sw=2
#
# Copyright 2008 Kate Ward. All Rights Reserved.
# Released under the LGPL (GNU Lesser General Public License)
#
# Author: kate.ward@forestent.com (Kate Ward)
#
# shUnit2 unit test common functions
# treat unset variables as an error when performing parameter expansion
set -u
# set shwordsplit for zsh
[ -n "${ZSH_VERSION:-}" ] && setopt shwordsplit
#
# constants
#
# path to shUnit2 library. can be overridden by setting SHUNIT_INC
TH_SHUNIT=${SHUNIT_INC:-./shunit2}
# configure debugging. set the DEBUG environment variable to any
# non-empty value to enable debug output, or TRACE to enable trace
# output.
TRACE=${TRACE:+'th_trace '}
[ -n "${TRACE}" ] && DEBUG=1
[ -z "${TRACE}" ] && TRACE=':'
DEBUG=${DEBUG:+'th_debug '}
[ -z "${DEBUG}" ] && DEBUG=':'
#
# variables
#
th_RANDOM=0
#
# functions
#
# message functions
th_trace() { echo "${MY_NAME}:TRACE $@" >&2; }
th_debug() { echo "${MY_NAME}:DEBUG $@" >&2; }
th_info() { echo "${MY_NAME}:INFO $@" >&2; }
th_warn() { echo "${MY_NAME}:WARN $@" >&2; }
th_error() { echo "${MY_NAME}:ERROR $@" >&2; }
th_fatal() { echo "${MY_NAME}:FATAL $@" >&2; }
# output subtest name
th_subtest() { echo " $@" >&2; }
th_oneTimeSetUp()
{
# these files will be cleaned up automatically by shUnit2
stdoutF="${SHUNIT_TMPDIR}/stdout"
stderrF="${SHUNIT_TMPDIR}/stderr"
returnF="${SHUNIT_TMPDIR}/return"
expectedF="${SHUNIT_TMPDIR}/expected"
}
# generate a random number
th_generateRandom()
{
tfgr_random=${th_RANDOM}
while [ "${tfgr_random}" = "${th_RANDOM}" ]; do
if [ -n "${RANDOM:-}" ]; then
# $RANDOM works
tfgr_random=${RANDOM}${RANDOM}${RANDOM}$$
elif [ -r '/dev/urandom' ]; then
tfgr_random=`od -vAn -N4 -tu4 </dev/urandom |sed 's/^[^0-9]*//'`
else
tfgr_date=`date '+%H%M%S'`
tfgr_random=`expr ${tfgr_date} \* $$`
unset tfgr_date
fi
[ "${tfgr_random}" = "${th_RANDOM}" ] && sleep 1
done
th_RANDOM=${tfgr_random}
unset tfgr_random
}
# this section returns the data section from the specified section of a file. a
# datasection is defined by a [header], one or more lines of data, and then a
# blank line.
th_getDataSect()
{
th_sgrep "\\[$1\\]" "$2" |sed '1d'
}
# this function greps a section from a file. a section is defined as a group of
# lines preceeded and followed by blank lines.
th_sgrep()
{
th_pattern_=$1
shift
sed -e '/./{H;$!d;}' -e "x;/${th_pattern_}/"'!d;' $@ |sed '1d'
unset th_pattern_
}
# Custom assert that checks for true return value (0), and no output to STDOUT
# or STDERR. If a non-zero return value is encountered, the output of STDERR
# will be output.
#
# Args:
# th_test_: string: name of the subtest
# th_rtrn_: integer: the return value of the subtest performed
# th_stdout_: string: filename where stdout was redirected to
# th_stderr_: string: filename where stderr was redirected to
th_assertTrueWithNoOutput()
{
th_test_=$1
th_rtrn_=$2
th_stdout_=$3
th_stderr_=$4
assertTrue "${th_test_}; expected return value of zero" ${th_rtrn_}
[ ${th_rtrn_} -ne ${SHUNIT_TRUE} ] && cat "${th_stderr_}"
assertFalse "${th_test_}; expected no output to STDOUT" \
"[ -s '${th_stdout_}' ]"
assertFalse "${th_test_}; expected no output to STDERR" \
"[ -s '${th_stderr_}' ]"
unset th_test_ th_rtrn_ th_stdout_ th_stderr_
}
# Custom assert that checks for non-zero return value, output to STDOUT, but no
# output to STDERR.
#
# Args:
# th_test_: string: name of the subtest
# th_rtrn_: integer: the return value of the subtest performed
# th_stdout_: string: filename where stdout was redirected to
# th_stderr_: string: filename where stderr was redirected to
th_assertFalseWithOutput()
{
th_test_=$1
th_rtrn_=$2
th_stdout_=$3
th_stderr_=$4
assertFalse "${th_test_}; expected non-zero return value" ${th_rtrn_}
assertTrue "${th_test_}; expected output to STDOUT" \
"[ -s '${th_stdout_}' ]"
assertFalse "${th_test_}; expected no output to STDERR" \
"[ -s '${th_stderr_}' ]"
[ -s "${th_stdout_}" -a ! -s "${th_stderr_}" ] || \
_th_showOutput ${SHUNIT_FALSE} "${th_stdout_}" "${th_stderr_}"
unset th_test_ th_rtrn_ th_stdout_ th_stderr_
}
# Custom assert that checks for non-zero return value, no output to STDOUT, but
# output to STDERR.
#
# Args:
# th_test_: string: name of the subtest
# th_rtrn_: integer: the return value of the subtest performed
# th_stdout_: string: filename where stdout was redirected to
# th_stderr_: string: filename where stderr was redirected to
th_assertFalseWithError()
{
th_test_=$1
th_rtrn_=$2
th_stdout_=$3
th_stderr_=$4
assertFalse "${th_test_}; expected non-zero return value" ${th_rtrn_}
assertFalse "${th_test_}; expected no output to STDOUT" \
"[ -s '${th_stdout_}' ]"
assertTrue "${th_test_}; expected output to STDERR" \
"[ -s '${th_stderr_}' ]"
[ ! -s "${th_stdout_}" -a -s "${th_stderr_}" ] || \
_th_showOutput ${SHUNIT_FALSE} "${th_stdout_}" "${th_stderr_}"
unset th_test_ th_rtrn_ th_stdout_ th_stderr_
}
# Some shells, zsh on Solaris in particular, return immediately from a sub-shell
# when a non-zero return value is encountered. To properly catch these values,
# they are either written to disk, or recognized as an error the file is empty.
th_clearReturn() { cp /dev/null "${returnF}"; }
th_queryReturn()
{
if [ -s "${returnF}" ]; then
th_return=`cat "${returnF}"`
else
th_return=${SHUNIT_ERROR}
fi
}
# Providing external and internal calls to the showOutput helper function.
th_showOutput() { _th_showOutput $@; }
_th_showOutput()
{
_th_return_=$1
_th_stdout_=$2
_th_stderr_=$3
isSkipping
if [ $? -eq ${SHUNIT_FALSE} -a ${_th_return_} != ${SHUNIT_TRUE} ]; then
if [ -n "${_th_stdout_}" -a -s "${_th_stdout_}" ]; then
echo '>>> STDOUT' >&2
cat "${_th_stdout_}" >&2
fi
if [ -n "${_th_stderr_}" -a -s "${_th_stderr_}" ]; then
echo '>>> STDERR' >&2
cat "${_th_stderr_}" >&2
fi
if [ -n "${_th_stdout_}" -o -n "${_th_stderr_}" ]; then
echo '<<< end output' >&2
fi
fi
unset _th_return_ _th_stdout_ _th_stderr_
}
#
# main
#
${TRACE} 'trace output enabled'
${DEBUG} 'debug output enabled'

View File

@@ -0,0 +1,246 @@
#! /bin/sh
# $Id$
# vim:et:ft=sh:sts=2:sw=2
#
# Copyright 2008 Kate Ward. All Rights Reserved.
# Released under the LGPL (GNU Lesser General Public License)
# Author: kate.ward@forestent.com (Kate Ward)
#
# shUnit2 unit test for macros.
# load test helpers
. ./shunit2_test_helpers
#------------------------------------------------------------------------------
# suite tests
#
testAssertEquals()
{
# start skipping if LINENO not available
[ -z "${LINENO:-}" ] && startSkipping
( ${_ASSERT_EQUALS_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_ASSERT_EQUALS_ failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_ASSERT_EQUALS_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_ASSERT_EQUALS_ w/ msg failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
}
testAssertNotEquals()
{
# start skipping if LINENO not available
[ -z "${LINENO:-}" ] && startSkipping
( ${_ASSERT_NOT_EQUALS_} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_ASSERT_NOT_EQUALS_ failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_ASSERT_NOT_EQUALS_} '"some msg"' 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_ASSERT_NOT_EQUALS_ w/ msg failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
}
testSame()
{
# start skipping if LINENO not available
[ -z "${LINENO:-}" ] && startSkipping
( ${_ASSERT_SAME_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_ASSERT_SAME_ failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_ASSERT_SAME_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_ASSERT_SAME_ w/ msg failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
}
testNotSame()
{
# start skipping if LINENO not available
[ -z "${LINENO:-}" ] && startSkipping
( ${_ASSERT_NOT_SAME_} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_ASSERT_NOT_SAME_ failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_ASSERT_NOT_SAME_} '"some msg"' 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_ASSERT_NOT_SAME_ w/ msg failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
}
testNull()
{
# start skipping if LINENO not available
[ -z "${LINENO:-}" ] && startSkipping
( ${_ASSERT_NULL_} 'x' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_ASSERT_NULL_ failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_ASSERT_NULL_} '"some msg"' 'x' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_ASSERT_NULL_ w/ msg failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
}
testNotNull()
{
# start skipping if LINENO not available
[ -z "${LINENO:-}" ] && startSkipping
( ${_ASSERT_NOT_NULL_} '' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_ASSERT_NOT_NULL_ failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_ASSERT_NOT_NULL_} '"some msg"' '""' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_ASSERT_NOT_NULL_ w/ msg failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stdoutF}" "${stderrF}" >&2
}
testAssertTrue()
{
# start skipping if LINENO not available
[ -z "${LINENO:-}" ] && startSkipping
( ${_ASSERT_TRUE_} ${SHUNIT_FALSE} >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_ASSERT_TRUE_ failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_ASSERT_TRUE_} '"some msg"' ${SHUNIT_FALSE} >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_ASSERT_TRUE_ w/ msg failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
}
testAssertFalse()
{
# start skipping if LINENO not available
[ -z "${LINENO:-}" ] && startSkipping
( ${_ASSERT_FALSE_} ${SHUNIT_TRUE} >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_ASSERT_FALSE_ failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_ASSERT_FALSE_} '"some msg"' ${SHUNIT_TRUE} >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_ASSERT_FALSE_ w/ msg failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
}
testFail()
{
# start skipping if LINENO not available
[ -z "${LINENO:-}" ] && startSkipping
( ${_FAIL_} >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_FAIL_ failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_FAIL_} '"some msg"' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_FAIL_ w/ msg failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
}
testFailNotEquals()
{
# start skipping if LINENO not available
[ -z "${LINENO:-}" ] && startSkipping
( ${_FAIL_NOT_EQUALS_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_FAIL_NOT_EQUALS_ failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_FAIL_NOT_EQUALS_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_FAIL_NOT_EQUALS_ w/ msg failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
}
testFailSame()
{
# start skipping if LINENO not available
[ -z "${LINENO:-}" ] && startSkipping
( ${_FAIL_SAME_} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_FAIL_SAME_ failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_FAIL_SAME_} '"some msg"' 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_FAIL_SAME_ w/ msg failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
}
testFailNotSame()
{
# start skipping if LINENO not available
[ -z "${LINENO:-}" ] && startSkipping
( ${_FAIL_NOT_SAME_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_FAIL_NOT_SAME_ failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
( ${_FAIL_NOT_SAME_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
rtrn=$?
assertTrue '_FAIL_NOT_SAME_ w/ msg failure' ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
}
#------------------------------------------------------------------------------
# suite functions
#
oneTimeSetUp()
{
th_oneTimeSetUp
}
# load and run shUnit2
[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
. ${TH_SHUNIT}

View File

@@ -0,0 +1,160 @@
#! /bin/sh
# $Id$
# vim:et:ft=sh:sts=2:sw=2
#
# Copyright 2008 Kate Ward. All Rights Reserved.
# Released under the LGPL (GNU Lesser General Public License)
#
# Author: kate.ward@forestent.com (Kate Ward)
#
# shUnit2 unit tests of miscellaneous things
# load test helpers
. ./shunit2_test_helpers
#------------------------------------------------------------------------------
# suite tests
#
# Note: the test script is prefixed with '#' chars so that shUnit2 does not
# incorrectly interpret the embedded functions as real functions.
testUnboundVariable()
{
unittestF="${SHUNIT_TMPDIR}/unittest"
sed 's/^#//' >"${unittestF}" <<EOF
## treat unset variables as an error when performing parameter expansion
#set -u
#
#boom() { x=\$1; } # this function goes boom if no parameters are passed!
#test_boom()
#{
# assertEquals 1 1
# boom # No parameter given
# assertEquals 0 \$?
#}
#. ${TH_SHUNIT}
EOF
( exec ${SHUNIT_SHELL:-sh} "${unittestF}" >"${stdoutF}" 2>"${stderrF}" )
assertFalse 'expected a non-zero exit value' $?
grep '^ASSERT:Unknown failure' "${stdoutF}" >/dev/null
assertTrue 'assert message was not generated' $?
grep '^Ran [0-9]* test' "${stdoutF}" >/dev/null
assertTrue 'test count message was not generated' $?
grep '^FAILED' "${stdoutF}" >/dev/null
assertTrue 'failure message was not generated' $?
}
testIssue7()
{
( assertEquals 'Some message.' 1 2 >"${stdoutF}" 2>"${stderrF}" )
diff "${stdoutF}" - >/dev/null <<EOF
ASSERT:Some message. expected:<1> but was:<2>
EOF
rtrn=$?
assertEquals ${SHUNIT_TRUE} ${rtrn}
[ ${rtrn} -ne ${SHUNIT_TRUE} ] && cat "${stderrF}" >&2
}
testPrepForSourcing()
{
assertEquals '/abc' `_shunit_prepForSourcing '/abc'`
assertEquals './abc' `_shunit_prepForSourcing './abc'`
assertEquals './abc' `_shunit_prepForSourcing 'abc'`
}
testEscapeCharInStr()
{
actual=`_shunit_escapeCharInStr '\' ''`
assertEquals '' "${actual}"
assertEquals 'abc\\' `_shunit_escapeCharInStr '\' 'abc\'`
assertEquals 'abc\\def' `_shunit_escapeCharInStr '\' 'abc\def'`
assertEquals '\\def' `_shunit_escapeCharInStr '\' '\def'`
actual=`_shunit_escapeCharInStr '"' ''`
assertEquals '' "${actual}"
assertEquals 'abc\"' `_shunit_escapeCharInStr '"' 'abc"'`
assertEquals 'abc\"def' `_shunit_escapeCharInStr '"' 'abc"def'`
assertEquals '\"def' `_shunit_escapeCharInStr '"' '"def'`
actual=`_shunit_escapeCharInStr '$' ''`
assertEquals '' "${actual}"
assertEquals 'abc\$' `_shunit_escapeCharInStr '$' 'abc$'`
assertEquals 'abc\$def' `_shunit_escapeCharInStr '$' 'abc$def'`
assertEquals '\$def' `_shunit_escapeCharInStr '$' '$def'`
# actual=`_shunit_escapeCharInStr "'" ''`
# assertEquals '' "${actual}"
# assertEquals "abc\\'" `_shunit_escapeCharInStr "'" "abc'"`
# assertEquals "abc\\'def" `_shunit_escapeCharInStr "'" "abc'def"`
# assertEquals "\\'def" `_shunit_escapeCharInStr "'" "'def"`
# # must put the backtick in a variable so the shell doesn't misinterpret it
# # while inside a backticked sequence (e.g. `echo '`'` would fail).
# backtick='`'
# actual=`_shunit_escapeCharInStr ${backtick} ''`
# assertEquals '' "${actual}"
# assertEquals '\`abc' \
# `_shunit_escapeCharInStr "${backtick}" ${backtick}'abc'`
# assertEquals 'abc\`' \
# `_shunit_escapeCharInStr "${backtick}" 'abc'${backtick}`
# assertEquals 'abc\`def' \
# `_shunit_escapeCharInStr "${backtick}" 'abc'${backtick}'def'`
}
testEscapeCharInStr_specialChars()
{
# make sure our forward slash doesn't upset sed
assertEquals '/' `_shunit_escapeCharInStr '\' '/'`
# some shells escape these differently
#assertEquals '\\a' `_shunit_escapeCharInStr '\' '\a'`
#assertEquals '\\b' `_shunit_escapeCharInStr '\' '\b'`
}
# Test the various ways of declaring functions.
#
# Prefixing (then stripping) with comment symbol so these functions aren't
# treated as real functions by shUnit2.
testExtractTestFunctions()
{
f="${SHUNIT_TMPDIR}/extract_test_functions"
sed 's/^#//' <<EOF >"${f}"
#testABC() { echo 'ABC'; }
#test_def() {
# echo 'def'
#}
#testG3 ()
#{
# echo 'G3'
#}
#function test4() { echo '4'; }
# test5() { echo '5'; }
#some_test_function() { echo 'some func'; }
#func_with_test_vars() {
# testVariable=1234
#}
EOF
actual=`_shunit_extractTestFunctions "${f}"`
assertEquals 'testABC test_def testG3 test4 test5' "${actual}"
}
#------------------------------------------------------------------------------
# suite functions
#
setUp()
{
for f in ${expectedF} ${stdoutF} ${stderrF}; do
cp /dev/null ${f}
done
}
oneTimeSetUp()
{
th_oneTimeSetUp
}
# load and run shUnit2
[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
. ${TH_SHUNIT}

View File

@@ -0,0 +1,41 @@
#! /bin/sh
# $Id$
# vim:et:ft=sh:sts=2:sw=2
#
# Copyright 2010 Kate Ward. All Rights Reserved.
# Released under the LGPL (GNU Lesser General Public License)
# Author: kate.ward@forestent.com (Kate Ward)
#
# shUnit2 unit test for standalone operation.
#
# This unit test is purely to test that calling shunit2 directly, while passing
# the name of a unit test script, works. When run, this script determines if it
# is running as a standalone program, and calls main() if it is.
ARGV0=`basename "$0"`
# load test helpers
. ./shunit2_test_helpers
#------------------------------------------------------------------------------
# suite tests
#
testStandalone()
{
assertTrue ${SHUNIT_TRUE}
}
#------------------------------------------------------------------------------
# main
#
main()
{
${TH_SHUNIT} "${ARGV0}"
}
# are we running as a standalone?
if [ "${ARGV0}" = 'shunit2_test_standalone.sh' ]; then
if [ $# -gt 0 ]; then main "$@"; else main; fi
fi

View File

@@ -2,7 +2,7 @@
###### obackup - Local or Remote, push or pull backup script for files & mysql databases
###### (C) 2013-2016 by Orsiris de Jong (www.netpower.fr)
###### obackup v2.x config file rev 2016052501
###### obackup v2.1x config file rev 2017010201
###### GENERAL BACKUP OPTIONS
@@ -22,13 +22,25 @@ BACKUP_TYPE=local
###### BACKUP STORAGE
## Storage paths of the backups (absolute paths of the local or remote system)
## Storage paths of the backups (absolute paths of the local or remote system). Please use ${HOME} instead of ~ if needed.
SQL_STORAGE="/home/storage/backup/sql"
FILE_STORAGE="/home/storage/backup/files"
## Backup encryption using GPG and duplicity. Feature not ready yet.
## Backup encryption using GPG and rsync.
## Push backups get encrypted locally in CRYPT_STORAGE before they are sent to the remote system
## Local and pull backups get encrypted after backup, in CRYPT_STORAGE
ENCRYPTION=no
## Backup encryption needs a temporary storage space in order to encrypt files before sending them (absolute paths of the local or remote system)
## In case of a pull backup, an encrypted copy of FILE_BACKUP goes here
CRYPT_STORAGE=/home/storage/backup/crpyt
## GPG recipient (pubkey for this recipient must exist, see gpg2 --list-keys or gpg --list-keys
GPG_RECIPIENT="John Doe"
## Use n CPUs for encryption / decryption where n is an integer
PARALLEL_ENCRYPTION_PROCESSES=
## Create backup directories if they do not exist
CREATE_DIRS=yes
@@ -53,12 +65,16 @@ FILE_WARN_MIN_SPACE=1048576
REMOTE_SYSTEM_URI="ssh://backupuser@remote.system.tld:22/"
## You can specify a RSA key (please use full path). If not defined, the default ~/.ssh/id_rsa will be used. See documentation for further information.
SSH_RSA_PRIVATE_KEY="/root/.ssh/id_rsa"
SSH_RSA_PRIVATE_KEY="${HOME}/.ssh/id_rsa"
## Alternatively, you may specify an SSH password file (less secure). Needs sshpass utility installed.
SSH_PASSWORD_FILE=""
## ssh compression should be used unless your remote connection is good enough (LAN)
SSH_COMPRESSION=yes
## Ignore ssh known hosts verification. DANGER WILL ROBINSON DANGER: This can lead to security risks. Only enable if you know what you're doing.
## Works on Redhat / CentOS, doesn't work on Debian / Ubunutu
SSH_IGNORE_KNOWN_HOSTS=no
## Remote rsync executable path. Leave this empty in most cases
@@ -75,14 +91,14 @@ SUDO_EXEC=no
###### DATABASE SPECIFIC OPTIONS
## Database backup user
SQL_USER=backupuser
## Database backup user (should be the same you are running obackup with)
SQL_USER=root
## Enabling the following option will save all databases on local or remote given SQL instance except the ones specified in the exclude list.
## Every found database will be backed up as separate backup task.
DATABASES_ALL=yes
DATABASES_ALL_EXCLUDE_LIST="test"
## Alternatively, if DATABASES_ALL=no, you can specify a list of databases to backup separated by spaces.
DATABASES_ALL_EXCLUDE_LIST="test;mysql"
## Alternatively, if DATABASES_ALL=no, you can specify a list of databases to backup separated by semi-colons.
#DATABASES_LIST="somedatabase"
## Max backup execution time per Database task. Soft max exec time generates a warning only. Hard max exec time generates a warning and stops current backup task.
@@ -90,8 +106,13 @@ DATABASES_ALL_EXCLUDE_LIST="test"
SOFT_MAX_EXEC_TIME_DB_TASK=3600
HARD_MAX_EXEC_TIME_DB_TASK=7200
## mysqldump options (ex: --extended-insert, --single-transaction, --quick...). See MySQL / MariaDB manual
## default option: --opt
MYSQLDUMP_OPTIONS="--opt --single-transaction"
## Preferred SQL dump compression. Compression methods can be xz, lzma, pigz or gzip (will fallback from xz to gzip depending if available)
## Generally, level 5 is a good compromise between cpu, memory hunger and compress ratio. Gzipped files are set to be rsyncable.
## If you use encryption, compression will only bring small benefits as GPG already has pretty good compression included
COMPRESSION_LEVEL=3
###### FILES SPECIFIC OPTIONS
@@ -100,6 +121,7 @@ COMPRESSION_LEVEL=3
## Every subdirectory of each directory in RECURSIVE_DIRECTORY_LIST will be processed as a unique task.
## Example: RECURSIVE_DIRECTORY_LIST="/home;/var" will create backup tasks tasks "/home/dir1, "/home/dir2", ... "/home/dirN", "/var/log", "/var/lib"... "/var/something".
## You can exclude directories from the avove backup task creation, ex: avoid backing up "/home/dir2" by adding it to RECURSIVE_EXCLUDE_LIST.
## Please use ${HOME} instead of ~ if needed.
## Directories backup list. List of semicolon separated directories that will be backed up.
DIRECTORY_LIST="/var/named"
@@ -125,6 +147,17 @@ RSYNC_EXCLUDE_FROM=""
## List separator char. You may set an alternative separator char for your directories lists above.
PATH_SEPARATOR_CHAR=";"
## Optional arguments passed to rsync executable. The following are already managed by the program and shoul never be passed here
## -rltD -n -P -o -g --executability -A -X -zz -L -K -H -8 -u -i --stats --checksum --bwlimit --partial --partial-dir --exclude --exclude-from --include--from --no-whole-file --whole-file --list-only
RSYNC_OPTIONAL_ARGS=""
## Preserve basic linux permissions
PRESERVE_PERMISSIONS=yes
PRESERVE_OWNER=yes
PRESERVE_GROUP=yes
## On MACOS X, does not work and will be ignored
PRESERVE_EXECUTABILITY=yes
## Preserve ACLS. Make sure source and target FS can hold same ACLs or you'll get loads of errors.
PRESERVE_ACL=no
## Preserve Xattr. MAke sure source and target FS can hold same Xattr or you'll get loads of errors.
@@ -165,7 +198,7 @@ RSYNC_EXECUTABLE=rsync
## Alert email addresses separated by a space character
DESTINATION_MAILS="your@mail.address"
## Windows specific (msys / cygwin environment) only mail options (used with mailsend.exe from muquit, http://github.com/muquit/mailsend or from sendemail.exe from Brandon Zehm, http://caspian.dotconf.net/menu/Software/SendEmail/
## Environment specific mail options (used with busybox sendemail, mailsend.exe from muquit, http://github.com/muquit/mailsend or sendemail.exe from Brandon Zehm, http://caspian.dotconf.net/menu/Software/SendEmail)
SENDER_MAIL="alert@your.system.tld"
SMTP_SERVER=smtp.your.isp.tld
SMTP_PORT=25
@@ -181,6 +214,9 @@ SMTP_PASSWORD=
SOFT_MAX_EXEC_TIME_TOTAL=30000
HARD_MAX_EXEC_TIME_TOTAL=36000
## Log a message every KEEP_LOGGING seconds just to know the task is still alive
KEEP_LOGGING=1801
## Backup Rotation. You may rotate backups if you don't use snapshots on your backup server.
ROTATE_SQL_BACKUPS=no
ROTATE_SQL_COPIES=7

View File

@@ -1,23 +1,26 @@
#!/usr/bin/env bash
_OFUNCTIONS_BOOTSTRAP=true
PROGRAM=obackup
PROGRAM_VERSION=2.0
PROGRAM_VERSION=2.1-beta1
PROGRAM_BINARY=$PROGRAM".sh"
PROGRAM_BATCH=$PROGRAM"-batch.sh"
SCRIPT_BUILD=2016052601
SCRIPT_BUILD=2016122701
## osync / obackup / pmocr / zsnap install script
## Tested on RHEL / CentOS 6 & 7, Fedora 23, Debian 7 & 8, Mint 17 and FreeBSD 8 & 10
## Tested on RHEL / CentOS 6 & 7, Fedora 23, Debian 7 & 8, Mint 17 and FreeBSD 8, 10 and 11
## Please adapt this to fit your distro needs
#TODO: silent mode and no stats mode
# Get current install.sh path from http://stackoverflow.com/a/246128/2635443
SCRIPT_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
CONF_DIR=/etc/$PROGRAM
BIN_DIR=/usr/local/bin
SERVICE_DIR_INIT=/etc/init.d
CONF_DIR=$FAKEROOT/etc/$PROGRAM
BIN_DIR="$FAKEROOT/usr/local/bin"
SERVICE_DIR_INIT=$FAKEROOT/etc/init.d
# Should be /usr/lib/systemd/system, but /lib/systemd/system exists on debian & rhel / fedora
SERVICE_DIR_SYSTEMD_SYSTEM=/lib/systemd/system
SERVICE_DIR_SYSTEMD_USER=/etc/systemd/user
SERVICE_DIR_SYSTEMD_SYSTEM=$FAKEROOT/lib/systemd/system
SERVICE_DIR_SYSTEMD_USER=$FAKEROOT/etc/systemd/user
## osync specific code
OSYNC_SERVICE_FILE_INIT="osync-srv"
@@ -26,20 +29,20 @@ OSYNC_SERVICE_FILE_SYSTEMD_USER="osync-srv@.service.user"
## pmocr specfic code
PMOCR_SERVICE_FILE_INIT="pmocr-srv"
PMOCR_SERVICE_FILE_SYSTEMD_SYSTEM="pmocr-srv.service"
PMOCR_SERVICE_FILE_SYSTEMD_SYSTEM="pmocr-srv@.service"
## Generic code
## Default log file
if [ -w /var/log ]; then
LOG_FILE="/var/log/$PROGRAM-install.log"
if [ -w $FAKEROOT/var/log ]; then
LOG_FILE="$FAKEROOT/var/log/$PROGRAM-install.log"
elif ([ "$HOME" != "" ] && [ -w "$HOME" ]); then
LOG_FILE="$HOME/$PROGRAM-install.log"
else
LOG_FILE="./$PROGRAM-install.log"
fi
# Generic quick logging function
# QuickLogger subfunction, can be called directly
function _QuickLogger {
local value="${1}"
local destination="${2}" # Destination: stdout, log, both
@@ -51,65 +54,130 @@ function _QuickLogger {
fi
}
# Generic quick logging function
function QuickLogger {
local value="${1}"
if [ "$_SILENT" -eq 1 ]; then
if [ "$_LOGGER_SILENT" == true ]; then
_QuickLogger "$value" "log"
else
_QuickLogger "$value" "stdout"
fi
}
function urlencode() {
# urlencode <string>
## from https://gist.github.com/cdown/1163649
function UrlEncode {
local length="${#1}"
local LANG=C
local length="${#1}"
for (( i = 0; i < length; i++ )); do
local c="${1:i:1}"
case $c in
[a-zA-Z0-9.~_-]) printf "$c" ;;
*) printf '%%%02X' "'$c" ;;
[a-zA-Z0-9.~_-])
printf "$c"
;;
*)
printf '%%%02X' "'$c"
;;
esac
done
}
function GetLocalOS {
local localOsVar
function SetOSSettings {
# There's no good way to tell if currently running in BusyBox shell. Using sluggish way.
if ls --help 2>&1 | grep -i "BusyBox" > /dev/null; then
localOsVar="BusyBox"
else
# Detecting the special ubuntu userland in Windows 10 bash
if grep -i Microsoft /proc/sys/kernel/osrelease > /dev/null 2>&1; then
localOsVar="Microsoft"
else
localOsVar="$(uname -spior 2>&1)"
if [ $? != 0 ]; then
localOsVar="$(uname -v 2>&1)"
if [ $? != 0 ]; then
localOsVar="$(uname)"
fi
fi
fi
fi
case $localOsVar in
# Android uname contains both linux and android, keep it before linux entry
*"Android"*)
LOCAL_OS="Android"
;;
*"Linux"*)
LOCAL_OS="Linux"
;;
*"BSD"*)
LOCAL_OS="BSD"
;;
*"MINGW32"*|*"MSYS"*)
LOCAL_OS="msys"
;;
*"CYGWIN"*)
LOCAL_OS="Cygwin"
;;
*"Microsoft"*)
LOCAL_OS="WinNT10"
;;
*"Darwin"*)
LOCAL_OS="MacOSX"
;;
*"BusyBox"*)
LOCAL_OS="BusyBox"
;;
*)
if [ "$IGNORE_OS_TYPE" == "yes" ]; then
Logger "Running on unknown local OS [$localOsVar]." "WARN"
return
fi
if [ "$_OFUNCTIONS_VERSION" != "" ]; then
Logger "Running on >> $localOsVar << not supported. Please report to the author." "ERROR"
fi
exit 1
;;
esac
if [ "$_OFUNCTIONS_VERSION" != "" ]; then
Logger "Local OS: [$localOsVar]." "DEBUG"
fi
# Add a global variable for statistics in installer
LOCAL_OS_FULL="$localOsVar"
}
function SetLocalOSSettings {
USER=root
local local_os_var
# LOCAL_OS and LOCAL_OS_FULL are global variables set at GetLocalOS
local_os_var="$(uname -spio 2>&1)"
if [ $? != 0 ]; then
local_os_var="$(uname -v 2>&1)"
if [ $? != 0 ]; then
local_os_var="$(uname)"
fi
fi
case $local_os_var in
case $LOCAL_OS in
*"BSD"*)
GROUP=wheel
;;
*"Darwin"*)
*"MacOSX"*)
GROUP=admin
;;
*"msys"*|*"Cygwin"*)
USER=""
GROUP=""
;;
*)
GROUP=root
;;
*"MINGW32"*|*"CYGWIN"*)
USER=""
GROUP=""
;;
esac
if ([ "$USER" != "" ] && [ "$(whoami)" != "$USER" ]); then
if [ "$LOCAL_OS" == "Android" ] || [ "$LOCAL_OS" == "MacOSX" ] || [ "$LOCAL_OS" == "BusyBox" ]; then
QuickLogger "Cannot be installed on [$LOCAL_OS]. Please use $PROGRAM.sh directly."
exit 1
fi
if ([ "$USER" != "" ] && [ "$(whoami)" != "$USER" ] && [ "$FAKEROOT" == "" ]); then
QuickLogger "Must be run as $USER."
exit 1
fi
OS=$(urlencode "$local_os_var")
OS=$(UrlEncode "$LOCAL_OS_FULL")
}
function GetInit {
@@ -140,25 +208,34 @@ function CreateConfDir {
}
function CopyExampleFiles {
if [ -f "./sync.conf.example" ]; then
cp "./sync.conf.example" "/etc/$PROGRAM/sync.conf.example"
if [ -f "$SCRIPT_PATH/sync.conf.example" ]; then
cp "$SCRIPT_PATH/sync.conf.example" "$CONF_DIR/sync.conf.example"
fi
if [ -f "./host_backup.conf.example" ]; then
cp "./host_backup.conf.example" "/etc/$PROGRAM/host_backup.conf.example"
if [ -f "$SCRIPT_PATH/host_backup.conf.example" ]; then
cp "$SCRIPT_PATH/host_backup.conf.example" "$CONF_DIR/host_backup.conf.example"
fi
if [ -f "./exlude.list.example" ]; then
cp "./exclude.list.example" "/etc/$PROGRAM"
if [ -f "$SCRIPT_PATH/exlude.list.example" ]; then
cp "$SCRIPT_PATH/exclude.list.example" "$CONF_DIR/exclude.list.example"
fi
if [ -f "./snapshot.conf.example" ]; then
cp "./snapshot.conf.example" "/etc/$PROGRAM/snapshot.conf.example"
if [ -f "$SCRIPT_PATH/snapshot.conf.example" ]; then
cp "$SCRIPT_PATH/snapshot.conf.example" "$CONF_DIR/snapshot.conf.example"
fi
if [ -f "$SCRIPT_PATH/default.conf" ]; then
if [ -f "$CONF_DIR/default.conf" ]; then
cp "$SCRIPT_PATH/default.conf" "$CONF_DIR/default.conf.new"
QuickLogger "Copied default.conf to [$CONF_DIR/default.conf.new]."
else
cp "$SCRIPT_PATH/default.conf" "$CONF_DIR/default.conf"
fi
fi
}
function CopyProgram {
cp "./$PROGRAM_BINARY" "$BIN_DIR"
cp "$SCRIPT_PATH/$PROGRAM_BINARY" "$BIN_DIR"
if [ $? != 0 ]; then
QuickLogger "Cannot copy $PROGRAM_BINARY to [$BIN_DIR]. Make sure to run install script in the directory containing all other files."
QuickLogger "Also make sure you have permissions to write to [$BIN_DIR]."
@@ -168,8 +245,8 @@ function CopyProgram {
QuickLogger "Copied $PROGRAM_BINARY to [$BIN_DIR]."
fi
if [ -f "./$PROGRAM_BATCH" ]; then
cp "./$PROGRAM_BATCH" "$BIN_DIR"
if [ -f "$SCRIPT_PATH/$PROGRAM_BATCH" ]; then
cp "$SCRIPT_PATH/$PROGRAM_BATCH" "$BIN_DIR"
if [ $? != 0 ]; then
QuickLogger "Cannot copy $PROGRAM_BATCH to [$BIN_DIR]."
else
@@ -178,13 +255,13 @@ function CopyProgram {
fi
fi
if [ -f "./ssh_filter.sh" ]; then
cp "./ssh_filter.sh" "$BIN_DIR"
if [ -f "$SCRIPT_PATH/ssh_filter.sh" ]; then
cp "$SCRIPT_PATH/ssh_filter.sh" "$BIN_DIR"
if [ $? != 0 ]; then
QuickLogger "Cannot copy ssh_filter.sh to [$BIN_DIR]."
else
chmod 755 "$BIN_DIR/ssh_filter.sh"
if ([ "$USER" != "" ] && [ "$GROUP" != "" ]); then
if ([ "$USER" != "" ] && [ "$GROUP" != "" ] && [ "$FAKEROOT" == "" ]); then
chown $USER:$GROUP "$BIN_DIR/ssh_filter.sh"
fi
QuickLogger "Copied ssh_filter.sh to [$BIN_DIR]."
@@ -194,18 +271,18 @@ function CopyProgram {
function CopyServiceFiles {
# OSYNC SPECIFIC
if ([ "$init" == "systemd" ] && [ -f "./$OSYNC_SERVICE_FILE_SYSTEMD_SYSTEM" ]); then
cp "./$OSYNC_SERVICE_FILE_SYSTEMD_SYSTEM" "$SERVICE_DIR_SYSTEMD_SYSTEM" && cp "./$OSYNC_SERVICE_FILE_SYSTEMD_USER" "$SERVICE_DIR_SYSTEMD_USER/$SERVICE_FILE_SYSTEMD_SYSTEM"
if ([ "$init" == "systemd" ] && [ -f "$SCRIPT_PATH/$OSYNC_SERVICE_FILE_SYSTEMD_SYSTEM" ]); then
cp "$SCRIPT_PATH/$OSYNC_SERVICE_FILE_SYSTEMD_SYSTEM" "$SERVICE_DIR_SYSTEMD_SYSTEM" && cp "$SCRIPT_PATH/$OSYNC_SERVICE_FILE_SYSTEMD_USER" "$SERVICE_DIR_SYSTEMD_USER/$SERVICE_FILE_SYSTEMD_SYSTEM"
if [ $? != 0 ]; then
QuickLogger "Cannot copy the systemd file to [$SERVICE_DIR_SYSTEMD_SYSTEM] or [$SERVICE_DIR_SYSTEMD_USER]."
else
QuickLogger "Created osync-srv service in [$SERVICE_DIR_SYSTEMD_SYSTEM] and [$SERVICE_DIR_SYSTEMD_USER]."
QuickLogger "Can be activated with [systemctl start osync-srv@instance.conf] where instance.conf is the name of the config file in /etc/osync."
QuickLogger "Can be activated with [systemctl start osync-srv@instance.conf] where instance.conf is the name of the config file in $CONF_DIR."
QuickLogger "Can be enabled on boot with [systemctl enable osync-srv@instance.conf]."
QuickLogger "In userland, active with [systemctl --user start osync-srv@instance.conf]."
fi
elif ([ "$init" == "initV" ] && [ -f "./$OSYNC_SERVICE_FILE_INIT" ]); then
cp "./$OSYNC_SERVICE_FILE_INIT" "$SERVICE_DIR_INIT"
elif ([ "$init" == "initV" ] && [ -f "$SCRIPT_PATH/$OSYNC_SERVICE_FILE_INIT" ]); then
cp "$SCRIPT_PATH/$OSYNC_SERVICE_FILE_INIT" "$SERVICE_DIR_INIT"
if [ $? != 0 ]; then
QuickLogger "Cannot copy osync-srv to [$SERVICE_DIR_INIT]."
else
@@ -217,17 +294,17 @@ function CopyServiceFiles {
fi
# PMOCR SPECIFIC
if ([ "$init" == "systemd" ] && [ -f "./$PMOCR_SERVICE_FILE_SYSTEMD_SYSTEM" ]); then
cp "./$PMOCR_SERVICE_FILE_SYSTEMD_SYSTEM" "$SERVICE_DIR_SYSTEMD_SYSTEM"
if ([ "$init" == "systemd" ] && [ -f "$SCRIPT_PATH/$PMOCR_SERVICE_FILE_SYSTEMD_SYSTEM" ]); then
cp "$SCRIPT_PATH/$PMOCR_SERVICE_FILE_SYSTEMD_SYSTEM" "$SERVICE_DIR_SYSTEMD_SYSTEM"
if [ $? != 0 ]; then
QuickLogger "Cannot copy the systemd file to [$SERVICE_DIR_SYSTEMD_SYSTEM] or [$SERVICE_DIR_SYSTEMD_USER]."
else
QuickLogger "Created pmocr-srv service in [$SERVICE_DIR_SYSTEMD_SYSTEM] and [$SERVICE_DIR_SYSTEMD_USER]."
QuickLogger "Can be activated with [systemctl start pmocr-srv] after configuring file options in [$BIN_DIR/$PROGRAM]."
QuickLogger "Can be enabled on boot with [systemctl enable pmocr-srv]."
QuickLogger "Can be activated with [systemctl start pmocr-srv@default.conf] where default.conf is the name of the config file in $CONF_DIR."
QuickLogger "Can be enabled on boot with [systemctl enable pmocr-srv@default.conf]."
fi
elif ([ "$init" == "initV" ] && [ -f "./$PMOCR_SERVICE_FILE_INIT" ]); then
cp "./$PMOCR_SERVICE_FILE_INIT" "$SERVICE_DIR_INIT"
elif ([ "$init" == "initV" ] && [ -f "$SCRIPT_PATH/$PMOCR_SERVICE_FILE_INIT" ]); then
cp "$SCRIPT_PATH/$PMOCR_SERVICE_FILE_INIT" "$SERVICE_DIR_INIT"
if [ $? != 0 ]; then
QuickLogger "Cannot copy pmoct-srv to [$SERVICE_DIR_INIT]."
else
@@ -266,13 +343,13 @@ function Usage {
exit 127
}
_SILENT=0
_LOGGER_SILENT=false
_STATS=1
for i in "$@"
do
case $i in
--silent)
_SILENT=1
_LOGGER_SILENT=true
;;
--no-stats)
_STATS=0
@@ -282,7 +359,12 @@ do
esac
done
SetOSSettings
if [ "$FAKEROOT" != "" ]; then
mkdir -p "$SERVICE_DIR_SYSTEMD_SYSTEM" "$SERVICE_DIR_SYSTEMD_USER" "$BIN_DIR"
fi
GetLocalOS
SetLocalOSSettings
CreateConfDir
CopyExampleFiles
CopyProgram
@@ -293,7 +375,7 @@ STATS_LINK="http://instcount.netpower.fr?program=$PROGRAM&version=$PROGRAM_VERSI
QuickLogger "$PROGRAM installed. Use with $BIN_DIR/$PROGRAM"
if [ $_STATS -eq 1 ]; then
if [ $_SILENT -eq 1 ]; then
if [ $_LOGGER_SILENT == true ]; then
Statistics
else
QuickLogger "In order to make install statistics, the script would like to connect to $STATS_LINK"

View File

@@ -3,19 +3,21 @@ SUBPROGRAM=obackup
PROGRAM="$SUBPROGRAM-batch" # Batch program to run osync / obackup instances sequentially and rerun failed ones
AUTHOR="(L) 2013-2016 by Orsiris de Jong"
CONTACT="http://www.netpower.fr - ozy@netpower.fr"
PROGRAM_BUILD=2016052501
PROGRAM_BUILD=2016120401
## Runs an osync /obackup instance for every conf file found
## If an instance fails, run it again if time permits
## Configuration file path. The path where all the osync / obackup conf files are, usually /etc/osync or /etc/obackup
CONF_FILE_PATH=/etc/$SUBPROGRAM
if ! type "$BASH" > /dev/null; then
echo "Please run this script only with bash shell. Tested on bash >= 3.2"
exit 127
fi
## If maximum execution time is not reached, failed instances will be rerun. Max exec time is in seconds. Example is set to 10 hours.
MAX_EXECUTION_TIME=36000
## Specifies the number of reruns an instance may get
MAX_RERUNS=3
## Specifies the number of total runs an instance may get
MAX_RUNS=3
## Log file path
if [ -w /var/log ]; then
@@ -63,54 +65,63 @@ function CheckEnvironment {
then
SUBPROGRAM_EXECUTABLE=/usr/local/bin/$SUBPROGRAM.sh
else
Logger "Could not find $SUBPROGRAM.sh" "CRITICAL"
Logger "Could not find [/usr/local/bin/$SUBPROGRAM.sh]" "CRITICAL"
( >&2 echo "Could not find [/usr/local/bin/$SUBPROGRAM.sh]" )
exit 1
fi
else
SUBPROGRAM_EXECUTABLE=$(type -p $SUBPROGRAM.sh)
fi
## Check for CONF_FILE_PATH
if [ ! -d "$CONF_FILE_PATH" ]; then
Logger "Cannot find conf file path $CONF_FILE_PATH" "CRITICAL"
if [ "$CONF_FILE_PATH" == "" ]; then
Usage
fi
}
function Batch {
## Get list of .conf files
for i in $CONF_FILE_PATH/*.conf
do
if [ "$RUN" == "" ]; then
RUN="$i"
else
RUN=$RUN" $i"
fi
done
local runs=1 # Number of batch runs
local runList # Actual conf file list to run
local runAgainList # List of failed conf files sto run again
RERUNS=0
while ([ $MAX_EXECUTION_TIME -gt $SECONDS ] || [ $MAX_EXECUTION_TIME -eq 0 ]) && [ "$RUN" != "" ] && [ $MAX_RERUNS -gt $RERUNS ]
do
Logger "$SUBPROGRAM instances will be run for: $RUN" "NOTICE"
for i in $RUN
do
$SUBPROGRAM_EXECUTABLE "$i" $opts &
local confFile
local result
local i
# Using -e because find will accept directories or files
if [ ! -e "$CONF_FILE_PATH" ]; then
Logger "Cannot find conf file path [$CONF_FILE_PATH]." "CRITICAL"
Usage
else
# Ugly hack to read files into an array while preserving special characters
runList=()
while IFS= read -d $'\0' -r file; do runList+=("$file"); done < <(find "$CONF_FILE_PATH" -maxdepth 1 -iname "*.conf" -print0)
while ([ $MAX_EXECUTION_TIME -gt $SECONDS ] || [ $MAX_EXECUTION_TIME -eq 0 ]) && [ "${#runList[@]}" -gt 0 ] && [ $runs -le $MAX_RUNS ]; do
runAgainList=()
Logger "Sequential run n°$runs of $SUBPROGRAM instances for:" "NOTICE"
for confFile in "${runList[@]}"; do
Logger "$(basename $confFile)" "NOTICE"
done
for confFile in "${runList[@]}"; do
$SUBPROGRAM_EXECUTABLE "$confFile" --silent $opts &
wait $!
if [ $? != 0 ]; then
Logger "Run instance $(basename $i) failed" "ERROR"
if [ "$RUN_AGAIN" == "" ]; then
RUN_AGAIN="$i"
else
RUN_AGAIN=$RUN_AGAIN" $i"
result=$?
if [ $result != 0 ]; then
if [ $result == 1 ] || [ $result == 128 ]; then # Do not handle exit code 128 because it is already handled here
Logger "Instance $(basename $confFile) failed with exit code [$result]." "ERROR"
runAgainList+=("$confFile")
elif [ $result == 2 ]; then
Logger "Instance $(basename $confFile) finished with warnings." "WARN"
fi
else
Logger "Run instance $(basename $i) succeed." "NOTICE"
Logger "Instance $(basename $confFile) succeed." "NOTICE"
fi
done
RUN="$RUN_AGAIN"
RUN_AGAIN=""
RERUNS=$(($RERUNS + 1))
runList=("${runAgainList[@]}")
runs=$(($runs + 1))
done
fi
}
function Usage {
@@ -119,16 +130,18 @@ function Usage {
echo $CONTACT
echo ""
echo "Batch script to sequentially run osync or obackup instances and rerun failed ones."
echo "Usage: $SUBPROGRAM-batch.sh [OPTIONS]"
echo "Usage: $PROGRAM.sh [OPTIONS] [$SUBPROGRAM OPTIONS]"
echo ""
echo "[OPTIONS]"
echo "--path=/path/to/conf Path to osync / obackup conf files, defaults to /etc/osync or /etc/obackup"
echo "--max-reruns=X Number of runs max for failed instances, (defaults to 3)"
echo "--max-exec-time=X Retry failed instances only if max execution time not reached (defaults to 36000 seconds). Set to 0 to bypass execution time check."
echo "--no-maxtime Run osync / obackup without honoring conf file defined timeouts"
echo "--dry Will run osync / obackup without actually doing anything; just testing"
echo "--silent Will run osync / obackup without any output to stdout, used for cron jobs"
echo "--verbose Increases output"
echo "--max-runs=X Number of max runs per instance, (defaults to 3)"
echo "--max-exec-time=X Retry failed instances only if max execution time not reached (defaults to 36000 seconds). Set to 0 to bypass execution time check"
echo "[$SUBPROGRAM OPTIONS]"
echo "Specify whatever options $PROGRAM accepts. Example"
echo "$PROGRAM.sh --path=/etc/$SUBPROGRAM --no-maxtime"
echo ""
echo "No output will be written to stdout/stderr."
echo "Verify log file in [$LOG_FILE]."
exit 128
}
@@ -136,23 +149,11 @@ opts=""
for i in "$@"
do
case $i in
--silent)
opts=$opts" --silent"
;;
--dry)
opts=$opts" --dry"
;;
--verbose)
opts=$opts" --verbose"
;;
--no-maxtime)
opts=$opts" --no-maxtime"
;;
--path=*)
CONF_FILE_PATH=${i##*=}
;;
--max-reruns=*)
MAX_RERUNS=${i##*=}
--max-runs=*)
MAX_RUNS=${i##*=}
;;
--max-exec-time=*)
MAX_EXECUTION_TIME=${i##*=}
@@ -161,8 +162,7 @@ do
Usage
;;
*)
Logger "Unknown param '$i'" "CRITICAL"
Usage
opts="$opts$i "
;;
esac
done

3702
obackup.sh

File diff suppressed because it is too large Load Diff

View File

@@ -1,19 +1,195 @@
#!/usr/bin/env bash
PROGRAM="obackup config file upgrade script"
PROGRAM="obackup.upgrade"
SUBPROGRAM="obackup"
AUTHOR="(C) 2015 by Orsiris \"Ozy\" de Jong"
AUTHOR="(C) 2016 by Orsiris de Jong"
CONTACT="http://www.netpower.fr/obacup - ozy@netpower.fr"
OLD_PROGRAM_VERSION="v1.x"
NEW_PROGRAM_VERSION="v2.x"
PROGRAM_BUILD=2016041201
NEW_PROGRAM_VERSION="v2.1x"
CONFIG_FILE_VERSION=2017010201
PROGRAM_BUILD=2016113001
## type -p does not work on platforms other than linux (bash). If if does not work, always as$
if ! type "$BASH" > /dev/null; then
echo "Please run this script only with bash shell. Tested on bash >= 3.2"
exit 127
fi
# Defines all keywords / value sets in obackup configuration files
# bash does not support two dimensional arrays, so we declare two arrays:
# ${KEYWORDS[index]}=${VALUES[index]}
KEYWORDS=(
INSTANCE_ID
LOGFILE
SQL_BACKUP
FILE_BACKUP
BACKUP_TYPE
SQL_STORAGE
FILE_STORAGE
ENCRYPTION
CRYPT_STORAGE
GPG_RECIPIENT
PARALLEL_ENCRYPTION_PROCESSES
CREATE_DIRS
KEEP_ABSOLUTE_PATHS
BACKUP_SIZE_MINIMUM
GET_BACKUP_SIZE
SQL_WARN_MIN_SPACE
FILE_WARN_MIN_SPACE
REMOTE_SYSTEM_URI
SSH_RSA_PRIVATE_KEY
SSH_PASSWORD_FILE
SSH_COMPRESSION
SSH_IGNORE_KNOWN_HOSTS
RSYNC_REMOTE_PATH
REMOTE_HOST_PING
REMOTE_3RD_PARTY_HOSTS
SUDO_EXEC
SQL_USER
DATABASES_ALL
DATABASES_ALL_EXCLUDE_LIST
DATABASES_LIST
SOFT_MAX_EXEC_TIME_DB_TASK
HARD_MAX_EXEC_TIME_DB_TASK
MYSQLDUMP_OPTIONS
COMPRESSION_LEVEL
DIRECTORY_LIST
RECURSIVE_DIRECTORY_LIST
RECURSIVE_EXCLUDE_LIST
RSYNC_PATTERN_FIRST
RSYNC_INCLUDE_PATTERN
RSYNC_EXCLUDE_PATTERN
RSYNC_INCLUDE_FROM
RSYNC_EXCLUDE_FROM
PATH_SEPARATOR_CHAR
RSYNC_OPTIONAL_ARGS
PRESERVE_PERMISSIONS
PRESERVE_OWNER
PRESERVE_GROUP
PRESERVE_EXECUTABILITY
PRESERVE_ACL
PRESERVE_XATTR
COPY_SYMLINKS
KEEP_DIRLINKS
PRESERVE_HARDLINKS
RSYNC_COMPRESS
SOFT_MAX_EXEC_TIME_FILE_TASK
HARD_MAX_EXEC_TIME_FILE_TASK
PARTIAL
DELETE_VANISHED_FILES
DELTA_COPIES
BANDWIDTH
RSYNC_EXECUTABLE
DESTINATION_MAILS
SENDER_MAIL
SMTP_SERVER
SMTP_PORT
SMTP_ENCRYPTION
SMTP_USER
SMTP_PASSWORD
SOFT_MAX_EXEC_TIME_TOTAL
HARD_MAX_EXEC_TIME_TOTAL
KEEP_LOGGING
ROTATE_SQL_BACKUPS
ROTATE_SQL_COPIES
ROTATE_FILE_BACKUPS
ROTATE_FILE_COPIES
LOCAL_RUN_BEFORE_CMD
LOCAL_RUN_AFTER_CMD
REMOTE_RUN_BEFORE_CMD
REMOTE_RUN_AFTER_CMD
MAX_EXEC_TIME_PER_CMD_BEFORE
MAX_EXEC_TIME_PER_CMD_AFTER
STOP_ON_CMD_ERROR
RUN_AFTER_CMD_ON_ERROR
)
VALUES=(
test-backup
''
yes
yes
local
/home/storage/sql
/home/storage/files
no
/home/storage/crypt
'Your Name used with GPG signature'
''
yes
yes
1024
yes
1048576
1048576
ssh://backupuser@remote.system.tld:22/
${HOME}/.ssh/id_rsa
''
yes
no
''
yes
'www.kernel.org www.google.com'
no
root
yes
test
''
3600
7200
'--opt --single-transaction'
3
/some/path
/home
/home/backupuser\;/host/lost+found
include
''
''
''
''
\;
''
yes
yes
yes
yes
no
no
yes
yes
no
no
3600
7200
no
no
yes
0
rsync
infrastructure@example.com
sender@example.com
smtp.isp.tld
25
none
''
''
30000
36000
1801
no
7
no
7
''
''
''
''
0
0
no
no
)
function Usage {
echo "$PROGRAM $PROGRAM_BUILD"
echo $AUTHOR
@@ -42,10 +218,10 @@ function LoadConfigFile {
fi
}
function RewriteConfigFiles {
function RewriteOldConfigFiles {
local config_file="${1}"
if ((! grep "BACKUP_ID=" $config_file > /dev/null) && ( ! grep "INSTANCE_ID=" $config_file > /dev/null)); then
if ! grep "BACKUP_ID=" $config_file > /dev/null && ! grep "INSTANCE_ID=" $config_file > /dev/null; then
echo "File [$config_file] does not seem to be a obackup config file."
exit 1
fi
@@ -64,9 +240,7 @@ function RewriteConfigFiles {
sed -i'.tmp' 's/^BACKUP_FILES=/FILE_BACKUP=/g' "$config_file"
sed -i'.tmp' 's/^LOCAL_SQL_STORAGE=/SQL_STORAGE=/g' "$config_file"
sed -i'.tmp' 's/^LOCAL_FILE_STORAGE=/FILE_STORAGE=/g' "$config_file"
if ! grep "^ENCRYPTION=" "$config_file" > /dev/null; then
sed -i'.tmp' '/^FILE_STORAGE=*/a\'$'\n''ENCRYPTION=no\'$'\n''' "$config_file"
fi
sed -i'.tmp' 's/^DISABLE_GET_BACKUP_FILE_SIZE=no/GET_BACKUP_SIZE=yes/g' "$config_file"
sed -i'.tmp' 's/^DISABLE_GET_BACKUP_FILE_SIZE=yes/GET_BACKUP_SIZE=no/g' "$config_file"
sed -i'.tmp' 's/^LOCAL_STORAGE_KEEP_ABSOLUTE_PATHS=/KEEP_ABSOLUTE_PATHS=/g' "$config_file"
@@ -104,9 +278,9 @@ function RewriteConfigFiles {
REMOTE_SYSTEM_URI="ssh://$REMOTE_USER@$REMOTE_HOST:$REMOTE_PORT/"
sed -i'.tmp' 's#^REMOTE_BACKUP=yes#REMOTE_SYSTEM_URI='$REMOTE_SYSTEM_URI'#g' "$config_file"
sed -i'.tmp' '/^REMOTE_USER=*/d' "$config_file"
sed -i'.tmp' '/^REMOTE_HOST=*/d' "$config_file"
sed -i'.tmp' '/^REMOTE_PORT=*/d' "$config_file"
sed -i'.tmp' '/^REMOTE_USER==*/d' "$config_file"
sed -i'.tmp' '/^REMOTE_HOST==*/d' "$config_file"
sed -i'.tmp' '/^REMOTE_PORT==*/d' "$config_file"
sed -i'.tmp' '/^INSTANCE_ID=*/a\'$'\n''BACKUP_TYPE=pull\'$'\n''' "$config_file"
else
@@ -114,79 +288,36 @@ function RewriteConfigFiles {
sed -i'.tmp' '/^INSTANCE_ID=*/a\'$'\n''BACKUP_TYPE=local\'$'\n''' "$config_file"
fi
fi
sed -i'.tmp' 's/^REMOTE_3RD_PARTY_HOST=/REMOTE_3RD_PARTY_HOSTS=/g' "$config_file"
}
# Add new config values from v1.1 if they don't exist
if ! grep "^ENCRYPTION=" "$config_file" > /dev/null; then
sed -i'.tmp' '/^FILE_STORAGE=*/a\'$'\n''ENCRYPTION=no\'$'\n''' "$config_file"
fi
function AddMissingConfigOptions {
local config_file="${1}"
local counter=0
if ! grep "^CREATE_DIRS=" "$config_file" > /dev/null; then
sed -i'.tmp' '/^ENCRYPTION=*/a\'$'\n''CREATE_DIRS=yes\'$'\n''' "$config_file"
while [ $counter -lt ${#KEYWORDS[@]} ]; do
if ! grep "^${KEYWORDS[$counter]}=" > /dev/null "$config_file"; then
echo "${KEYWORDS[$counter]} not found"
if [ $counter -gt 0 ]; then
sed -i'.tmp' '/^'${KEYWORDS[$((counter-1))]}'=*/a\'$'\n'${KEYWORDS[$counter]}'="'"${VALUES[$counter]}"'"\'$'\n''' "$config_file"
if [ $? -ne 0 ]; then
echo "Cannot add missing ${[KEYWORDS[$counter]}."
exit 1
fi
else
sed -i'.tmp' '/onfig file rev*/a\'$'\n'${KEYWORDS[$counter]}'="'"${VALUES[$counter]}"'"\'$'\n''' "$config_file"
fi
echo "Added missing ${KEYWORDS[$counter]} config option with default option [${VALUES[$counter]}]"
fi
counter=$((counter+1))
done
}
if ! grep "^GET_BACKUP_SIZE=" "$config_file" > /dev/null; then
sed -i'.tmp' '/^BACKUP_SIZE_MINIMUM=*/a\'$'\n''GET_BACKUP_SIZE=yes\'$'\n''' "$config_file"
fi
function UpdateConfigHeader {
local config_file="${1}"
if ! grep "^RSYNC_REMOTE_PATH=" "$config_file" > /dev/null; then
sed -i'.tmp' '/^SSH_COMPRESSION=*/a\'$'\n''RSYNC_REMOTE_PATH=\'$'\n''' "$config_file"
fi
if ! grep "^SSH_IGNORE_KNOWN_HOSTS=" "$config_file" > /dev/null; then
sed -i'.tmp' '/^SSH_COMPRESSION=*/a\'$'\n''SSH_IGNORE_KNOWN_HOSTS=no\'$'\n''' "$config_file"
fi
if ! grep "^REMOTE_HOST_PING=" "$config_file" > /dev/null; then
sed -i'.tmp' '/^RSYNC_REMOTE_PATH=*/a\'$'\n''REMOTE_HOST_PING=yes\'$'\n''' "$config_file"
fi
if ! grep "^COPY_SYMLINKS=" "$config_file" > /dev/null; then
sed -i'.tmp' '/^PRESERVE_XATTR=*/a\'$'\n''COPY_SYMLINKS=yes\'$'\n''' "$config_file"
fi
if ! grep "^KEEP_DIRLINKS=" "$config_file" > /dev/null; then
sed -i'.tmp' '/^COPY_SYMLINKS=*/a\'$'\n''KEEP_DIRLINKS=yes\'$'\n''' "$config_file"
fi
if ! grep "^PRESERVE_HARDLINKS=" "$config_file" > /dev/null; then
sed -i'.tmp' '/^KEEP_DIRLINKS=*/a\'$'\n''PRESERVE_HARDLINKS=no\'$'\n''' "$config_file"
fi
if ! grep "^RSYNC_PATTERN_FIRST=" "$config_file" > /dev/null; then
sed -i'.tmp' '/^LOGFILE=*/a\'$'\n''RSYNC_PATTERN_FIRST=include\'$'\n''' "$config_file"
fi
if ! grep "^RSYNC_INCLUDE_PATTERN=" "$config_file" > /dev/null; then
sed -i'.tmp' '/^RSYNC_EXCLUDE_PATTERN=*/a\'$'\n''RSYNC_INCLUDE_PATTERN=""\'$'\n''' "$config_file"
fi
if ! grep "^RSYNC_INCLUDE_FROM=" "$config_file" > /dev/null; then
sed -i'.tmp' '/^RSYNC_EXCLUDE_FROM=*/a\'$'\n''RSYNC_INCLUDE_FROM=""\'$'\n''' "$config_file"
fi
if ! grep "^PARTIAL=" "$config_file" > /dev/null; then
sed -i'.tmp' '/^HARD_MAX_EXEC_TIME_FILE_TASK==*/a\'$'\n''PARTIAL=no\'$'\n''' "$config_file"
fi
if ! grep "^DELETE_VANISHED_FILES=" "$config_file" > /dev/null; then
sed -i'.tmp' '/^PARTIAL=*/a\'$'\n''DELETE_VANISHED_FILES=no\'$'\n''' "$config_file"
fi
if ! grep "^DELTA_COPIES=" "$config_file" > /dev/null; then
sed -i'.tmp' '/^PARTIAL=*/a\'$'\n''DELTA_COPIES=yes\'$'\n''' "$config_file"
fi
if ! grep "^BANDWIDTH=" "$config_file" > /dev/null; then
sed -i'.tmp' '/^DELTA_COPIES=*/a\'$'\n''BANDWIDTH=0\'$'\n''' "$config_file"
fi
if ! grep "^STOP_ON_CMD_ERROR=" "$config_file" > /dev/null; then
sed -i'.tmp' '/^MAX_EXEC_TIME_PER_CMD_AFTER=*/a\'$'\n''STOP_ON_CMD_ERROR=no\'$'\n''' "$config_file"
fi
if ! grep "^RUN_AFTER_CMD_ON_ERROR=" "$config_file" > /dev/null; then
sed -i'.tmp' '/^STOP_ON_CMD_ERROR=*/a\'$'\n''RUN_AFTER_CMD_ON_ERROR=no\'$'\n''' "$config_file"
fi
# "onfig file rev" to deal with earlier variants of the file
sed -i'.tmp' 's/.*onfig file rev.*/##### '$SUBPROGRAM' config file rev '$CONFIG_FILE_VERSION' '$NEW_PROGRAM_VERSION'/' "$config_file"
rm -f "$config_file.tmp"
}
@@ -196,7 +327,9 @@ if [ "$1" != "" ] && [ -f "$1" ] && [ -w "$1" ]; then
# Make sure there is no ending slash
CONF_FILE="${CONF_FILE%/}"
LoadConfigFile "$CONF_FILE"
RewriteConfigFiles "$CONF_FILE"
RewriteOldConfigFiles "$CONF_FILE"
AddMissingConfigOptions "$CONF_FILE"
UpdateConfigHeader "$CONF_FILE"
else
Usage
fi