mirror of
https://github.com/deajan/obackup.git
synced 2025-05-10 20:23:41 +02:00
Compare commits
659 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
8ccf304a20 | ||
|
e4b80c85cc | ||
|
b9cf079f15 | ||
|
19043ade8c | ||
|
a47771964c | ||
|
ac7f0376f5 | ||
|
deb28458b1 | ||
|
ff94a0cbbc | ||
|
3d97324366 | ||
|
9a43145dc1 | ||
|
3dc0fb8aec | ||
|
b963e67016 | ||
|
b5a951fcce | ||
|
20946f6f24 | ||
|
8210fe9e7f | ||
|
7c51784aa1 | ||
|
d36386ee61 | ||
|
823c6034db | ||
|
30ea816a71 | ||
|
01bb628b23 | ||
|
cef75e3e15 | ||
|
56013a3cac | ||
|
5ab76ce5c9 | ||
|
bfb3e7c906 | ||
|
003aade0a8 | ||
|
8868dcbb7f | ||
|
d73ffccd06 | ||
|
741167cba3 | ||
|
fe75737910 | ||
|
3ee026281b | ||
|
0e568c9a10 | ||
|
ee36a4b3cb | ||
|
6bbb7b6b80 | ||
|
6587123d11 | ||
|
8bc1237ef0 | ||
|
e0edee8ebf | ||
|
bef776072f | ||
|
da91b4353b | ||
|
f4b7ef280b | ||
|
27a30e1521 | ||
|
7f25f9b152 | ||
|
27ff3dc9f8 | ||
|
e1e39cd2a5 | ||
|
c82c5ed3cd | ||
|
60a3079b3a | ||
|
bf78921600 | ||
|
c8fb0de3ee | ||
|
68d1df9b18 | ||
|
1d6fb00344 | ||
|
d3c1f183a2 | ||
|
be3b3f8c0c | ||
|
556b1d8f35 | ||
|
7b75d13b1f | ||
|
4a7994d2b2 | ||
|
a5e17202c6 | ||
|
703dc46af7 | ||
|
b3ff577ea0 | ||
|
c70f0d6bed | ||
|
7edaa48c6a | ||
|
56d779209f | ||
|
01d6588d15 | ||
|
f9af20fe3b | ||
|
9a5a8048bf | ||
|
233ec565e3 | ||
|
42684fbdf8 | ||
|
0f18258e61 | ||
|
3fccc45ac7 | ||
|
ead5712c90 | ||
|
8b5965ae4a | ||
|
45464074ef | ||
|
69590167d8 | ||
|
6b5f819b17 | ||
|
acb27bef1b | ||
|
e710729a88 | ||
|
29e8369421 | ||
|
93a5bf5b8d | ||
|
d744f48f9b | ||
|
768d17c2cb | ||
|
9cf5574488 | ||
|
d1f344e879 | ||
|
40999fd9a2 | ||
|
14fc50304a | ||
|
ff9b9b632b | ||
|
5a51855691 | ||
|
c33e6b69c1 | ||
|
8079a2e9a5 | ||
|
657f828c74 | ||
|
0b4a2543d4 | ||
|
19ee83bb52 | ||
|
41f64388d9 | ||
|
3276c9189e | ||
|
a0384fc452 | ||
|
f4525f0f13 | ||
|
512bf8e5a4 | ||
|
ee52fe13a9 | ||
|
83aa596568 | ||
|
f0eee8897f | ||
|
fec66efe2a | ||
|
0a17c17c28 | ||
|
8070640adc | ||
|
a8d5814b8f | ||
|
f72d1366ff | ||
|
7844475278 | ||
|
a12d530fee | ||
|
cf78b6e5f9 | ||
|
8f7f30f937 | ||
|
0a204d47a2 | ||
|
f95328ce39 | ||
|
f3014cb029 | ||
|
3aa1837f1a | ||
|
78abc689f9 | ||
|
3ca611584c | ||
|
ec65960dc7 | ||
|
edae3ff2b1 | ||
|
c56b1d6031 | ||
|
33fff1c5a2 | ||
|
e83474614f | ||
|
0a10ea99d1 | ||
|
6fa917d56e | ||
|
743daeb3d9 | ||
|
019e8aa68f | ||
|
b9799cfb33 | ||
|
c57688b035 | ||
|
3632fd3972 | ||
|
bb77f1f5ec | ||
|
e47e179169 | ||
|
c525b449f9 | ||
|
604d381091 | ||
|
5d460c3916 | ||
|
5b0442caed | ||
|
fdb88e04f2 | ||
|
7e432c340e | ||
|
c7e3bc3e5f | ||
|
d9b61f2aaf | ||
|
b800f25d75 | ||
|
e190278fe6 | ||
|
dc9949de4b | ||
|
15fa534df0 | ||
|
7fd2fcf703 | ||
|
2e2b757fd7 | ||
|
71d7445c76 | ||
|
5612badadb | ||
|
2a270fc9c3 | ||
|
3147e30965 | ||
|
2eed11303b | ||
|
1ab7176194 | ||
|
792d1415f2 | ||
|
a2c5f6e51d | ||
|
299f242e89 | ||
|
204680d108 | ||
|
d038e5dae4 | ||
|
e85ddfa2ae | ||
|
08f017596b | ||
|
b4a7f3018a | ||
|
2ba247e2eb | ||
|
2653ea8923 | ||
|
631c6d3565 | ||
|
af81f46935 | ||
|
1bd73f2dec | ||
|
8f2cd131fe | ||
|
2a0245c2d5 | ||
|
b53531cac3 | ||
|
3a330b841c | ||
|
f3a9e1d05e | ||
|
c5ad174a0e | ||
|
f8d191cb54 | ||
|
42a2942557 | ||
|
9dc22e6855 | ||
|
884fcf7eff | ||
|
92da7e68de | ||
|
c02d301a45 | ||
|
a48c55ef69 | ||
|
83fdc5ee22 | ||
|
f097999bd3 | ||
|
9a9a7ac8a3 | ||
|
c60ac3784d | ||
|
02fcf746ef | ||
|
bd06dfe397 | ||
|
c7eb51f8a3 | ||
|
ee689da7f3 | ||
|
27a2b26473 | ||
|
c2b14377c3 | ||
|
f15f216989 | ||
|
0ee0bf52fd | ||
|
8cd3ac6a50 | ||
|
e916df3e87 | ||
|
be4ba9f366 | ||
|
cbdd836ddd | ||
|
fbd5317010 | ||
|
64c32bca07 | ||
|
644346316c | ||
|
304dace821 | ||
|
d311ef0574 | ||
|
56010d3a0d | ||
|
fefc01983d | ||
|
faafa81d10 | ||
|
5302c8c5f0 | ||
|
e2e4f4b563 | ||
|
67ecf9ea0a | ||
|
6f51b472c5 | ||
|
f342cdb3b0 | ||
|
2bb56d07b8 | ||
|
f291611011 | ||
|
5d85b58f48 | ||
|
30c88daaf2 | ||
|
b121d96d69 | ||
|
375f5d413b | ||
|
9474629d31 | ||
|
f4d587f1d1 | ||
|
52161b3faf | ||
|
34ba82784b | ||
|
d629105d92 | ||
|
bc2705273f | ||
|
041ad4eaa7 | ||
|
32c29f2ea3 | ||
|
bc799380a4 | ||
|
b4829a798f | ||
|
a54577f6c8 | ||
|
3b71ad70d2 | ||
|
249e37db32 | ||
|
d395e99640 | ||
|
90857f4248 | ||
|
1f3da3a952 | ||
|
b5ef76bfa3 | ||
|
7a930c9aef | ||
|
900783f543 | ||
|
ddd8e9eef3 | ||
|
0a2df4efe9 | ||
|
30f6e4e02c | ||
|
7fc7676473 | ||
|
39cc2ca4b2 | ||
|
88a0718636 | ||
|
252df96e59 | ||
|
06efa18901 | ||
|
365c93a8a3 | ||
|
84bf01f2c8 | ||
|
84fa92e46c | ||
|
552f6e2b80 | ||
|
19365df1ba | ||
|
2d2530c55d | ||
|
4e0bbbcd88 | ||
|
f96d8ba5e0 | ||
|
cdf62e3be8 | ||
|
2d1cd20f06 | ||
|
da9de0acdb | ||
|
b35d883403 | ||
|
5dba4dc808 | ||
|
6335e889ea | ||
|
71dd3e939d | ||
|
13ad1b9372 | ||
|
321aba05ff | ||
|
4e75fdc445 | ||
|
38b1a0ee0b | ||
|
fe8a5c51fa | ||
|
643ede6524 | ||
|
6a51acbb49 | ||
|
c448ff3937 | ||
|
89c74723bd | ||
|
bc3e3e9e66 | ||
|
bc396270df | ||
|
eead765d69 | ||
|
2db6a9ca29 | ||
|
6c00014520 | ||
|
e968a67c04 | ||
|
0c2b52e0bf | ||
|
c1aee74a3e | ||
|
a3e156edf9 | ||
|
83aed0fe20 | ||
|
121d78d8aa | ||
|
290821e949 | ||
|
085a167442 | ||
|
afce24c7ec | ||
|
4524df8a76 | ||
|
d16e1e13c3 | ||
|
2b69b4b3c1 | ||
|
259b05ff33 | ||
|
53eaeb4dd7 | ||
|
dfee375b8d | ||
|
b4255318e6 | ||
|
a81f741204 | ||
|
e6b29356c7 | ||
|
805239e669 | ||
|
a8b6bcbf13 | ||
|
b6ad787e23 | ||
|
de44f55cb8 | ||
|
1f8fef8c52 | ||
|
e3c6ae3a40 | ||
|
549bb803dd | ||
|
7eb1acd079 | ||
|
10e6ede85c | ||
|
582f79d38f | ||
|
1a23c48d3c | ||
|
749e4ffdbd | ||
|
92ad2ac81d | ||
|
db5593ddb9 | ||
|
0eac2b5520 | ||
|
190c6dd3f7 | ||
|
3a4aab72b1 | ||
|
d934add237 | ||
|
9d6b95e7a6 | ||
|
33bb142467 | ||
|
a4ad02e7f7 | ||
|
f3a7faf0dd | ||
|
0d969d992b | ||
|
3c72bffdbf | ||
|
94b4f58740 | ||
|
be559a124c | ||
|
546df675e7 | ||
|
cd1c597e68 | ||
|
c144278c2a | ||
|
6cc894b69d | ||
|
630b7d9eff | ||
|
40102defd6 | ||
|
4ee20b130e | ||
|
d48fe86d75 | ||
|
6864cedb14 | ||
|
85e680cc75 | ||
|
7e63f1dab2 | ||
|
39cfa7e89e | ||
|
47b6d21480 | ||
|
1dbd3e9a72 | ||
|
7967251206 | ||
|
25fb362762 | ||
|
a207fc2008 | ||
|
d4a072846c | ||
|
6d6ea301aa | ||
|
d02d26fee1 | ||
|
31eaee7993 | ||
|
da29cf978f | ||
|
039e057915 | ||
|
625a65bee0 | ||
|
ff791f2ccf | ||
|
161599617d | ||
|
a527350afa | ||
|
d7cba2acc2 | ||
|
2e043210b2 | ||
|
b8216f8d6a | ||
|
8f0efbcb46 | ||
|
92852357e3 | ||
|
b156013598 | ||
|
e3a717c7fa | ||
|
b21fafd523 | ||
|
b1ea1dfa2c | ||
|
6c9e4082be | ||
|
16fc043322 | ||
|
3fe2f1eb88 | ||
|
169f7b157c | ||
|
2167bc8e09 | ||
|
6f697cc63f | ||
|
03f93d2576 | ||
|
709af82ff4 | ||
|
88a720be21 | ||
|
321a20558a | ||
|
711465f701 | ||
|
3975763ed2 | ||
|
177321a9de | ||
|
6075b92c91 | ||
|
a8892f88d0 | ||
|
f2e0f63a33 | ||
|
3ca39c7a0a | ||
|
e4e1273e5a | ||
|
545a7e05f5 | ||
|
9641b9d277 | ||
|
0a8b17814b | ||
|
caa462fee7 | ||
|
7a17118305 | ||
|
f2a62821d6 | ||
|
04524e6bec | ||
|
b697c13c76 | ||
|
4044ea6dbc | ||
|
b331ffb1f2 | ||
|
ba8541d5f0 | ||
|
4f4fcf831a | ||
|
2118a27bcc | ||
|
4982d326d3 | ||
|
7f5126ad28 | ||
|
4e5dfa31a9 | ||
|
6cbb3fe5da | ||
|
bcbbdc9cc3 | ||
|
b838100556 | ||
|
281cb38ba5 | ||
|
da5dc6f4bc | ||
|
7e918cfbc2 | ||
|
1d5d1b4b17 | ||
|
4e9eee7c48 | ||
|
cfbec5120b | ||
|
a35edebafc | ||
|
8573ecfcb5 | ||
|
179434e9b0 | ||
|
25cb702d22 | ||
|
8924ae31ff | ||
|
c6f386bf01 | ||
|
6a1b2b9d48 | ||
|
5a8d1ab811 | ||
|
e59fcdbe15 | ||
|
da5d98e922 | ||
|
20cf783bdb | ||
|
93dbcb4f74 | ||
|
5aae319a2e | ||
|
176564769c | ||
|
f0170d949a | ||
|
e741aa6bc2 | ||
|
e0a8c5567d | ||
|
1187360b62 | ||
|
099575fb88 | ||
|
272adb24e7 | ||
|
c84e4eae9e | ||
|
69b41e01dd | ||
|
af9924a648 | ||
|
2346594adc | ||
|
cd3da2d589 | ||
|
65a3a0a19a | ||
|
26664835c0 | ||
|
cf8f3f20ef | ||
|
fc8ef801a4 | ||
|
3b090d8e43 | ||
|
3134131975 | ||
|
bfbd57490b | ||
|
064b3f875b | ||
|
dd42cd08b0 | ||
|
05590e1266 | ||
|
2ce18fb721 | ||
|
caf8301679 | ||
|
f9fef31e73 | ||
|
9fabedaeb4 | ||
|
fa038f9994 | ||
|
c42b80ccb0 | ||
|
a14431c828 | ||
|
e5b0b89b70 | ||
|
326f8a8245 | ||
|
e77a20595b | ||
|
4c8ae70b73 | ||
|
2f7cea1736 | ||
|
428c6464e2 | ||
|
f28b884c01 | ||
|
694b73983a | ||
|
6647a7be59 | ||
|
35c7dc97c2 | ||
|
66babd22fa | ||
|
758f074d65 | ||
|
92258308fe | ||
|
672672af2b | ||
|
67b42842db | ||
|
5ca743f7da | ||
|
1c88c73b5c | ||
|
084fd91005 | ||
|
2ac8b646a9 | ||
|
f74aad596d | ||
|
89975337c9 | ||
|
bb8cf4a27c | ||
|
7ccc469dd6 | ||
|
c0e9c14fa2 | ||
|
0127da6ac3 | ||
|
fbdd12dc72 | ||
|
e6be72f5a8 | ||
|
9da86de41e | ||
|
a6f52125b0 | ||
|
8cef797979 | ||
|
0cab87309f | ||
|
5371f859e9 | ||
|
c8e73d152f | ||
|
133e1ebe0e | ||
|
a9077af7bb | ||
|
b1fafab1f7 | ||
|
2ee2624d9d | ||
|
a583835cf5 | ||
|
35558cf48c | ||
|
b091262231 | ||
|
9daa4c10bf | ||
|
8c17c2dfe2 | ||
|
d415a727bf | ||
|
cf908c4fab | ||
|
4a9ebd0abc | ||
|
40eb0f760c | ||
|
4755fbacb7 | ||
|
9aaf4ff11d | ||
|
9cfcf338e1 | ||
|
bf8e45217c | ||
|
8817c68f7c | ||
|
134aaec0da | ||
|
5b9f6c5fa2 | ||
|
809e6a1d4f | ||
|
2c55108ba6 | ||
|
cfd76c0c4c | ||
|
d1c845ba67 | ||
|
e966682ecc | ||
|
fc84c633a8 | ||
|
a342b1ba1d | ||
|
da0a45e3d3 | ||
|
0d4664cae0 | ||
|
53677b00d3 | ||
|
1e1adf3470 | ||
|
ae23418f7b | ||
|
775866b62d | ||
|
add5c69929 | ||
|
51b43487e2 | ||
|
f550b03d4d | ||
|
cb8bd6b326 | ||
|
37b94c6712 | ||
|
5c8eb7fdf9 | ||
|
db32308c40 | ||
|
7fb22d0732 | ||
|
6d63a4b754 | ||
|
d006328b61 | ||
|
6cbb732d7c | ||
|
bda19b3822 | ||
|
073291934b | ||
|
b7c49ed6d9 | ||
|
9fed22d4f9 | ||
|
f6c916f6ef | ||
|
9f6e676a9a | ||
|
a6be969081 | ||
|
179f274147 | ||
|
2d49c957de | ||
|
a4e4c5b7be | ||
|
a4038750a7 | ||
|
2658730f70 | ||
|
565d0f5524 | ||
|
56173e9d1c | ||
|
87dd096f61 | ||
|
a84a8c95f9 | ||
|
aa581c5ce1 | ||
|
a74e83185a | ||
|
01ddddcb49 | ||
|
7076a9dfaa | ||
|
234961e982 | ||
|
9fe0aa3546 | ||
|
49748686b9 | ||
|
18f318d138 | ||
|
075c673717 | ||
|
01725786f1 | ||
|
0e085000d7 | ||
|
cccb358373 | ||
|
0df0f8c44c | ||
|
cd35771399 | ||
|
5e7206ce8b | ||
|
d7040e8d0a | ||
|
10708e9b45 | ||
|
9ca9ab6ed0 | ||
|
ce841a29ac | ||
|
c822c6e3eb | ||
|
4e95245374 | ||
|
bc5404cbdd | ||
|
2d3ce5cd6f | ||
|
357f4a1b71 | ||
|
f0f22e0afa | ||
|
ba80a28fee | ||
|
0d958ed923 | ||
|
ec5720623d | ||
|
7b378dafb7 | ||
|
40af118c1e | ||
|
f7a3c4b3f0 | ||
|
81c9ddf202 | ||
|
93a5dbcf78 | ||
|
0b67c9790d | ||
|
dbeb79980f | ||
|
6fb84d1441 | ||
|
dbe0020c85 | ||
|
cc5a2a4fd1 | ||
|
88a927c0b4 | ||
|
1a98f1c855 | ||
|
b66f25f436 | ||
|
4b888812ff | ||
|
0aee59e05e | ||
|
7cdb0e6ac7 | ||
|
d860242781 | ||
|
9ef1841543 | ||
|
69d1729b2e | ||
|
8828a21741 | ||
|
deb10dd3f0 | ||
|
759621c045 | ||
|
200b0a8685 | ||
|
7f99fd6c67 | ||
|
af8e220e33 | ||
|
b1c6aeb6fc | ||
|
fcab617c0d | ||
|
557738a5e5 | ||
|
f2d121f38f | ||
|
2488f9f654 | ||
|
48c178ed6b | ||
|
82e1b68398 | ||
|
22cc71c9e3 | ||
|
7a7a7c08bc | ||
|
5808e87057 | ||
|
a41fcdd00a | ||
|
04a66b2c9c | ||
|
92c3b38fff | ||
|
1aaf80eff7 | ||
|
ba9455d7e9 | ||
|
c68e326a45 | ||
|
104d7a0dde | ||
|
fdc71ceda3 | ||
|
e7fb070400 | ||
|
2c51055209 | ||
|
1913c897f1 | ||
|
f5dfc478a2 | ||
|
e0e3804e27 | ||
|
39de9cdfa5 | ||
|
78233a408e | ||
|
d0982a6cf6 | ||
|
15d3d98164 | ||
|
9d057ea95f | ||
|
e3c9c5cb18 | ||
|
4ad99413f4 | ||
|
4e61708b85 | ||
|
33570bc3f1 | ||
|
79ec8668e4 | ||
|
087638c8ae | ||
|
d39f6d092d | ||
|
b91f80702e | ||
|
2d95d4e64b | ||
|
38c39360b7 | ||
|
91dd9f5104 | ||
|
60266c40a6 | ||
|
a7e8487df3 | ||
|
6d410409a3 | ||
|
b9d2b72ac9 | ||
|
7eeff3b476 | ||
|
a61cb4d7cd | ||
|
ac50d769c9 | ||
|
6a4c56b939 | ||
|
7ed0e71ab4 | ||
|
f931227cd2 | ||
|
c2675e125d | ||
|
234fcd9887 | ||
|
5c74224dcd | ||
|
e18ebd1115 | ||
|
69fc7ac9cd | ||
|
0f11faa7c7 | ||
|
d2ab111f90 | ||
|
d36c77d777 | ||
|
b27ee4f8c1 | ||
|
192e1d2f65 | ||
|
6eea38afdf | ||
|
18f530ce0c | ||
|
6712fc6cc7 | ||
|
42a86c116d | ||
|
0096fe81c8 | ||
|
56f7c07e56 | ||
|
94eee06616 | ||
|
5ddec503dc | ||
|
d1c5de13de | ||
|
d584149e39 | ||
|
6cd1815ed9 | ||
|
a95c37e67f | ||
|
954606b8ba | ||
|
61a70b1ba5 | ||
|
4b2b99d016 | ||
|
896eb0b650 | ||
|
cdf6ca15ec | ||
|
da043ed3ae | ||
|
60b3d4674e | ||
|
213b2de002 | ||
|
882bfd276e | ||
|
013fbdf000 | ||
|
541119dda2 | ||
|
87dd950199 | ||
|
3b17f127cf | ||
|
197f3036d1 |
16
.travis.yml
Normal file
16
.travis.yml
Normal file
@ -0,0 +1,16 @@
|
||||
# Necessary evil: if 'bash' is selected as language, travis will try to install ruby and fails
|
||||
language: php
|
||||
|
||||
sudo: required
|
||||
|
||||
services:
|
||||
- mysql
|
||||
|
||||
os:
|
||||
linux
|
||||
|
||||
before_script:
|
||||
mysql -e 'CREATE DATABASE travistest;'
|
||||
|
||||
script:
|
||||
TRAVIS_RUN=true dev/tests/run_tests.sh
|
187
CHANGELOG.md
187
CHANGELOG.md
@ -1,26 +1,163 @@
|
||||
SHORT FUTURE IMPROVEMENTS
|
||||
-------------------------
|
||||
|
||||
- Rewrite rsync exclude patterns using \"pattern\" instead of escaped chars
|
||||
- Clean most of recursive task creation code
|
||||
|
||||
KNOWN ISSUES
|
||||
------------
|
||||
|
||||
- Backup size check does not honor rsync exclude patterns
|
||||
- Encryption does not honor rsync exclude patterns
|
||||
- Bandwidth parameter is ignored for SQL backups
|
||||
- Missing symlink support when run from MSYS environment
|
||||
|
||||
UNDER WORK
|
||||
----------
|
||||
|
||||
- Commands like cp should have their stderr redirected to log file
|
||||
- Mysqldump must be checked for not telling success if a table is damaged (also check for event table error)
|
||||
- Mysqldump commands error msg must be logged
|
||||
|
||||
|
||||
CHANGELOG
|
||||
---------
|
||||
|
||||
dd Mmm YYYY: obackup v2.1 RC2 released
|
||||
--------------------------------------
|
||||
|
||||
- Added a default required config file revision
|
||||
- ! Update script updated accordingly
|
||||
- Updated ofunctions to use booleans instead of yes/no syntax which still works
|
||||
- Fixed verbose rsync output not working
|
||||
- Fixed a potential bash buffer overflow when very large file lists are logged
|
||||
|
||||
03 Jan 2019: obackup v2.1 RC1 released
|
||||
--------------------------------------
|
||||
|
||||
- File backup rotation will always make copies instead of moving files, in order to let rsync do deltas even in local backup scheme
|
||||
- Fixed non recursive backups being recursive (bug introduced in a8b6bcb)
|
||||
- Fixed multiple trailing slashes when backing-up '/'
|
||||
- Updated ofunctions
|
||||
- Upgraded shunit2 test framework to v2.1.8pre (git commit 07bb329)
|
||||
- Minor fixes
|
||||
|
||||
20 Jun 2017: obackup v2.1 beta3 released
|
||||
----------------------------------------
|
||||
|
||||
- Fixed regression where some commandline arguments weren't honored anymore since 2.1 beta1 (--delete, --stats, --dontgetsize)
|
||||
- Fixed commandline arguments aren't checked against valid list
|
||||
|
||||
14 Mar 2017: obackup v2.1 beta2 released
|
||||
----------------------------------------
|
||||
|
||||
- Fixed remote commands can be run on local runs and obviously fail
|
||||
- Uninstall leaves ssh_filter if needed by other programs
|
||||
- Logger now obfuscates _REMOTE_TOKEN
|
||||
- Improved sudo privilege run
|
||||
- Brand new ssh filter from osync project
|
||||
- Better installer with --remove option from osync project
|
||||
- Updated ofunctions from osync project
|
||||
- Fixes UTF-8 escaped characters in log files due to LC_ALL=C
|
||||
- Optional MAIL_BODY_CHARSET so destination mails aren't sent as UTF-8 anymore depending on systems
|
||||
- Minor fixes
|
||||
|
||||
04 Jan 2017: obackup v2.1 beta1 released
|
||||
----------------------------------------
|
||||
|
||||
- Fixed wrong file size fetched remotely since v2.1 rewrite
|
||||
- Fixed missing databases in manual list fails to trigger an alert
|
||||
- Improved support for GPG ver >= 2.1
|
||||
- Added encryption / decryption parallel execution support
|
||||
- Improved compatibility for RotateCopies
|
||||
- Unit tests now run on CentOS 5,6
|
||||
- Added optional rsync arguments configuration value
|
||||
- Forcec bash usage on remote connections in order to be FreeBSD 11 compatible
|
||||
- Spinner is less prone to move logging on screen
|
||||
- Fixed another random error involving warns and errors triggered by earlier runs with same PID flag files
|
||||
- Adde more preflight checks (pgrep presence)
|
||||
- Added --no-prefix, --error-only and --summary switches
|
||||
- Updated installer from osync
|
||||
- Updated merge.sh script to handle includes
|
||||
- Improved remote logging
|
||||
- Simplified osync-batch runner (internally and for user)
|
||||
- Better filename handling
|
||||
- Easier to read log output
|
||||
- Always passes --silent to obackup
|
||||
- All options that do not belong to obackup-batch are automatically passed to obackup
|
||||
- Improved installer OS detection
|
||||
- Fixed upgrade script cannot update header on BSD / MacOS X
|
||||
- Fixed SendEmail function on MacOS X
|
||||
- Fixed MAX_SOFT_EXEC_TIME_PER_XX_TASK not enforced bug introduced with newer ofunctions from v2.1
|
||||
- PRESERVE_ACL and PRESERVE_XATTR are ignored when local or remote OS is MacOS or msys or Cygwin
|
||||
- Fixed PRESERVE_EXECUTABILITY was ommited volontary on MacOS X because of rsync syntax
|
||||
- merge.sh is now BSD and Mac compatible
|
||||
- Unit tests are now BSD and Mac compatible
|
||||
- Local runs should not check for remote connectivity
|
||||
- Fixed error alerts cannot be triggered from subprocesses
|
||||
- Fixed error flags
|
||||
- Faster remote OS detection
|
||||
- Added busybox (and Android Termux) support
|
||||
- More portable file size functions
|
||||
- More portable compression program commands
|
||||
- More paranoia checks
|
||||
- Added busybox sendmail support
|
||||
- Added tls and ssl support for sendmail
|
||||
- Added ssh password file support
|
||||
- Added unit tests
|
||||
- Added basic unit tests for all three operation modes
|
||||
- Added process management function tests
|
||||
- Added file rotation tests
|
||||
- Added upgrade script test
|
||||
- Added encryption tests
|
||||
- Added missing files / databases test
|
||||
- Added timed execution tests
|
||||
- Implemented backup encryption using GPG (see documentation for advantages and caveats)
|
||||
- Backup encrypted but still use differential engine :)
|
||||
- Database backup improvements
|
||||
- Added mysqldump options to config file
|
||||
- Improved unit tests
|
||||
- Added more preflight checks
|
||||
- Logs sent by mail are easier to read
|
||||
- Better subject (currently running or finished run)
|
||||
- Fixed bogus double log sent in alert mails
|
||||
- Only current run log is now sent
|
||||
- Alert sending is now triggered after last action
|
||||
- Made unix signals posix compliant
|
||||
- Improved upgrade script
|
||||
- Upgrade script now updates header
|
||||
- Can add any missing value now
|
||||
- Added encrpytion support
|
||||
- Fixed problem with spaces in directories to backup (again !)
|
||||
- Added options to ignore permissions, ownership and groups
|
||||
- Improved batch runner
|
||||
- Batch runner works for directories and direct paths
|
||||
- Fixed batch runner does not rerun obackup on warnings only
|
||||
- Code compliance
|
||||
- More clear semantic
|
||||
- Made keep logging value configurable and not mandatory
|
||||
- Fixed handling of processes in uninterruptible sleep state
|
||||
- Code cleanup
|
||||
- Refactored waiting functions
|
||||
- Fixed double RunAfterHook launch
|
||||
|
||||
06 Aug 2016: obackup v2.0 released
|
||||
----------------------------------
|
||||
|
||||
- Made logging begin before remote checks for sanity purposes
|
||||
- RunAfterCommands can get executed when trapquit
|
||||
- Improved process killing and process time control
|
||||
- Added optional statistics for installer
|
||||
- Added an option to ignore knownhosts for ssh connections (use with caution, this can lead to a security issue)
|
||||
- Improved mail fallback
|
||||
- More logging enhancements
|
||||
- Improved upgrade script
|
||||
- Revamped rsync patterns to allow include and exclude patterns
|
||||
- Better SQL and file backup task separation (rotate copies and warnings are defined for sql and/or file)
|
||||
- Added reverse backup, now backups can be local, pushed or pulled to or from a remote system
|
||||
- Better fallback for SendAlert even if disk full
|
||||
- Added an alert email sent on warnings while backup script is running
|
||||
- Way better logging of errors in _GetDirectoriesSizeX, _BackupDatabaseX, _CreateStorageDirectoriesX
|
||||
- Added bogus config file checks & environment checks
|
||||
- Full code refactoring to use local and remote code once
|
||||
- Fully merged codebase with osync
|
||||
- Added (much) more verbose debugging (and possibility to remove debug code to gain speed)
|
||||
- Replace child_pid by $? directly, add a better sub process killer in TrapQuit
|
||||
- Added some automatic checks in code, for _DEBUG mode (and _PARANOIA_DEBUG now)
|
||||
- Improved Logging
|
||||
- Updated obackup to be fully compliant with coding style
|
||||
- Fixed creation of bogus subdirectories in some cases
|
||||
- A long list of minor improvements and bug fixes
|
||||
|
||||
v0-1.x - Jan 2013 - Oct 2015
|
||||
----------------------------
|
||||
|
||||
- New function to kill child processes
|
||||
- Fixed no_maxtime not honored
|
||||
- Improved some logging, also added highlighting to stdout errors
|
||||
@ -79,7 +216,10 @@ CHANGELOG
|
||||
- Improved OS detection and added prelimnary MacOS X support
|
||||
- Improved execution hook logs
|
||||
- Improved RunLocalCommand execution hook
|
||||
- 02 Nov. 2013: v1.84 RC3
|
||||
|
||||
02 Nov. 2013: obackup v1.84RC3 released
|
||||
---------------------------------------
|
||||
|
||||
- Updated documentation
|
||||
- Minor rewrites in recursive backup code
|
||||
- Added base directory files backup for recursive directories backup
|
||||
@ -106,7 +246,10 @@ CHANGELOG
|
||||
- Improved dryrun output
|
||||
- Improved remote connecivity detection
|
||||
- Fixed a typo in configuration file
|
||||
- 18 Aug. 2013: Now v1.84 RC2
|
||||
|
||||
18 Aug. 2013: obackup v1.84RC2 released
|
||||
---------------------------------------
|
||||
|
||||
- Added possibility to change default logfile
|
||||
- Simplified dryrun (removed dryrun function and merged it with main function)
|
||||
- Simplified Init function
|
||||
@ -118,14 +261,17 @@ CHANGELOG
|
||||
- Added --verbose switch (will add databases list, rsync commands, and file backup list)
|
||||
- Improved task execution checks and more code cleanup
|
||||
- Fixed CleanUp function if DEBUG=yes, also function is now launched from TrapQuit
|
||||
- 16 Jul. 2013: version tagged as v1.84 RC1
|
||||
|
||||
16 Jul. 2013: obackup v1.84RC1 released
|
||||
---------------------------------------
|
||||
|
||||
- Code cleanup
|
||||
- Uploaded first documentation
|
||||
- Fixed an issue with RotateBackups
|
||||
- Updated obackup to log failed ssh command results
|
||||
- Updated ssh command filter to log failed commands
|
||||
- Updated ssh command filter to accept personalized commands
|
||||
- 23 Jun. 2013 v 1.84 RC1 approaching
|
||||
- 23 Jun. 2013: v1.84 RC1 approaching
|
||||
- Added ssh commands filter, updated documentation
|
||||
- Rewrote local space check function
|
||||
- Added ability to run another executable than rsync (see documentation on sudo execution)
|
||||
@ -135,5 +281,8 @@ CHANGELOG
|
||||
- Updated command line argument --silent processing
|
||||
- Added remote before and after command execution hook
|
||||
- Added local before and after command execution hook
|
||||
- 14 Jun 2013
|
||||
|
||||
14 Jun 2013
|
||||
-----------
|
||||
|
||||
- Initial public release, fully functionnal
|
||||
|
147
CODING_STYLE.TXT
Normal file
147
CODING_STYLE.TXT
Normal file
@ -0,0 +1,147 @@
|
||||
Coding style used for my bash projects (v2.1 Oct 2015)
|
||||
|
||||
++++++ Header
|
||||
|
||||
Always use the following header
|
||||
|
||||
----BEGIN HEADER
|
||||
#!/usr/bin/env bash
|
||||
|
||||
PROGRAM="program-name" # Long description
|
||||
AUTHOR="(L) 20XX-20YY by Orsiris \"Ozy\" de Jong"
|
||||
CONTACT="http://www.example.com me@example.com"
|
||||
PROGRAM_BUILD=YYYYMMDDVV
|
||||
|
||||
## Optional instructions
|
||||
----END HEADER
|
||||
|
||||
Using bind style versionning:
|
||||
YYYYMMDDVV (Year, Month, Day, Revision): Example: 2015012402 = 2nd revision of 24 Jan 2015
|
||||
|
||||
#!/usr/bin/env bash instead of #!/bin/bash
|
||||
|
||||
Change old scripts with
|
||||
for i in $(grep -r '#!/bin/bash' * |cut -f1 -d':'); do sed -i 's&#!/bin/bash&#!/usr/bin/env bash&g' $i; done
|
||||
|
||||
|
||||
type instead of type -p for bash test (other shells don't know -p)
|
||||
++++++ Indentation
|
||||
|
||||
Using tabs
|
||||
Transform old shell scripts using unexpand command
|
||||
|
||||
++++++ Comments
|
||||
|
||||
Some command # comment
|
||||
## Some comment on a new line
|
||||
################################################# Some separation
|
||||
|
||||
++++++ Work comments
|
||||
|
||||
Whenever there is some idea to postpone, use #TODO[-version]:[dev-name:] some remark
|
||||
A marker must be left where on the line a dev is working (when the work isn't finished). Marker is #WIP:dev-name: some remark
|
||||
dev-name is mandatory if more than one person is coding
|
||||
Example: #TODO-v2.1:deajan: need to do something
|
||||
|
||||
++++++ Variables
|
||||
|
||||
All local variables are lowercase, separated by _ (ex: low_wait)
|
||||
All global variables full upercase, separated by _ (ex: EXEC_TIME)
|
||||
All environment variables (verbose, silent, debug, etc) have prefix _ and are full upercase, separated by _ (ex: _PARANOIA_DEBUG)
|
||||
|
||||
++++++ Functions
|
||||
|
||||
Every word in a function begins with an uppercase (ex: SomeFunctionDoesThings)
|
||||
|
||||
Define functions this way. Use sed ':a;N;$!ba;s/\n{\n/ {\n/g' to adapt when opening bracket is on a new line.
|
||||
|
||||
function something {
|
||||
|
||||
}
|
||||
|
||||
If function has some arguments, use local variable names that are more readable than $1...$n. Explain via comments what those variables contain if needed.
|
||||
|
||||
function anotherthing {
|
||||
local var_name="${1}"
|
||||
local other_var_name="${2}" # This variable contains stuff
|
||||
}
|
||||
|
||||
Functions should always have return status
|
||||
function thirdthing {
|
||||
some_command
|
||||
return $?
|
||||
}
|
||||
|
||||
++++++ Sub functions
|
||||
|
||||
When a function is a subroutine of another function, it is called _SomethingAsSubFunction
|
||||
|
||||
++++++ Function argument check
|
||||
|
||||
Bash does not provide any checks against missing function arguments. Also, missing quotes can lead to an inconsistent number of arguments.
|
||||
Every function call will be checked by __CheckArguments which takes the number of arguments, $# (the real number of args given), the parent function name and the parent function's arguments.
|
||||
__CheckArguments will trigger a critical error if number of arguments if incorrect. This will also prevent silent typo errors.
|
||||
Ex:
|
||||
|
||||
function Something {
|
||||
local some="${1}"
|
||||
local other="${2}"
|
||||
local args="${3}"
|
||||
__CheckArguments 3 $# $FUNCNAME "$*"
|
||||
|
||||
__CheckArguments will only trigger if script is called with DEBUG=yes
|
||||
Also, with PARANOIA_DEBUG=yes, __CheckArguments will recount all arguments given by "$*" and compare. This can mislead if arguments contain spaces.
|
||||
|
||||
++++++ If statements
|
||||
|
||||
If statements will be fully written (word "if" must be used). then is written on the same line.
|
||||
(Use sed ':a;N;$!ba;s/]\n\t*then/]; then/g' to convert files to this format... Replace "],new line, zero or more tabs, then" by "; then")
|
||||
if [ something ]; then
|
||||
stuff
|
||||
else
|
||||
other stuff
|
||||
fi
|
||||
|
||||
++++++ Logging
|
||||
|
||||
A logging function is available with the following levels of logging:
|
||||
|
||||
- DEBUG: Only log this when DEBUG flas is set in program. Any command forged for eval should be logged by this.
|
||||
- NOTICE: Standard messages
|
||||
- WARN: Requires attention
|
||||
- ERROR: Program produced an error but continues execution
|
||||
- CRITICAL: Program execution is halted
|
||||
|
||||
++++++ Eval
|
||||
|
||||
Most commands should be logged to a tmp file.
|
||||
The basic way of doing is:
|
||||
|
||||
cmd='"something '$somevar'" > some_file 2>&1'
|
||||
eval $cmd &
|
||||
WaitForTaskCompletion $! 0 0 $FUNCNAME
|
||||
|
||||
Remote commands should exist as:
|
||||
|
||||
cmd=$SSH_CMD' "some; commands \"'$VARIABLE'\" some; other; commands" > some_file 2>&1'
|
||||
|
||||
++++++ File variables
|
||||
|
||||
All eval cmd should exit their content to a file called "$RUNDIR/osync.$FUNCNAME.$SCRIPT_PID"
|
||||
Dots are used instead of '_' so variables can be separated with a forbidden char in variables, so they get detected.
|
||||
|
||||
++++++ Finding code errors
|
||||
|
||||
Use shellcheck.net now and then (ignore SC2086 in our case)
|
||||
|
||||
Use a low tech approach to find uneven number of quotes per line
|
||||
|
||||
tr -cd "'\n" < my_bash_file.sh | awk 'length%2==1 {print NR, $0}'
|
||||
tr -cd "\"\n" < my_bash_file.sh | awk 'length%2==1 {print NR, $0}'
|
||||
|
||||
++++++ ofunctions
|
||||
|
||||
As obackup and osync share alot of common functions, ofunctions.sh will host all shared code.
|
||||
Dev programs n_osync.sh and n_obackup.sh will source ofunctions.sh
|
||||
Release programs will still include ofunctions.sh in order to enhance ease of use.
|
||||
|
@ -1,4 +1,4 @@
|
||||
Copyright (c) 2013, Orsiris "Ozy" de Jong. ozy@netpower.fr
|
||||
Copyright (c) 2013-2016, Orsiris "Ozy" de Jong. ozy@netpower.fr
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
39
README.md
39
README.md
@ -1,12 +1,12 @@
|
||||
obackup
|
||||
=======
|
||||
# obackup [](https://travis-ci.org/deajan/obackup) [](https://opensource.org/licenses/BSD-3-Clause) [](https://github.com/deajan/obackup/releases/latest)
|
||||
|
||||
A small robust file & database backup script for local to local or remote to local backups via ssh.
|
||||
Works especially well for multiple virtualhost backups with 'backup divide task' functionnality.
|
||||
A robust file & database backup script that works for local and remote push or pull backups via ssh.
|
||||
Designed to backup multiple subdirectories with a timeslot for each.
|
||||
Supports encryption while still using rsync to lower transfered data (see advantages and caveats below).
|
||||
|
||||
## About
|
||||
|
||||
OBackup is designed from ground to make the backup process as reliable as possible.
|
||||
obackup is designed to make the backup process as reliable as possible.
|
||||
It divides the whole backup process into tasks, allowing each task to execute for a certain amount of time.
|
||||
If a task doesn't finish in time, it's stopped and the next task in list is processed.
|
||||
Before a task gets stopped, a first warning message is generated telling the task takes too long.
|
||||
@ -14,22 +14,19 @@ Every action gets logged, and if a warning has been generated, a task gets stopp
|
||||
|
||||
Remote backups are initiated from the backup server instead of the production server, so hacked servers won't get ssh access to the backup server.
|
||||
|
||||
OBackup can enumerate and backup all MariaDB / MySQL databases present on a server.
|
||||
obackup can enumerate and backup all MariaDB / MySQL databases present on a server.
|
||||
It can also enumarate all subdirectories of a given path and process them as separate tasks (usefull for multiple vhosts).
|
||||
It will do several checks before launching a backup like execution checks, dryruns, checking backup size and available local disk space.
|
||||
|
||||
Obackup can execute local and remote commands before and after backup execution,
|
||||
obackup can execute local and remote commands before and after backup execution,
|
||||
thus providing an easy way to handle snapshots (see https://github.com/deajan/zsnap for a zfs snapshot management script).
|
||||
It may also rotate backups for you.
|
||||
|
||||
As of today, obackup has been tested successfully on RHEL / CentOS 5, CentOS 6, Debian 6.0.7 and Linux Mint 14.
|
||||
Currently, Obackup also runs on FreeBSD and Windows MSYS environment, altough it is not fully tested yet.
|
||||
|
||||
Feel free to drop me a mail for limited support in my free time.
|
||||
As of today, obackup has been tested successfully on RHEL / CentOS 5, 6 and 7, Debian 6 and 7, Linux Mint 14 and 17, FreeBSD 8.3 and 10.3.
|
||||
Currently, obackup also runs on MacOSX and Windows MSYS environment.
|
||||
|
||||
## Warning
|
||||
|
||||
Starting with Obackup 1.84RC4, the default behavior is modified.
|
||||
Obackup now follows symlinks and treats them as the referent files / dirs, following symlinks even outside the backup root, which IMHO is more secure in terms of backups.
|
||||
You may disable this behavior in the config file.
|
||||
|
||||
@ -38,10 +35,11 @@ You may disable this behavior in the config file.
|
||||
You can download the latest obackup script from authors website.
|
||||
You may also clone the following git which will maybe have some more recent builds.
|
||||
|
||||
$ git clone git://github.com/deajan/obackup.git
|
||||
$ chmod +x ./obackup.sh
|
||||
$ git clone -b "v2.1-maint" git://github.com/deajan/obackup.git
|
||||
$ cd obackup
|
||||
$ ./install.sh
|
||||
|
||||
Obackup needs to run with bash shell, using any other shell will most probably fail.
|
||||
obackup needs to run with bash shell, using any other shell will most probably fail.
|
||||
Once you have grabbed a copy, just edit the config file with your favorite text editor to setup your environment and you're ready to run.
|
||||
A detailled documentation can be found on the author's site.
|
||||
You can run multiple instances of obackup scripts with different backup environments. Just create another configuration file,
|
||||
@ -72,10 +70,14 @@ All backup activity is logged to "/var/log/obackup_backupname.log" or current di
|
||||
|
||||
You may mix "--silent" and "--verbose" parameters to output verbose input only in the log files.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
Whenever you may encounter rsync zombie processes and/or in unterruptible sleep state processes, you should force unmounting network drives obackup is supposed to deal with.
|
||||
|
||||
## Final words
|
||||
|
||||
Backup tasks aren't always reliable, connectivity loss, insufficient disk space, hacked servers with tons of unusefull stuff to backup... Anything can happen.
|
||||
Obackup will sent your a warning email for every issue it can handle.
|
||||
obackup will sent your a warning email for every issue it can handle.
|
||||
Nevertheless, you should assure yourself that your backup tasks will get done the way you meant it. Also, a backup isn't valuable until you're sure
|
||||
you can successfully restore. Try to restore your backups to check whether everything is okay. Backups will keep file permissions and owners,
|
||||
but may loose ACLs if destination file system won't handle them.
|
||||
@ -83,7 +85,4 @@ but may loose ACLs if destination file system won't handle them.
|
||||
## Author
|
||||
|
||||
Feel free to mail me for limited support in my free time :)
|
||||
Orsiris "Ozy" de Jong | ozy@netpower.fr
|
||||
|
||||
|
||||
|
||||
Orsiris de Jong | ozy@netpower.fr
|
||||
|
75
dev/bootstrap.sh
Executable file
75
dev/bootstrap.sh
Executable file
@ -0,0 +1,75 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
## dev pre-processor bootstrap rev 2019052001
|
||||
## Yeah !!! A really tech sounding name... In fact it's just include emulation in bash
|
||||
|
||||
function Usage {
|
||||
echo "$0 - Quick and dirty preprocessor for including ofunctions into programs"
|
||||
echo "Creates and executes $0.tmp.sh"
|
||||
echo "Usage:"
|
||||
echo ""
|
||||
echo "$0 --program=osync|obackup|pmocr [options to pass to program]"
|
||||
echo "Can also be run with BASHVERBOSE=yes environment variable in order to prefix program with bash -x"
|
||||
}
|
||||
|
||||
|
||||
if [ ! -f "./merge.sh" ]; then
|
||||
echo "Plrase run bootstrap.sh from osync/dev directory."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
bootstrapProgram=""
|
||||
opts=()
|
||||
outputFileName="$0"
|
||||
|
||||
for i in "${@}"; do
|
||||
case "$i" in
|
||||
--program=*)
|
||||
bootstrapProgram="${i##*=}"
|
||||
;;
|
||||
*)
|
||||
opts+=("$i")
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ "$bootstrapProgram" == "" ]; then
|
||||
Usage
|
||||
exit 128
|
||||
else
|
||||
source "merge.sh"
|
||||
|
||||
__PREPROCESSOR_PROGRAM=$bootstrapProgram
|
||||
__PREPROCESSOR_PROGRAM_EXEC="n_$bootstrapProgram.sh"
|
||||
__PREPROCESSOR_Constants
|
||||
|
||||
if [ ! -f "$__PREPROCESSOR_PROGRAM_EXEC" ]; then
|
||||
echo "Cannot find file $__PREPROCESSOR_PROGRAM executable [n_$bootstrapProgram.sh]."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
cp "$__PREPROCESSOR_PROGRAM_EXEC" "$outputFileName.tmp.sh"
|
||||
if [ $? != 0 ]; then
|
||||
echo "Cannot copy original file [$__PREPROCESSOR_PROGRAM_EXEC] to [$outputFileName.tmp.sh]."
|
||||
exit 1
|
||||
fi
|
||||
for subset in "${__PREPROCESSOR_SUBSETS[@]}"; do
|
||||
__PREPROCESSOR_MergeSubset "$subset" "${subset//SUBSET/SUBSET END}" "ofunctions.sh" "$outputFileName.tmp.sh"
|
||||
done
|
||||
chmod +x "$outputFileName.tmp.sh"
|
||||
if [ $? != 0 ]; then
|
||||
echo "Cannot make [$outputFileName] executable."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Termux fix
|
||||
if type termux-fix-shebang > /dev/null 2>&1; then
|
||||
termux-fix-shebang "$outputFileName.tmp.sh"
|
||||
fi
|
||||
|
||||
if [ "$BASHVERBOSE" == "yes" ]; then
|
||||
bash -x "$outputFileName.tmp.sh" "${opts[@]}"
|
||||
else
|
||||
"$outputFileName.tmp.sh" "${opts[@]}"
|
||||
fi
|
157
dev/common_batch.sh
Executable file
157
dev/common_batch.sh
Executable file
@ -0,0 +1,157 @@
|
||||
#!/usr/bin/env bash
|
||||
SUBPROGRAM=[prgname]
|
||||
PROGRAM="$SUBPROGRAM-batch" # Batch program to run osync / obackup instances sequentially and rerun failed ones
|
||||
AUTHOR="(L) 2013-2020 by Orsiris de Jong"
|
||||
CONTACT="http://www.netpower.fr - ozy@netpower.fr"
|
||||
PROGRAM_BUILD=2020031502
|
||||
|
||||
## Runs an osync /obackup instance for every conf file found
|
||||
## If an instance fails, run it again if time permits
|
||||
|
||||
if ! type "$BASH" > /dev/null; then
|
||||
echo "Please run this script only with bash shell. Tested on bash >= 3.2"
|
||||
exit 127
|
||||
fi
|
||||
|
||||
## If maximum execution time is not reached, failed instances will be rerun. Max exec time is in seconds. Example is set to 10 hours.
|
||||
MAX_EXECUTION_TIME=36000
|
||||
|
||||
## Specifies the number of total runs an instance may get
|
||||
MAX_RUNS=3
|
||||
|
||||
## Log file path
|
||||
if [ -w /var/log ]; then
|
||||
LOG_FILE=/var/log/$SUBPROGRAM-batch.log
|
||||
else
|
||||
LOG_FILE=./$SUBPROGRAM-batch.log
|
||||
fi
|
||||
|
||||
## Default directory where to store temporary run files
|
||||
if [ -w /tmp ]; then
|
||||
RUN_DIR=/tmp
|
||||
elif [ -w /var/tmp ]; then
|
||||
RUN_DIR=/var/tmp
|
||||
else
|
||||
RUN_DIR=.
|
||||
fi
|
||||
# No need to edit under this line ##############################################################
|
||||
|
||||
include #### Logger SUBSET ####
|
||||
include #### CleanUp SUBSET ####
|
||||
include #### GenericTrapQuit SUBSET ####
|
||||
|
||||
function CheckEnvironment {
|
||||
## osync / obackup executable full path can be set here if it cannot be found on the system
|
||||
if ! type $SUBPROGRAM.sh > /dev/null 2>&1
|
||||
then
|
||||
if [ -f /usr/local/bin/$SUBPROGRAM.sh ]
|
||||
then
|
||||
SUBPROGRAM_EXECUTABLE=/usr/local/bin/$SUBPROGRAM.sh
|
||||
else
|
||||
Logger "Could not find [/usr/local/bin/$SUBPROGRAM.sh]" "CRITICAL"
|
||||
( >&2 echo "Could not find [/usr/local/bin/$SUBPROGRAM.sh]" )
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
SUBPROGRAM_EXECUTABLE=$(type -p $SUBPROGRAM.sh)
|
||||
fi
|
||||
|
||||
if [ "$CONF_FILE_PATH" == "" ]; then
|
||||
Usage
|
||||
fi
|
||||
}
|
||||
|
||||
function Batch {
|
||||
local runs=1 # Number of batch runs
|
||||
local runList # Actual conf file list to run
|
||||
local runAgainList # List of failed conf files sto run again
|
||||
|
||||
local confFile
|
||||
local result
|
||||
|
||||
local i
|
||||
|
||||
# Using -e because find will accept directories or files
|
||||
if [ ! -e "$CONF_FILE_PATH" ]; then
|
||||
Logger "Cannot find conf file path [$CONF_FILE_PATH]." "CRITICAL"
|
||||
Usage
|
||||
else
|
||||
# Ugly hack to read files into an array while preserving special characters
|
||||
runList=()
|
||||
while IFS= read -d $'\0' -r file; do runList+=("$file"); done < <(find "$CONF_FILE_PATH" -maxdepth 1 -iname "*.conf" -print0)
|
||||
|
||||
while ([ $MAX_EXECUTION_TIME -gt $SECONDS ] || [ $MAX_EXECUTION_TIME -eq 0 ]) && [ "${#runList[@]}" -gt 0 ] && [ $runs -le $MAX_RUNS ]; do
|
||||
runAgainList=()
|
||||
Logger "Sequential run n°$runs of $SUBPROGRAM instances for:" "NOTICE"
|
||||
for confFile in "${runList[@]}"; do
|
||||
Logger "$(basename $confFile)" "NOTICE"
|
||||
done
|
||||
for confFile in "${runList[@]}"; do
|
||||
$SUBPROGRAM_EXECUTABLE "$confFile" --silent $opts &
|
||||
wait $!
|
||||
result=$?
|
||||
if [ $result != 0 ]; then
|
||||
if [ $result == 1 ] || [ $result == 128 ]; then # Do not handle exit code 128 because it is already handled here
|
||||
Logger "Instance $(basename $confFile) failed with exit code [$result]." "ERROR"
|
||||
runAgainList+=("$confFile")
|
||||
elif [ $result == 2 ]; then
|
||||
Logger "Instance $(basename $confFile) finished with warnings." "WARN"
|
||||
fi
|
||||
else
|
||||
Logger "Instance $(basename $confFile) succeed." "NOTICE"
|
||||
fi
|
||||
done
|
||||
runList=("${runAgainList[@]}")
|
||||
runs=$((runs + 1))
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
function Usage {
|
||||
echo "$PROGRAM $PROGRAM_BUILD"
|
||||
echo "$AUTHOR"
|
||||
echo "$CONTACT"
|
||||
echo ""
|
||||
echo "Batch script to sequentially run osync or obackup instances and rerun failed ones."
|
||||
echo "Usage: $PROGRAM.sh [OPTIONS] [$SUBPROGRAM OPTIONS]"
|
||||
echo ""
|
||||
echo "[OPTIONS]"
|
||||
echo "--path=/path/to/conf Path to osync / obackup conf files, defaults to /etc/osync or /etc/obackup"
|
||||
echo "--max-runs=X Number of max runs per instance, (defaults to 3)"
|
||||
echo "--max-exec-time=X Retry failed instances only if max execution time not reached (defaults to 36000 seconds). Set to 0 to bypass execution time check"
|
||||
echo "[$SUBPROGRAM OPTIONS]"
|
||||
echo "Specify whatever options $PROGRAM accepts. Example"
|
||||
echo "$PROGRAM.sh --path=/etc/$SUBPROGRAM --no-maxtime"
|
||||
echo ""
|
||||
echo "No output will be written to stdout/stderr."
|
||||
echo "Verify log file in [$LOG_FILE]."
|
||||
exit 128
|
||||
}
|
||||
|
||||
trap GenericTrapQuit TERM EXIT HUP QUIT
|
||||
|
||||
opts=""
|
||||
for i in "$@"
|
||||
do
|
||||
case $i in
|
||||
--path=*)
|
||||
CONF_FILE_PATH=${i##*=}
|
||||
;;
|
||||
--max-runs=*)
|
||||
MAX_RUNS=${i##*=}
|
||||
;;
|
||||
--max-exec-time=*)
|
||||
MAX_EXECUTION_TIME=${i##*=}
|
||||
;;
|
||||
--help|-h|-?)
|
||||
Usage
|
||||
;;
|
||||
*)
|
||||
opts="$opts$i "
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
CheckEnvironment
|
||||
Logger "$(date) $SUBPROGRAM batch run" "NOTICE"
|
||||
Batch
|
459
dev/common_install.sh
Executable file
459
dev/common_install.sh
Executable file
@ -0,0 +1,459 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
## Installer script suitable for osync / obackup / pmocr
|
||||
|
||||
PROGRAM=[prgname]
|
||||
|
||||
PROGRAM_VERSION=$(grep "PROGRAM_VERSION=" $PROGRAM.sh)
|
||||
PROGRAM_VERSION=${PROGRAM_VERSION#*=}
|
||||
PROGRAM_BINARY=$PROGRAM".sh"
|
||||
PROGRAM_BATCH=$PROGRAM"-batch.sh"
|
||||
SSH_FILTER="ssh_filter.sh"
|
||||
|
||||
SCRIPT_BUILD=2020042901
|
||||
INSTANCE_ID="installer-$SCRIPT_BUILD"
|
||||
|
||||
## osync / obackup / pmocr / zsnap install script
|
||||
## Tested on RHEL / CentOS 6 & 7, Fedora 23, Debian 7 & 8, Mint 17 and FreeBSD 8, 10 and 11
|
||||
## Please adapt this to fit your distro needs
|
||||
|
||||
include #### OFUNCTIONS MICRO SUBSET ####
|
||||
|
||||
# Get current install.sh path from http://stackoverflow.com/a/246128/2635443
|
||||
SCRIPT_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
|
||||
_LOGGER_SILENT=false
|
||||
_STATS=1
|
||||
ACTION="install"
|
||||
FAKEROOT=""
|
||||
|
||||
## Default log file
|
||||
if [ -w "$FAKEROOT/var/log" ]; then
|
||||
LOG_FILE="$FAKEROOT/var/log/$PROGRAM-install.log"
|
||||
elif ([ "$HOME" != "" ] && [ -w "$HOME" ]); then
|
||||
LOG_FILE="$HOME/$PROGRAM-install.log"
|
||||
else
|
||||
LOG_FILE="./$PROGRAM-install.log"
|
||||
fi
|
||||
|
||||
include #### UrlEncode SUBSET ####
|
||||
include #### GetLocalOS SUBSET ####
|
||||
include #### GetConfFileValue SUBSET ####
|
||||
include #### CleanUp SUBSET ####
|
||||
include #### GenericTrapQuit SUBSET ####
|
||||
|
||||
function SetLocalOSSettings {
|
||||
USER=root
|
||||
DO_INIT=true
|
||||
|
||||
# LOCAL_OS and LOCAL_OS_FULL are global variables set at GetLocalOS
|
||||
|
||||
case $LOCAL_OS in
|
||||
*"BSD"*)
|
||||
GROUP=wheel
|
||||
;;
|
||||
*"MacOSX"*)
|
||||
GROUP=admin
|
||||
DO_INIT=false
|
||||
;;
|
||||
*"Cygwin"*|*"Android"*|*"msys"*|*"BusyBox"*)
|
||||
USER=""
|
||||
GROUP=""
|
||||
DO_INIT=false
|
||||
;;
|
||||
*)
|
||||
GROUP=root
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ "$LOCAL_OS" == "Android" ] || [ "$LOCAL_OS" == "BusyBox" ]; then
|
||||
Logger "Cannot be installed on [$LOCAL_OS]. Please use $PROGRAM.sh directly." "CRITICAL"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ([ "$USER" != "" ] && [ "$(whoami)" != "$USER" ] && [ "$FAKEROOT" == "" ]); then
|
||||
Logger "Must be run as $USER." "CRITICAL"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
OS=$(UrlEncode "$LOCAL_OS_FULL")
|
||||
}
|
||||
|
||||
function GetInit {
|
||||
if [ -f /sbin/openrc-run ]; then
|
||||
init="openrc"
|
||||
Logger "Detected openrc." "NOTICE"
|
||||
elif [ -f /sbin/init ]; then
|
||||
if file /sbin/init | grep systemd > /dev/null; then
|
||||
init="systemd"
|
||||
Logger "Detected systemd." "NOTICE"
|
||||
else
|
||||
init="initV"
|
||||
Logger "Detected initV." "NOTICE"
|
||||
fi
|
||||
else
|
||||
Logger "Can't detect initV, systemd or openRC. Service files won't be installed. You can still run $PROGRAM manually or via cron." "WARN"
|
||||
init="none"
|
||||
fi
|
||||
}
|
||||
|
||||
function CreateDir {
|
||||
local dir="${1}"
|
||||
local dirMask="${2}"
|
||||
local dirUser="${3}"
|
||||
local dirGroup="${4}"
|
||||
|
||||
if [ ! -d "$dir" ]; then
|
||||
(
|
||||
if [ $(IsInteger $dirMask) -eq 1 ]; then
|
||||
umask $dirMask
|
||||
fi
|
||||
mkdir -p "$dir"
|
||||
)
|
||||
if [ $? == 0 ]; then
|
||||
Logger "Created directory [$dir]." "NOTICE"
|
||||
else
|
||||
Logger "Cannot create directory [$dir]." "CRITICAL"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$dirUser" != "" ]; then
|
||||
userGroup="$dirUser"
|
||||
if [ "$dirGroup" != "" ]; then
|
||||
userGroup="$userGroup"":$dirGroup"
|
||||
fi
|
||||
chown "$userGroup" "$dir"
|
||||
if [ $? != 0 ]; then
|
||||
Logger "Could not set directory ownership on [$dir] to [$userGroup]." "CRITICAL"
|
||||
exit 1
|
||||
else
|
||||
Logger "Set file ownership on [$dir] to [$userGroup]." "NOTICE"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function CopyFile {
|
||||
local sourcePath="${1}"
|
||||
local destPath="${2}"
|
||||
local sourceFileName="${3}"
|
||||
local destFileName="${4}"
|
||||
local fileMod="${5}"
|
||||
local fileUser="${6}"
|
||||
local fileGroup="${7}"
|
||||
local overwrite="${8:-false}"
|
||||
|
||||
local userGroup=""
|
||||
|
||||
if [ "$destFileName" == "" ]; then
|
||||
destFileName="$sourceFileName"
|
||||
fi
|
||||
|
||||
if [ -f "$destPath/$destFileName" ] && [ $overwrite == false ]; then
|
||||
destFileName="$sourceFileName.new"
|
||||
Logger "Copying [$sourceFileName] to [$destPath/$destFileName]." "NOTICE"
|
||||
fi
|
||||
|
||||
cp "$sourcePath/$sourceFileName" "$destPath/$destFileName"
|
||||
if [ $? != 0 ]; then
|
||||
Logger "Cannot copy [$sourcePath/$sourceFileName] to [$destPath/$destFileName]. Make sure to run install script in the directory containing all other files." "CRITICAL"
|
||||
Logger "Also make sure you have permissions to write to [$BIN_DIR]." "ERROR"
|
||||
exit 1
|
||||
else
|
||||
Logger "Copied [$sourcePath/$sourceFileName] to [$destPath/$destFileName]." "NOTICE"
|
||||
if [ "$(IsInteger $fileMod)" -eq 1 ]; then
|
||||
chmod "$fileMod" "$destPath/$destFileName"
|
||||
if [ $? != 0 ]; then
|
||||
Logger "Cannot set file permissions of [$destPath/$destFileName] to [$fileMod]." "CRITICAL"
|
||||
exit 1
|
||||
else
|
||||
Logger "Set file permissions to [$fileMod] on [$destPath/$destFileName]." "NOTICE"
|
||||
fi
|
||||
elif [ "$fileMod" != "" ]; then
|
||||
Logger "Bogus filemod [$fileMod] for [$destPath] given." "WARN"
|
||||
fi
|
||||
|
||||
if [ "$fileUser" != "" ]; then
|
||||
userGroup="$fileUser"
|
||||
|
||||
if [ "$fileGroup" != "" ]; then
|
||||
userGroup="$userGroup"":$fileGroup"
|
||||
fi
|
||||
|
||||
chown "$userGroup" "$destPath/$destFileName"
|
||||
if [ $? != 0 ]; then
|
||||
Logger "Could not set file ownership on [$destPath/$destFileName] to [$userGroup]." "CRITICAL"
|
||||
exit 1
|
||||
else
|
||||
Logger "Set file ownership on [$destPath/$destFileName] to [$userGroup]." "NOTICE"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function CopyExampleFiles {
|
||||
exampleFiles=()
|
||||
exampleFiles[0]="sync.conf.example" # osync
|
||||
exampleFiles[1]="host_backup.conf.example" # obackup
|
||||
exampleFiles[2]="exclude.list.example" # osync & obackup
|
||||
exampleFiles[3]="snapshot.conf.example" # zsnap
|
||||
exampleFiles[4]="default.conf" # pmocr
|
||||
|
||||
for file in "${exampleFiles[@]}"; do
|
||||
if [ -f "$SCRIPT_PATH/$file" ]; then
|
||||
CopyFile "$SCRIPT_PATH" "$CONF_DIR" "$file" "$file" "" "" "" false
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
function CopyProgram {
|
||||
binFiles=()
|
||||
binFiles[0]="$PROGRAM_BINARY"
|
||||
if [ "$PROGRAM" == "osync" ] || [ "$PROGRAM" == "obackup" ]; then
|
||||
binFiles[1]="$PROGRAM_BATCH"
|
||||
binFiles[2]="$SSH_FILTER"
|
||||
fi
|
||||
|
||||
local user=""
|
||||
local group=""
|
||||
|
||||
if ([ "$USER" != "" ] && [ "$FAKEROOT" == "" ]); then
|
||||
user="$USER"
|
||||
fi
|
||||
if ([ "$GROUP" != "" ] && [ "$FAKEROOT" == "" ]); then
|
||||
group="$GROUP"
|
||||
fi
|
||||
|
||||
for file in "${binFiles[@]}"; do
|
||||
CopyFile "$SCRIPT_PATH" "$BIN_DIR" "$file" "$file" 755 "$user" "$group" true
|
||||
done
|
||||
}
|
||||
|
||||
function CopyServiceFiles {
|
||||
if ([ "$init" == "systemd" ] && [ -f "$SCRIPT_PATH/$SERVICE_FILE_SYSTEMD_SYSTEM" ]); then
|
||||
CreateDir "$SERVICE_DIR_SYSTEMD_SYSTEM"
|
||||
CopyFile "$SCRIPT_PATH" "$SERVICE_DIR_SYSTEMD_SYSTEM" "$SERVICE_FILE_SYSTEMD_SYSTEM" "$SERVICE_FILE_SYSTEMD_SYSTEM" "" "" "" true
|
||||
if [ -f "$SCRIPT_PATH/$SERVICE_FILE_SYSTEMD_USER" ]; then
|
||||
CreateDir "$SERVICE_DIR_SYSTEMD_USER"
|
||||
CopyFile "$SCRIPT_PATH" "$SERVICE_DIR_SYSTEMD_USER" "$SERVICE_FILE_SYSTEMD_USER" "$SERVICE_FILE_SYSTEMD_USER" "" "" "" true
|
||||
fi
|
||||
|
||||
if [ -f "$SCRIPT_PATH/$TARGET_HELPER_SERVICE_FILE_SYSTEMD_SYSTEM" ]; then
|
||||
CopyFile "$SCRIPT_PATH" "$SERVICE_DIR_SYSTEMD_SYSTEM" "$TARGET_HELPER_SERVICE_FILE_SYSTEMD_SYSTEM" "$TARGET_HELPER_SERVICE_FILE_SYSTEMD_SYSTEM" "" "" "" true
|
||||
Logger "Created optional service [$TARGET_HELPER_SERVICE_NAME] with same specifications as below." "NOTICE"
|
||||
fi
|
||||
if [ -f "$SCRIPT_PATH/$TARGET_HELPER_SERVICE_FILE_SYSTEMD_USER" ]; then
|
||||
CopyFile "$SCRIPT_PATH" "$SERVICE_DIR_SYSTEMD_USER" "$TARGET_HELPER_SERVICE_FILE_SYSTEMD_USER" "$TARGET_HELPER_SERVICE_FILE_SYSTEMD_USER" "" "" "" true
|
||||
fi
|
||||
|
||||
|
||||
Logger "Created [$SERVICE_NAME] service in [$SERVICE_DIR_SYSTEMD_SYSTEM] and [$SERVICE_DIR_SYSTEMD_USER]." "NOTICE"
|
||||
Logger "Can be activated with [systemctl start SERVICE_NAME@instance.conf] where instance.conf is the name of the config file in $CONF_DIR." "NOTICE"
|
||||
Logger "Can be enabled on boot with [systemctl enable $SERVICE_NAME@instance.conf]." "NOTICE"
|
||||
Logger "In userland, active with [systemctl --user start $SERVICE_NAME@instance.conf]." "NOTICE"
|
||||
elif ([ "$init" == "initV" ] && [ -f "$SCRIPT_PATH/$SERVICE_FILE_INIT" ] && [ -d "$SERVICE_DIR_INIT" ]); then
|
||||
#CreateDir "$SERVICE_DIR_INIT"
|
||||
CopyFile "$SCRIPT_PATH" "$SERVICE_DIR_INIT" "$SERVICE_FILE_INIT" "$SERVICE_FILE_INIT" "755" "" "" true
|
||||
if [ -f "$SCRIPT_PATH/$TARGET_HELPER_SERVICE_FILE_INIT" ]; then
|
||||
CopyFile "$SCRIPT_PATH" "$SERVICE_DIR_INIT" "$TARGET_HELPER_SERVICE_FILE_INIT" "$TARGET_HELPER_SERVICE_FILE_INIT" "755" "" "" true
|
||||
Logger "Created optional service [$TARGET_HELPER_SERVICE_NAME] with same specifications as below." "NOTICE"
|
||||
fi
|
||||
Logger "Created [$SERVICE_NAME] service in [$SERVICE_DIR_INIT]." "NOTICE"
|
||||
Logger "Can be activated with [service $SERVICE_FILE_INIT start]." "NOTICE"
|
||||
Logger "Can be enabled on boot with [chkconfig $SERVICE_FILE_INIT on]." "NOTICE"
|
||||
elif ([ "$init" == "openrc" ] && [ -f "$SCRIPT_PATH/$SERVICE_FILE_OPENRC" ] && [ -d "$SERVICE_DIR_OPENRC" ]); then
|
||||
# Rename service to usual service file
|
||||
CopyFile "$SCRIPT_PATH" "$SERVICE_DIR_OPENRC" "$SERVICE_FILE_OPENRC" "$SERVICE_FILE_INIT" "755" "" "" true
|
||||
if [ -f "$SCRPT_PATH/$TARGET_HELPER_SERVICE_FILE_OPENRC" ]; then
|
||||
CopyFile "$SCRIPT_PATH" "$SERVICE_DIR_OPENRC" "$TARGET_HELPER_SERVICE_FILE_OPENRC" "$TARGET_HELPER_SERVICE_FILE_OPENRC" "755" "" "" true
|
||||
Logger "Created optional service [$TARGET_HELPER_SERVICE_NAME] with same specifications as below." "NOTICE"
|
||||
fi
|
||||
Logger "Created [$SERVICE_NAME] service in [$SERVICE_DIR_OPENRC]." "NOTICE"
|
||||
Logger "Can be activated with [rc-update add $SERVICE_NAME.instance] where instance is a configuration file found in /etc/osync." "NOTICE"
|
||||
else
|
||||
Logger "Cannot properly find how to deal with init on this system. Skipping service file installation." "NOTICE"
|
||||
fi
|
||||
}
|
||||
|
||||
function Statistics {
|
||||
if type wget > /dev/null; then
|
||||
wget -qO- "$STATS_LINK" > /dev/null 2>&1
|
||||
if [ $? == 0 ]; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
if type curl > /dev/null; then
|
||||
curl "$STATS_LINK" -o /dev/null > /dev/null 2>&1
|
||||
if [ $? == 0 ]; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
Logger "Neiter wget nor curl could be used for. Cannot run statistics. Use the provided link please." "WARN"
|
||||
return 1
|
||||
}
|
||||
|
||||
function RemoveFile {
|
||||
local file="${1}"
|
||||
|
||||
if [ -f "$file" ]; then
|
||||
rm -f "$file"
|
||||
if [ $? != 0 ]; then
|
||||
Logger "Could not remove file [$file]." "ERROR"
|
||||
else
|
||||
Logger "Removed file [$file]." "NOTICE"
|
||||
fi
|
||||
else
|
||||
Logger "File [$file] not found. Skipping." "NOTICE"
|
||||
fi
|
||||
}
|
||||
|
||||
function RemoveAll {
|
||||
RemoveFile "$BIN_DIR/$PROGRAM_BINARY"
|
||||
|
||||
if [ "$PROGRAM" == "osync" ] || [ "$PROGRAM" == "obackup" ]; then
|
||||
RemoveFile "$BIN_DIR/$PROGRAM_BATCH"
|
||||
fi
|
||||
|
||||
if [ ! -f "$BIN_DIR/osync.sh" ] && [ ! -f "$BIN_DIR/obackup.sh" ]; then # Check if any other program requiring ssh filter is present before removal
|
||||
RemoveFile "$BIN_DIR/$SSH_FILTER"
|
||||
else
|
||||
Logger "Skipping removal of [$BIN_DIR/$SSH_FILTER] because other programs present that need it." "NOTICE"
|
||||
fi
|
||||
RemoveFile "$SERVICE_DIR_SYSTEMD_SYSTEM/$SERVICE_FILE_SYSTEMD_SYSTEM"
|
||||
RemoveFile "$SERVICE_DIR_SYSTEMD_USER/$SERVICE_FILE_SYSTEMD_USER"
|
||||
RemoveFile "$SERVICE_DIR_INIT/$SERVICE_FILE_INIT"
|
||||
|
||||
RemoveFile "$TARGET_HELPER_SERVICE_DIR_SYSTEMD_SYSTEM/$SERVICE_FILE_SYSTEMD_SYSTEM"
|
||||
RemoveFile "$TARGET_HELPER_SERVICE_DIR_SYSTEMD_USER/$SERVICE_FILE_SYSTEMD_USER"
|
||||
RemoveFile "$TARGET_HELPER_SERVICE_DIR_INIT/$SERVICE_FILE_INIT"
|
||||
|
||||
Logger "Skipping configuration files in [$CONF_DIR]. You may remove this directory manually." "NOTICE"
|
||||
}
|
||||
|
||||
function Usage {
|
||||
echo "Installs $PROGRAM into $BIN_DIR"
|
||||
echo "options:"
|
||||
echo "--silent Will log and bypass user interaction."
|
||||
echo "--no-stats Used with --silent in order to refuse sending anonymous install stats."
|
||||
echo "--remove Remove the program."
|
||||
echo "--prefix=/path Use prefix to install path."
|
||||
exit 127
|
||||
}
|
||||
|
||||
############################## Script entry point
|
||||
|
||||
function GetCommandlineArguments {
|
||||
for i in "$@"; do
|
||||
case $i in
|
||||
--prefix=*)
|
||||
FAKEROOT="${i##*=}"
|
||||
;;
|
||||
--silent)
|
||||
_LOGGER_SILENT=true
|
||||
;;
|
||||
--no-stats)
|
||||
_STATS=0
|
||||
;;
|
||||
--remove)
|
||||
ACTION="uninstall"
|
||||
;;
|
||||
--help|-h|-?)
|
||||
Usage
|
||||
;;
|
||||
*)
|
||||
Logger "Unknown option '$i'" "ERROR"
|
||||
Usage
|
||||
exit
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
GetCommandlineArguments "$@"
|
||||
|
||||
CONF_DIR=$FAKEROOT/etc/$PROGRAM
|
||||
BIN_DIR="$FAKEROOT/usr/local/bin"
|
||||
SERVICE_DIR_INIT=$FAKEROOT/etc/init.d
|
||||
# Should be /usr/lib/systemd/system, but /lib/systemd/system exists on debian & rhel / fedora
|
||||
SERVICE_DIR_SYSTEMD_SYSTEM=$FAKEROOT/lib/systemd/system
|
||||
SERVICE_DIR_SYSTEMD_USER=$FAKEROOT/etc/systemd/user
|
||||
SERVICE_DIR_OPENRC=$FAKEROOT/etc/init.d
|
||||
|
||||
if [ "$PROGRAM" == "osync" ]; then
|
||||
SERVICE_NAME="osync-srv"
|
||||
TARGET_HELPER_SERVICE_NAME="osync-target-helper-srv"
|
||||
|
||||
TARGET_HELPER_SERVICE_FILE_INIT="$TARGET_HELPER_SERVICE_NAME"
|
||||
TARGET_HELPER_SERVICE_FILE_SYSTEMD_SYSTEM="$TARGET_HELPER_SERVICE_NAME@.service"
|
||||
TARGET_HELPER_SERVICE_FILE_SYSTEMD_USER="$TARGET_HELPER_SERVICE_NAME@.service.user"
|
||||
TARGET_HELPER_SERVICE_FILE_OPENRC="$TARGET_HELPER_SERVICE_NAME-openrc"
|
||||
elif [ "$PROGRAM" == "pmocr" ]; then
|
||||
SERVICE_NAME="pmocr-srv"
|
||||
fi
|
||||
|
||||
SERVICE_FILE_INIT="$SERVICE_NAME"
|
||||
SERVICE_FILE_SYSTEMD_SYSTEM="$SERVICE_NAME@.service"
|
||||
SERVICE_FILE_SYSTEMD_USER="$SERVICE_NAME@.service.user"
|
||||
SERVICE_FILE_OPENRC="$SERVICE_NAME-openrc"
|
||||
|
||||
## Generic code
|
||||
|
||||
trap GenericTrapQuit TERM EXIT HUP QUIT
|
||||
|
||||
if [ ! -w "$(dirname $LOG_FILE)" ]; then
|
||||
echo "Cannot write to log [$(dirname $LOG_FILE)]."
|
||||
else
|
||||
Logger "Script begin, logging to [$LOG_FILE]." "DEBUG"
|
||||
fi
|
||||
|
||||
# Set default umask
|
||||
umask 0022
|
||||
|
||||
GetLocalOS
|
||||
SetLocalOSSettings
|
||||
# On Mac OS this always produces a warning which causes the installer to fail with exit code 2
|
||||
# Since we know it won't work anyway, and that's fine, just skip this step
|
||||
if $DO_INIT; then
|
||||
GetInit
|
||||
fi
|
||||
|
||||
STATS_LINK="http://instcount.netpower.fr?program=$PROGRAM&version=$PROGRAM_VERSION&os=$OS&action=$ACTION"
|
||||
|
||||
if [ "$ACTION" == "uninstall" ]; then
|
||||
RemoveAll
|
||||
Logger "$PROGRAM uninstalled." "NOTICE"
|
||||
else
|
||||
CreateDir "$CONF_DIR"
|
||||
CreateDir "$BIN_DIR"
|
||||
CopyExampleFiles
|
||||
CopyProgram
|
||||
if [ "$PROGRAM" == "osync" ] || [ "$PROGRAM" == "pmocr" ]; then
|
||||
CopyServiceFiles
|
||||
fi
|
||||
Logger "$PROGRAM installed. Use with $BIN_DIR/$PROGRAM_BINARY" "NOTICE"
|
||||
if [ "$PROGRAM" == "osync" ] || [ "$PROGRAM" == "obackup" ]; then
|
||||
echo ""
|
||||
Logger "If connecting remotely, consider setup ssh filter to enhance security." "NOTICE"
|
||||
echo ""
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $_STATS -eq 1 ]; then
|
||||
if [ $_LOGGER_SILENT == true ]; then
|
||||
Statistics
|
||||
else
|
||||
Logger "In order to make usage statistics, the script would like to connect to $STATS_LINK" "NOTICE"
|
||||
read -r -p "No data except those in the url will be send. Allow [Y/n] " response
|
||||
case $response in
|
||||
[nN])
|
||||
exit
|
||||
;;
|
||||
*)
|
||||
Statistics
|
||||
exit $?
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
fi
|
5080
dev/debug_obackup.sh
Executable file
5080
dev/debug_obackup.sh
Executable file
File diff suppressed because it is too large
Load Diff
204
dev/merge.sh
Executable file
204
dev/merge.sh
Executable file
@ -0,0 +1,204 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
## MERGE 2020031501
|
||||
|
||||
## Merges ofunctions.sh and n_program.sh into program.sh
|
||||
## Adds installer
|
||||
|
||||
PROGRAM=merge
|
||||
INSTANCE_ID=dev
|
||||
|
||||
function Usage {
|
||||
echo "Merges ofunctions.sh and n_program.sh into debug_program.sh and ../program.sh"
|
||||
echo "Usage"
|
||||
echo "$0 osync|obackup|pmocr"
|
||||
}
|
||||
|
||||
function __PREPROCESSOR_Merge {
|
||||
local nPROGRAM="$1"
|
||||
|
||||
if [ -f "$nPROGRAM" ]; then
|
||||
Logger "$nPROGRAM is not found in local path." "CRITICAL"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
VERSION=$(grep "PROGRAM_VERSION=" n_$nPROGRAM.sh)
|
||||
VERSION=${VERSION#*=}
|
||||
__PREPROCESSOR_Constants
|
||||
|
||||
__PREPROCESSOR_Unexpand "n_$nPROGRAM.sh" "debug_$nPROGRAM.sh"
|
||||
|
||||
for subset in "${__PREPROCESSOR_SUBSETS[@]}"; do
|
||||
__PREPROCESSOR_MergeSubset "$subset" "${subset//SUBSET/SUBSET END}" "ofunctions.sh" "debug_$nPROGRAM.sh"
|
||||
done
|
||||
|
||||
__PREPROCESSOR_CleanDebug "debug_$nPROGRAM.sh" "../$nPROGRAM.sh"
|
||||
}
|
||||
|
||||
function __PREPROCESSOR_Constants {
|
||||
PARANOIA_DEBUG_LINE="#__WITH_PARANOIA_DEBUG"
|
||||
PARANOIA_DEBUG_BEGIN="#__BEGIN_WITH_PARANOIA_DEBUG"
|
||||
PARANOIA_DEBUG_END="#__END_WITH_PARANOIA_DEBUG"
|
||||
|
||||
__PREPROCESSOR_SUBSETS=(
|
||||
'#### OFUNCTIONS FULL SUBSET ####'
|
||||
'#### OFUNCTIONS MINI SUBSET ####'
|
||||
'#### OFUNCTIONS MICRO SUBSET ####'
|
||||
'#### PoorMansRandomGenerator SUBSET ####'
|
||||
'#### _OFUNCTIONS_BOOTSTRAP SUBSET ####'
|
||||
'#### RUN_DIR SUBSET ####'
|
||||
'#### DEBUG SUBSET ####'
|
||||
'#### TrapError SUBSET ####'
|
||||
'#### RemoteLogger SUBSET ####'
|
||||
'#### Logger SUBSET ####'
|
||||
'#### GetLocalOS SUBSET ####'
|
||||
'#### IsInteger SUBSET ####'
|
||||
'#### UrlEncode SUBSET ####'
|
||||
'#### HumanToNumeric SUBSET ####'
|
||||
'#### ArrayContains SUBSET ####'
|
||||
'#### VerComp SUBSET ####'
|
||||
'#### GetConfFileValue SUBSET ####'
|
||||
'#### SetConfFileValue SUBSET ####'
|
||||
'#### CheckRFC822 SUBSET ####'
|
||||
'#### CleanUp SUBSET ####'
|
||||
'#### GenericTrapQuit SUBSET ####'
|
||||
'#### FileMove SUBSET ####'
|
||||
)
|
||||
}
|
||||
|
||||
function __PREPROCESSOR_Unexpand {
|
||||
local source="${1}"
|
||||
local destination="${2}"
|
||||
|
||||
unexpand "$source" > "$destination"
|
||||
if [ $? != 0 ]; then
|
||||
Logger "Cannot unexpand [$source] to [$destination]." "CRITICAL"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function __PREPROCESSOR_MergeSubset {
|
||||
local subsetBegin="${1}"
|
||||
local subsetEnd="${2}"
|
||||
local subsetFile="${3}"
|
||||
local mergedFile="${4}"
|
||||
|
||||
sed -n "/$subsetBegin/,/$subsetEnd/p" "$subsetFile" > "$subsetFile.$subsetBegin"
|
||||
if [ $? != 0 ]; then
|
||||
Logger "Cannot sed subset [$subsetBegin -- $subsetEnd] in [$subsetFile]." "CRTICIAL"
|
||||
exit 1
|
||||
fi
|
||||
sed "/include $subsetBegin/r $subsetFile.$subsetBegin" "$mergedFile" | grep -v -E "$subsetBegin\$|$subsetEnd\$" > "$mergedFile.tmp"
|
||||
if [ $? != 0 ]; then
|
||||
Logger "Cannot add subset [$subsetBegin] to [$mergedFile]." "CRITICAL"
|
||||
exit 1
|
||||
fi
|
||||
rm -f "$subsetFile.$subsetBegin"
|
||||
if [ $? != 0 ]; then
|
||||
Logger "Cannot remove temporary subset [$subsetFile.$subsetBegin]." "CRITICAL"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -f "$mergedFile"
|
||||
if [ $? != 0 ]; then
|
||||
Logger "Cannot remove merged original file [$mergedFile]." "CRITICAL"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mv "$mergedFile.tmp" "$mergedFile"
|
||||
if [ $? != 0 ]; then
|
||||
Logger "Cannot move merged tmp file to original [$mergedFile]." "CRITICAL"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function __PREPROCESSOR_CleanDebug {
|
||||
local source="${1}"
|
||||
local destination="${2:-$source}"
|
||||
|
||||
sed '/'$PARANOIA_DEBUG_BEGIN'/,/'$PARANOIA_DEBUG_END'/d' "$source" | grep -v "$PARANOIA_DEBUG_LINE" > "$destination.tmp"
|
||||
if [ $? != 0 ]; then
|
||||
Logger "Cannot remove PARANOIA_DEBUG code from standard build." "CRITICAL"
|
||||
exit 1
|
||||
else
|
||||
mv -f "$destination.tmp" "$destination"
|
||||
if [ $? -ne 0 ]; then
|
||||
Logger "Cannot move [$destination.tmp] to [$destination]." "CRITICAL"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
chmod +x "$source"
|
||||
if [ $? != 0 ]; then
|
||||
Logger "Cannot chmod [$source]." "CRITICAL"
|
||||
exit 1
|
||||
else
|
||||
Logger "Prepared [$source]." "NOTICE"
|
||||
fi
|
||||
|
||||
if [ "$source" != "$destination" ]; then
|
||||
|
||||
chmod +x "$destination"
|
||||
if [ $? != 0 ]; then
|
||||
Logger "Cannot chmod [$destination]." "CRITICAL"
|
||||
exit 1
|
||||
else
|
||||
Logger "Prepared [$destination]." "NOTICE"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function __PREPROCESSOR_CopyCommons {
|
||||
local nPROGRAM="$1"
|
||||
|
||||
sed "s/\[prgname\]/$nPROGRAM/g" common_install.sh > ../install.sh
|
||||
if [ $? != 0 ]; then
|
||||
Logger "Cannot assemble install." "CRITICAL"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for subset in "${__PREPROCESSOR_SUBSETS[@]}"; do
|
||||
__PREPROCESSOR_MergeSubset "$subset" "${subset//SUBSET/SUBSET END}" "ofunctions.sh" "../install.sh"
|
||||
done
|
||||
|
||||
__PREPROCESSOR_CleanDebug "../install.sh"
|
||||
|
||||
if [ -f "common_batch.sh" ]; then
|
||||
sed "s/\[prgname\]/$nPROGRAM/g" common_batch.sh > ../$nPROGRAM-batch.sh
|
||||
if [ $? != 0 ]; then
|
||||
Logger "Cannot assemble batch runner." "CRITICAL"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for subset in "${__PREPROCESSOR_SUBSETS[@]}"; do
|
||||
__PREPROCESSOR_MergeSubset "$subset" "${subset//SUBSET/SUBSET END}" "ofunctions.sh" "../$nPROGRAM-batch.sh"
|
||||
done
|
||||
|
||||
__PREPROCESSOR_CleanDebug "../$nPROGRAM-batch.sh"
|
||||
fi
|
||||
}
|
||||
|
||||
# If sourced don't do anything
|
||||
if [ "$(basename $0)" == "merge.sh" ]; then
|
||||
source "./ofunctions.sh"
|
||||
if [ $? != 0 ]; then
|
||||
echo "Please run $0 in dev directory with ofunctions.sh"
|
||||
exit 1
|
||||
fi
|
||||
trap GenericTrapQuit TERM EXIT HUP QUIT
|
||||
|
||||
if [ "$1" == "osync" ]; then
|
||||
__PREPROCESSOR_Merge osync
|
||||
__PREPROCESSOR_CopyCommons osync
|
||||
elif [ "$1" == "obackup" ]; then
|
||||
__PREPROCESSOR_Merge obackup
|
||||
__PREPROCESSOR_CopyCommons obackup
|
||||
elif [ "$1" == "pmocr" ]; then
|
||||
__PREPROCESSOR_Merge pmocr
|
||||
__PREPROCESSOR_CopyCommons pmocr
|
||||
else
|
||||
echo "No valid program given."
|
||||
Usage
|
||||
exit 1
|
||||
fi
|
||||
fi
|
2123
dev/n_obackup.sh
Normal file
2123
dev/n_obackup.sh
Normal file
File diff suppressed because it is too large
Load Diff
2449
dev/ofunctions.sh
Normal file
2449
dev/ofunctions.sh
Normal file
File diff suppressed because it is too large
Load Diff
10
dev/shellcheck.sh
Executable file
10
dev/shellcheck.sh
Executable file
@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
#SC1090 = not following non constants source
|
||||
#SC1091 = not following source
|
||||
#SC2086 = quoting errors (shellcheck is way too picky about quoting)
|
||||
#SC2120 = only for debug version
|
||||
#SC2034 = unused variabled (can be ignored in ofunctions.sh)
|
||||
#SC2068 = bad array usage (can be ignored in ofunctions.sh)
|
||||
|
||||
shellcheck -e SC1090,SC1091,SC2086,SC2119,SC2120 $@
|
238
dev/tests/conf/local.conf
Normal file
238
dev/tests/conf/local.conf
Normal file
@ -0,0 +1,238 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
###### obackup - Local or Remote, push or pull backup script for files & mysql databases
|
||||
###### (C) 2013-2016 by Orsiris de Jong (www.netpower.fr)
|
||||
|
||||
[GENERAL]
|
||||
CONFIG_FILE_REVISION=2.1
|
||||
|
||||
## Backup identification string.
|
||||
INSTANCE_ID="local-test"
|
||||
|
||||
## Log file location. Leaving this empty will create log file at /var/log/obackup.INSTANCE_ID.log (or current directory if /var/log doesn't exist).
|
||||
LOGFILE=""
|
||||
|
||||
## Elements to backup
|
||||
SQL_BACKUP=no
|
||||
FILE_BACKUP=yes
|
||||
|
||||
## Backups can be done local, pulled from another server or pushed to a backup server. Available options are [local,pull,push].
|
||||
## Pulled backups are the safest option, as the backup server contains the RSA key and cannot be compromised by another server.
|
||||
BACKUP_TYPE=local
|
||||
|
||||
[BACKUP STORAGE]
|
||||
|
||||
## Storage paths of the backups (absolute paths of the local or remote system)
|
||||
SQL_STORAGE="${HOME}/obackup-storage/sql-local"
|
||||
FILE_STORAGE="${HOME}/obackup-storage/files-local"
|
||||
|
||||
## Encryption
|
||||
ENCRYPTION=no
|
||||
|
||||
## Backup encryption needs a temporary storage space in order to encrypt files before sending them (absolute paths of the local or remote system)
|
||||
CRYPT_STORAGE="${HOME}/obackup-storage/crypt-local"
|
||||
|
||||
## GPG recipient (pubkey for this recipient must exist, see gpg2 --list-keys or gpg --list-keys
|
||||
GPG_RECIPIENT="John Doe"
|
||||
PARALLEL_ENCRYPTION_PROCESSES=""
|
||||
|
||||
|
||||
## Create backup directories if they do not exist
|
||||
CREATE_DIRS=yes
|
||||
|
||||
## Keep absolute source path in your backup, eg: /your/backup/storage/the/remote/server/files
|
||||
## You should leave this enabled if you intend to use 'backup task division' functionality of oBackup, or everything will end up in the same directory.
|
||||
KEEP_ABSOLUTE_PATHS=yes
|
||||
|
||||
## Generate an alert if backup size is lower than given value in Kb (this can also help identifying empty mount dirs).
|
||||
BACKUP_SIZE_MINIMUM=1024
|
||||
|
||||
## Check backup size before proceeding
|
||||
GET_BACKUP_SIZE=yes
|
||||
|
||||
## Generate an alert if storage free space is lower than given value in Kb.
|
||||
## Keep in mind that disabling backup file size test will only test min space against SQL backup size.
|
||||
SQL_WARN_MIN_SPACE=1048576
|
||||
FILE_WARN_MIN_SPACE=1048576
|
||||
|
||||
[REMOTE_OPTIONS]
|
||||
|
||||
## In case of pulled or pushed backups, remote system URI needs to be supplied.
|
||||
REMOTE_SYSTEM_URI="ssh://backupuser@remote.system.tld:22/"
|
||||
|
||||
## You can specify a RSA key (please use full path). If not defined, the default ~/.ssh/id_rsa will be used. See documentation for further information.
|
||||
SSH_RSA_PRIVATE_KEY="${HOME}/.ssh/id_rsa"
|
||||
SSH_PASSWORD_FILE=""
|
||||
_REMOTE_TOKEN="SomeAlphaNumericToken9"
|
||||
|
||||
|
||||
|
||||
## ssh compression should be used unless your remote connection is good enough (LAN)
|
||||
SSH_COMPRESSION=yes
|
||||
|
||||
## Ignore ssh known hosts verification. DANGER WILL ROBINSON DANGER: This can lead to security risks. Only enable if you know what you're doing.
|
||||
SSH_IGNORE_KNOWN_HOSTS=no
|
||||
|
||||
## Remote rsync executable path. Leave this empty in most cases
|
||||
RSYNC_REMOTE_PATH=""
|
||||
|
||||
## Check for connectivity to remote host before launching remote backup tasks. Be sure the hosts responds to ping. Failing to ping will skip current task.
|
||||
REMOTE_HOST_PING=yes
|
||||
|
||||
## Check for internet access by pinging one or more 3rd party hosts before remote backup tasks. Leave empty if you don't want this check to be be performed. Failing to ping will skip current task.
|
||||
REMOTE_3RD_PARTY_HOSTS="www.kernel.org www.google.com"
|
||||
|
||||
## If enabled, commands will be executed as superuser on remote side. See documentation for /etc/sudoers configuration ("find", "du", "tee" and "rsync" need to be allowed). Requiretty needs to be disabled.
|
||||
SUDO_EXEC=no
|
||||
|
||||
[DATABASE BACKUP SETTINGS]
|
||||
|
||||
## Database backup user
|
||||
SQL_USER=root
|
||||
|
||||
## Enabling the following option will save all databases on local or remote given SQL instance except the ones specified in the exclude list.
|
||||
## Every found database will be backed up as separate backup task.
|
||||
DATABASES_ALL=yes
|
||||
DATABASES_ALL_EXCLUDE_LIST="test;information_schema;kopano_prod"
|
||||
## Alternatively, if DATABASES_ALL=no, you can specify a list of databases to backup separated by spaces.
|
||||
DATABASES_LIST=mysql
|
||||
|
||||
## Max backup execution time per Database task. Soft max exec time generates a warning only. Hard max exec time generates a warning and stops current backup task.
|
||||
## If a task gets stopped, next one in the task list gets executed. Time is specified in seconds.
|
||||
SOFT_MAX_EXEC_TIME_DB_TASK=3600
|
||||
HARD_MAX_EXEC_TIME_DB_TASK=7200
|
||||
|
||||
## mysqldump options (ex: --extended-insert, --single-transaction, --quick...). See MySQL / MariaDB manual
|
||||
## default option: --opt
|
||||
MYSQLDUMP_OPTIONS="--opt --single-transaction"
|
||||
|
||||
## Preferred SQL dump compression. Compression methods can be xz, lzma, pigz or gzip (will fallback from xz to gzip depending if available)
|
||||
## Generally, level 5 is a good compromise between cpu, memory hunger and compress ratio. Gzipped files are set to be rsyncable.
|
||||
COMPRESSION_LEVEL=3
|
||||
|
||||
[FILE BACKUP SETTINGS]
|
||||
|
||||
## File backups are divided in tasks. Every directory in DIRECTORY_LIST will be processed as a unique task.
|
||||
## Every subdirectory of each directory in RECURSIVE_DIRECTORY_LIST will be processed as a unique task.
|
||||
## Example: RECURSIVE_DIRECTORY_LIST="/home;/var" will create backup tasks tasks "/home/dir1, "/home/dir2", ... "/home/dirN", "/var/log", "/var/lib"... "/var/something".
|
||||
## You can exclude directories from the avove backup task creation, ex: avoid backing up "/home/dir2" by adding it to RECURSIVE_EXCLUDE_LIST.
|
||||
|
||||
## Directories backup list. List of semicolon separated directories that will be backed up.
|
||||
DIRECTORY_LIST=/root/obackup-testdata/nonPresentData
|
||||
RECURSIVE_DIRECTORY_LIST=/root/obackup-testdata/nonPresentDataRecursive
|
||||
RECURSIVE_EXCLUDE_LIST="${HOME}/obackup-testdata/testDataRecursive/Excluded"
|
||||
|
||||
## Rsync exclude / include order (the option set here will be set first, eg: include will make include then exclude patterns)
|
||||
RSYNC_PATTERN_FIRST=include
|
||||
|
||||
## List of files / directories to incldue / exclude from sync on both sides (see rsync patterns, wildcards work).
|
||||
## Paths are relative to sync dirs. List elements are separated by a semicolon.
|
||||
RSYNC_INCLUDE_PATTERN=""
|
||||
RSYNC_EXCLUDE_PATTERN="*.ded"
|
||||
#RSYNC_EXCLUDE_PATTERN="tmp;archives"
|
||||
|
||||
## Files that contains lists of files / directories to include / exclude from sync on both sides. Leave this empty if you don't want to use an exclusion file.
|
||||
## This file has to be in the same directory as the config file
|
||||
## Paths are relative to sync dirs. One element per line.
|
||||
RSYNC_INCLUDE_FROM=""
|
||||
RSYNC_EXCLUDE_FROM=""
|
||||
#RSYNC_EXCLUDE_FROM="exclude.list"
|
||||
|
||||
## List separator char. You may set an alternative separator char for your directories lists above.
|
||||
PATH_SEPARATOR_CHAR=";"
|
||||
RSYNC_OPTIONAL_ARGS=""
|
||||
|
||||
|
||||
## Preserve basic linux permissions
|
||||
PRESERVE_PERMISSIONS=yes
|
||||
PRESERVE_OWNER=yes
|
||||
PRESERVE_GROUP=yes
|
||||
## On MACOS X, does not work and will be ignored
|
||||
PRESERVE_EXECUTABILITY=yes
|
||||
|
||||
## Preserve ACLS. Make sure source and target FS can hold same ACLs or you'll get loads of errors.
|
||||
PRESERVE_ACL=no
|
||||
## Preserve Xattr. MAke sure source and target FS can hold same Xattr or you'll get loads of errors.
|
||||
PRESERVE_XATTR=no
|
||||
|
||||
## Transforms symlinks into referent files/dirs
|
||||
COPY_SYMLINKS=yes
|
||||
## Treat symlinked dirs as dirs. CAUTION: This also follows symlinks outside of the replica root.
|
||||
KEEP_DIRLINKS=yes
|
||||
## Preserve hard links. Make sure source and target FS can manage hard links or you will lose them.
|
||||
PRESERVE_HARDLINKS=no
|
||||
|
||||
|
||||
## Let RSYNC compress file transfers. Do not use this on local-local backup schemes. Also, this is not useful if SSH compression is enabled.
|
||||
RSYNC_COMPRESS=no
|
||||
|
||||
## Max execution time per file backup task. Soft is warning only. Hard is warning, stopping backup and processing next one one file list. Tilme is specified in seconds
|
||||
SOFT_MAX_EXEC_TIME_FILE_TASK=3600
|
||||
HARD_MAX_EXEC_TIME_FILE_TASK=7200
|
||||
|
||||
## Keep partial uploads that can be resumed on next run, experimental feature
|
||||
PARTIAL=no
|
||||
|
||||
## Delete files on destination that vanished from source. Do not turn this on unless you enabled backup rotation or a snapshotting FS like zfs to keep those vanished files on the destination.
|
||||
DELETE_VANISHED_FILES=no
|
||||
|
||||
## Use delta copy algortithm (usefull when local paths are network drives), defaults to yes
|
||||
DELTA_COPIES=yes
|
||||
|
||||
## Bandwidth limit Kbytes / second for file backups. Leave 0 to disable limitation.
|
||||
BANDWIDTH=0
|
||||
|
||||
## Paranoia option. Don't change this unless you read the documentation.
|
||||
RSYNC_EXECUTABLE=rsync
|
||||
|
||||
[ALERT_OPTIONS]
|
||||
|
||||
## Alert email addresses separated by a space character
|
||||
DESTINATION_MAILS=""
|
||||
MAIL_BODY_CHARSET=""
|
||||
|
||||
|
||||
## Windows specific (msys / cygwin environment) only mail options (used with mailsend.exe from muquit, http://github.com/muquit/mailsend or from sendemail.exe from Brandon Zehm, http://caspian.dotconf.net/menu/Software/SendEmail/
|
||||
SENDER_MAIL="alert@your.system.tld"
|
||||
SMTP_SERVER=smtp.your.isp.tld
|
||||
SMTP_PORT=25
|
||||
# encryption can be tls, ssl or none
|
||||
SMTP_ENCRYPTION=none
|
||||
SMTP_USER=
|
||||
SMTP_PASSWORD=
|
||||
|
||||
[BACKUP SETTINGS]
|
||||
|
||||
## Max execution time of whole backup process. Soft max exec time generates a warning only.
|
||||
## Hard max exec time generates a warning and stops the whole backup execution.
|
||||
SOFT_MAX_EXEC_TIME_TOTAL=30000
|
||||
HARD_MAX_EXEC_TIME_TOTAL=36000
|
||||
|
||||
## Log a message every KEEP_LOGGING seconds just to know the task is still alive
|
||||
KEEP_LOGGING=1801
|
||||
|
||||
## Backup Rotation. You may rotate backups if you don't use snapshots on your backup server.
|
||||
ROTATE_SQL_BACKUPS=yes
|
||||
ROTATE_SQL_COPIES=7
|
||||
ROTATE_FILE_BACKUPS=yes
|
||||
ROTATE_FILE_COPIES=7
|
||||
|
||||
[EXECUTION_HOOKS]
|
||||
|
||||
## Commands can will be run before and / or after backup execution (remote execution will only happen if REMOTE_BACKUP is set).
|
||||
## This is useful to make a snapshot before backing up data, or even handle snapshots of backed up data.
|
||||
LOCAL_RUN_BEFORE_CMD=""
|
||||
LOCAL_RUN_AFTER_CMD=""
|
||||
|
||||
REMOTE_RUN_BEFORE_CMD=""
|
||||
REMOTE_RUN_AFTER_CMD=""
|
||||
|
||||
## Max execution time of commands before they get force killed. Leave 0 if you don't want this to happen. Time is specified in seconds.
|
||||
MAX_EXEC_TIME_PER_CMD_BEFORE=0
|
||||
MAX_EXEC_TIME_PER_CMD_AFTER=0
|
||||
|
||||
## Stops whole backup execution if one of the above commands fail
|
||||
STOP_ON_CMD_ERROR=no
|
||||
|
||||
## Run local and remote after backup cmd's even on failure
|
||||
RUN_AFTER_CMD_ON_ERROR=yes
|
236
dev/tests/conf/max-exec-time.conf
Normal file
236
dev/tests/conf/max-exec-time.conf
Normal file
@ -0,0 +1,236 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
###### obackup - Local or Remote, push or pull backup script for files & mysql databases
|
||||
###### (C) 2013-2016 by Orsiris de Jong (www.netpower.fr)
|
||||
|
||||
[GENERAL]
|
||||
CONFIG_FILE_REVISION=2.1
|
||||
|
||||
## Backup identification string.
|
||||
INSTANCE_ID="local-test"
|
||||
|
||||
## Log file location. Leaving this empty will create log file at /var/log/obackup.INSTANCE_ID.log (or current directory if /var/log doesn't exist).
|
||||
LOGFILE=""
|
||||
|
||||
## Elements to backup
|
||||
SQL_BACKUP=yes
|
||||
FILE_BACKUP=yes
|
||||
|
||||
## Backups can be done local, pulled from another server or pushed to a backup server. Available options are [local,pull,push].
|
||||
## Pulled backups are the safest option, as the backup server contains the RSA key and cannot be compromised by another server.
|
||||
BACKUP_TYPE=local
|
||||
|
||||
[BACKUP STORAGE]
|
||||
|
||||
## Storage paths of the backups (absolute paths of the local or remote system)
|
||||
SQL_STORAGE="${HOME}/obackup-storage/sql"
|
||||
FILE_STORAGE="${HOME}/obackup-storage/files"
|
||||
|
||||
## Backup encryption using GPG and duplicity. Feature not ready yet.
|
||||
ENCRYPTION=no
|
||||
CRYPT_STORAGE="/home/storage/crypt"
|
||||
GPG_RECIPIENT="Your Name used with GPG signature"
|
||||
PARALLEL_ENCRYPTION_PROCESSES=""
|
||||
|
||||
|
||||
|
||||
|
||||
## Create backup directories if they do not exist
|
||||
CREATE_DIRS=yes
|
||||
|
||||
## Keep absolute source path in your backup, eg: /your/backup/storage/the/remote/server/files
|
||||
## You should leave this enabled if you intend to use 'backup task division' functionality of oBackup, or everything will end up in the same directory.
|
||||
KEEP_ABSOLUTE_PATHS=yes
|
||||
|
||||
## Generate an alert if backup size is lower than given value in Kb (this can also help identifying empty mount dirs).
|
||||
BACKUP_SIZE_MINIMUM=1024
|
||||
|
||||
## Check backup size before proceeding
|
||||
GET_BACKUP_SIZE=yes
|
||||
|
||||
## Generate an alert if storage free space is lower than given value in Kb.
|
||||
## Keep in mind that disabling backup file size test will only test min space against SQL backup size.
|
||||
SQL_WARN_MIN_SPACE=1048576
|
||||
FILE_WARN_MIN_SPACE=1048576
|
||||
|
||||
[REMOTE_OPTIONS]
|
||||
|
||||
## In case of pulled or pushed backups, remote system URI needs to be supplied.
|
||||
REMOTE_SYSTEM_URI="ssh://backupuser@remote.system.tld:22/"
|
||||
|
||||
## You can specify a RSA key (please use full path). If not defined, the default ~/.ssh/id_rsa will be used. See documentation for further information.
|
||||
SSH_RSA_PRIVATE_KEY="${HOME}/.ssh/id_rsa"
|
||||
SSH_PASSWORD_FILE=""
|
||||
_REMOTE_TOKEN="SomeAlphaNumericToken9"
|
||||
|
||||
|
||||
|
||||
## ssh compression should be used unless your remote connection is good enough (LAN)
|
||||
SSH_COMPRESSION=yes
|
||||
|
||||
## Ignore ssh known hosts verification. DANGER WILL ROBINSON DANGER: This can lead to security risks. Only enable if you know what you're doing.
|
||||
SSH_IGNORE_KNOWN_HOSTS=no
|
||||
|
||||
## Remote rsync executable path. Leave this empty in most cases
|
||||
RSYNC_REMOTE_PATH=""
|
||||
|
||||
## Check for connectivity to remote host before launching remote backup tasks. Be sure the hosts responds to ping. Failing to ping will skip current task.
|
||||
REMOTE_HOST_PING=yes
|
||||
|
||||
## Check for internet access by pinging one or more 3rd party hosts before remote backup tasks. Leave empty if you don't want this check to be be performed. Failing to ping will skip current task.
|
||||
REMOTE_3RD_PARTY_HOSTS="www.kernel.org www.google.com"
|
||||
|
||||
## If enabled, commands will be executed as superuser on remote side. See documentation for /etc/sudoers configuration ("find", "du", "tee" and "rsync" need to be allowed). Requiretty needs to be disabled.
|
||||
SUDO_EXEC=no
|
||||
|
||||
[DATABASE BACKUP SETTINGS]
|
||||
|
||||
## Database backup user
|
||||
SQL_USER=root
|
||||
|
||||
## Enabling the following option will save all databases on local or remote given SQL instance except the ones specified in the exclude list.
|
||||
## Every found database will be backed up as separate backup task.
|
||||
DATABASES_ALL=yes
|
||||
DATABASES_ALL_EXCLUDE_LIST="test;information_schema"
|
||||
## Alternatively, if DATABASES_ALL=no, you can specify a list of databases to backup separated by spaces.
|
||||
DATABASES_LIST="mysql"
|
||||
|
||||
## Max backup execution time per Database task. Soft max exec time generates a warning only. Hard max exec time generates a warning and stops current backup task.
|
||||
## If a task gets stopped, next one in the task list gets executed. Time is specified in seconds.
|
||||
SOFT_MAX_EXEC_TIME_DB_TASK=1000
|
||||
HARD_MAX_EXEC_TIME_DB_TASK=1000
|
||||
|
||||
## mysqldump options (ex: --extended-insert, --single-transaction, --quick...). See MySQL / MariaDB manual
|
||||
## default option: --opt
|
||||
MYSQLDUMP_OPTIONS="--opt --single-transaction"
|
||||
|
||||
## Preferred SQL dump compression. Compression methods can be xz, lzma, pigz or gzip (will fallback from xz to gzip depending if available)
|
||||
## Generally, level 5 is a good compromise between cpu, memory hunger and compress ratio. Gzipped files are set to be rsyncable.
|
||||
COMPRESSION_LEVEL=3
|
||||
|
||||
[FILE BACKUP SETTINGS]
|
||||
|
||||
## File backups are divided in tasks. Every directory in DIRECTORY_LIST will be processed as a unique task.
|
||||
## Every subdirectory of each directory in RECURSIVE_DIRECTORY_LIST will be processed as a unique task.
|
||||
## Example: RECURSIVE_DIRECTORY_LIST="/home;/var" will create backup tasks tasks "/home/dir1, "/home/dir2", ... "/home/dirN", "/var/log", "/var/lib"... "/var/something".
|
||||
## You can exclude directories from the avove backup task creation, ex: avoid backing up "/home/dir2" by adding it to RECURSIVE_EXCLUDE_LIST.
|
||||
|
||||
## Directories backup list. List of semicolon separated directories that will be backed up.
|
||||
DIRECTORY_LIST="${HOME}/obackup-testdata/testData"
|
||||
RECURSIVE_DIRECTORY_LIST="${HOME}/obackup-testdata/testDataRecursive"
|
||||
RECURSIVE_EXCLUDE_LIST="${HOME}/obackup-testdata/testDataRecursive/Excluded"
|
||||
|
||||
## Rsync exclude / include order (the option set here will be set first, eg: include will make include then exclude patterns)
|
||||
RSYNC_PATTERN_FIRST=include
|
||||
|
||||
## List of files / directories to incldue / exclude from sync on both sides (see rsync patterns, wildcards work).
|
||||
## Paths are relative to sync dirs. List elements are separated by a semicolon.
|
||||
RSYNC_INCLUDE_PATTERN=""
|
||||
RSYNC_EXCLUDE_PATTERN="*.ded"
|
||||
#RSYNC_EXCLUDE_PATTERN="tmp;archives"
|
||||
|
||||
## Files that contains lists of files / directories to include / exclude from sync on both sides. Leave this empty if you don't want to use an exclusion file.
|
||||
## This file has to be in the same directory as the config file
|
||||
## Paths are relative to sync dirs. One element per line.
|
||||
RSYNC_INCLUDE_FROM=""
|
||||
RSYNC_EXCLUDE_FROM=""
|
||||
#RSYNC_EXCLUDE_FROM="exclude.list"
|
||||
|
||||
## List separator char. You may set an alternative separator char for your directories lists above.
|
||||
PATH_SEPARATOR_CHAR=";"
|
||||
RSYNC_OPTIONAL_ARGS=""
|
||||
|
||||
|
||||
## Preserve basic linux permissions
|
||||
PRESERVE_PERMISSIONS=yes
|
||||
PRESERVE_OWNER=yes
|
||||
PRESERVE_GROUP=yes
|
||||
## On MACOS X, does not work and will be ignored
|
||||
PRESERVE_EXECUTABILITY=yes
|
||||
|
||||
## Preserve ACLS. Make sure source and target FS can hold same ACLs or you'll get loads of errors.
|
||||
PRESERVE_ACL=no
|
||||
## Preserve Xattr. MAke sure source and target FS can hold same Xattr or you'll get loads of errors.
|
||||
PRESERVE_XATTR=no
|
||||
|
||||
## Transforms symlinks into referent files/dirs
|
||||
COPY_SYMLINKS=yes
|
||||
## Treat symlinked dirs as dirs. CAUTION: This also follows symlinks outside of the replica root.
|
||||
KEEP_DIRLINKS=yes
|
||||
## Preserve hard links. Make sure source and target FS can manage hard links or you will lose them.
|
||||
PRESERVE_HARDLINKS=no
|
||||
|
||||
|
||||
## Let RSYNC compress file transfers. Do not use this on local-local backup schemes. Also, this is not useful if SSH compression is enabled.
|
||||
RSYNC_COMPRESS=no
|
||||
|
||||
## Max execution time per file backup task. Soft is warning only. Hard is warning, stopping backup and processing next one one file list. Tilme is specified in seconds
|
||||
SOFT_MAX_EXEC_TIME_FILE_TASK=1000
|
||||
HARD_MAX_EXEC_TIME_FILE_TASK=1000
|
||||
|
||||
## Keep partial uploads that can be resumed on next run, experimental feature
|
||||
PARTIAL=no
|
||||
|
||||
## Delete files on destination that vanished from source. Do not turn this on unless you enabled backup rotation or a snapshotting FS like zfs to keep those vanished files on the destination.
|
||||
DELETE_VANISHED_FILES=no
|
||||
|
||||
## Use delta copy algortithm (usefull when local paths are network drives), defaults to yes
|
||||
DELTA_COPIES=yes
|
||||
|
||||
## Bandwidth limit Kbytes / second for file backups. Leave 0 to disable limitation.
|
||||
BANDWIDTH=0
|
||||
|
||||
## Paranoia option. Don't change this unless you read the documentation.
|
||||
RSYNC_EXECUTABLE=rsync
|
||||
|
||||
[ALERT_OPTIONS]
|
||||
|
||||
## Alert email addresses separated by a space character
|
||||
DESTINATION_MAILS=""
|
||||
MAIL_BODY_CHARSET=""
|
||||
|
||||
|
||||
## Windows specific (msys / cygwin environment) only mail options (used with mailsend.exe from muquit, http://github.com/muquit/mailsend or from sendemail.exe from Brandon Zehm, http://caspian.dotconf.net/menu/Software/SendEmail/
|
||||
SENDER_MAIL="alert@your.system.tld"
|
||||
SMTP_SERVER=smtp.your.isp.tld
|
||||
SMTP_PORT=25
|
||||
# encryption can be tls, ssl or none
|
||||
SMTP_ENCRYPTION=none
|
||||
SMTP_USER=
|
||||
SMTP_PASSWORD=
|
||||
|
||||
[BACKUP SETTINGS]
|
||||
|
||||
## Max execution time of whole backup process. Soft max exec time generates a warning only.
|
||||
## Hard max exec time generates a warning and stops the whole backup execution.
|
||||
SOFT_MAX_EXEC_TIME_TOTAL=1000
|
||||
HARD_MAX_EXEC_TIME_TOTAL=1
|
||||
|
||||
## Log a message every KEEP_LOGGING seconds just to know the task is still alive
|
||||
KEEP_LOGGING=1801
|
||||
|
||||
## Backup Rotation. You may rotate backups if you don't use snapshots on your backup server.
|
||||
ROTATE_SQL_BACKUPS=no
|
||||
ROTATE_SQL_COPIES=7
|
||||
ROTATE_FILE_BACKUPS=no
|
||||
ROTATE_FILE_COPIES=7
|
||||
|
||||
[EXECUTION_HOOKS]
|
||||
|
||||
## Commands can will be run before and / or after backup execution (remote execution will only happen if REMOTE_BACKUP is set).
|
||||
## This is useful to make a snapshot before backing up data, or even handle snapshots of backed up data.
|
||||
LOCAL_RUN_BEFORE_CMD=""
|
||||
LOCAL_RUN_AFTER_CMD=""
|
||||
|
||||
REMOTE_RUN_BEFORE_CMD=""
|
||||
REMOTE_RUN_AFTER_CMD=""
|
||||
|
||||
## Max execution time of commands before they get force killed. Leave 0 if you don't want this to happen. Time is specified in seconds.
|
||||
MAX_EXEC_TIME_PER_CMD_BEFORE=0
|
||||
MAX_EXEC_TIME_PER_CMD_AFTER=0
|
||||
|
||||
## Stops whole backup execution if one of the above commands fail
|
||||
STOP_ON_CMD_ERROR=no
|
||||
|
||||
## Run local and remote after backup cmd's even on failure
|
||||
RUN_AFTER_CMD_ON_ERROR=yes
|
101
dev/tests/conf/old.conf
Normal file
101
dev/tests/conf/old.conf
Normal file
@ -0,0 +1,101 @@
|
||||
#!/bin/bash
|
||||
|
||||
###### Remote (or local) backup script for files & databases
|
||||
###### (C) 2013 by Ozy de Jong (www.badministrateur.com)
|
||||
###### Config file rev 0408201301
|
||||
|
||||
## Backup identification, any string you want
|
||||
BACKUP_ID="really-old-config-file"
|
||||
|
||||
## General backup options
|
||||
BACKUP_SQL=yes
|
||||
BACKUP_FILES=yes
|
||||
|
||||
## Local storage paths
|
||||
LOCAL_SQL_STORAGE="${HOME}/obackup-storage/sql-old"
|
||||
LOCAL_FILE_STORAGE="${HOME}/obackup-storage/files-old"
|
||||
## Keep the absolute source path in your backup, eg: /your/backup/storage/the/remote/server/files
|
||||
## You should leave this enabled if you use recursive directories backup lists or they'll all end in the same path.
|
||||
LOCAL_STORAGE_KEEP_ABSOLUTE_PATHS=yes
|
||||
## Generate an alert if backup size is lower than given value in Kb, useful for local mounted backups.
|
||||
BACKUP_SIZE_MINIMUM=1024
|
||||
## Generate an alert if local storage free space is lower than given value in Kb.
|
||||
LOCAL_STORAGE_WARN_MIN_SPACE=1048576
|
||||
|
||||
## If enabled, file backups will be processed with sudo command. See documentation for /etc/sudoers configuration ("find", "du" and "rsync" need to be allowed). Requiretty needs to be disabled.
|
||||
SUDO_EXEC=no
|
||||
## Paranoia option. Don't change this unless you read the documentation and still feel concerned about security issues.
|
||||
RSYNC_EXECUTABLE=rsync
|
||||
|
||||
## Remote options (will make backups of remote system through ssh tunnel, public RSA key need to be put into /home/.ssh/authorized_keys in remote users home directory)
|
||||
REMOTE_BACKUP=yes
|
||||
SSH_RSA_PRIVATE_KEY=${HOME}/.ssh/id_rsa_local_obackup_tests
|
||||
REMOTE_USER=root
|
||||
REMOTE_HOST=localhost
|
||||
REMOTE_PORT=22
|
||||
## ssh compression should be used unless your remote connection is good enough (LAN)
|
||||
SSH_COMPRESSION=yes
|
||||
## Check for connectivity to remote host before launching remote backup tasks. Be sure the hosts responds to ping. Failing to ping will skip current task.
|
||||
REMOTE_HOST_PING=yes
|
||||
## Check for internet access by pinging one or more 3rd party hosts before remote backup tasks. Leave empty if you don't want this check to be be performed. Failing to ping will skip current task.
|
||||
REMOTE_3RD_PARTY_HOST="www.kernel.org www.google.com"
|
||||
|
||||
## Databases options
|
||||
SQL_USER=root
|
||||
## Save all databases except the ones specified in the exlude list. Every found database will be backed up as separate task (see documentation for explanation about tasks)
|
||||
DATABASES_ALL=yes
|
||||
DATABASES_ALL_EXCLUDE_LIST="test;information_schema;kopano_prod"
|
||||
# Alternatively, you can specifiy a manual list of databases to backup separated by spaces
|
||||
DATABASES_LIST=""
|
||||
## Max backup execution time per DB task. Soft is warning only. Hard is warning, stopping backup task and processing next one. Time is specified in seconds
|
||||
SOFT_MAX_EXEC_TIME_DB_TASK=3600
|
||||
HARD_MAX_EXEC_TIME_DB_TASK=7200
|
||||
## Preferred sql dump compression. Can be set to xz, lzma or gzip.
|
||||
## Generally, xz level 5 is a good compromise between cpu, memory hunger and compress ratio. Gzipped files are set to be rsyncable.
|
||||
COMPRESSION_PROGRAM=xz
|
||||
COMPRESSION_LEVEL=3
|
||||
## Dump compression should be done on remote side but can also be done locally to lower remote system usage (will take more bandwidth, check for ssh compression)
|
||||
COMPRESSION_REMOTE=yes
|
||||
|
||||
## Path separator. You can set whatever seperator you want in your directories list below. You may change this in case you have some esoteric filenames (try to use unconventional separators like | ).
|
||||
PATH_SEPARATOR_CHAR=";"
|
||||
## File backup lists. Double quoted directory list separated by the $PATH_SEPARATOR_CHAR. Every directory will be processed as task (see documentation for explanation about tasks)
|
||||
DIRECTORIES_SIMPLE_LIST="${HOME}/obackup-testdata/testData"
|
||||
## Recurse directory list separated by the $PATH_SEPARATOR_CHAR. Will create a backup task per subdirectory (one level only), eg RECURSE_LIST="/home /var" will create tasks "/home/dir1", "/home/dir2", ... "/home/dirN", "/var/log", "/var/lib"... "/var/whatever"
|
||||
DIRECTORIES_RECURSE_LIST="${HOME}/obackup-testdata/testDataRecursive"
|
||||
## You can optionally exclude directories from RECURSE_LIST tasks, eg on the above example you could exclude /home/dir2 by adding it to RECURSE_EXCLUDE_LIST
|
||||
DIRECTORIES_RECURSE_EXCLUDE_LIST="${HOME}/obackup-testdata/testDataRecursive/Excluded"
|
||||
## Be aware that every recurse list will have it's own root (exclude pattern is relative from /home/web for /home/web/{recursedir})
|
||||
RSYNC_EXCLUDE_PATTERN="*.ded"
|
||||
## Preserve ACLS. Make sure target FS can hold ACLs or you'll get loads of errors.
|
||||
PRESERVE_ACL=no
|
||||
## Preserve Xattr
|
||||
PRESERVE_XATTR=no
|
||||
## Let RSYNC compress file transfers. Do not use if you already enabled SSH compression.
|
||||
RSYNC_COMPRESS=yes
|
||||
## Max execution time per file backup task. Soft is warning only. Hard is warning, stopping backup and processing next one one file list. Tilme is specified in seconds
|
||||
SOFT_MAX_EXEC_TIME_FILE_TASK=3600
|
||||
HARD_MAX_EXEC_TIME_FILE_TASK=7200
|
||||
|
||||
## Alert email adresses separated by a space character
|
||||
DESTINATION_MAILS="your@mail.tld"
|
||||
|
||||
## Max execution time of whole backup process. Soft is warning only. Hard is warning and stopping whole backup process.
|
||||
SOFT_MAX_EXEC_TIME_TOTAL=30000
|
||||
HARD_MAX_EXEC_TIME_TOTAL=36000
|
||||
|
||||
## Backup Rotation in case you don't use a snapshot aware file system like zfs or btrfs to perform a snapshot before every backup
|
||||
ROTATE_BACKUPS=yes
|
||||
ROTATE_COPIES=7
|
||||
|
||||
## Commands that will be run before and / or after backup execution (remote execution will only happen if REMOTE_BACKUP is set to yes). Very usefull to initiate snapshots.
|
||||
## Set max execution time to 0 if you want these commands not to get stopped, else set a value in seconds after which execution will be stopped.
|
||||
LOCAL_RUN_BEFORE_CMD=""
|
||||
LOCAL_RUN_AFTER_CMD=""
|
||||
|
||||
REMOTE_RUN_BEFORE_CMD=""
|
||||
REMOTE_RUN_AFTER_CMD=""
|
||||
|
||||
MAX_EXEC_TIME_PER_CMD_BEFORE=0
|
||||
MAX_EXEC_TIME_PER_CMD_AFTER=0
|
||||
|
239
dev/tests/conf/pull.conf
Normal file
239
dev/tests/conf/pull.conf
Normal file
@ -0,0 +1,239 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
###### obackup - Local or Remote, push or pull backup script for files & mysql databases
|
||||
###### (C) 2013-2016 by Orsiris de Jong (www.netpower.fr)
|
||||
|
||||
[GENERAL]
|
||||
CONFIG_FILE_REVISION=2.1
|
||||
|
||||
## Backup identification string.
|
||||
INSTANCE_ID="pull-test"
|
||||
|
||||
## Log file location. Leaving this empty will create log file at /var/log/obackup.INSTANCE_ID.log (or current directory if /var/log doesn't exist).
|
||||
LOGFILE=""
|
||||
|
||||
## Elements to backup
|
||||
SQL_BACKUP=yes
|
||||
FILE_BACKUP=yes
|
||||
|
||||
## Backups can be done local, pulled from another server or pushed to a backup server. Available options are [local,pull,push].
|
||||
## Pulled backups are the safest option, as the backup server contains the RSA key and cannot be compromised by another server.
|
||||
BACKUP_TYPE=pull
|
||||
|
||||
[BACKUP STORAGE]
|
||||
|
||||
## Storage paths of the backups (absolute paths of the local or remote system)
|
||||
SQL_STORAGE="${HOME}/obackup-storage/sql-pull"
|
||||
FILE_STORAGE="${HOME}/obackup-storage/files-pull"
|
||||
|
||||
## Encryption
|
||||
ENCRYPTION=no
|
||||
|
||||
## Backup encryption needs a temporary storage space in order to encrypt files before sending them (absolute paths of the local or remote system)
|
||||
CRYPT_STORAGE="${HOME}/obackup-storage/crypt-pull"
|
||||
|
||||
## GPG recipient (pubkey for this recipient must exist, see gpg2 --list-keys or gpg --list-keys
|
||||
GPG_RECIPIENT="John Doe"
|
||||
PARALLEL_ENCRYPTION_PROCESSES=""
|
||||
|
||||
|
||||
## Create backup directories if they do not exist
|
||||
CREATE_DIRS=yes
|
||||
|
||||
## Keep absolute source path in your backup, eg: /your/backup/storage/the/remote/server/files
|
||||
## You should leave this enabled if you intend to use 'backup task division' functionality of oBackup, or everything will end up in the same directory.
|
||||
KEEP_ABSOLUTE_PATHS=yes
|
||||
|
||||
## Generate an alert if backup size is lower than given value in Kb (this can also help identifying empty mount dirs).
|
||||
BACKUP_SIZE_MINIMUM=1024
|
||||
|
||||
## Check backup size before proceeding
|
||||
GET_BACKUP_SIZE=yes
|
||||
|
||||
## Generate an alert if storage free space is lower than given value in Kb.
|
||||
## Keep in mind that disabling backup file size test will only test min space against SQL backup size.
|
||||
SQL_WARN_MIN_SPACE=1048576
|
||||
FILE_WARN_MIN_SPACE=1048576
|
||||
|
||||
[REMOTE_OPTIONS]
|
||||
|
||||
## In case of pulled or pushed backups, remote system URI needs to be supplied.
|
||||
REMOTE_SYSTEM_URI="ssh://root@localhost:22/"
|
||||
|
||||
## You can specify a RSA key (please use full path). If not defined, the default ~/.ssh/id_rsa will be used. See documentation for further information.
|
||||
SSH_RSA_PRIVATE_KEY="${HOME}/.ssh/id_rsa_local_obackup_tests"
|
||||
|
||||
## Alternatively, you may specify an SSH password file (less secure). Needs sshpass utility installed.
|
||||
SSH_PASSWORD_FILE=""
|
||||
|
||||
_REMOTE_TOKEN=SomeAlphaNumericToken9
|
||||
|
||||
## ssh compression should be used unless your remote connection is good enough (LAN)
|
||||
SSH_COMPRESSION=yes
|
||||
|
||||
## Ignore ssh known hosts verification. DANGER WILL ROBINSON DANGER: This can lead to security risks. Only enable if you know what you're doing.
|
||||
SSH_IGNORE_KNOWN_HOSTS=no
|
||||
|
||||
## Remote rsync executable path. Leave this empty in most cases
|
||||
RSYNC_REMOTE_PATH=""
|
||||
|
||||
## Check for connectivity to remote host before launching remote backup tasks. Be sure the hosts responds to ping. Failing to ping will skip current task.
|
||||
REMOTE_HOST_PING=yes
|
||||
|
||||
## Check for internet access by pinging one or more 3rd party hosts before remote backup tasks. Leave empty if you don't want this check to be be performed. Failing to ping will skip current task.
|
||||
REMOTE_3RD_PARTY_HOSTS="www.kernel.org www.google.com"
|
||||
|
||||
## If enabled, commands will be executed as superuser on remote side. See documentation for /etc/sudoers configuration ("find", "du", "tee" and "rsync" need to be allowed). Requiretty needs to be disabled.
|
||||
SUDO_EXEC=no
|
||||
|
||||
[DATABASE BACKUP SETTINGS]
|
||||
|
||||
## Database backup user
|
||||
SQL_USER=root
|
||||
|
||||
## Enabling the following option will save all databases on local or remote given SQL instance except the ones specified in the exclude list.
|
||||
## Every found database will be backed up as separate backup task.
|
||||
DATABASES_ALL=yes
|
||||
DATABASES_ALL_EXCLUDE_LIST="test;information_schema;kopano_prod"
|
||||
## Alternatively, if DATABASES_ALL=no, you can specify a list of databases to backup separated by spaces.
|
||||
DATABASES_LIST=mysql
|
||||
|
||||
## Max backup execution time per Database task. Soft max exec time generates a warning only. Hard max exec time generates a warning and stops current backup task.
|
||||
## If a task gets stopped, next one in the task list gets executed. Time is specified in seconds.
|
||||
SOFT_MAX_EXEC_TIME_DB_TASK=3600
|
||||
HARD_MAX_EXEC_TIME_DB_TASK=7200
|
||||
|
||||
## mysqldump options (ex: --extended-insert, --single-transaction, --quick...). See MySQL / MariaDB manual
|
||||
## default option: --opt
|
||||
MYSQLDUMP_OPTIONS="--opt --single-transaction"
|
||||
|
||||
## Preferred SQL dump compression. Compression methods can be xz, lzma, pigz or gzip (will fallback from xz to gzip depending if available)
|
||||
## Generally, level 5 is a good compromise between cpu, memory hunger and compress ratio. Gzipped files are set to be rsyncable.
|
||||
COMPRESSION_LEVEL=3
|
||||
|
||||
[FILE BACKUP SETTINGS]
|
||||
|
||||
## File backups are divided in tasks. Every directory in DIRECTORY_LIST will be processed as a unique task.
|
||||
## Every subdirectory of each directory in RECURSIVE_DIRECTORY_LIST will be processed as a unique task.
|
||||
## Example: RECURSIVE_DIRECTORY_LIST="/home;/var" will create backup tasks tasks "/home/dir1, "/home/dir2", ... "/home/dirN", "/var/log", "/var/lib"... "/var/something".
|
||||
## You can exclude directories from the avove backup task creation, ex: avoid backing up "/home/dir2" by adding it to RECURSIVE_EXCLUDE_LIST.
|
||||
|
||||
## Directories backup list. List of semicolon separated directories that will be backed up.
|
||||
DIRECTORY_LIST=/root/obackup-testdata/testData
|
||||
RECURSIVE_DIRECTORY_LIST=/root/obackup-testdata/testDataRecursive
|
||||
RECURSIVE_EXCLUDE_LIST="${HOME}/obackup-testdata/testDataRecursive/Excluded"
|
||||
|
||||
## Rsync exclude / include order (the option set here will be set first, eg: include will make include then exclude patterns)
|
||||
RSYNC_PATTERN_FIRST=include
|
||||
|
||||
## List of files / directories to incldue / exclude from sync on both sides (see rsync patterns, wildcards work).
|
||||
## Paths are relative to sync dirs. List elements are separated by a semicolon.
|
||||
RSYNC_INCLUDE_PATTERN=""
|
||||
RSYNC_EXCLUDE_PATTERN="*.ded"
|
||||
#RSYNC_EXCLUDE_PATTERN="tmp;archives"
|
||||
|
||||
## Files that contains lists of files / directories to include / exclude from sync on both sides. Leave this empty if you don't want to use an exclusion file.
|
||||
## This file has to be in the same directory as the config file
|
||||
## Paths are relative to sync dirs. One element per line.
|
||||
RSYNC_INCLUDE_FROM=""
|
||||
RSYNC_EXCLUDE_FROM=""
|
||||
#RSYNC_EXCLUDE_FROM="exclude.list"
|
||||
|
||||
## List separator char. You may set an alternative separator char for your directories lists above.
|
||||
PATH_SEPARATOR_CHAR=";"
|
||||
RSYNC_OPTIONAL_ARGS=""
|
||||
|
||||
|
||||
## Preserve basic linux permissions
|
||||
PRESERVE_PERMISSIONS=yes
|
||||
PRESERVE_OWNER=yes
|
||||
PRESERVE_GROUP=yes
|
||||
## On MACOS X, does not work and will be ignored
|
||||
PRESERVE_EXECUTABILITY=yes
|
||||
|
||||
## Preserve ACLS. Make sure source and target FS can hold same ACLs or you'll get loads of errors.
|
||||
PRESERVE_ACL=no
|
||||
## Preserve Xattr. MAke sure source and target FS can hold same Xattr or you'll get loads of errors.
|
||||
PRESERVE_XATTR=no
|
||||
|
||||
## Transforms symlinks into referent files/dirs
|
||||
COPY_SYMLINKS=yes
|
||||
## Treat symlinked dirs as dirs. CAUTION: This also follows symlinks outside of the replica root.
|
||||
KEEP_DIRLINKS=yes
|
||||
## Preserve hard links. Make sure source and target FS can manage hard links or you will lose them.
|
||||
PRESERVE_HARDLINKS=no
|
||||
|
||||
|
||||
## Let RSYNC compress file transfers. Do not use this on local-local backup schemes. Also, this is not useful if SSH compression is enabled.
|
||||
RSYNC_COMPRESS=no
|
||||
|
||||
## Max execution time per file backup task. Soft is warning only. Hard is warning, stopping backup and processing next one one file list. Tilme is specified in seconds
|
||||
SOFT_MAX_EXEC_TIME_FILE_TASK=3600
|
||||
HARD_MAX_EXEC_TIME_FILE_TASK=7200
|
||||
|
||||
## Keep partial uploads that can be resumed on next run, experimental feature
|
||||
PARTIAL=no
|
||||
|
||||
## Delete files on destination that vanished from source. Do not turn this on unless you enabled backup rotation or a snapshotting FS like zfs to keep those vanished files on the destination.
|
||||
DELETE_VANISHED_FILES=no
|
||||
|
||||
## Use delta copy algortithm (usefull when local paths are network drives), defaults to yes
|
||||
DELTA_COPIES=yes
|
||||
|
||||
## Bandwidth limit Kbytes / second for file backups. Leave 0 to disable limitation.
|
||||
BANDWIDTH=0
|
||||
|
||||
## Paranoia option. Don't change this unless you read the documentation.
|
||||
RSYNC_EXECUTABLE=rsync
|
||||
|
||||
[ALERT_OPTIONS]
|
||||
|
||||
## Alert email addresses separated by a space character
|
||||
DESTINATION_MAILS=""
|
||||
MAIL_BODY_CHARSET=""
|
||||
|
||||
|
||||
## Windows specific (msys / cygwin environment) only mail options (used with mailsend.exe from muquit, http://github.com/muquit/mailsend or from sendemail.exe from Brandon Zehm, http://caspian.dotconf.net/menu/Software/SendEmail/
|
||||
SENDER_MAIL="alert@your.system.tld"
|
||||
SMTP_SERVER=smtp.your.isp.tld
|
||||
SMTP_PORT=25
|
||||
# encryption can be tls, ssl or none
|
||||
SMTP_ENCRYPTION=none
|
||||
SMTP_USER=
|
||||
SMTP_PASSWORD=
|
||||
|
||||
[BACKUP SETTINGS]
|
||||
|
||||
## Max execution time of whole backup process. Soft max exec time generates a warning only.
|
||||
## Hard max exec time generates a warning and stops the whole backup execution.
|
||||
SOFT_MAX_EXEC_TIME_TOTAL=30000
|
||||
HARD_MAX_EXEC_TIME_TOTAL=36000
|
||||
|
||||
## Log a message every KEEP_LOGGING seconds just to know the task is still alive
|
||||
KEEP_LOGGING=1801
|
||||
|
||||
## Backup Rotation. You may rotate backups if you don't use snapshots on your backup server.
|
||||
ROTATE_SQL_BACKUPS=yes
|
||||
ROTATE_SQL_COPIES=7
|
||||
ROTATE_FILE_BACKUPS=yes
|
||||
ROTATE_FILE_COPIES=7
|
||||
|
||||
[EXECUTION_HOOKS]
|
||||
|
||||
## Commands can will be run before and / or after backup execution (remote execution will only happen if REMOTE_BACKUP is set).
|
||||
## This is useful to make a snapshot before backing up data, or even handle snapshots of backed up data.
|
||||
LOCAL_RUN_BEFORE_CMD=""
|
||||
LOCAL_RUN_AFTER_CMD=""
|
||||
|
||||
REMOTE_RUN_BEFORE_CMD=""
|
||||
REMOTE_RUN_AFTER_CMD=""
|
||||
|
||||
## Max execution time of commands before they get force killed. Leave 0 if you don't want this to happen. Time is specified in seconds.
|
||||
MAX_EXEC_TIME_PER_CMD_BEFORE=0
|
||||
MAX_EXEC_TIME_PER_CMD_AFTER=0
|
||||
|
||||
## Stops whole backup execution if one of the above commands fail
|
||||
STOP_ON_CMD_ERROR=no
|
||||
|
||||
## Run local and remote after backup cmd's even on failure
|
||||
RUN_AFTER_CMD_ON_ERROR=yes
|
239
dev/tests/conf/push.conf
Normal file
239
dev/tests/conf/push.conf
Normal file
@ -0,0 +1,239 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
###### obackup - Local or Remote, push or pull backup script for files & mysql databases
|
||||
###### (C) 2013-2016 by Orsiris de Jong (www.netpower.fr)
|
||||
|
||||
[GENERAL]
|
||||
CONFIG_FILE_REVISION=2.1
|
||||
|
||||
## Backup identification string.
|
||||
INSTANCE_ID="push-test"
|
||||
|
||||
## Log file location. Leaving this empty will create log file at /var/log/obackup.INSTANCE_ID.log (or current directory if /var/log doesn't exist).
|
||||
LOGFILE=""
|
||||
|
||||
## Elements to backup
|
||||
SQL_BACKUP=yes
|
||||
FILE_BACKUP=yes
|
||||
|
||||
## Backups can be done local, pulled from another server or pushed to a backup server. Available options are [local,pull,push].
|
||||
## Pulled backups are the safest option, as the backup server contains the RSA key and cannot be compromised by another server.
|
||||
BACKUP_TYPE=push
|
||||
|
||||
[BACKUP STORAGE]
|
||||
|
||||
## Storage paths of the backups (absolute paths of the local or remote system)
|
||||
SQL_STORAGE="${HOME}/obackup-storage/sql-push"
|
||||
FILE_STORAGE="${HOME}/obackup-storage/files-push"
|
||||
|
||||
## Encryption
|
||||
ENCRYPTION=no
|
||||
|
||||
## Backup encryption needs a temporary storage space in order to encrypt files before sending them (absolute paths of the local or remote system)
|
||||
CRYPT_STORAGE="${HOME}/obackup-storage/crypt-push"
|
||||
|
||||
## GPG recipient (pubkey for this recipient must exist, see gpg2 --list-keys or gpg --list-keys
|
||||
GPG_RECIPIENT="John Doe"
|
||||
PARALLEL_ENCRYPTION_PROCESSES=""
|
||||
|
||||
|
||||
## Create backup directories if they do not exist
|
||||
CREATE_DIRS=yes
|
||||
|
||||
## Keep absolute source path in your backup, eg: /your/backup/storage/the/remote/server/files
|
||||
## You should leave this enabled if you intend to use 'backup task division' functionality of oBackup, or everything will end up in the same directory.
|
||||
KEEP_ABSOLUTE_PATHS=yes
|
||||
|
||||
## Generate an alert if backup size is lower than given value in Kb (this can also help identifying empty mount dirs).
|
||||
BACKUP_SIZE_MINIMUM=1024
|
||||
|
||||
## Check backup size before proceeding
|
||||
GET_BACKUP_SIZE=yes
|
||||
|
||||
## Generate an alert if storage free space is lower than given value in Kb.
|
||||
## Keep in mind that disabling backup file size test will only test min space against SQL backup size.
|
||||
SQL_WARN_MIN_SPACE=1048576
|
||||
FILE_WARN_MIN_SPACE=1048576
|
||||
|
||||
[REMOTE_OPTIONS]
|
||||
|
||||
## In case of pulled or pushed backups, remote system URI needs to be supplied.
|
||||
REMOTE_SYSTEM_URI="ssh://root@localhost:22/"
|
||||
|
||||
## You can specify a RSA key (please use full path). If not defined, the default ~/.ssh/id_rsa will be used. See documentation for further information.
|
||||
SSH_RSA_PRIVATE_KEY="${HOME}/.ssh/id_rsa_local_obackup_tests"
|
||||
|
||||
## Alternatively, you may specify an SSH password file (less secure). Needs sshpass utility installed.
|
||||
SSH_PASSWORD_FILE=""
|
||||
|
||||
_REMOTE_TOKEN=SomeAlphaNumericToken9
|
||||
|
||||
## ssh compression should be used unless your remote connection is good enough (LAN)
|
||||
SSH_COMPRESSION=yes
|
||||
|
||||
## Ignore ssh known hosts verification. DANGER WILL ROBINSON DANGER: This can lead to security risks. Only enable if you know what you're doing.
|
||||
SSH_IGNORE_KNOWN_HOSTS=no
|
||||
|
||||
## Remote rsync executable path. Leave this empty in most cases
|
||||
RSYNC_REMOTE_PATH=""
|
||||
|
||||
## Check for connectivity to remote host before launching remote backup tasks. Be sure the hosts responds to ping. Failing to ping will skip current task.
|
||||
REMOTE_HOST_PING=yes
|
||||
|
||||
## Check for internet access by pinging one or more 3rd party hosts before remote backup tasks. Leave empty if you don't want this check to be be performed. Failing to ping will skip current task.
|
||||
REMOTE_3RD_PARTY_HOSTS="www.kernel.org www.google.com"
|
||||
|
||||
## If enabled, commands will be executed as superuser on remote side. See documentation for /etc/sudoers configuration ("find", "du", "tee" and "rsync" need to be allowed). Requiretty needs to be disabled.
|
||||
SUDO_EXEC=no
|
||||
|
||||
[DATABASE BACKUP SETTINGS]
|
||||
|
||||
## Database backup user
|
||||
SQL_USER=root
|
||||
|
||||
## Enabling the following option will save all databases on local or remote given SQL instance except the ones specified in the exclude list.
|
||||
## Every found database will be backed up as separate backup task.
|
||||
DATABASES_ALL=yes
|
||||
DATABASES_ALL_EXCLUDE_LIST="test;information_schema;kopano_prod"
|
||||
## Alternatively, if DATABASES_ALL=no, you can specify a list of databases to backup separated by spaces.
|
||||
DATABASES_LIST=mysql
|
||||
|
||||
## Max backup execution time per Database task. Soft max exec time generates a warning only. Hard max exec time generates a warning and stops current backup task.
|
||||
## If a task gets stopped, next one in the task list gets executed. Time is specified in seconds.
|
||||
SOFT_MAX_EXEC_TIME_DB_TASK=3600
|
||||
HARD_MAX_EXEC_TIME_DB_TASK=7200
|
||||
|
||||
## mysqldump options (ex: --extended-insert, --single-transaction, --quick...). See MySQL / MariaDB manual
|
||||
## default option: --opt
|
||||
MYSQLDUMP_OPTIONS="--opt --single-transaction"
|
||||
|
||||
## Preferred SQL dump compression. Compression methods can be xz, lzma, pigz or gzip (will fallback from xz to gzip depending if available)
|
||||
## Generally, level 5 is a good compromise between cpu, memory hunger and compress ratio. Gzipped files are set to be rsyncable.
|
||||
COMPRESSION_LEVEL=3
|
||||
|
||||
[FILE BACKUP SETTINGS]
|
||||
|
||||
## File backups are divided in tasks. Every directory in DIRECTORY_LIST will be processed as a unique task.
|
||||
## Every subdirectory of each directory in RECURSIVE_DIRECTORY_LIST will be processed as a unique task.
|
||||
## Example: RECURSIVE_DIRECTORY_LIST="/home;/var" will create backup tasks tasks "/home/dir1, "/home/dir2", ... "/home/dirN", "/var/log", "/var/lib"... "/var/something".
|
||||
## You can exclude directories from the avove backup task creation, ex: avoid backing up "/home/dir2" by adding it to RECURSIVE_EXCLUDE_LIST.
|
||||
|
||||
## Directories backup list. List of semicolon separated directories that will be backed up.
|
||||
DIRECTORY_LIST=/root/obackup-testdata/testData
|
||||
RECURSIVE_DIRECTORY_LIST=/root/obackup-testdata/testDataRecursive
|
||||
RECURSIVE_EXCLUDE_LIST="${HOME}/obackup-testdata/testDataRecursive/Excluded"
|
||||
|
||||
## Rsync exclude / include order (the option set here will be set first, eg: include will make include then exclude patterns)
|
||||
RSYNC_PATTERN_FIRST=include
|
||||
|
||||
## List of files / directories to incldue / exclude from sync on both sides (see rsync patterns, wildcards work).
|
||||
## Paths are relative to sync dirs. List elements are separated by a semicolon.
|
||||
RSYNC_INCLUDE_PATTERN=""
|
||||
RSYNC_EXCLUDE_PATTERN="*.ded"
|
||||
#RSYNC_EXCLUDE_PATTERN="tmp;archives"
|
||||
|
||||
## Files that contains lists of files / directories to include / exclude from sync on both sides. Leave this empty if you don't want to use an exclusion file.
|
||||
## This file has to be in the same directory as the config file
|
||||
## Paths are relative to sync dirs. One element per line.
|
||||
RSYNC_INCLUDE_FROM=""
|
||||
RSYNC_EXCLUDE_FROM=""
|
||||
#RSYNC_EXCLUDE_FROM="exclude.list"
|
||||
|
||||
## List separator char. You may set an alternative separator char for your directories lists above.
|
||||
PATH_SEPARATOR_CHAR=";"
|
||||
RSYNC_OPTIONAL_ARGS=""
|
||||
|
||||
|
||||
## Preserve basic linux permissions
|
||||
PRESERVE_PERMISSIONS=yes
|
||||
PRESERVE_OWNER=yes
|
||||
PRESERVE_GROUP=yes
|
||||
## On MACOS X, does not work and will be ignored
|
||||
PRESERVE_EXECUTABILITY=yes
|
||||
|
||||
## Preserve ACLS. Make sure source and target FS can hold same ACLs or you'll get loads of errors.
|
||||
PRESERVE_ACL=no
|
||||
## Preserve Xattr. MAke sure source and target FS can hold same Xattr or you'll get loads of errors.
|
||||
PRESERVE_XATTR=no
|
||||
|
||||
## Transforms symlinks into referent files/dirs
|
||||
COPY_SYMLINKS=yes
|
||||
## Treat symlinked dirs as dirs. CAUTION: This also follows symlinks outside of the replica root.
|
||||
KEEP_DIRLINKS=yes
|
||||
## Preserve hard links. Make sure source and target FS can manage hard links or you will lose them.
|
||||
PRESERVE_HARDLINKS=no
|
||||
|
||||
|
||||
## Let RSYNC compress file transfers. Do not use this on local-local backup schemes. Also, this is not useful if SSH compression is enabled.
|
||||
RSYNC_COMPRESS=no
|
||||
|
||||
## Max execution time per file backup task. Soft is warning only. Hard is warning, stopping backup and processing next one one file list. Tilme is specified in seconds
|
||||
SOFT_MAX_EXEC_TIME_FILE_TASK=3600
|
||||
HARD_MAX_EXEC_TIME_FILE_TASK=7200
|
||||
|
||||
## Keep partial uploads that can be resumed on next run, experimental feature
|
||||
PARTIAL=no
|
||||
|
||||
## Delete files on destination that vanished from source. Do not turn this on unless you enabled backup rotation or a snapshotting FS like zfs to keep those vanished files on the destination.
|
||||
DELETE_VANISHED_FILES=no
|
||||
|
||||
## Use delta copy algortithm (usefull when local paths are network drives), defaults to yes
|
||||
DELTA_COPIES=yes
|
||||
|
||||
## Bandwidth limit Kbytes / second for file backups. Leave 0 to disable limitation.
|
||||
BANDWIDTH=0
|
||||
|
||||
## Paranoia option. Don't change this unless you read the documentation.
|
||||
RSYNC_EXECUTABLE=rsync
|
||||
|
||||
[ALERT_OPTIONS]
|
||||
|
||||
## Alert email addresses separated by a space character
|
||||
DESTINATION_MAILS=""
|
||||
MAIL_BODY_CHARSET=""
|
||||
|
||||
|
||||
## Windows specific (msys / cygwin environment) only mail options (used with mailsend.exe from muquit, http://github.com/muquit/mailsend or from sendemail.exe from Brandon Zehm, http://caspian.dotconf.net/menu/Software/SendEmail/
|
||||
SENDER_MAIL="alert@your.system.tld"
|
||||
SMTP_SERVER=smtp.your.isp.tld
|
||||
SMTP_PORT=25
|
||||
# encryption can be tls, ssl or none
|
||||
SMTP_ENCRYPTION=none
|
||||
SMTP_USER=
|
||||
SMTP_PASSWORD=
|
||||
|
||||
[BACKUP SETTINGS]
|
||||
|
||||
## Max execution time of whole backup process. Soft max exec time generates a warning only.
|
||||
## Hard max exec time generates a warning and stops the whole backup execution.
|
||||
SOFT_MAX_EXEC_TIME_TOTAL=30000
|
||||
HARD_MAX_EXEC_TIME_TOTAL=36000
|
||||
|
||||
## Log a message every KEEP_LOGGING seconds just to know the task is still alive
|
||||
KEEP_LOGGING=1801
|
||||
|
||||
## Backup Rotation. You may rotate backups if you don't use snapshots on your backup server.
|
||||
ROTATE_SQL_BACKUPS=yes
|
||||
ROTATE_SQL_COPIES=7
|
||||
ROTATE_FILE_BACKUPS=yes
|
||||
ROTATE_FILE_COPIES=7
|
||||
|
||||
[EXECUTION_HOOKS]
|
||||
|
||||
## Commands can will be run before and / or after backup execution (remote execution will only happen if REMOTE_BACKUP is set).
|
||||
## This is useful to make a snapshot before backing up data, or even handle snapshots of backed up data.
|
||||
LOCAL_RUN_BEFORE_CMD=""
|
||||
LOCAL_RUN_AFTER_CMD=""
|
||||
|
||||
REMOTE_RUN_BEFORE_CMD=""
|
||||
REMOTE_RUN_AFTER_CMD=""
|
||||
|
||||
## Max execution time of commands before they get force killed. Leave 0 if you don't want this to happen. Time is specified in seconds.
|
||||
MAX_EXEC_TIME_PER_CMD_BEFORE=0
|
||||
MAX_EXEC_TIME_PER_CMD_AFTER=0
|
||||
|
||||
## Stops whole backup execution if one of the above commands fail
|
||||
STOP_ON_CMD_ERROR=no
|
||||
|
||||
## Run local and remote after backup cmd's even on failure
|
||||
RUN_AFTER_CMD_ON_ERROR=yes
|
1015
dev/tests/run_tests.sh
Executable file
1015
dev/tests/run_tests.sh
Executable file
File diff suppressed because it is too large
Load Diff
46
dev/tests/shunit2/CODE_OF_CONDUCT.md
Normal file
46
dev/tests/shunit2/CODE_OF_CONDUCT.md
Normal file
@ -0,0 +1,46 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at kate.ward@forestent.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
201
dev/tests/shunit2/LICENSE
Normal file
201
dev/tests/shunit2/LICENSE
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
434
dev/tests/shunit2/README.md
Normal file
434
dev/tests/shunit2/README.md
Normal file
@ -0,0 +1,434 @@
|
||||
# shUnit2
|
||||
|
||||
shUnit2 is a [xUnit](http://en.wikipedia.org/wiki/XUnit) unit test framework for Bourne based shell scripts, and it is designed to work in a similar manner to [JUnit](http://www.junit.org), [PyUnit](http://pyunit.sourceforge.net), etc.. If you have ever had the desire to write a unit test for a shell script, shUnit2 can do the job.
|
||||
|
||||
[](https://travis-ci.org/kward/shunit2)
|
||||
|
||||
## Table of Contents
|
||||
* [Introduction](#introduction)
|
||||
* [Credits / Contributors](#credits-contributors)
|
||||
* [Feedback](#feedback)
|
||||
* [Quickstart](#quickstart)
|
||||
* [Function Reference](#function-reference)
|
||||
* [General Info](#general-info)
|
||||
* [Asserts](#asserts)
|
||||
* [Failures](#failures)
|
||||
* [Setup/Teardown](#setup-teardown)
|
||||
* [Skipping](#skipping)
|
||||
* [Suites](#suites)
|
||||
* [Advanced Usage](#advanced-usage)
|
||||
* [Some constants you can use](#some-constants-you-can-use)
|
||||
* [Error Handling](#error-handling)
|
||||
* [Including Line Numbers in Asserts (Macros)](#including-line-numbers-in-asserts-macros)
|
||||
* [Test Skipping](#test-skipping)
|
||||
* [Appendix](#appendix)
|
||||
* [Getting help](#getting-help)
|
||||
* [Zsh](#zsh)
|
||||
|
||||
---
|
||||
## <a name="introduction"></a> Introduction
|
||||
shUnit2 was originally developed to provide a consistent testing solution for [log4sh][log4sh], a shell based logging framework similar to [log4j](http://logging.apache.org). During the development of that product, a repeated problem of having things work just fine under one shell (`/bin/bash` on Linux to be specific), and then not working under another shell (`/bin/sh` on Solaris) kept coming up. Although several simple tests were run, they were not adequate and did not catch some corner cases. The decision was finally made to write a proper unit test framework after multiple brown-bag releases were made. _Research was done to look for an existing product that met the testing requirements, but no adequate product was found._
|
||||
|
||||
Tested Operating Systems (varies over time)
|
||||
|
||||
* Cygwin
|
||||
* FreeBSD (user supported)
|
||||
* Linux (Gentoo, Ubuntu)
|
||||
* Mac OS X
|
||||
* Solaris 8, 9, 10 (inc. OpenSolaris)
|
||||
|
||||
Tested Shells
|
||||
|
||||
* Bourne Shell (__sh__)
|
||||
* BASH - GNU Bourne Again SHell (__bash__)
|
||||
* DASH (__dash__)
|
||||
* Korn Shell (__ksh__)
|
||||
* pdksh - Public Domain Korn Shell (__pdksh__)
|
||||
* zsh - Zsh (__zsh__) (since 2.1.2) _please see the Zsh shell errata for more information_
|
||||
|
||||
See the appropriate Release Notes for this release (`doc/RELEASE_NOTES-X.X.X.txt`) for the list of actual versions tested.
|
||||
|
||||
### <a name="credits-contributors"></a> Credits / Contributors
|
||||
A list of contributors to shUnit2 can be found in `doc/contributors.md`. Many thanks go out to all those who have contributed to make this a better tool.
|
||||
|
||||
shUnit2 is the original product of many hours of work by Kate Ward, the primary author of the code. For related software, check out https://github.com/kward.
|
||||
|
||||
### <a name="feedback"></a> Feedback
|
||||
Feedback is most certainly welcome for this document. Send your additions, comments and criticisms to the shunit2-users@google.com mailing list.
|
||||
|
||||
---
|
||||
|
||||
## <a name="quickstart"></a> Quickstart
|
||||
This section will give a very quick start to running unit tests with shUnit2. More information is located in later sections.
|
||||
|
||||
Here is a quick sample script to show how easy it is to write a unit test in shell. _Note: the script as it stands expects that you are running it from the "examples" directory._
|
||||
|
||||
```sh
|
||||
#! /bin/sh
|
||||
# file: examples/equality_test.sh
|
||||
|
||||
testEquality() {
|
||||
assertEquals 1 1
|
||||
}
|
||||
|
||||
# Load shUnit2.
|
||||
. ./shunit2
|
||||
```
|
||||
|
||||
Running the unit test should give results similar to the following.
|
||||
|
||||
```console
|
||||
$ cd examples
|
||||
$ ./equality_test.sh
|
||||
testEquality
|
||||
|
||||
Ran 1 test.
|
||||
|
||||
OK
|
||||
```
|
||||
|
||||
W00t! You've just run your first successful unit test. So, what just happened? Quite a bit really, and it all happened simply by sourcing the `shunit2` library. The basic functionality for the script above goes like this:
|
||||
|
||||
* When shUnit2 is sourced, it will walk through any functions defined whose name starts with the string `test`, and add those to an internal list of tests to execute. Once a list of test functions to be run has been determined, shunit2 will go to work.
|
||||
* Before any tests are executed, shUnit2 again looks for a function, this time one named `oneTimeSetUp()`. If it exists, it will be run. This function is normally used to setup the environment for all tests to be run. Things like creating directories for output or setting environment variables are good to place here. Just so you know, you can also declare a corresponding function named `oneTimeTearDown()` function that does the same thing, but once all the tests have been completed. It is good for removing temporary directories, etc.
|
||||
* shUnit2 is now ready to run tests. Before doing so though, it again looks for another function that might be declared, one named `setUp()`. If the function exists, it will be run before each test. It is good for resetting the environment so that each test starts with a clean slate. **At this stage, the first test is finally run.** The success of the test is recorded for a report that will be generated later. After the test is run, shUnit2 looks for a final function that might be declared, one named `tearDown()`. If it exists, it will be run after each test. It is a good place for cleaning up after each test, maybe doing things like removing files that were created, or removing directories. This set of steps, `setUp() > test() > tearDown()`, is repeated for all of the available tests.
|
||||
* Once all the work is done, shUnit2 will generate the nice report you saw above. A summary of all the successes and failures will be given so that you know how well your code is doing.
|
||||
|
||||
We should now try adding a test that fails. Change your unit test to look like this.
|
||||
|
||||
```sh
|
||||
#! /bin/sh
|
||||
# file: examples/party_test.sh
|
||||
|
||||
testEquality() {
|
||||
assertEquals 1 1
|
||||
}
|
||||
|
||||
testPartyLikeItIs1999() {
|
||||
year=`date '+%Y'`
|
||||
assertEquals "It's not 1999 :-(" '1999' "${year}"
|
||||
}
|
||||
|
||||
# Load shUnit2.
|
||||
. ./shunit2
|
||||
```
|
||||
|
||||
So, what did you get? I guess it told you that this isn't 1999. Bummer, eh? Hopefully, you noticed a couple of things that were different about the second test. First, we added an optional message that the user will see if the assert fails. Second, we did comparisons of strings instead of integers as in the first test. It doesn't matter whether you are testing for equality of strings or integers. Both work equally well with shUnit2.
|
||||
|
||||
Hopefully, this is enough to get you started with unit testing. If you want a ton more examples, take a look at the tests provided with [log4sh][log4sh] or [shFlags][shflags]. Both provide excellent examples of more advanced usage. shUnit2 was after all written to meet the unit testing need that [log4sh][log4sh] had.
|
||||
|
||||
---
|
||||
|
||||
## <a name="function-reference"></a> Function Reference
|
||||
|
||||
### <a name="general-info"></a> General Info
|
||||
|
||||
Any string values passed should be properly quoted -- they should must be surrounded by single-quote (`'`) or double-quote (`"`) characters -- so that the shell will properly parse them.
|
||||
|
||||
### <a name="asserts"></a> Asserts
|
||||
|
||||
`assertEquals [message] expected actual`
|
||||
|
||||
Asserts that _expected_ and _actual_ are equal to one another. The _expected_ and _actual_ values can be either strings or integer values as both will be treated as strings. The _message_ is optional, and must be quoted.
|
||||
|
||||
`assertNotEquals [message] unexpected actual`
|
||||
|
||||
Asserts that _unexpected_ and _actual_ are not equal to one another. The _unexpected_ and _actual_ values can be either strings or integer values as both will be treaded as strings. The _message_ is optional, and must be quoted.
|
||||
|
||||
`assertSame [message] expected actual`
|
||||
|
||||
This function is functionally equivalent to `assertEquals`.
|
||||
|
||||
`assertNotSame [message] unexpected actual`
|
||||
|
||||
This function is functionally equivalent to `assertNotEquals`.
|
||||
|
||||
`assertNull [message] value`
|
||||
|
||||
Asserts that _value_ is _null_, or in shell terms, a zero-length string. The _value_ must be a string as an integer value does not translate into a zero-length string. The _message_ is optional, and must be quoted.
|
||||
|
||||
`assertNotNull [message] value`
|
||||
|
||||
Asserts that _value_ is _not null_, or in shell terms, a non-empty string. The _value_ may be a string or an integer as the later will be parsed as a non-empty string value. The _message_ is optional, and must be quoted.
|
||||
|
||||
`assertTrue [message] condition`
|
||||
|
||||
Asserts that a given shell test _condition_ is _true_. The condition can be as simple as a shell _true_ value (the value `0` -- equivalent to `${SHUNIT_TRUE}`), or a more sophisticated shell conditional expression. The _message_ is optional, and must be quoted.
|
||||
|
||||
A sophisticated shell conditional expression is equivalent to what the __if__ or __while__ shell built-ins would use (more specifically, what the __test__ command would use). Testing for example whether some value is greater than another value can be done this way.
|
||||
|
||||
`assertTrue "[ 34 -gt 23 ]"`
|
||||
|
||||
Testing for the ability to read a file can also be done. This particular test will fail.
|
||||
|
||||
`assertTrue 'test failed' "[ -r /some/non-existant/file' ]"`
|
||||
|
||||
As the expressions are standard shell __test__ expressions, it is possible to string multiple expressions together with `-a` and `-o` in the standard fashion. This test will succeed as the entire expression evaluates to _true_.
|
||||
|
||||
`assertTrue 'test failed' '[ 1 -eq 1 -a 2 -eq 2 ]'`
|
||||
|
||||
_One word of warning: be very careful with your quoting as shell is not the most forgiving of bad quoting, and things will fail in strange ways._
|
||||
|
||||
`assertFalse [message] condition`
|
||||
|
||||
Asserts that a given shell test _condition_ is _false_. The condition can be as simple as a shell _false_ value (the value `1` -- equivalent to `${SHUNIT_FALSE}`), or a more sophisticated shell conditional expression. The _message_ is optional, and must be quoted.
|
||||
|
||||
_For examples of more sophisticated expressions, see `assertTrue`._
|
||||
|
||||
### <a name="failures"></a> Failures
|
||||
|
||||
Just to clarify, failures __do not__ test the various arguments against one another. Failures simply fail, optionally with a message, and that is all they do. If you need to test arguments against one another, use asserts.
|
||||
|
||||
If all failures do is fail, why might one use them? There are times when you may have some very complicated logic that you need to test, and the simple asserts provided are simply not adequate. You can do your own validation of the code, use an `assertTrue ${SHUNIT_TRUE}` if your own tests succeeded, and use a failure to record a failure.
|
||||
|
||||
`fail [message]`
|
||||
|
||||
Fails the test immediately. The _message_ is optional, and must be quoted.
|
||||
|
||||
`failNotEquals [message] unexpected actual`
|
||||
|
||||
Fails the test immediately, reporting that the _unexpected_ and _actual_ values are not equal to one another. The _message_ is optional, and must be quoted.
|
||||
|
||||
_Note: no actual comparison of unexpected and actual is done._
|
||||
|
||||
`failSame [message] expected actual`
|
||||
|
||||
Fails the test immediately, reporting that the _expected_ and _actual_ values are the same. The _message_ is optional, and must be quoted.
|
||||
|
||||
_Note: no actual comparison of expected and actual is done._
|
||||
|
||||
`failNotSame [message] expected actual`
|
||||
|
||||
Fails the test immediately, reporting that the _expected_ and _actual_ values are not the same. The _message_ is optional, and must be quoted.
|
||||
|
||||
_Note: no actual comparison of expected and actual is done._
|
||||
|
||||
### <a name="setup-teardown"></a> Setup/Teardown
|
||||
|
||||
`oneTimeSetUp`
|
||||
|
||||
This function can be be optionally overridden by the user in their test suite.
|
||||
|
||||
If this function exists, it will be called once before any tests are run. It is useful to prepare a common environment for all tests.
|
||||
|
||||
`oneTimeTearDown`
|
||||
|
||||
This function can be be optionally overridden by the user in their test suite.
|
||||
|
||||
If this function exists, it will be called once after all tests are completed. It is useful to clean up the environment after all tests.
|
||||
|
||||
`setUp`
|
||||
|
||||
This function can be be optionally overridden by the user in their test suite.
|
||||
|
||||
If this function exists, it will be called before each test is run. It is useful to reset the environment before each test.
|
||||
|
||||
`tearDown`
|
||||
|
||||
This function can be be optionally overridden by the user in their test suite.
|
||||
|
||||
If this function exists, it will be called after each test completes. It is useful to clean up the environment after each test.
|
||||
|
||||
### <a name="skipping"></a> Skipping
|
||||
|
||||
`startSkipping`
|
||||
|
||||
This function forces the remaining _assert_ and _fail_ functions to be "skipped", i.e. they will have no effect. Each function skipped will be recorded so that the total of asserts and fails will not be altered.
|
||||
|
||||
`endSkipping`
|
||||
|
||||
This function returns calls to the _assert_ and _fail_ functions to their default behavior, i.e. they will be called.
|
||||
|
||||
`isSkipping`
|
||||
|
||||
This function returns the current state of skipping. It can be compared against `${SHUNIT_TRUE}` or `${SHUNIT_FALSE}` if desired.
|
||||
|
||||
### <a name="suites"></a> Suites
|
||||
|
||||
The default behavior of shUnit2 is that all tests will be found dynamically. If you have a specific set of tests you want to run, or you don't want to use the standard naming scheme of prefixing your tests with `test`, these functions are for you. Most users will never use them though.
|
||||
|
||||
`suite`
|
||||
|
||||
This function can be optionally overridden by the user in their test suite.
|
||||
|
||||
If this function exists, it will be called when `shunit2` is sourced. If it does not exist, shUnit2 will search the parent script for all functions beginning with the word `test`, and they will be added dynamically to the test suite.
|
||||
|
||||
`suite_addTest name`
|
||||
|
||||
This function adds a function named _name_ to the list of tests scheduled for execution as part of this test suite. This function should only be called from within the `suite()` function.
|
||||
|
||||
---
|
||||
|
||||
## <a name="advanced-usage"></a> Advanced Usage
|
||||
|
||||
### <a name="some-constants-you-can-use"></a> Some constants you can use
|
||||
|
||||
There are several constants provided by shUnit2 as variables that might be of use to you.
|
||||
|
||||
*Predefined*
|
||||
|
||||
| Constant | Value |
|
||||
| --------------- | ----- |
|
||||
| SHUNIT\_TRUE | Standard shell `true` value (the integer value 0). |
|
||||
| SHUNIT\_FALSE | Standard shell `false` value (the integer value 1). |
|
||||
| SHUNIT\_ERROR | The integer value 2. |
|
||||
| SHUNIT\_TMPDIR | Path to temporary directory that will be automatically cleaned up upon exit of shUnit2. |
|
||||
| SHUNIT\_VERSION | The version of shUnit2 you are running. |
|
||||
|
||||
*User defined*
|
||||
|
||||
| Constant | Value |
|
||||
| ----------------- | ----- |
|
||||
| SHUNIT\_CMD\_EXPR | Override which `expr` command is used. By default `expr` is used, except on BSD systems where `gexpr` is used. |
|
||||
| SHUNIT\_COLOR | Enable colorized output. Options are 'auto', 'always', or 'never', with 'auto' being the default. |
|
||||
| SHUNIT\_PARENT | The filename of the shell script containing the tests. This is needed specifically for Zsh support. |
|
||||
| SHUNIT\_TEST\_PREFIX | Define this variable to add a prefix in front of each test name that is output in the test report. |
|
||||
|
||||
### <a name="error-handling"></a> Error handling
|
||||
|
||||
The constants values `SHUNIT_TRUE`, `SHUNIT_FALSE`, and `SHUNIT_ERROR` are returned from nearly every function to indicate the success or failure of the function. Additionally the variable `flags_error` is filled with a detailed error message if any function returns with a `SHUNIT_ERROR` value.
|
||||
|
||||
### <a name="including-line-numbers-in-asserts-macros"></a> Including Line Numbers in Asserts (Macros)
|
||||
|
||||
If you include lots of assert statements in an individual test function, it can become difficult to determine exactly which assert was thrown unless your messages are unique. To help somewhat, line numbers can be included in the assert messages. To enable this, a special shell "macro" must be used rather than the standard assert calls. _Shell doesn't actually have macros; the name is used here as the operation is similar to a standard macro._
|
||||
|
||||
For example, to include line numbers for a `assertEquals()` function call, replace the `assertEquals()` with `${_ASSERT_EQUALS_}`.
|
||||
|
||||
_**Example** -- Asserts with and without line numbers_
|
||||
```sh
|
||||
#! /bin/sh
|
||||
# file: examples/lineno_test.sh
|
||||
|
||||
testLineNo() {
|
||||
# This assert will have line numbers included (e.g. "ASSERT:[123] ...").
|
||||
echo "ae: ${_ASSERT_EQUALS_}"
|
||||
${_ASSERT_EQUALS_} 'not equal' 1 2
|
||||
|
||||
# This assert will not have line numbers included (e.g. "ASSERT: ...").
|
||||
assertEquals 'not equal' 1 2
|
||||
}
|
||||
|
||||
# Load shUnit2.
|
||||
. ./shunit2
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
1. Due to how shell parses command-line arguments, all strings used with macros should be quoted twice. Namely, single-quotes must be converted to single-double-quotes, and vice-versa. If the string being passed is absolutely for sure not empty, the extra quoting is not necessary.<br/><br/>Normal `assertEquals` call.<br/>`assertEquals 'some message' 'x' ''`<br/><br/>Macro `_ASSERT_EQUALS_` call. Note the extra quoting around the _message_ and the _null_ value.<br/>`_ASSERT_EQUALS_ '"some message"' 'x' '""'`
|
||||
|
||||
1. Line numbers are not supported in all shells. If a shell does not support them, no errors will be thrown. Supported shells include: __bash__ (>=3.0), __ksh__, __pdksh__, and __zsh__.
|
||||
|
||||
### <a name="test-skipping"></a> Test Skipping
|
||||
|
||||
There are times where the test code you have written is just not applicable to the system you are running on. This section describes how to skip these tests but maintain the total test count.
|
||||
|
||||
Probably the easiest example would be shell code that is meant to run under the __bash__ shell, but the unit test is running under the Bourne shell. There are things that just won't work. The following test code demonstrates two sample functions, one that will be run under any shell, and the another that will run only under the __bash__ shell.
|
||||
|
||||
_**Example** -- math include_
|
||||
```sh
|
||||
# file: examples/math.inc.
|
||||
|
||||
add_generic() {
|
||||
num_a=$1
|
||||
num_b=$2
|
||||
|
||||
expr $1 + $2
|
||||
}
|
||||
|
||||
add_bash() {
|
||||
num_a=$1
|
||||
num_b=$2
|
||||
|
||||
echo $(($1 + $2))
|
||||
}
|
||||
```
|
||||
|
||||
And here is a corresponding unit test that correctly skips the `add_bash()` function when the unit test is not running under the __bash__ shell.
|
||||
|
||||
_**Example** -- math unit test_
|
||||
```sh
|
||||
#! /bin/sh
|
||||
# file: examples/math_test.sh
|
||||
|
||||
testAdding() {
|
||||
result=`add_generic 1 2`
|
||||
assertEquals \
|
||||
"the result of '${result}' was wrong" \
|
||||
3 "${result}"
|
||||
|
||||
# Disable non-generic tests.
|
||||
[ -z "${BASH_VERSION:-}" ] && startSkipping
|
||||
|
||||
result=`add_bash 1 2`
|
||||
assertEquals \
|
||||
"the result of '${result}' was wrong" \
|
||||
3 "${result}"
|
||||
}
|
||||
|
||||
oneTimeSetUp() {
|
||||
# Load include to test.
|
||||
. ./math.inc
|
||||
}
|
||||
|
||||
# Load and run shUnit2.
|
||||
. ./shunit2
|
||||
```
|
||||
|
||||
Running the above test under the __bash__ shell will result in the following output.
|
||||
|
||||
```console
|
||||
$ /bin/bash math_test.sh
|
||||
testAdding
|
||||
|
||||
Ran 1 test.
|
||||
|
||||
OK
|
||||
```
|
||||
|
||||
But, running the test under any other Unix shell will result in the following output.
|
||||
|
||||
```console
|
||||
$ /bin/ksh math_test.sh
|
||||
testAdding
|
||||
|
||||
Ran 1 test.
|
||||
|
||||
OK (skipped=1)
|
||||
```
|
||||
|
||||
As you can see, the total number of tests has not changed, but the report indicates that some tests were skipped.
|
||||
|
||||
Skipping can be controlled with the following functions: `startSkipping()`, `endSkipping()`, and `isSkipping()`. Once skipping is enabled, it will remain enabled until the end of the current test function call, after which skipping is disabled.
|
||||
|
||||
---
|
||||
|
||||
## <a name="appendix"></a> Appendix
|
||||
|
||||
### <a name="getting-help"></a> Getting Help
|
||||
|
||||
For help, please send requests to either the shunit2-users@googlegroups.com mailing list (archives available on the web at http://groups.google.com/group/shunit2-users) or directly to Kate Ward <kate dot ward at forestent dot com>.
|
||||
|
||||
### <a name="zsh"></a> Zsh
|
||||
|
||||
For compatibility with Zsh, there is one requirement that must be met -- the `shwordsplit` option must be set. There are three ways to accomplish this.
|
||||
|
||||
1. In the unit-test script, add the following shell code snippet before sourcing the `shunit2` library.
|
||||
|
||||
```sh
|
||||
setopt shwordsplit
|
||||
```
|
||||
|
||||
1. When invoking __zsh__ from either the command-line or as a script with `#!`, add the `-y` parameter.
|
||||
|
||||
```sh
|
||||
#! /bin/zsh -y
|
||||
```
|
||||
|
||||
1. When invoking __zsh__ from the command-line, add `-o shwordsplit --` as parameters before the script name.
|
||||
|
||||
```console
|
||||
$ zsh -o shwordsplit -- some_script
|
||||
```
|
||||
|
||||
[log4sh]: https://github.com/kward/log4sh
|
||||
[shflags]: https://github.com/kward/shflags
|
88
dev/tests/shunit2/gen_test_report.sh
Executable file
88
dev/tests/shunit2/gen_test_report.sh
Executable file
@ -0,0 +1,88 @@
|
||||
#! /bin/sh
|
||||
# vim:et:ft=sh:sts=2:sw=2
|
||||
#
|
||||
# This script runs the provided unit tests and sends the output to the
|
||||
# appropriate file.
|
||||
#
|
||||
# Copyright 2008-2017 Kate Ward. All Rights Reserved.
|
||||
# Released under the Apache 2.0 license.
|
||||
#
|
||||
# Author: kate.ward@forestent.com (Kate Ward)
|
||||
# https://github.com/kward/shunit2
|
||||
#
|
||||
# Source following.
|
||||
# shellcheck disable=SC1090,SC1091
|
||||
# FLAGS variables are dynamically created.
|
||||
# shellcheck disable=SC2154
|
||||
# Disagree with [ p ] && [ q ] vs [ p -a -q ] recommendation.
|
||||
# shellcheck disable=SC2166
|
||||
|
||||
# Treat unset variables as an error.
|
||||
set -u
|
||||
|
||||
die() {
|
||||
[ $# -gt 0 ] && echo "error: $*" >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
BASE_DIR=$(dirname "$0")
|
||||
LIB_DIR="${BASE_DIR}/lib"
|
||||
|
||||
### Load libraries.
|
||||
. "${LIB_DIR}/shflags" || die 'unable to load shflags library'
|
||||
. "${LIB_DIR}/shlib" || die 'unable to load shlib library'
|
||||
. "${LIB_DIR}/versions" || die 'unable to load versions library'
|
||||
|
||||
# Redefining BASE_DIR now that we have the shlib functions. We need BASE_DIR so
|
||||
# that we can properly load things, even in the event that this script is called
|
||||
# from a different directory.
|
||||
BASE_DIR=$(shlib_relToAbsPath "${BASE_DIR}")
|
||||
|
||||
# Define flags.
|
||||
os_name=$(versions_osName |sed 's/ /_/g')
|
||||
os_version=$(versions_osVersion)
|
||||
|
||||
DEFINE_boolean force false 'force overwrite' f
|
||||
DEFINE_string output_dir "${TMPDIR}" 'output dir' d
|
||||
DEFINE_string output_file "${os_name}-${os_version}.txt" 'output file' o
|
||||
DEFINE_string runner 'test_runner' 'unit test runner' r
|
||||
DEFINE_boolean dry_run false "suppress logging to a file" n
|
||||
|
||||
main() {
|
||||
# Determine output filename.
|
||||
# shellcheck disable=SC2154
|
||||
output="${FLAGS_output_dir:+${FLAGS_output_dir}/}${FLAGS_output_file}"
|
||||
output=$(shlib_relToAbsPath "${output}")
|
||||
|
||||
# Checks.
|
||||
if [ "${FLAGS_dry_run}" -eq "${FLAGS_FALSE}" -a -f "${output}" ]; then
|
||||
if [ "${FLAGS_force}" -eq "${FLAGS_TRUE}" ]; then
|
||||
rm -f "${output}"
|
||||
else
|
||||
echo "not overwriting '${output}'" >&2
|
||||
exit "${FLAGS_ERROR}"
|
||||
fi
|
||||
fi
|
||||
if [ "${FLAGS_dry_run}" -eq "${FLAGS_FALSE}" ]; then
|
||||
touch "${output}" 2>/dev/null || die "unable to write to '${output}'"
|
||||
fi
|
||||
|
||||
# Run tests.
|
||||
(
|
||||
if [ "${FLAGS_dry_run}" -eq "${FLAGS_FALSE}" ]; then
|
||||
"./${FLAGS_runner}" |tee "${output}"
|
||||
else
|
||||
"./${FLAGS_runner}"
|
||||
fi
|
||||
)
|
||||
|
||||
if [ "${FLAGS_dry_run}" -eq "${FLAGS_FALSE}" ]; then
|
||||
echo >&2
|
||||
echo "Output written to '${output}'." >&2
|
||||
fi
|
||||
}
|
||||
|
||||
FLAGS "$@" || exit $?
|
||||
[ "${FLAGS_help}" -eq "${FLAGS_FALSE}" ] || exit
|
||||
eval set -- "${FLAGS_ARGV}"
|
||||
main "${@:-}"
|
1222
dev/tests/shunit2/lib/shflags
Normal file
1222
dev/tests/shunit2/lib/shflags
Normal file
File diff suppressed because it is too large
Load Diff
39
dev/tests/shunit2/lib/shlib
Normal file
39
dev/tests/shunit2/lib/shlib
Normal file
@ -0,0 +1,39 @@
|
||||
# vim:et:ft=sh:sts=2:sw=2
|
||||
#
|
||||
# Copyright 2008 Kate Ward. All Rights Reserved.
|
||||
# Released under the LGPL (GNU Lesser General Public License).
|
||||
#
|
||||
# Author: kate.ward@forestent.com (Kate Ward)
|
||||
#
|
||||
# Library of shell functions.
|
||||
|
||||
# Convert a relative path into it's absolute equivalent.
|
||||
#
|
||||
# This function will automatically prepend the current working directory if the
|
||||
# path is not already absolute. It then removes all parent references (../) to
|
||||
# reconstruct the proper absolute path.
|
||||
#
|
||||
# Args:
|
||||
# shlib_path_: string: relative path
|
||||
# Outputs:
|
||||
# string: absolute path
|
||||
shlib_relToAbsPath()
|
||||
{
|
||||
shlib_path_=$1
|
||||
|
||||
# prepend current directory to relative paths
|
||||
echo "${shlib_path_}" |grep '^/' >/dev/null 2>&1 \
|
||||
|| shlib_path_="${PWD}/${shlib_path_}"
|
||||
|
||||
# clean up the path. if all seds supported true regular expressions, then
|
||||
# this is what it would be:
|
||||
shlib_old_=${shlib_path_}
|
||||
while true; do
|
||||
shlib_new_=`echo "${shlib_old_}" |sed 's/[^/]*\/\.\.\/*//;s/\/\.\//\//'`
|
||||
[ "${shlib_old_}" = "${shlib_new_}" ] && break
|
||||
shlib_old_=${shlib_new_}
|
||||
done
|
||||
echo "${shlib_new_}"
|
||||
|
||||
unset shlib_path_ shlib_old_ shlib_new_
|
||||
}
|
272
dev/tests/shunit2/lib/versions
Executable file
272
dev/tests/shunit2/lib/versions
Executable file
@ -0,0 +1,272 @@
|
||||
#! /bin/sh
|
||||
# vim:et:ft=sh:sts=2:sw=2
|
||||
#
|
||||
# Versions determines the versions of all installed shells.
|
||||
#
|
||||
# Copyright 2008-2018 Kate Ward. All Rights Reserved.
|
||||
# Released under the Apache 2.0 License.
|
||||
#
|
||||
# Author: kate.ward@forestent.com (Kate Ward)
|
||||
# https://github.com/kward/shlib
|
||||
#
|
||||
# This library provides reusable functions that determine actual names and
|
||||
# versions of installed shells and the OS. The library can also be run as a
|
||||
# script if set executable.
|
||||
#
|
||||
# Disable checks that aren't fully portable (POSIX != portable).
|
||||
# shellcheck disable=SC2006
|
||||
|
||||
ARGV0=`basename "$0"`
|
||||
LSB_RELEASE='/etc/lsb-release'
|
||||
VERSIONS_SHELLS='ash /bin/bash /bin/dash /bin/ksh /bin/pdksh /bin/zsh /bin/sh /usr/xpg4/bin/sh /sbin/sh'
|
||||
|
||||
true; TRUE=$?
|
||||
false; FALSE=$?
|
||||
ERROR=2
|
||||
|
||||
UNAME_R=`uname -r`
|
||||
UNAME_S=`uname -s`
|
||||
|
||||
__versions_haveStrings=${ERROR}
|
||||
|
||||
versions_osName() {
|
||||
os_name_='unrecognized'
|
||||
os_system_=${UNAME_S}
|
||||
os_release_=${UNAME_R}
|
||||
case ${os_system_} in
|
||||
CYGWIN_NT-*) os_name_='Cygwin' ;;
|
||||
Darwin)
|
||||
os_name_=`/usr/bin/sw_vers -productName`
|
||||
os_version_=`versions_osVersion`
|
||||
case ${os_version_} in
|
||||
10.4|10.4.[0-9]*) os_name_='Mac OS X Tiger' ;;
|
||||
10.5|10.5.[0-9]*) os_name_='Mac OS X Leopard' ;;
|
||||
10.6|10.6.[0-9]*) os_name_='Mac OS X Snow Leopard' ;;
|
||||
10.7|10.7.[0-9]*) os_name_='Mac OS X Lion' ;;
|
||||
10.8|10.8.[0-9]*) os_name_='Mac OS X Mountain Lion' ;;
|
||||
10.9|10.9.[0-9]*) os_name_='Mac OS X Mavericks' ;;
|
||||
10.10|10.10.[0-9]*) os_name_='Mac OS X Yosemite' ;;
|
||||
10.11|10.11.[0-9]*) os_name_='Mac OS X El Capitan' ;;
|
||||
10.12|10.12.[0-9]*) os_name_='macOS Sierra' ;;
|
||||
10.13|10.13.[0-9]*) os_name_='macOS High Sierra' ;;
|
||||
*) os_name_='macOS' ;;
|
||||
esac
|
||||
;;
|
||||
FreeBSD) os_name_='FreeBSD' ;;
|
||||
Linux) os_name_='Linux' ;;
|
||||
SunOS)
|
||||
os_name_='SunOS'
|
||||
if [ -r '/etc/release' ]; then
|
||||
if grep 'OpenSolaris' /etc/release >/dev/null; then
|
||||
os_name_='OpenSolaris'
|
||||
else
|
||||
os_name_='Solaris'
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
echo ${os_name_}
|
||||
unset os_name_ os_system_ os_release_ os_version_
|
||||
}
|
||||
|
||||
versions_osVersion() {
|
||||
os_version_='unrecognized'
|
||||
os_system_=${UNAME_S}
|
||||
os_release_=${UNAME_R}
|
||||
case ${os_system_} in
|
||||
CYGWIN_NT-*)
|
||||
os_version_=`expr "${os_release_}" : '\([0-9]*\.[0-9]\.[0-9]*\).*'`
|
||||
;;
|
||||
Darwin)
|
||||
os_version_=`/usr/bin/sw_vers -productVersion`
|
||||
;;
|
||||
FreeBSD)
|
||||
os_version_=`expr "${os_release_}" : '\([0-9]*\.[0-9]*\)-.*'`
|
||||
;;
|
||||
Linux)
|
||||
if [ -r '/etc/os-release' ]; then
|
||||
os_version_=`awk -F= '$1~/PRETTY_NAME/{print $2}' /etc/os-release \
|
||||
|sed 's/"//g'`
|
||||
elif [ -r '/etc/redhat-release' ]; then
|
||||
os_version_=`cat /etc/redhat-release`
|
||||
elif [ -r '/etc/SuSE-release' ]; then
|
||||
os_version_=`head -n 1 /etc/SuSE-release`
|
||||
elif [ -r "${LSB_RELEASE}" ]; then
|
||||
if grep -q 'DISTRIB_ID=Ubuntu' "${LSB_RELEASE}"; then
|
||||
# shellcheck disable=SC2002
|
||||
os_version_=`cat "${LSB_RELEASE}" \
|
||||
|awk -F= '$1~/DISTRIB_DESCRIPTION/{print $2}' \
|
||||
|sed 's/"//g;s/ /-/g'`
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
SunOS)
|
||||
if [ -r '/etc/release' ]; then
|
||||
if grep 'OpenSolaris' /etc/release >/dev/null; then # OpenSolaris
|
||||
os_version_=`grep 'OpenSolaris' /etc/release |awk '{print $2"("$3")"}'`
|
||||
else # Solaris
|
||||
major_=`echo "${os_release_}" |sed 's/[0-9]*\.\([0-9]*\)/\1/'`
|
||||
minor_=`grep Solaris /etc/release |sed 's/[^u]*\(u[0-9]*\).*/\1/'`
|
||||
os_version_="${major_}${minor_}"
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "${os_version_}"
|
||||
unset os_release_ os_system_ os_version_ major_ minor_
|
||||
}
|
||||
|
||||
versions_shellVersion() {
|
||||
shell_=$1
|
||||
|
||||
shell_present_=${FALSE}
|
||||
case "${shell_}" in
|
||||
ash) [ -x '/bin/busybox' ] && shell_present_=${TRUE} ;;
|
||||
*) [ -x "${shell_}" ] && shell_present_=${TRUE} ;;
|
||||
esac
|
||||
if [ ${shell_present_} -eq ${FALSE} ]; then
|
||||
echo 'not installed'
|
||||
return ${FALSE}
|
||||
fi
|
||||
|
||||
version_=''
|
||||
case ${shell_} in
|
||||
/sbin/sh) ;; # SunOS
|
||||
/usr/xpg4/bin/sh)
|
||||
version_=`versions_shell_xpg4 "${shell_}"`
|
||||
;; # SunOS
|
||||
*/sh)
|
||||
# This could be one of any number of shells. Try until one fits.
|
||||
version_=''
|
||||
[ -z "${version_}" ] && version_=`versions_shell_bash "${shell_}"`
|
||||
# dash cannot be self determined yet
|
||||
[ -z "${version_}" ] && version_=`versions_shell_ksh "${shell_}"`
|
||||
# pdksh is covered in versions_shell_ksh()
|
||||
[ -z "${version_}" ] && version_=`versions_shell_xpg4 "${shell_}"`
|
||||
[ -z "${version_}" ] && version_=`versions_shell_zsh "${shell_}"`
|
||||
;;
|
||||
ash) version_=`versions_shell_ash "${shell_}"` ;;
|
||||
*/bash) version_=`versions_shell_bash "${shell_}"` ;;
|
||||
*/dash)
|
||||
# Assuming Ubuntu Linux until somebody comes up with a better test. The
|
||||
# following test will return an empty string if dash is not installed.
|
||||
version_=`versions_shell_dash`
|
||||
;;
|
||||
*/ksh) version_=`versions_shell_ksh "${shell_}"` ;;
|
||||
*/pdksh) version_=`versions_shell_pdksh "${shell_}"` ;;
|
||||
*/zsh) version_=`versions_shell_zsh "${shell_}"` ;;
|
||||
*) version_='invalid'
|
||||
esac
|
||||
|
||||
echo "${version_:-unknown}"
|
||||
unset shell_ version_
|
||||
}
|
||||
|
||||
# The ash shell is included in BusyBox.
|
||||
versions_shell_ash() {
|
||||
busybox --help |head -1 |sed 's/BusyBox v\([0-9.]*\) .*/\1/'
|
||||
}
|
||||
|
||||
versions_shell_bash() {
|
||||
$1 --version : 2>&1 |grep 'GNU bash' |sed 's/.*version \([^ ]*\).*/\1/'
|
||||
}
|
||||
|
||||
versions_shell_dash() {
|
||||
eval dpkg >/dev/null 2>&1
|
||||
[ $? -eq 127 ] && return # Return if dpkg not found.
|
||||
|
||||
dpkg -l |grep ' dash ' |awk '{print $3}'
|
||||
}
|
||||
|
||||
versions_shell_ksh() {
|
||||
versions_shell_=$1
|
||||
versions_version_=''
|
||||
|
||||
# Try a few different ways to figure out the version.
|
||||
versions_version_=`${versions_shell_} --version : 2>&1`
|
||||
# shellcheck disable=SC2181
|
||||
if [ $? -eq 0 ]; then
|
||||
versions_version_=`echo "${versions_version_}" \
|
||||
|sed 's/.*\([0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]\).*/\1/'`
|
||||
else
|
||||
versions_version_=''
|
||||
fi
|
||||
if [ -z "${versions_version_}" ]; then
|
||||
_versions_have_strings
|
||||
versions_version_=`strings "${versions_shell_}" 2>&1 \
|
||||
|grep Version \
|
||||
|sed 's/^.*Version \(.*\)$/\1/;s/ s+ \$$//;s/ /-/g'`
|
||||
fi
|
||||
if [ -z "${versions_version_}" ]; then
|
||||
versions_version_=`versions_shell_pdksh "${versions_shell_}"`
|
||||
fi
|
||||
|
||||
echo "${versions_version_}"
|
||||
unset versions_shell_ versions_version_
|
||||
}
|
||||
|
||||
versions_shell_pdksh() {
|
||||
_versions_have_strings
|
||||
strings "$1" 2>&1 \
|
||||
|grep 'PD KSH' \
|
||||
|sed -e 's/.*PD KSH \(.*\)/\1/;s/ /-/g'
|
||||
}
|
||||
|
||||
versions_shell_xpg4() {
|
||||
_versions_have_strings
|
||||
strings "$1" 2>&1 \
|
||||
|grep 'Version' \
|
||||
|sed -e 's/^@(#)Version //'
|
||||
}
|
||||
|
||||
versions_shell_zsh() {
|
||||
versions_shell_=$1
|
||||
|
||||
# Try a few different ways to figure out the version.
|
||||
# shellcheck disable=SC2016
|
||||
versions_version_=`echo 'echo ${ZSH_VERSION}' |${versions_shell_}`
|
||||
if [ -z "${versions_version_}" ]; then
|
||||
versions_version_=`${versions_shell_} --version : 2>&1`
|
||||
# shellcheck disable=SC2181
|
||||
if [ $? -eq 0 ]; then
|
||||
versions_version_=`echo "${versions_version_}" |awk '{print $2}'`
|
||||
else
|
||||
versions_version_=''
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "${versions_version_}"
|
||||
unset versions_shell_ versions_version_
|
||||
}
|
||||
|
||||
# Determine if the 'strings' binary installed.
|
||||
_versions_have_strings() {
|
||||
[ ${__versions_haveStrings} -ne ${ERROR} ] && return
|
||||
if eval strings /dev/null >/dev/null 2>&1; then
|
||||
__versions_haveStrings=${TRUE}
|
||||
return
|
||||
fi
|
||||
|
||||
echo 'WARN: strings not installed. try installing binutils?' >&2
|
||||
__versions_haveStrings=${FALSE}
|
||||
}
|
||||
|
||||
versions_main() {
|
||||
# Treat unset variables as an error.
|
||||
set -u
|
||||
|
||||
os_name=`versions_osName`
|
||||
os_version=`versions_osVersion`
|
||||
echo "os: ${os_name} version: ${os_version}"
|
||||
|
||||
for shell in ${VERSIONS_SHELLS}; do
|
||||
shell_version=`versions_shellVersion "${shell}"`
|
||||
echo "shell: ${shell} version: ${shell_version}"
|
||||
done
|
||||
}
|
||||
|
||||
if [ "${ARGV0}" = 'versions' ]; then
|
||||
versions_main "$@"
|
||||
fi
|
1169
dev/tests/shunit2/shunit2
Executable file
1169
dev/tests/shunit2/shunit2
Executable file
File diff suppressed because it is too large
Load Diff
197
dev/tests/shunit2/shunit2_asserts_test.sh
Executable file
197
dev/tests/shunit2/shunit2_asserts_test.sh
Executable file
@ -0,0 +1,197 @@
|
||||
#! /bin/sh
|
||||
# vim:et:ft=sh:sts=2:sw=2
|
||||
#
|
||||
# shunit2 unit test for assert functions.
|
||||
#
|
||||
# Copyright 2008-2017 Kate Ward. All Rights Reserved.
|
||||
# Released under the Apache 2.0 license.
|
||||
#
|
||||
# Author: kate.ward@forestent.com (Kate Ward)
|
||||
# https://github.com/kward/shunit2
|
||||
#
|
||||
# Disable source following.
|
||||
# shellcheck disable=SC1090,SC1091
|
||||
|
||||
# These variables will be overridden by the test helpers.
|
||||
stdoutF="${TMPDIR:-/tmp}/STDOUT"
|
||||
stderrF="${TMPDIR:-/tmp}/STDERR"
|
||||
|
||||
# Load test helpers.
|
||||
. ./shunit2_test_helpers
|
||||
|
||||
commonEqualsSame() {
|
||||
fn=$1
|
||||
|
||||
( ${fn} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertTrueWithNoOutput 'equal' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( ${fn} "${MSG}" 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertTrueWithNoOutput 'equal; with msg' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( ${fn} 'abc def' 'abc def' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertTrueWithNoOutput 'equal with spaces' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( ${fn} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithOutput 'not equal' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( ${fn} '' '' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertTrueWithNoOutput 'null values' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( ${fn} arg1 >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( ${fn} arg1 arg2 arg3 arg4 >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
|
||||
}
|
||||
|
||||
commonNotEqualsSame() {
|
||||
fn=$1
|
||||
|
||||
( ${fn} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertTrueWithNoOutput 'not same' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( ${fn} "${MSG}" 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertTrueWithNoOutput 'not same, with msg' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( ${fn} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithOutput 'same' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( ${fn} '' '' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithOutput 'null values' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( ${fn} arg1 >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( ${fn} arg1 arg2 arg3 arg4 >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
|
||||
}
|
||||
|
||||
testAssertEquals() {
|
||||
commonEqualsSame 'assertEquals'
|
||||
}
|
||||
|
||||
testAssertNotEquals() {
|
||||
commonNotEqualsSame 'assertNotEquals'
|
||||
}
|
||||
|
||||
testAssertSame() {
|
||||
commonEqualsSame 'assertSame'
|
||||
}
|
||||
|
||||
testAssertNotSame() {
|
||||
commonNotEqualsSame 'assertNotSame'
|
||||
}
|
||||
|
||||
testAssertNull() {
|
||||
( assertNull '' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertTrueWithNoOutput 'null' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertNull "${MSG}" '' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertTrueWithNoOutput 'null, with msg' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertNull 'x' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithOutput 'not null' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertNull >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertNull arg1 arg2 arg3 >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
|
||||
}
|
||||
|
||||
testAssertNotNull()
|
||||
{
|
||||
( assertNotNull 'x' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertTrueWithNoOutput 'not null' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertNotNull "${MSG}" 'x' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertTrueWithNoOutput 'not null, with msg' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertNotNull 'x"b' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertTrueWithNoOutput 'not null, with double-quote' $? \
|
||||
"${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertNotNull "x'b" >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertTrueWithNoOutput 'not null, with single-quote' $? \
|
||||
"${stdoutF}" "${stderrF}"
|
||||
|
||||
# shellcheck disable=SC2016
|
||||
( assertNotNull 'x$b' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertTrueWithNoOutput 'not null, with dollar' $? \
|
||||
"${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertNotNull 'x`b' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertTrueWithNoOutput 'not null, with backtick' $? \
|
||||
"${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertNotNull '' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithOutput 'null' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
# There is no test for too few arguments as $1 might actually be null.
|
||||
|
||||
( assertNotNull arg1 arg2 arg3 >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
|
||||
}
|
||||
|
||||
testAssertTrue() {
|
||||
( assertTrue 0 >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertTrueWithNoOutput 'true' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertTrue "${MSG}" 0 >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertTrueWithNoOutput 'true, with msg' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertTrue '[ 0 -eq 0 ]' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertTrueWithNoOutput 'true condition' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertTrue 1 >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithOutput 'false' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertTrue '[ 0 -eq 1 ]' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithOutput 'false condition' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertTrue '' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithOutput 'null' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertTrue >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertTrue arg1 arg2 arg3 >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
|
||||
}
|
||||
|
||||
testAssertFalse() {
|
||||
( assertFalse 1 >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertTrueWithNoOutput 'false' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertFalse "${MSG}" 1 >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertTrueWithNoOutput 'false, with msg' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertFalse '[ 0 -eq 1 ]' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertTrueWithNoOutput 'false condition' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertFalse 0 >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithOutput 'true' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertFalse '[ 0 -eq 0 ]' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithOutput 'true condition' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertFalse '' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithOutput 'true condition' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertFalse >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( assertFalse arg1 arg2 arg3 >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
|
||||
}
|
||||
|
||||
oneTimeSetUp() {
|
||||
th_oneTimeSetUp
|
||||
|
||||
MSG='This is a test message'
|
||||
}
|
||||
|
||||
# Load and run shunit2.
|
||||
# shellcheck disable=SC2034
|
||||
[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
|
||||
. "${TH_SHUNIT}"
|
82
dev/tests/shunit2/shunit2_failures_test.sh
Executable file
82
dev/tests/shunit2/shunit2_failures_test.sh
Executable file
@ -0,0 +1,82 @@
|
||||
#! /bin/sh
|
||||
# vim:et:ft=sh:sts=2:sw=2
|
||||
#
|
||||
# shUnit2 unit test for failure functions
|
||||
#
|
||||
# Copyright 2008-2017 Kate Ward. All Rights Reserved.
|
||||
# Released under the LGPL (GNU Lesser General Public License)
|
||||
#
|
||||
# Author: kate.ward@forestent.com (Kate Ward)
|
||||
# https://github.com/kward/shunit2
|
||||
#
|
||||
# Disable source following.
|
||||
# shellcheck disable=SC1090,SC1091
|
||||
|
||||
# These variables will be overridden by the test helpers.
|
||||
stdoutF="${TMPDIR:-/tmp}/STDOUT"
|
||||
stderrF="${TMPDIR:-/tmp}/STDERR"
|
||||
|
||||
# Load test helpers.
|
||||
. ./shunit2_test_helpers
|
||||
|
||||
testFail() {
|
||||
( fail >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithOutput 'fail' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( fail "${MSG}" >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithOutput 'fail with msg' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( fail arg1 >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithOutput 'too many arguments' $? "${stdoutF}" "${stderrF}"
|
||||
}
|
||||
|
||||
testFailNotEquals() {
|
||||
( failNotEquals 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithOutput 'same' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( failNotEquals "${MSG}" 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithOutput 'same with msg' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( failNotEquals 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithOutput 'not same' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( failNotEquals '' '' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithOutput 'null values' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( failNotEquals >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( failNotEquals arg1 arg2 arg3 arg4 >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
|
||||
}
|
||||
|
||||
testFailSame() {
|
||||
( failSame 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithOutput 'same' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( failSame "${MSG}" 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithOutput 'same with msg' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( failSame 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithOutput 'not same' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( failSame '' '' >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithOutput 'null values' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( failSame >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithError 'too few arguments' $? "${stdoutF}" "${stderrF}"
|
||||
|
||||
( failSame arg1 arg2 arg3 arg4 >"${stdoutF}" 2>"${stderrF}" )
|
||||
th_assertFalseWithError 'too many arguments' $? "${stdoutF}" "${stderrF}"
|
||||
}
|
||||
|
||||
oneTimeSetUp() {
|
||||
th_oneTimeSetUp
|
||||
|
||||
MSG='This is a test message'
|
||||
}
|
||||
|
||||
# Load and run shUnit2.
|
||||
# shellcheck disable=SC2034
|
||||
[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
|
||||
. "${TH_SHUNIT}"
|
241
dev/tests/shunit2/shunit2_macros_test.sh
Executable file
241
dev/tests/shunit2/shunit2_macros_test.sh
Executable file
@ -0,0 +1,241 @@
|
||||
#! /bin/sh
|
||||
# vim:et:ft=sh:sts=2:sw=2
|
||||
#
|
||||
# shunit2 unit test for macros.
|
||||
#
|
||||
# Copyright 2008-2017 Kate Ward. All Rights Reserved.
|
||||
# Released under the Apache 2.0 license.
|
||||
#
|
||||
# Author: kate.ward@forestent.com (Kate Ward)
|
||||
# https://github.com/kward/shunit2
|
||||
#
|
||||
### ShellCheck http://www.shellcheck.net/
|
||||
# Disable source following.
|
||||
# shellcheck disable=SC1090,SC1091
|
||||
# Presence of LINENO variable is checked.
|
||||
# shellcheck disable=SC2039
|
||||
|
||||
# These variables will be overridden by the test helpers.
|
||||
stdoutF="${TMPDIR:-/tmp}/STDOUT"
|
||||
stderrF="${TMPDIR:-/tmp}/STDERR"
|
||||
|
||||
# Load test helpers.
|
||||
. ./shunit2_test_helpers
|
||||
|
||||
testAssertEquals() {
|
||||
# Start skipping if LINENO not available.
|
||||
[ -z "${LINENO:-}" ] && startSkipping
|
||||
|
||||
( ${_ASSERT_EQUALS_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_ASSERT_EQUALS_ failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
|
||||
( ${_ASSERT_EQUALS_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_ASSERT_EQUALS_ w/ msg failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
}
|
||||
|
||||
testAssertNotEquals() {
|
||||
# Start skipping if LINENO not available.
|
||||
[ -z "${LINENO:-}" ] && startSkipping
|
||||
|
||||
( ${_ASSERT_NOT_EQUALS_} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_ASSERT_NOT_EQUALS_ failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
|
||||
( ${_ASSERT_NOT_EQUALS_} '"some msg"' 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_ASSERT_NOT_EQUALS_ w/ msg failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
}
|
||||
|
||||
testSame() {
|
||||
# Start skipping if LINENO not available.
|
||||
[ -z "${LINENO:-}" ] && startSkipping
|
||||
|
||||
( ${_ASSERT_SAME_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_ASSERT_SAME_ failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
|
||||
( ${_ASSERT_SAME_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_ASSERT_SAME_ w/ msg failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
}
|
||||
|
||||
testNotSame() {
|
||||
# Start skipping if LINENO not available.
|
||||
[ -z "${LINENO:-}" ] && startSkipping
|
||||
|
||||
( ${_ASSERT_NOT_SAME_} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_ASSERT_NOT_SAME_ failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
|
||||
( ${_ASSERT_NOT_SAME_} '"some msg"' 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_ASSERT_NOT_SAME_ w/ msg failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
}
|
||||
|
||||
testNull() {
|
||||
# Start skipping if LINENO not available.
|
||||
[ -z "${LINENO:-}" ] && startSkipping
|
||||
|
||||
( ${_ASSERT_NULL_} 'x' >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_ASSERT_NULL_ failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
|
||||
( ${_ASSERT_NULL_} '"some msg"' 'x' >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_ASSERT_NULL_ w/ msg failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
}
|
||||
|
||||
testNotNull()
|
||||
{
|
||||
# start skipping if LINENO not available
|
||||
[ -z "${LINENO:-}" ] && startSkipping
|
||||
|
||||
( ${_ASSERT_NOT_NULL_} '' >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_ASSERT_NOT_NULL_ failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
|
||||
( ${_ASSERT_NOT_NULL_} '"some msg"' '""' >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_ASSERT_NOT_NULL_ w/ msg failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stdoutF}" "${stderrF}" >&2
|
||||
}
|
||||
|
||||
testAssertTrue() {
|
||||
# Start skipping if LINENO not available.
|
||||
[ -z "${LINENO:-}" ] && startSkipping
|
||||
|
||||
( ${_ASSERT_TRUE_} "${SHUNIT_FALSE}" >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_ASSERT_TRUE_ failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
|
||||
( ${_ASSERT_TRUE_} '"some msg"' "${SHUNIT_FALSE}" >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_ASSERT_TRUE_ w/ msg failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
}
|
||||
|
||||
testAssertFalse() {
|
||||
# Start skipping if LINENO not available.
|
||||
[ -z "${LINENO:-}" ] && startSkipping
|
||||
|
||||
( ${_ASSERT_FALSE_} "${SHUNIT_TRUE}" >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_ASSERT_FALSE_ failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
|
||||
( ${_ASSERT_FALSE_} '"some msg"' "${SHUNIT_TRUE}" >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_ASSERT_FALSE_ w/ msg failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
}
|
||||
|
||||
testFail() {
|
||||
# Start skipping if LINENO not available.
|
||||
[ -z "${LINENO:-}" ] && startSkipping
|
||||
|
||||
( ${_FAIL_} >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_FAIL_ failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
|
||||
( ${_FAIL_} '"some msg"' >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_FAIL_ w/ msg failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
}
|
||||
|
||||
testFailNotEquals()
|
||||
{
|
||||
# start skipping if LINENO not available
|
||||
[ -z "${LINENO:-}" ] && startSkipping
|
||||
|
||||
( ${_FAIL_NOT_EQUALS_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_FAIL_NOT_EQUALS_ failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
|
||||
( ${_FAIL_NOT_EQUALS_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_FAIL_NOT_EQUALS_ w/ msg failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
}
|
||||
|
||||
testFailSame() {
|
||||
# Start skipping if LINENO not available.
|
||||
[ -z "${LINENO:-}" ] && startSkipping
|
||||
|
||||
( ${_FAIL_SAME_} 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_FAIL_SAME_ failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
|
||||
( ${_FAIL_SAME_} '"some msg"' 'x' 'x' >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_FAIL_SAME_ w/ msg failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
}
|
||||
|
||||
testFailNotSame() {
|
||||
# Start skipping if LINENO not available.
|
||||
[ -z "${LINENO:-}" ] && startSkipping
|
||||
|
||||
( ${_FAIL_NOT_SAME_} 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_FAIL_NOT_SAME_ failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
|
||||
( ${_FAIL_NOT_SAME_} '"some msg"' 'x' 'y' >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^ASSERT:\[[0-9]*\] *' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertTrue '_FAIL_NOT_SAME_ w/ msg failure' ${rtrn}
|
||||
[ "${rtrn}" -ne "${SHUNIT_TRUE}" ] && cat "${stderrF}" >&2
|
||||
}
|
||||
|
||||
oneTimeSetUp() {
|
||||
th_oneTimeSetUp
|
||||
}
|
||||
|
||||
# Disable output coloring as it breaks the tests.
|
||||
SHUNIT_COLOR='none'; export SHUNIT_COLOR
|
||||
|
||||
# Load and run shUnit2.
|
||||
# shellcheck disable=SC2034
|
||||
[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT="$0"
|
||||
. "${TH_SHUNIT}"
|
262
dev/tests/shunit2/shunit2_misc_test.sh
Executable file
262
dev/tests/shunit2/shunit2_misc_test.sh
Executable file
@ -0,0 +1,262 @@
|
||||
#! /bin/sh
|
||||
# vim:et:ft=sh:sts=2:sw=2
|
||||
#
|
||||
# shUnit2 unit tests of miscellaneous things
|
||||
#
|
||||
# Copyright 2008-2018 Kate Ward. All Rights Reserved.
|
||||
# Released under the Apache 2.0 license.
|
||||
#
|
||||
# Author: kate.ward@forestent.com (Kate Ward)
|
||||
# https://github.com/kward/shunit2
|
||||
#
|
||||
### ShellCheck http://www.shellcheck.net/
|
||||
# $() are not fully portable (POSIX != portable).
|
||||
# shellcheck disable=SC2006
|
||||
# Disable source following.
|
||||
# shellcheck disable=SC1090,SC1091
|
||||
# Not wanting to escape single quotes.
|
||||
# shellcheck disable=SC1003
|
||||
|
||||
# These variables will be overridden by the test helpers.
|
||||
stdoutF="${TMPDIR:-/tmp}/STDOUT"
|
||||
stderrF="${TMPDIR:-/tmp}/STDERR"
|
||||
|
||||
# Load test helpers.
|
||||
. ./shunit2_test_helpers
|
||||
|
||||
# Note: the test script is prefixed with '#' chars so that shUnit2 does not
|
||||
# incorrectly interpret the embedded functions as real functions.
|
||||
testUnboundVariable() {
|
||||
unittestF="${SHUNIT_TMPDIR}/unittest"
|
||||
sed 's/^#//' >"${unittestF}" <<EOF
|
||||
## Treat unset variables as an error when performing parameter expansion.
|
||||
#set -u
|
||||
#
|
||||
#boom() { x=\$1; } # This function goes boom if no parameters are passed!
|
||||
#test_boom() {
|
||||
# assertEquals 1 1
|
||||
# boom # No parameter given
|
||||
# assertEquals 0 \$?
|
||||
#}
|
||||
#SHUNIT_COLOR='none'
|
||||
#. ${TH_SHUNIT}
|
||||
EOF
|
||||
( exec "${SHUNIT_SHELL:-sh}" "${unittestF}" >"${stdoutF}" 2>"${stderrF}" )
|
||||
assertFalse 'expected a non-zero exit value' $?
|
||||
grep '^ASSERT:Unknown failure' "${stdoutF}" >/dev/null
|
||||
assertTrue 'assert message was not generated' $?
|
||||
grep '^Ran [0-9]* test' "${stdoutF}" >/dev/null
|
||||
assertTrue 'test count message was not generated' $?
|
||||
grep '^FAILED' "${stdoutF}" >/dev/null
|
||||
assertTrue 'failure message was not generated' $?
|
||||
}
|
||||
|
||||
# assertEquals repeats message argument.
|
||||
# https://github.com/kward/shunit2/issues/7
|
||||
testIssue7() {
|
||||
# Disable coloring so 'ASSERT:' lines can be matched correctly.
|
||||
_shunit_configureColor 'none'
|
||||
|
||||
( assertEquals 'Some message.' 1 2 >"${stdoutF}" 2>"${stderrF}" )
|
||||
diff "${stdoutF}" - >/dev/null <<EOF
|
||||
ASSERT:Some message. expected:<1> but was:<2>
|
||||
EOF
|
||||
rtrn=$?
|
||||
assertEquals "${SHUNIT_TRUE}" "${rtrn}"
|
||||
[ "${rtrn}" -eq "${SHUNIT_TRUE}" ] || cat "${stderrF}" >&2
|
||||
}
|
||||
|
||||
# Support prefixes on test output.
|
||||
# https://github.com/kward/shunit2/issues/29
|
||||
testIssue29() {
|
||||
unittestF="${SHUNIT_TMPDIR}/unittest"
|
||||
sed 's/^#//' >"${unittestF}" <<EOF
|
||||
## Support test prefixes.
|
||||
#test_assert() { assertTrue ${SHUNIT_TRUE}; }
|
||||
#SHUNIT_COLOR='none'
|
||||
#SHUNIT_TEST_PREFIX='--- '
|
||||
#. ${TH_SHUNIT}
|
||||
EOF
|
||||
( exec "${SHUNIT_SHELL:-sh}" "${unittestF}" >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^--- test_assert' "${stdoutF}" >/dev/null
|
||||
rtrn=$?
|
||||
assertEquals "${SHUNIT_TRUE}" "${rtrn}"
|
||||
[ "${rtrn}" -eq "${SHUNIT_TRUE}" ] || cat "${stdoutF}" >&2
|
||||
}
|
||||
|
||||
# shUnit2 should not exit with 0 when it has syntax errors.
|
||||
# https://github.com/kward/shunit2/issues/69
|
||||
testIssue69() {
|
||||
unittestF="${SHUNIT_TMPDIR}/unittest"
|
||||
|
||||
for t in Equals NotEquals Null NotNull Same NotSame True False; do
|
||||
assert="assert${t}"
|
||||
sed 's/^#//' >"${unittestF}" <<EOF
|
||||
## Asserts with invalid argument counts should be counted as failures.
|
||||
#test_assert() { ${assert}; }
|
||||
#SHUNIT_COLOR='none'
|
||||
#. ${TH_SHUNIT}
|
||||
EOF
|
||||
( exec "${SHUNIT_SHELL:-sh}" "${unittestF}" >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^FAILED' "${stdoutF}" >/dev/null
|
||||
assertTrue "failure message for ${assert} was not generated" $?
|
||||
done
|
||||
}
|
||||
|
||||
# Ensure that test fails if setup/teardown functions fail.
|
||||
testIssue77() {
|
||||
unittestF="${SHUNIT_TMPDIR}/unittest"
|
||||
for func in oneTimeSetUp setUp tearDown oneTimeTearDown; do
|
||||
sed 's/^#//' >"${unittestF}" <<EOF
|
||||
## Environment failure should end test.
|
||||
#${func}() { return ${SHUNIT_FALSE}; }
|
||||
#test_true() { assertTrue ${SHUNIT_TRUE}; }
|
||||
#SHUNIT_COLOR='none'
|
||||
#. ${TH_SHUNIT}
|
||||
EOF
|
||||
( exec "${SHUNIT_SHELL:-sh}" "${unittestF}" >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^FAILED' "${stdoutF}" >/dev/null
|
||||
assertTrue "failure of ${func}() did not end test" $?
|
||||
done
|
||||
}
|
||||
|
||||
# Ensure a test failure is recorded for code containing syntax errors.
|
||||
# https://github.com/kward/shunit2/issues/84
|
||||
testIssue84() {
|
||||
unittestF="${SHUNIT_TMPDIR}/unittest"
|
||||
sed 's/^#//' >"${unittestF}" <<\EOF
|
||||
## Function with syntax error.
|
||||
#syntax_error() { ${!#3442} -334 a$@2[1]; }
|
||||
#test_syntax_error() {
|
||||
# syntax_error
|
||||
# assertTrue ${SHUNIT_TRUE}
|
||||
#}
|
||||
#SHUNIT_COLOR='none'
|
||||
#SHUNIT_TEST_PREFIX='--- '
|
||||
#. ${TH_SHUNIT}
|
||||
EOF
|
||||
( exec "${SHUNIT_SHELL:-sh}" "${unittestF}" >"${stdoutF}" 2>"${stderrF}" )
|
||||
grep '^FAILED' "${stdoutF}" >/dev/null
|
||||
assertTrue "failure message for ${assert} was not generated" $?
|
||||
}
|
||||
|
||||
testPrepForSourcing() {
|
||||
assertEquals '/abc' "`_shunit_prepForSourcing '/abc'`"
|
||||
assertEquals './abc' "`_shunit_prepForSourcing './abc'`"
|
||||
assertEquals './abc' "`_shunit_prepForSourcing 'abc'`"
|
||||
}
|
||||
|
||||
testEscapeCharInStr() {
|
||||
while read -r desc char str want; do
|
||||
got=`_shunit_escapeCharInStr "${char}" "${str}"`
|
||||
assertEquals "${desc}" "${want}" "${got}"
|
||||
done <<'EOF'
|
||||
backslash \ '' ''
|
||||
backslash_pre \ \def \\def
|
||||
backslash_mid \ abc\def abc\\def
|
||||
backslash_post \ abc\ abc\\
|
||||
quote " '' ''
|
||||
quote_pre " "def \"def
|
||||
quote_mid " abc"def abc\"def
|
||||
quote_post " abc" abc\"
|
||||
string $ '' ''
|
||||
string_pre $ $def \$def
|
||||
string_mid $ abc$def abc\$def
|
||||
string_post $ abc$ abc\$
|
||||
EOF
|
||||
|
||||
# TODO(20170924:kward) fix or remove.
|
||||
# actual=`_shunit_escapeCharInStr "'" ''`
|
||||
# assertEquals '' "${actual}"
|
||||
# assertEquals "abc\\'" `_shunit_escapeCharInStr "'" "abc'"`
|
||||
# assertEquals "abc\\'def" `_shunit_escapeCharInStr "'" "abc'def"`
|
||||
# assertEquals "\\'def" `_shunit_escapeCharInStr "'" "'def"`
|
||||
|
||||
# # Must put the backtick in a variable so the shell doesn't misinterpret it
|
||||
# # while inside a backticked sequence (e.g. `echo '`'` would fail).
|
||||
# backtick='`'
|
||||
# actual=`_shunit_escapeCharInStr ${backtick} ''`
|
||||
# assertEquals '' "${actual}"
|
||||
# assertEquals '\`abc' \
|
||||
# `_shunit_escapeCharInStr "${backtick}" ${backtick}'abc'`
|
||||
# assertEquals 'abc\`' \
|
||||
# `_shunit_escapeCharInStr "${backtick}" 'abc'${backtick}`
|
||||
# assertEquals 'abc\`def' \
|
||||
# `_shunit_escapeCharInStr "${backtick}" 'abc'${backtick}'def'`
|
||||
}
|
||||
|
||||
testEscapeCharInStr_specialChars() {
|
||||
# Make sure our forward slash doesn't upset sed.
|
||||
assertEquals '/' "`_shunit_escapeCharInStr '\' '/'`"
|
||||
|
||||
# Some shells escape these differently.
|
||||
# TODO(20170924:kward) fix or remove.
|
||||
#assertEquals '\\a' `_shunit_escapeCharInStr '\' '\a'`
|
||||
#assertEquals '\\b' `_shunit_escapeCharInStr '\' '\b'`
|
||||
}
|
||||
|
||||
# Test the various ways of declaring functions.
|
||||
#
|
||||
# Prefixing (then stripping) with comment symbol so these functions aren't
|
||||
# treated as real functions by shUnit2.
|
||||
testExtractTestFunctions() {
|
||||
f="${SHUNIT_TMPDIR}/extract_test_functions"
|
||||
sed 's/^#//' <<EOF >"${f}"
|
||||
## Function on a single line.
|
||||
#testABC() { echo 'ABC'; }
|
||||
## Multi-line function with '{' on next line.
|
||||
#test_def()
|
||||
# {
|
||||
# echo 'def'
|
||||
#}
|
||||
## Multi-line function with '{' on first line.
|
||||
#testG3 () {
|
||||
# echo 'G3'
|
||||
#}
|
||||
## Function with numerical values in name.
|
||||
#function test4() { echo '4'; }
|
||||
## Leading space in front of function.
|
||||
# test5() { echo '5'; }
|
||||
## Function with '_' chars in name.
|
||||
#some_test_function() { echo 'some func'; }
|
||||
## Function that sets variables.
|
||||
#func_with_test_vars() {
|
||||
# testVariable=1234
|
||||
#}
|
||||
EOF
|
||||
|
||||
actual=`_shunit_extractTestFunctions "${f}"`
|
||||
assertEquals 'testABC test_def testG3 test4 test5' "${actual}"
|
||||
}
|
||||
|
||||
# Test that certain external commands sometimes "stubbed" by users
|
||||
# are escaped. See Issue #54.
|
||||
testProtectedCommands() {
|
||||
for c in mkdir rm cat chmod; do
|
||||
grep "^[^#]*${c} " "${TH_SHUNIT}" | grep -qv "command ${c}"
|
||||
assertFalse "external call to ${c} not protected somewhere" $?
|
||||
done
|
||||
grep '^[^#]*[^ ] *\[' "${TH_SHUNIT}" | grep -qv 'command \['
|
||||
assertFalse "call to [ ... ] not protected somewhere" $?
|
||||
grep '^[^#]* *\.' "${TH_SHUNIT}" | grep -qv 'command \.'
|
||||
assertFalse "call to . not protected somewhere" $?
|
||||
}
|
||||
|
||||
setUp() {
|
||||
for f in "${stdoutF}" "${stderrF}"; do
|
||||
cp /dev/null "${f}"
|
||||
done
|
||||
|
||||
# Reconfigure coloring as some tests override default behavior.
|
||||
_shunit_configureColor "${SHUNIT_COLOR_DEFAULT}"
|
||||
}
|
||||
|
||||
oneTimeSetUp() {
|
||||
SHUNIT_COLOR_DEFAULT="${SHUNIT_COLOR}"
|
||||
th_oneTimeSetUp
|
||||
}
|
||||
|
||||
# Load and run shUnit2.
|
||||
# shellcheck disable=SC2034
|
||||
[ -n "${ZSH_VERSION:-}" ] && SHUNIT_PARENT=$0
|
||||
. "${TH_SHUNIT}"
|
38
dev/tests/shunit2/shunit2_standalone_test.sh
Executable file
38
dev/tests/shunit2/shunit2_standalone_test.sh
Executable file
@ -0,0 +1,38 @@
|
||||
#! /bin/sh
|
||||
# vim:et:ft=sh:sts=2:sw=2
|
||||
#
|
||||
# shUnit2 unit test for standalone operation.
|
||||
#
|
||||
# Copyright 2010-2017 Kate Ward. All Rights Reserved.
|
||||
# Released under the Apache 2.0 license.
|
||||
#
|
||||
# Author: kate.ward@forestent.com (Kate Ward)
|
||||
# https://github.com/kward/shunit2
|
||||
#
|
||||
# This unit test is purely to test that calling shunit2 directly, while passing
|
||||
# the name of a unit test script, works. When run, this script determines if it
|
||||
# is running as a standalone program, and calls main() if it is.
|
||||
#
|
||||
### ShellCheck http://www.shellcheck.net/
|
||||
# $() are not fully portable (POSIX != portable).
|
||||
# shellcheck disable=SC2006
|
||||
# Disable source following.
|
||||
# shellcheck disable=SC1090,SC1091
|
||||
|
||||
ARGV0="`basename "$0"`"
|
||||
|
||||
# Load test helpers.
|
||||
. ./shunit2_test_helpers
|
||||
|
||||
testStandalone() {
|
||||
assertTrue "${SHUNIT_TRUE}"
|
||||
}
|
||||
|
||||
main() {
|
||||
${TH_SHUNIT} "${ARGV0}"
|
||||
}
|
||||
|
||||
# Are we running as a standalone?
|
||||
if [ "${ARGV0}" = 'shunit2_test_standalone.sh' ]; then
|
||||
if [ $# -gt 0 ]; then main "$@"; else main; fi
|
||||
fi
|
234
dev/tests/shunit2/shunit2_test_helpers
Normal file
234
dev/tests/shunit2/shunit2_test_helpers
Normal file
@ -0,0 +1,234 @@
|
||||
# vim:et:ft=sh:sts=2:sw=2
|
||||
#
|
||||
# shUnit2 unit test common functions
|
||||
#
|
||||
# Copyright 2008 Kate Ward. All Rights Reserved.
|
||||
# Released under the Apache 2.0 license.
|
||||
#
|
||||
# Author: kate.ward@forestent.com (Kate Ward)
|
||||
# https://github.com/kward/shunit2
|
||||
#
|
||||
### ShellCheck (http://www.shellcheck.net/)
|
||||
# Commands are purposely escaped so they can be mocked outside shUnit2.
|
||||
# shellcheck disable=SC1001,SC1012
|
||||
# expr may be antiquated, but it is the only solution in some cases.
|
||||
# shellcheck disable=SC2003
|
||||
# $() are not fully portable (POSIX != portable).
|
||||
# shellcheck disable=SC2006
|
||||
|
||||
# Treat unset variables as an error when performing parameter expansion.
|
||||
set -u
|
||||
|
||||
# Set shwordsplit for zsh.
|
||||
\[ -n "${ZSH_VERSION:-}" ] && setopt shwordsplit
|
||||
|
||||
#
|
||||
# Constants.
|
||||
#
|
||||
|
||||
# Path to shUnit2 library. Can be overridden by setting SHUNIT_INC.
|
||||
TH_SHUNIT=${SHUNIT_INC:-./shunit2}; export TH_SHUNIT
|
||||
|
||||
# Configure debugging. Set the DEBUG environment variable to any
|
||||
# non-empty value to enable debug output, or TRACE to enable trace
|
||||
# output.
|
||||
TRACE=${TRACE:+'th_trace '}
|
||||
\[ -n "${TRACE}" ] && DEBUG=1
|
||||
\[ -z "${TRACE}" ] && TRACE=':'
|
||||
|
||||
DEBUG=${DEBUG:+'th_debug '}
|
||||
\[ -z "${DEBUG}" ] && DEBUG=':'
|
||||
|
||||
#
|
||||
# Variables.
|
||||
#
|
||||
|
||||
th_RANDOM=0
|
||||
|
||||
#
|
||||
# Functions.
|
||||
#
|
||||
|
||||
# Logging functions.
|
||||
th_trace() { echo "${MY_NAME}:TRACE $*" >&2; }
|
||||
th_debug() { echo "${MY_NAME}:DEBUG $*" >&2; }
|
||||
th_info() { echo "${MY_NAME}:INFO $*" >&2; }
|
||||
th_warn() { echo "${MY_NAME}:WARN $*" >&2; }
|
||||
th_error() { echo "${MY_NAME}:ERROR $*" >&2; }
|
||||
th_fatal() { echo "${MY_NAME}:FATAL $*" >&2; }
|
||||
|
||||
# Output subtest name.
|
||||
th_subtest() { echo " $*" >&2; }
|
||||
|
||||
th_oneTimeSetUp() {
|
||||
# These files will be cleaned up automatically by shUnit2.
|
||||
stdoutF="${SHUNIT_TMPDIR}/stdout"
|
||||
stderrF="${SHUNIT_TMPDIR}/stderr"
|
||||
returnF="${SHUNIT_TMPDIR}/return"
|
||||
expectedF="${SHUNIT_TMPDIR}/expected"
|
||||
export stdoutF stderrF returnF expectedF
|
||||
}
|
||||
|
||||
# Generate a random number.
|
||||
th_generateRandom() {
|
||||
tfgr_random=${th_RANDOM}
|
||||
|
||||
while \[ "${tfgr_random}" = "${th_RANDOM}" ]; do
|
||||
# shellcheck disable=SC2039
|
||||
if \[ -n "${RANDOM:-}" ]; then
|
||||
# $RANDOM works
|
||||
# shellcheck disable=SC2039
|
||||
tfgr_random=${RANDOM}${RANDOM}${RANDOM}$$
|
||||
elif \[ -r '/dev/urandom' ]; then
|
||||
tfgr_random=`od -vAn -N4 -tu4 </dev/urandom |sed 's/^[^0-9]*//'`
|
||||
else
|
||||
tfgr_date=`date '+%H%M%S'`
|
||||
tfgr_random=`expr "${tfgr_date}" \* $$`
|
||||
unset tfgr_date
|
||||
fi
|
||||
\[ "${tfgr_random}" = "${th_RANDOM}" ] && sleep 1
|
||||
done
|
||||
|
||||
th_RANDOM=${tfgr_random}
|
||||
unset tfgr_random
|
||||
}
|
||||
|
||||
# This section returns the data section from the specified section of a file. A
|
||||
# data section is defined by a [header], one or more lines of data, and then a
|
||||
# blank line.
|
||||
th_getDataSect() {
|
||||
th_sgrep "\\[$1\\]" "$2" |sed '1d'
|
||||
}
|
||||
|
||||
# This function greps a section from a file. a section is defined as a group of
|
||||
# lines preceded and followed by blank lines..
|
||||
th_sgrep() {
|
||||
th_pattern_=$1
|
||||
shift
|
||||
|
||||
# shellcheck disable=SC2068
|
||||
sed -e '/./{H;$!d;}' -e "x;/${th_pattern_}/"'!d;' $@ |sed '1d'
|
||||
|
||||
unset th_pattern_
|
||||
}
|
||||
|
||||
# Custom assert that checks for true return value (0), and no output to STDOUT
|
||||
# or STDERR. If a non-zero return value is encountered, the output of STDERR
|
||||
# will be output.
|
||||
#
|
||||
# Args:
|
||||
# th_test_: string: name of the subtest
|
||||
# th_rtrn_: integer: the return value of the subtest performed
|
||||
# th_stdout_: string: filename where stdout was redirected to
|
||||
# th_stderr_: string: filename where stderr was redirected to
|
||||
th_assertTrueWithNoOutput() {
|
||||
th_test_=$1
|
||||
th_rtrn_=$2
|
||||
th_stdout_=$3
|
||||
th_stderr_=$4
|
||||
|
||||
assertTrue "${th_test_}; expected return value of zero" "${th_rtrn_}"
|
||||
\[ "${th_rtrn_}" -ne "${SHUNIT_TRUE}" ] && \cat "${th_stderr_}"
|
||||
assertFalse "${th_test_}; expected no output to STDOUT" \
|
||||
"[ -s '${th_stdout_}' ]"
|
||||
assertFalse "${th_test_}; expected no output to STDERR" \
|
||||
"[ -s '${th_stderr_}' ]"
|
||||
|
||||
unset th_test_ th_rtrn_ th_stdout_ th_stderr_
|
||||
}
|
||||
|
||||
# Custom assert that checks for non-zero return value, output to STDOUT, but no
|
||||
# output to STDERR.
|
||||
#
|
||||
# Args:
|
||||
# th_test_: string: name of the subtest
|
||||
# th_rtrn_: integer: the return value of the subtest performed
|
||||
# th_stdout_: string: filename where stdout was redirected to
|
||||
# th_stderr_: string: filename where stderr was redirected to
|
||||
th_assertFalseWithOutput()
|
||||
{
|
||||
th_test_=$1
|
||||
th_rtrn_=$2
|
||||
th_stdout_=$3
|
||||
th_stderr_=$4
|
||||
|
||||
assertFalse "${th_test_}; expected non-zero return value" "${th_rtrn_}"
|
||||
assertTrue "${th_test_}; expected output to STDOUT" \
|
||||
"[ -s '${th_stdout_}' ]"
|
||||
assertFalse "${th_test_}; expected no output to STDERR" \
|
||||
"[ -s '${th_stderr_}' ]"
|
||||
\[ -s "${th_stdout_}" -a ! -s "${th_stderr_}" ] || \
|
||||
_th_showOutput "${SHUNIT_FALSE}" "${th_stdout_}" "${th_stderr_}"
|
||||
|
||||
unset th_test_ th_rtrn_ th_stdout_ th_stderr_
|
||||
}
|
||||
|
||||
# Custom assert that checks for non-zero return value, no output to STDOUT, but
|
||||
# output to STDERR.
|
||||
#
|
||||
# Args:
|
||||
# th_test_: string: name of the subtest
|
||||
# th_rtrn_: integer: the return value of the subtest performed
|
||||
# th_stdout_: string: filename where stdout was redirected to
|
||||
# th_stderr_: string: filename where stderr was redirected to
|
||||
th_assertFalseWithError() {
|
||||
th_test_=$1
|
||||
th_rtrn_=$2
|
||||
th_stdout_=$3
|
||||
th_stderr_=$4
|
||||
|
||||
assertFalse "${th_test_}; expected non-zero return value" "${th_rtrn_}"
|
||||
assertFalse "${th_test_}; expected no output to STDOUT" \
|
||||
"[ -s '${th_stdout_}' ]"
|
||||
assertTrue "${th_test_}; expected output to STDERR" \
|
||||
"[ -s '${th_stderr_}' ]"
|
||||
\[ ! -s "${th_stdout_}" -a -s "${th_stderr_}" ] || \
|
||||
_th_showOutput "${SHUNIT_FALSE}" "${th_stdout_}" "${th_stderr_}"
|
||||
|
||||
unset th_test_ th_rtrn_ th_stdout_ th_stderr_
|
||||
}
|
||||
|
||||
# Some shells, zsh on Solaris in particular, return immediately from a sub-shell
|
||||
# when a non-zero return value is encountered. To properly catch these values,
|
||||
# they are either written to disk, or recognized as an error the file is empty.
|
||||
th_clearReturn() { cp /dev/null "${returnF}"; }
|
||||
th_queryReturn() {
|
||||
if \[ -s "${returnF}" ]; then
|
||||
th_return=`\cat "${returnF}"`
|
||||
else
|
||||
th_return=${SHUNIT_ERROR}
|
||||
fi
|
||||
export th_return
|
||||
}
|
||||
|
||||
# Providing external and internal calls to the showOutput helper function.
|
||||
th_showOutput() { _th_showOutput "$@"; }
|
||||
_th_showOutput() {
|
||||
_th_return_=$1
|
||||
_th_stdout_=$2
|
||||
_th_stderr_=$3
|
||||
|
||||
isSkipping
|
||||
if \[ $? -eq "${SHUNIT_FALSE}" -a "${_th_return_}" != "${SHUNIT_TRUE}" ]; then
|
||||
if \[ -n "${_th_stdout_}" -a -s "${_th_stdout_}" ]; then
|
||||
echo '>>> STDOUT' >&2
|
||||
\cat "${_th_stdout_}" >&2
|
||||
fi
|
||||
if \[ -n "${_th_stderr_}" -a -s "${_th_stderr_}" ]; then
|
||||
echo '>>> STDERR' >&2
|
||||
\cat "${_th_stderr_}" >&2
|
||||
fi
|
||||
if \[ -n "${_th_stdout_}" -o -n "${_th_stderr_}" ]; then
|
||||
echo '<<< end output' >&2
|
||||
fi
|
||||
fi
|
||||
|
||||
unset _th_return_ _th_stdout_ _th_stderr_
|
||||
}
|
||||
|
||||
#
|
||||
# Main.
|
||||
#
|
||||
|
||||
${TRACE} 'trace output enabled'
|
||||
${DEBUG} 'debug output enabled'
|
165
dev/tests/shunit2/test_runner
Executable file
165
dev/tests/shunit2/test_runner
Executable file
@ -0,0 +1,165 @@
|
||||
#! /bin/sh
|
||||
# vim:et:ft=sh:sts=2:sw=2
|
||||
#
|
||||
# Unit test suite runner.
|
||||
#
|
||||
# Copyright 2008-2017 Kate Ward. All Rights Reserved.
|
||||
# Released under the Apache 2.0 license.
|
||||
#
|
||||
# Author: kate.ward@forestent.com (Kate Ward)
|
||||
# https://github.com/kward/shlib
|
||||
#
|
||||
# This script runs all the unit tests that can be found, and generates a nice
|
||||
# report of the tests.
|
||||
#
|
||||
### ShellCheck (http://www.shellcheck.net/)
|
||||
# Disable source following.
|
||||
# shellcheck disable=SC1090,SC1091
|
||||
# expr may be antiquated, but it is the only solution in some cases.
|
||||
# shellcheck disable=SC2003
|
||||
# $() are not fully portable (POSIX != portable).
|
||||
# shellcheck disable=SC2006
|
||||
|
||||
# Return if test_runner already loaded.
|
||||
[ -z "${RUNNER_LOADED:-}" ] || return 0
|
||||
RUNNER_LOADED=0
|
||||
|
||||
RUNNER_ARGV0=`basename "$0"`
|
||||
RUNNER_SHELLS='/bin/sh ash /bin/bash /bin/dash /bin/ksh /bin/pdksh /bin/zsh'
|
||||
RUNNER_TEST_SUFFIX='_test.sh'
|
||||
|
||||
runner_warn() { echo "runner:WARN $*" >&2; }
|
||||
runner_error() { echo "runner:ERROR $*" >&2; }
|
||||
runner_fatal() { echo "runner:FATAL $*" >&2; exit 1; }
|
||||
|
||||
runner_usage() {
|
||||
echo "usage: ${RUNNER_ARGV0} [-e key=val ...] [-s shell(s)] [-t test(s)]"
|
||||
}
|
||||
|
||||
_runner_tests() { echo ./*${RUNNER_TEST_SUFFIX} |sed 's#./##g'; }
|
||||
_runner_testName() {
|
||||
# shellcheck disable=SC1117
|
||||
_runner_testName_=`expr "${1:-}" : "\(.*\)${RUNNER_TEST_SUFFIX}"`
|
||||
if [ -n "${_runner_testName_}" ]; then
|
||||
echo "${_runner_testName_}"
|
||||
else
|
||||
echo 'unknown'
|
||||
fi
|
||||
unset _runner_testName_
|
||||
}
|
||||
|
||||
main() {
|
||||
# Find and load versions library.
|
||||
for _runner_dir_ in . ${LIB_DIR:-lib}; do
|
||||
if [ -r "${_runner_dir_}/versions" ]; then
|
||||
_runner_lib_dir_="${_runner_dir_}"
|
||||
break
|
||||
fi
|
||||
done
|
||||
[ -n "${_runner_lib_dir_}" ] || runner_fatal 'Unable to find versions library.'
|
||||
. "${_runner_lib_dir_}/versions" || runner_fatal 'Unable to load versions library.'
|
||||
unset _runner_dir_ _runner_lib_dir_
|
||||
|
||||
# Process command line flags.
|
||||
env=''
|
||||
while getopts 'e:hs:t:' opt; do
|
||||
case ${opt} in
|
||||
e) # set an environment variable
|
||||
key=`expr "${OPTARG}" : '\([^=]*\)='`
|
||||
val=`expr "${OPTARG}" : '[^=]*=\(.*\)'`
|
||||
# shellcheck disable=SC2166
|
||||
if [ -z "${key}" -o -z "${val}" ]; then
|
||||
runner_usage
|
||||
exit 1
|
||||
fi
|
||||
eval "${key}='${val}'"
|
||||
eval "export ${key}"
|
||||
env="${env:+${env} }${key}"
|
||||
;;
|
||||
h) runner_usage; exit 0 ;; # help output
|
||||
s) shells=${OPTARG} ;; # list of shells to run
|
||||
t) tests=${OPTARG} ;; # list of tests to run
|
||||
*) runner_usage; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
shift "`expr ${OPTIND} - 1`"
|
||||
|
||||
# Fill shells and/or tests.
|
||||
shells=${shells:-${RUNNER_SHELLS}}
|
||||
[ -z "${tests}" ] && tests=`_runner_tests`
|
||||
|
||||
# Error checking.
|
||||
if [ -z "${tests}" ]; then
|
||||
runner_error 'no tests found to run; exiting'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cat <<EOF
|
||||
#------------------------------------------------------------------------------
|
||||
# System data.
|
||||
#
|
||||
|
||||
$ uname -mprsv
|
||||
`uname -mprsv`
|
||||
|
||||
OS Name: `versions_osName`
|
||||
OS Version: `versions_osVersion`
|
||||
|
||||
### Test run info.
|
||||
shells: ${shells}
|
||||
tests: ${tests}
|
||||
EOF
|
||||
for key in ${env}; do
|
||||
eval "echo \"${key}=\$${key}\""
|
||||
done
|
||||
|
||||
# Run tests.
|
||||
for shell in ${shells}; do
|
||||
echo
|
||||
|
||||
cat <<EOF
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# Running the test suite with ${shell}.
|
||||
#
|
||||
EOF
|
||||
|
||||
# Check for existence of shell.
|
||||
shell_bin=${shell}
|
||||
shell_name=''
|
||||
shell_present=${FALSE}
|
||||
case ${shell} in
|
||||
ash)
|
||||
shell_bin=`which busybox |grep -v '^no busybox'`
|
||||
[ $? -eq "${TRUE}" -a -n "${shell_bin}" ] && shell_present="${TRUE}"
|
||||
shell_bin="${shell_bin} ash"
|
||||
shell_name=${shell}
|
||||
;;
|
||||
*)
|
||||
[ -x "${shell_bin}" ] && shell_present="${TRUE}"
|
||||
shell_name=`basename "${shell}"`
|
||||
;;
|
||||
esac
|
||||
if [ "${shell_present}" -eq "${FALSE}" ]; then
|
||||
runner_warn "unable to run tests with the ${shell_name} shell"
|
||||
continue
|
||||
fi
|
||||
|
||||
shell_version=`versions_shellVersion "${shell}"`
|
||||
|
||||
echo "shell name: ${shell_name}"
|
||||
echo "shell version: ${shell_version}"
|
||||
|
||||
# Execute the tests.
|
||||
for t in ${tests}; do
|
||||
echo
|
||||
echo "--- Executing the '`_runner_testName "${t}"`' test suite. ---"
|
||||
# ${shell_bin} needs word splitting.
|
||||
# shellcheck disable=SC2086
|
||||
( exec ${shell_bin} "./${t}" 2>&1; )
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
# Execute main() if this is run in standalone mode (i.e. not from a unit test).
|
||||
[ -z "${SHUNIT_VERSION}" ] && main "$@"
|
187
host_backup.conf
187
host_backup.conf
@ -1,187 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
###### Remote (or local) backup script for files & databases
|
||||
###### (L) 2013 by Ozy de Jong (www.netpower.fr)
|
||||
###### Config file rev 2015090801
|
||||
|
||||
## ---------- GENERAL BACKUP OPTIONS
|
||||
|
||||
## Backup identification name.
|
||||
BACKUP_ID="your backup name"
|
||||
|
||||
## Log file location. Leaving this empty will create log file at /var/log/obackup_version_BACKUP_ID.log (or current directory if /var/log doesn't exist)
|
||||
LOGFILE=""
|
||||
|
||||
## Backup databases
|
||||
BACKUP_SQL=no
|
||||
## Backup files
|
||||
BACKUP_FILES=yes
|
||||
|
||||
## ---------- LOCAL BACKUP STORAGE OPTIONS
|
||||
|
||||
## Local storage paths where to put backups
|
||||
LOCAL_SQL_STORAGE="/home/storage/backup/sql"
|
||||
LOCAL_FILE_STORAGE="/home/storage/backup/files"
|
||||
|
||||
## Create backup directories if they do not exist
|
||||
CREATE_DIRS=yes
|
||||
|
||||
## Keep absolute source path in your backup, eg: /your/backup/storage/the/remote/server/files
|
||||
## You should leave this enabled if you intend to use 'backup task division' functionality of OBackup, or everything will end up in the same directory.
|
||||
LOCAL_STORAGE_KEEP_ABSOLUTE_PATHS=yes
|
||||
|
||||
## Generate an alert if backup size is lower than given value in Kb (this can also help identifying empty mount dirs)
|
||||
BACKUP_SIZE_MINIMUM=1024
|
||||
## You may disable testing backup size
|
||||
DISABLE_GET_BACKUP_FILE_SIZE=no
|
||||
|
||||
## Generate an alert if local storage free space is lower than given value in Kb. Keep in mind that disabling backup file size test will only test min space against SQL backup size.
|
||||
LOCAL_STORAGE_WARN_MIN_SPACE=1048576
|
||||
|
||||
## ---------- MISC OPTIONS
|
||||
|
||||
## Bandwidth limit Kbytes / second for file backups. Leave 0 to disable limitation.
|
||||
BANDWIDTH=0
|
||||
|
||||
## If enabled, file backups will be processed as superuser. See documentation for /etc/sudoers configuration ("find", "du" and "rsync" need to be allowed). Requiretty needs to be disabled.
|
||||
SUDO_EXEC=no
|
||||
## Paranoia option. Don't change this unless you read the documentation.
|
||||
RSYNC_EXECUTABLE=rsync
|
||||
|
||||
## ---------- REMOTE BACKUP OPTIONS
|
||||
|
||||
## The following options allow this Obackup instance to connect to a remote system via an ssh tunnel.
|
||||
## Needs public RSA key need to be put into ~/.ssh/authorized_keys in remote users home directory.
|
||||
REMOTE_BACKUP=no
|
||||
SSH_RSA_PRIVATE_KEY=~/.ssh/id_rsa
|
||||
REMOTE_USER=backupuser
|
||||
REMOTE_HOST=yourhost.local
|
||||
REMOTE_PORT=22
|
||||
## ssh compression should be used unless your remote connection is good enough (LAN)
|
||||
SSH_COMPRESSION=yes
|
||||
## Remote rsync executable path. Leave this empty in most cases
|
||||
RSYNC_REMOTE_PATH=""
|
||||
|
||||
## Check for connectivity to remote host before launching remote backup tasks. Be sure the hosts responds to ping. Failing to ping will skip current task.
|
||||
REMOTE_HOST_PING=yes
|
||||
## Check for internet access by pinging one or more 3rd party hosts before remote backup tasks. Leave empty if you don't want this check to be be performed. Failing to ping will skip current task.
|
||||
REMOTE_3RD_PARTY_HOSTS="www.kernel.org www.google.com"
|
||||
|
||||
## ---------- DATABASE BACKUP OPTIONS
|
||||
|
||||
## Database backup user
|
||||
SQL_USER=backupuser
|
||||
|
||||
## Enabling the following option will save all databases on local or remote given SQL instance except the ones specified in the exclude list.
|
||||
## Every found database will be backed up as separate backup task.
|
||||
DATABASES_ALL=yes
|
||||
DATABASES_ALL_EXCLUDE_LIST="test;mysql"
|
||||
## Alternatively, if DATABASES_ALL=no, you can specify a list of databases to backup separated by spaces.
|
||||
DATABASES_LIST=""
|
||||
|
||||
## Max backup execution time per Database task. Soft max exec time generates a warning only. Hard max exec time generates a warning and stops current backup task.
|
||||
## If a task gets stopped, next one in the task list gets executed. Time is specified in seconds.
|
||||
SOFT_MAX_EXEC_TIME_DB_TASK=3600
|
||||
HARD_MAX_EXEC_TIME_DB_TASK=7200
|
||||
|
||||
## Preferred SQL dump compression. Compression methods can be xz, lzma, pigz or gzip (will fallback from xz to gzip depending if available)
|
||||
## Generally, level 5 is a good compromise between cpu, memory hunger and compress ratio. Gzipped files are set to be rsyncable.
|
||||
COMPRESSION_LEVEL=3
|
||||
|
||||
## SQL Dump compression should be done on remote side but can also be done locally to lower remote system usage (will take more bandwidth, check for ssh compression)
|
||||
COMPRESSION_REMOTE=yes
|
||||
|
||||
## ---------- FILES BACKUP OPTIONS
|
||||
|
||||
## Directories backup list. List of semicolon separated directories that will be backed up recursively. Every directory will be processed as one backup task.
|
||||
DIRECTORIES_SIMPLE_LIST="/var/named;/var/lib"
|
||||
|
||||
## There's a special backup schema in Obackup called 'backup task division' which creates one backup task per level 1 subdirectory of a directory.
|
||||
## This is VERY useful to backup multiple virtualhosts as separate tasks without having to specify each one separately.
|
||||
## This may also be useful dividing big data directories in subdirectories tasks.
|
||||
|
||||
## Directories backup task division backup: Semicolon separated directories of which every level 1 subdirectory will be backed up recursively as a separate backup task.
|
||||
## Example: "/home;/var" will create tasks "/home/dir1", "/home/dir2", ... "/home/dirN", "/var/log", "/var/lib"... "/var/whatever"
|
||||
DIRECTORIES_RECURSE_LIST="/home"
|
||||
## You may optionally exclude subdirectories from task division. On the above example you could exclude /home/dir2 by adding it to DIRECTORIES_RECURSE_EXCLUDE_LIST
|
||||
DIRECTORIES_RECURSE_EXCLUDE_LIST="/home/backupuser;/home/lost+found"
|
||||
|
||||
## Rsync exclude patterns, used by simple and division lists
|
||||
RSYNC_EXCLUDE_PATTERN="*/tmp;*/ftp/www/cache/cachefs;*/sessions"
|
||||
|
||||
## File that contains the list of directories or files to exclude from sync on both sides. Leave this empty if you don't want to use an exclusion file.
|
||||
## This file has to be in the same directory as the config file
|
||||
## Paths are relative to sync dirs. One element per line.
|
||||
RSYNC_EXCLUDE_FROM=""
|
||||
#RSYNC_EXCLUDE_FROM="exclude.list"
|
||||
|
||||
## List separator char. You may set an alternative separator char for your directories lists above.
|
||||
PATH_SEPARATOR_CHAR=";"
|
||||
|
||||
## Preserve ACLS. Make sure source and target FS can hold same ACLs or you'll get loads of errors.
|
||||
PRESERVE_ACL=no
|
||||
## Preserve Xattr. MAke sure source and target FS can hold same Xattr or you'll get loads of errors.
|
||||
PRESERVE_XATTR=no
|
||||
|
||||
## Transforms symlinks into referent files/dirs
|
||||
COPY_SYMLINKS=yes
|
||||
## Treat symlinked dirs as dirs. CAUTION: This also follows symlinks outside of the replica root.
|
||||
KEEP_DIRLINKS=yes
|
||||
## Preserve hard links. Make sure source and target FS can manage hard links or you will lose them.
|
||||
PRESERVE_HARDLINKS=no
|
||||
|
||||
|
||||
## Let RSYNC compress file transfers. Do not use this on local-local backup schemes. Also, this is not useful if SSH compression is enabled.
|
||||
RSYNC_COMPRESS=no
|
||||
|
||||
## Max execution time per file backup task. Soft is warning only. Hard is warning, stopping backup and processing next one one file list. Tilme is specified in seconds
|
||||
SOFT_MAX_EXEC_TIME_FILE_TASK=3600
|
||||
HARD_MAX_EXEC_TIME_FILE_TASK=7200
|
||||
|
||||
## Keep partial uploads that can be resumed on next run, experimental feature
|
||||
PARTIAL=no
|
||||
|
||||
## Delete files on destination that vanished from source. Do not turn this on unless you enabled backup rotation or a snapshotting FS like zfs to keep those vanished files on the destination.
|
||||
DELETE_VANISHED_FILES=no
|
||||
|
||||
## Use delta copy algortithm (usefull when local paths are network drives), defaults to yes
|
||||
DELTA_COPIES=yes
|
||||
|
||||
## ---------- ALERT OPTIONS
|
||||
|
||||
## Alert email addresses separated by a space character
|
||||
DESTINATION_MAILS="your@mail.address"
|
||||
|
||||
## Windows (MSYS environment) only mail options (used by sendemail.exe)
|
||||
SENDER_MAIL="alert@your.system"
|
||||
SMTP_SERVER=smtp.your.isp.com
|
||||
SMTP_USER=
|
||||
SMTP_PASSWORD=
|
||||
|
||||
## ---------- GENERAL BACKUP OPTIONS
|
||||
|
||||
## Max execution time of whole backup process. Soft max exec time generates a warning only.
|
||||
## Hard max exec time generates a warning and stops the whole backup execution.
|
||||
SOFT_MAX_EXEC_TIME_TOTAL=30000
|
||||
HARD_MAX_EXEC_TIME_TOTAL=36000
|
||||
|
||||
## Backup Rotation. You may rotate backups if you don't use snapshots on your backup server.
|
||||
ROTATE_BACKUPS=no
|
||||
ROTATE_COPIES=7
|
||||
|
||||
## ---------- EXECUTION HOOKS
|
||||
|
||||
## Commands can will be run before and / or after backup execution (remote execution will only happen if REMOTE_BACKUP is set).
|
||||
## This is useful to make a snapshot before backing up data, or even handle snapshots of backed up data.
|
||||
LOCAL_RUN_BEFORE_CMD=""
|
||||
LOCAL_RUN_AFTER_CMD=""
|
||||
|
||||
REMOTE_RUN_BEFORE_CMD=""
|
||||
REMOTE_RUN_AFTER_CMD=""
|
||||
|
||||
## Max execution time of commands before they get force killed. Leave 0 if you don't want this to happen. Time is specified in seconds.
|
||||
MAX_EXEC_TIME_PER_CMD_BEFORE=0
|
||||
MAX_EXEC_TIME_PER_CMD_AFTER=0
|
||||
|
||||
## Stops whole backup execution if one of the above commands fail
|
||||
STOP_ON_CMD_ERROR=no
|
256
host_backup.conf.example
Normal file
256
host_backup.conf.example
Normal file
@ -0,0 +1,256 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
###### obackup - Local or Remote, push or pull backup script for files & mysql databases
|
||||
###### (C) 2013-2019 by Orsiris de Jong (www.netpower.fr)
|
||||
|
||||
[GENERAL]
|
||||
CONFIG_FILE_REVISION=2.1
|
||||
|
||||
## Backup identification string.
|
||||
INSTANCE_ID="test-backup"
|
||||
|
||||
## Log file location. Leaving this empty will create log file at /var/log/obackup.INSTANCE_ID.log (or current directory if /var/log doesn't exist).
|
||||
LOGFILE=""
|
||||
|
||||
## Elements to backup
|
||||
SQL_BACKUP=true
|
||||
FILE_BACKUP=true
|
||||
|
||||
## Backups can be done local, pulled from another server or pushed to a backup server. Available options are [local,pull,push].
|
||||
## Pulled backups are the safest option, as the backup server contains the RSA key and cannot be compromised by another server.
|
||||
BACKUP_TYPE=local
|
||||
|
||||
[BACKUP STORAGE]
|
||||
|
||||
## Storage paths of the backups (absolute paths of the local or remote system). Please use ${HOME} instead of ~ if needed.
|
||||
SQL_STORAGE="/home/storage/backup/sql"
|
||||
FILE_STORAGE="/home/storage/backup/files"
|
||||
|
||||
## Backup encryption using GPG and rsync.
|
||||
## Push backups get encrypted locally in CRYPT_STORAGE before they are sent to the remote system
|
||||
## Local and pull backups get encrypted after backup, in CRYPT_STORAGE
|
||||
ENCRYPTION=false
|
||||
|
||||
## Backup encryption needs a temporary storage space in order to encrypt files before sending them (absolute paths of the local or remote system)
|
||||
## In case of a pull backup, an encrypted copy of FILE_BACKUP goes here
|
||||
CRYPT_STORAGE=/home/storage/backup/crpyt
|
||||
|
||||
## GPG recipient (pubkey for this recipient must exist, see gpg2 --list-keys or gpg --list-keys
|
||||
GPG_RECIPIENT="John Doe"
|
||||
|
||||
## Use n CPUs for encryption / decryption where n is an integer. Defaults to 1
|
||||
PARALLEL_ENCRYPTION_PROCESSES=
|
||||
|
||||
## Create backup directories if they do not exist
|
||||
CREATE_DIRS=true
|
||||
|
||||
## Keep absolute source path in your backup, eg: /your/backup/storage/the/remote/server/files
|
||||
## You should leave this enabled if you intend to use 'backup task division' functionality of oBackup, or everything will end up in the same directory.
|
||||
KEEP_ABSOLUTE_PATHS=true
|
||||
|
||||
## Generate an alert if backup size is lower than given value in Kb (this can also help identifying empty mount dirs).
|
||||
BACKUP_SIZE_MINIMUM=1024
|
||||
|
||||
## Check backup size before proceeding
|
||||
GET_BACKUP_SIZE=true
|
||||
|
||||
## Generate an alert if storage free space is lower than given value in Kb.
|
||||
## Keep in mind that disabling backup file size test will only test min space against SQL backup size.
|
||||
SQL_WARN_MIN_SPACE=1048576
|
||||
FILE_WARN_MIN_SPACE=1048576
|
||||
|
||||
[REMOTE_OPTIONS]
|
||||
|
||||
## In case of pulled or pushed backups, remote system URI needs to be supplied.
|
||||
REMOTE_SYSTEM_URI="ssh://backupuser@remote.system.tld:22/"
|
||||
|
||||
## You can specify a RSA key (please use full path). If not defined, the default ~/.ssh/id_rsa will be used. See documentation for further information.
|
||||
SSH_RSA_PRIVATE_KEY="${HOME}/.ssh/id_rsa"
|
||||
|
||||
## Alternatively, you may specify an SSH password file (less secure). Needs sshpass utility installed.
|
||||
SSH_PASSWORD_FILE=""
|
||||
|
||||
## When using ssh filter, you must specify a remote token matching the one setup in authorized_keys
|
||||
_REMOTE_TOKEN=SomeAlphaNumericToken9
|
||||
|
||||
## ssh compression should be used unless your remote connection is good enough (LAN)
|
||||
SSH_COMPRESSION=true
|
||||
|
||||
## Ignore ssh known hosts verification. DANGER WILL ROBINSON DANGER: This can lead to security risks. Only enable if you know what you're doing.
|
||||
## Works on Redhat / CentOS, doesn't work on Debian / Ubunutu
|
||||
SSH_IGNORE_KNOWN_HOSTS=false
|
||||
|
||||
## Remote rsync executable path. Leave this empty in most cases
|
||||
RSYNC_REMOTE_PATH=""
|
||||
|
||||
## Check for connectivity to remote host before launching remote backup tasks. Be sure the hosts responds to ping. Failing to ping will skip current task.
|
||||
REMOTE_HOST_PING=true
|
||||
|
||||
## Check for internet access by pinging one or more 3rd party hosts before remote backup tasks. Leave empty if you don't want this check to be be performed. Failing to ping will skip current task.
|
||||
REMOTE_3RD_PARTY_HOSTS="www.kernel.org www.google.com"
|
||||
|
||||
## If enabled, commands will be executed as superuser on remote side. See documentation for /etc/sudoers configuration ("find", "du", "tee" and "rsync" need to be allowed). Requiretty needs to be disabled.
|
||||
SUDO_EXEC=false
|
||||
|
||||
[DATABASE BACKUP SETTINGS]
|
||||
|
||||
## Database backup user (should be the same you are running obackup with)
|
||||
SQL_USER=root
|
||||
|
||||
## Enabling the following option will save all databases on local or remote given SQL instance except the ones specified in the exclude list.
|
||||
## Every found database will be backed up as separate backup task.
|
||||
DATABASES_ALL=true
|
||||
DATABASES_ALL_EXCLUDE_LIST="test;mysql"
|
||||
DATABASES_LIST=""
|
||||
|
||||
## Alternatively, if DATABASES_ALL=false, you can specify a list of databases to backup separated by semi-colons.
|
||||
#DATABASES_LIST="somedatabase"
|
||||
|
||||
## Max backup execution time per Database task. Soft max exec time generates a warning only. Hard max exec time generates a warning and stops current backup task.
|
||||
## If a task gets stopped, next one in the task list gets executed. Time is specified in seconds.
|
||||
SOFT_MAX_EXEC_TIME_DB_TASK=3600
|
||||
HARD_MAX_EXEC_TIME_DB_TASK=7200
|
||||
|
||||
## mysqldump options (ex: --extended-insert, --single-transaction, --quick...). See MySQL / MariaDB manual
|
||||
## default option: --opt
|
||||
MYSQLDUMP_OPTIONS="--opt --single-transaction"
|
||||
|
||||
## Preferred SQL dump compression. Compression methods can be xz, lzma, pigz or gzip (will fallback from xz to gzip depending if available)
|
||||
## Generally, level 5 is a good compromise between cpu, memory hunger and compress ratio. Gzipped files are set to be rsyncable.
|
||||
## If you use encryption, compression will only bring small benefits as GPG already has pretty good compression included
|
||||
COMPRESSION_LEVEL=3
|
||||
|
||||
[FILE BACKUP SETTINGS]
|
||||
|
||||
## File backups are divided in tasks. Every directory in DIRECTORY_LIST will be processed as a unique task.
|
||||
## Every subdirectory of each directory in RECURSIVE_DIRECTORY_LIST will be processed as a unique task.
|
||||
## Example: RECURSIVE_DIRECTORY_LIST="/home;/var" will create backup tasks tasks "/home/dir1, "/home/dir2", ... "/home/dirN", "/var/log", "/var/lib"... "/var/something".
|
||||
## You can exclude directories from the avove backup task creation, ex: avoid backing up "/home/dir2" by adding it to RECURSIVE_EXCLUDE_LIST.
|
||||
## Note that since we recurse only by one level, excluding /home/dir2/somedir won't have any effect.
|
||||
## Please use ${HOME} instead of ~ if needed.
|
||||
|
||||
## Directories backup list. List of semicolon separated directories that will be backed up.
|
||||
DIRECTORY_LIST="/var/named"
|
||||
RECURSIVE_DIRECTORY_LIST="/home"
|
||||
RECURSIVE_EXCLUDE_LIST="/home/backupuser;/home/lost+found"
|
||||
|
||||
## Rsync exclude / include order (the option set here will be set first, eg: include will make include then exclude patterns)
|
||||
RSYNC_PATTERN_FIRST=include
|
||||
|
||||
## List of files / directories to incldue / exclude from sync on both sides (see rsync patterns, wildcards work).
|
||||
## Paths are relative to sync dirs. List elements are separated by a semicolon. Specifying "cache" will remove every found cache subdirectory.
|
||||
RSYNC_INCLUDE_PATTERN=""
|
||||
RSYNC_EXCLUDE_PATTERN=""
|
||||
#RSYNC_EXCLUDE_PATTERN="tmp;archives;cache"
|
||||
|
||||
## Files that contains lists of files / directories to include / exclude from sync on both sides. Leave this empty if you don't want to use an exclusion file.
|
||||
## This file has to be in the same directory as the config file
|
||||
## Paths are relative to sync dirs. One element per line.
|
||||
RSYNC_INCLUDE_FROM=""
|
||||
RSYNC_EXCLUDE_FROM=""
|
||||
#RSYNC_EXCLUDE_FROM="exclude.list"
|
||||
|
||||
## List separator char. You may set an alternative separator char for your directories lists above.
|
||||
PATH_SEPARATOR_CHAR=";"
|
||||
|
||||
## Optional arguments passed to rsync executable. The following are already managed by the program and shoul never be passed here
|
||||
## -rltD -n -P -o -g --executability -A -X -zz -L -K -H -8 -u -i --stats --checksum --bwlimit --partial --partial-dir --exclude --exclude-from --include--from --no-whole-file --whole-file --list-only
|
||||
## When dealing with different filesystems for sync, or using SMB mountpoints, try adding --modify-window=2 --omit-dir-times as optional arguments
|
||||
RSYNC_OPTIONAL_ARGS=""
|
||||
|
||||
## Preserve basic linux permissions
|
||||
PRESERVE_PERMISSIONS=true
|
||||
PRESERVE_OWNER=true
|
||||
PRESERVE_GROUP=true
|
||||
## On MACOS X, does not work and will be ignored
|
||||
PRESERVE_EXECUTABILITY=true
|
||||
|
||||
## Preserve ACLS. Make sure source and target FS can hold same ACLs or you'll get loads of errors.
|
||||
PRESERVE_ACL=false
|
||||
## Preserve Xattr. MAke sure source and target FS can hold same Xattr or you'll get loads of errors.
|
||||
PRESERVE_XATTR=false
|
||||
|
||||
## Transforms symlinks into referent files/dirs
|
||||
COPY_SYMLINKS=true
|
||||
## Treat symlinked dirs as dirs. CAUTION: This also follows symlinks outside of the replica root.
|
||||
KEEP_DIRLINKS=true
|
||||
## Preserve hard links. Make sure source and target FS can manage hard links or you will lose them.
|
||||
PRESERVE_HARDLINKS=false
|
||||
|
||||
|
||||
## Let RSYNC compress file transfers. Do not use this on local-local backup schemes. Also, this is not useful if SSH compression is enabled.
|
||||
RSYNC_COMPRESS=false
|
||||
|
||||
## Max execution time per file backup task. Soft is warning only. Hard is warning, stopping backup and processing next one one file list. Tilme is specified in seconds
|
||||
SOFT_MAX_EXEC_TIME_FILE_TASK=3600
|
||||
HARD_MAX_EXEC_TIME_FILE_TASK=7200
|
||||
|
||||
## Keep partial uploads that can be resumed on next run, experimental feature
|
||||
PARTIAL=false
|
||||
|
||||
## Delete files on destination that vanished from source. Do not turn this on unless you enabled backup rotation or a snapshotting FS like zfs to keep those vanished files on the destination.
|
||||
DELETE_VANISHED_FILES=false
|
||||
|
||||
## Use delta copy algortithm (usefull when local paths are network drives), defaults to true
|
||||
DELTA_COPIES=true
|
||||
|
||||
## Bandwidth limit Kbytes / second for file backups. Leave 0 to disable limitation.
|
||||
BANDWIDTH=0
|
||||
|
||||
## Paranoia option. Don't change this unless you read the documentation.
|
||||
RSYNC_EXECUTABLE=rsync
|
||||
|
||||
[ALERT_OPTIONS]
|
||||
|
||||
## Alert email addresses separated by a space character
|
||||
DESTINATION_MAILS="your@mail.address"
|
||||
|
||||
## Optional change of mail body encoding (using iconv)
|
||||
## By default, all mails are sent in UTF-8 format without header (because of maximum compatibility of all platforms)
|
||||
## You may specify an optional encoding here (like "ISO-8859-1" or whatever iconv can handle)
|
||||
MAIL_BODY_CHARSET=""
|
||||
|
||||
## Environment specific mail options (used with busybox sendemail, mailsend.exe from muquit, http://github.com/muquit/mailsend or sendemail.exe from Brandon Zehm, http://caspian.dotconf.net/menu/Software/SendEmail)
|
||||
SENDER_MAIL="alert@your.system.tld"
|
||||
SMTP_SERVER=smtp.your.isp.tld
|
||||
SMTP_PORT=25
|
||||
# encryption can be tls, ssl or none
|
||||
SMTP_ENCRYPTION=none
|
||||
SMTP_USER=
|
||||
SMTP_PASSWORD=
|
||||
|
||||
[BACKUP SETTINGS]
|
||||
|
||||
## Max execution time of whole backup process. Soft max exec time generates a warning only.
|
||||
## Hard max exec time generates a warning and stops the whole backup execution.
|
||||
SOFT_MAX_EXEC_TIME_TOTAL=30000
|
||||
HARD_MAX_EXEC_TIME_TOTAL=36000
|
||||
|
||||
## Log a message every KEEP_LOGGING seconds just to know the task is still alive
|
||||
KEEP_LOGGING=1801
|
||||
|
||||
## Backup Rotation. You may rotate backups if you don't use snapshots on your backup server.
|
||||
ROTATE_SQL_BACKUPS=false
|
||||
ROTATE_SQL_COPIES=7
|
||||
ROTATE_FILE_BACKUPS=false
|
||||
ROTATE_FILE_COPIES=7
|
||||
|
||||
[EXECUTION_HOOKS]
|
||||
|
||||
## Commands can will be run before and / or after backup execution (remote execution will only happen if REMOTE_BACKUP is set).
|
||||
## This is useful to make a snapshot before backing up data, or even handle snapshots of backed up data.
|
||||
LOCAL_RUN_BEFORE_CMD=""
|
||||
LOCAL_RUN_AFTER_CMD=""
|
||||
|
||||
REMOTE_RUN_BEFORE_CMD=""
|
||||
REMOTE_RUN_AFTER_CMD=""
|
||||
|
||||
## Max execution time of commands before they get force killed. Leave 0 if you don't want this to happen. Time is specified in seconds.
|
||||
MAX_EXEC_TIME_PER_CMD_BEFORE=0
|
||||
MAX_EXEC_TIME_PER_CMD_AFTER=0
|
||||
|
||||
## Stops whole backup execution if one of the above commands fail
|
||||
STOP_ON_CMD_ERROR=false
|
||||
|
||||
## Run local and remote after backup cmd's even on failure
|
||||
RUN_AFTER_CMD_ON_ERROR=false
|
2941
install.sh
2941
install.sh
File diff suppressed because it is too large
Load Diff
2295
obackup-batch.sh
2295
obackup-batch.sh
File diff suppressed because it is too large
Load Diff
5857
obackup.sh
5857
obackup.sh
File diff suppressed because it is too large
Load Diff
118
ssh_filter.sh
118
ssh_filter.sh
@ -1,107 +1,53 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
##### Osync ssh command filter build 2015070203
|
||||
##### osync / obackup ssh command filter
|
||||
##### This script should be located in /usr/local/bin in the remote system to sync / backup
|
||||
##### It will filter the commands that can be run remotely via ssh.
|
||||
##### Please chmod 755 and chown root:root this file
|
||||
|
||||
##### Obackup needed commands: rsync find du mysql mysqldump (sudo)
|
||||
##### Osync needed commands: rsync find du echo mkdir rm if df (sudo)
|
||||
##### Any command that has env _REMOTE_TOKEN= with the corresponding token in it will be run
|
||||
##### Any other command will return a "syntax error"
|
||||
##### For details, see ssh_filter.log
|
||||
|
||||
## If enabled, execution of "sudo" command will be allowed.
|
||||
SCRIPT_BUILD=2017020802
|
||||
|
||||
## Allow sudo
|
||||
SUDO_EXEC=yes
|
||||
## Paranoia option. Don't change this unless you read the documentation and still feel concerned about security issues.
|
||||
RSYNC_EXECUTABLE=rsync
|
||||
## Enable other commands, useful for remote execution hooks like remotely creating snapshots.
|
||||
CMD1=
|
||||
CMD2=
|
||||
CMD3=
|
||||
|
||||
LOG_FILE=~/.ssh/ssh_filter.log
|
||||
## Log all valid commands too
|
||||
_DEBUG=no
|
||||
|
||||
function Log
|
||||
{
|
||||
## Set remote token in authorized_keys
|
||||
if [ "$1" != "" ]; then
|
||||
_REMOTE_TOKEN="${1}"
|
||||
fi
|
||||
|
||||
LOG_FILE="${HOME}/.ssh/ssh_filter.log"
|
||||
|
||||
function Log {
|
||||
DATE=$(date)
|
||||
echo "$DATE - $1" >> $LOG_FILE
|
||||
echo "$DATE - $1" >> "$LOG_FILE"
|
||||
}
|
||||
|
||||
function Go
|
||||
{
|
||||
eval $SSH_ORIGINAL_COMMAND
|
||||
function Go {
|
||||
if [ "$_DEBUG" == "yes" ]; then
|
||||
Log "Executing [$SSH_ORIGINAL_COMMAND]."
|
||||
fi
|
||||
eval "$SSH_ORIGINAL_COMMAND"
|
||||
}
|
||||
|
||||
case ${SSH_ORIGINAL_COMMAND%% *} in
|
||||
"$RSYNC_EXECUTABLE")
|
||||
Go ;;
|
||||
"find")
|
||||
Go ;;
|
||||
"du")
|
||||
Go ;;
|
||||
"mysql")
|
||||
Go ;;
|
||||
"mysqldump")
|
||||
Go ;;
|
||||
"$CMD1")
|
||||
if [ "$CMD1" != "" ]
|
||||
then
|
||||
Go ;;
|
||||
fi
|
||||
"$CMD2")
|
||||
if [ "$CMD2" != "" ]
|
||||
then
|
||||
Go ;;
|
||||
fi
|
||||
"$CMD3")
|
||||
if [ "$CMD3" != "" ]
|
||||
then
|
||||
Go ;;
|
||||
fi
|
||||
"sudo")
|
||||
if [ "$SUDO_EXEC" == "yes" ]
|
||||
then
|
||||
if [[ "$SSH_ORIGINAL_COMMAND" == "sudo $RSYNC_EXECUTABLE"* ]]
|
||||
then
|
||||
Go
|
||||
elif [[ "$SSH_ORIGINAL_COMMAND" == "sudo du"* ]]
|
||||
then
|
||||
Go
|
||||
elif [[ "$SSH_ORIGINAL_COMMAND" == "sudo find"* ]]
|
||||
then
|
||||
Go
|
||||
elif [[ "$SSH_ORIGINAL_COMMAND" == "sudo mysql"* ]]
|
||||
then
|
||||
Go
|
||||
elif [[ "$SSH_ORIGINAL_COMMAND" == "sudo mysqldump"* ]]
|
||||
then
|
||||
Go
|
||||
elif [[ "$SSH_ORIGINAL_COMMAND" == "sudo $CMD1"* ]]
|
||||
then
|
||||
if [ "$CMD1" != "" ]
|
||||
then
|
||||
Go
|
||||
fi
|
||||
elif [[ "$SSH_ORIGINAL_COMMAND" == "sudo $CMD2"* ]]
|
||||
then
|
||||
if [ "$CMD2" != "" ]
|
||||
then
|
||||
Go
|
||||
fi
|
||||
elif [[ "$SSH_ORIGINAL_COMMAND" == "sudo $CMD3"* ]]
|
||||
then
|
||||
if [ "$CMD3" != "" ]
|
||||
then
|
||||
Go
|
||||
fi
|
||||
else
|
||||
Log "Command [$SSH_ORIGINAL_COMMAND] not allowed."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
Log "Command [$SSH_ORIGINAL_COMMAND] not allowed. sudo not enabled."
|
||||
case "${SSH_ORIGINAL_COMMAND}" in
|
||||
*"env _REMOTE_TOKEN=$_REMOTE_TOKEN"*)
|
||||
if [ "$SUDO_EXEC" != "yes" ] && [[ $SSH_ORIGINAL_COMMAND == *"sudo "* ]]; then
|
||||
Log "Command [$SSH_ORIGINAL_COMMAND] contains sudo which is not allowed."
|
||||
echo "Syntax error unexpected end of file"
|
||||
exit 1
|
||||
fi
|
||||
Go
|
||||
;;
|
||||
*)
|
||||
Log "Command [$SSH_ORIGINAL_COMMAND] not allowed."
|
||||
echo "Syntax error near unexpected token"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
397
upgrade-v1.x-2.1x.sh
Executable file
397
upgrade-v1.x-2.1x.sh
Executable file
@ -0,0 +1,397 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
PROGRAM="obackup.upgrade"
|
||||
SUBPROGRAM="obackup"
|
||||
AUTHOR="(C) 2016 by Orsiris de Jong"
|
||||
CONTACT="http://www.netpower.fr/obacup - ozy@netpower.fr"
|
||||
OLD_PROGRAM_VERSION="v1.x"
|
||||
NEW_PROGRAM_VERSION="v2.1x"
|
||||
CONFIG_FILE_REVISION=2.1
|
||||
PROGRAM_BUILD=2019102101
|
||||
|
||||
if ! type "$BASH" > /dev/null; then
|
||||
echo "Please run this script only with bash shell. Tested on bash >= 3.2"
|
||||
exit 127
|
||||
fi
|
||||
|
||||
# Defines all keywords / value sets in obackup configuration files
|
||||
# bash does not support two dimensional arrays, so we declare two arrays:
|
||||
# ${KEYWORDS[index]}=${VALUES[index]}
|
||||
|
||||
KEYWORDS=(
|
||||
INSTANCE_ID
|
||||
LOGFILE
|
||||
SQL_BACKUP
|
||||
FILE_BACKUP
|
||||
BACKUP_TYPE
|
||||
SQL_STORAGE
|
||||
FILE_STORAGE
|
||||
ENCRYPTION
|
||||
CRYPT_STORAGE
|
||||
GPG_RECIPIENT
|
||||
PARALLEL_ENCRYPTION_PROCESSES
|
||||
CREATE_DIRS
|
||||
KEEP_ABSOLUTE_PATHS
|
||||
BACKUP_SIZE_MINIMUM
|
||||
GET_BACKUP_SIZE
|
||||
SQL_WARN_MIN_SPACE
|
||||
FILE_WARN_MIN_SPACE
|
||||
REMOTE_SYSTEM_URI
|
||||
SSH_RSA_PRIVATE_KEY
|
||||
SSH_PASSWORD_FILE
|
||||
_REMOTE_TOKEN
|
||||
SSH_COMPRESSION
|
||||
SSH_IGNORE_KNOWN_HOSTS
|
||||
RSYNC_REMOTE_PATH
|
||||
REMOTE_HOST_PING
|
||||
REMOTE_3RD_PARTY_HOSTS
|
||||
SUDO_EXEC
|
||||
SQL_USER
|
||||
DATABASES_ALL
|
||||
DATABASES_ALL_EXCLUDE_LIST
|
||||
DATABASES_LIST
|
||||
SOFT_MAX_EXEC_TIME_DB_TASK
|
||||
HARD_MAX_EXEC_TIME_DB_TASK
|
||||
MYSQLDUMP_OPTIONS
|
||||
COMPRESSION_LEVEL
|
||||
DIRECTORY_LIST
|
||||
RECURSIVE_DIRECTORY_LIST
|
||||
RECURSIVE_EXCLUDE_LIST
|
||||
RSYNC_PATTERN_FIRST
|
||||
RSYNC_INCLUDE_PATTERN
|
||||
RSYNC_EXCLUDE_PATTERN
|
||||
RSYNC_INCLUDE_FROM
|
||||
RSYNC_EXCLUDE_FROM
|
||||
PATH_SEPARATOR_CHAR
|
||||
RSYNC_OPTIONAL_ARGS
|
||||
PRESERVE_PERMISSIONS
|
||||
PRESERVE_OWNER
|
||||
PRESERVE_GROUP
|
||||
PRESERVE_EXECUTABILITY
|
||||
PRESERVE_ACL
|
||||
PRESERVE_XATTR
|
||||
COPY_SYMLINKS
|
||||
KEEP_DIRLINKS
|
||||
PRESERVE_HARDLINKS
|
||||
RSYNC_COMPRESS
|
||||
SOFT_MAX_EXEC_TIME_FILE_TASK
|
||||
HARD_MAX_EXEC_TIME_FILE_TASK
|
||||
PARTIAL
|
||||
DELETE_VANISHED_FILES
|
||||
DELTA_COPIES
|
||||
BANDWIDTH
|
||||
RSYNC_EXECUTABLE
|
||||
DESTINATION_MAILS
|
||||
MAIL_BODY_CHARSET
|
||||
SENDER_MAIL
|
||||
SMTP_SERVER
|
||||
SMTP_PORT
|
||||
SMTP_ENCRYPTION
|
||||
SMTP_USER
|
||||
SMTP_PASSWORD
|
||||
SOFT_MAX_EXEC_TIME_TOTAL
|
||||
HARD_MAX_EXEC_TIME_TOTAL
|
||||
KEEP_LOGGING
|
||||
ROTATE_SQL_BACKUPS
|
||||
ROTATE_SQL_COPIES
|
||||
ROTATE_FILE_BACKUPS
|
||||
ROTATE_FILE_COPIES
|
||||
LOCAL_RUN_BEFORE_CMD
|
||||
LOCAL_RUN_AFTER_CMD
|
||||
REMOTE_RUN_BEFORE_CMD
|
||||
REMOTE_RUN_AFTER_CMD
|
||||
MAX_EXEC_TIME_PER_CMD_BEFORE
|
||||
MAX_EXEC_TIME_PER_CMD_AFTER
|
||||
STOP_ON_CMD_ERROR
|
||||
RUN_AFTER_CMD_ON_ERROR
|
||||
)
|
||||
|
||||
VALUES=(
|
||||
test-backup
|
||||
''
|
||||
true
|
||||
true
|
||||
local
|
||||
/home/storage/sql
|
||||
/home/storage/files
|
||||
false
|
||||
/home/storage/crypt
|
||||
'Your Name used with GPG signature'
|
||||
''
|
||||
true
|
||||
true
|
||||
1024
|
||||
true
|
||||
1048576
|
||||
1048576
|
||||
ssh://backupuser@remote.system.tld:22/
|
||||
${HOME}/.ssh/id_rsa
|
||||
''
|
||||
SomeAlphaNumericToken9
|
||||
true
|
||||
false
|
||||
''
|
||||
true
|
||||
'www.kernel.org www.google.com'
|
||||
false
|
||||
root
|
||||
true
|
||||
test
|
||||
''
|
||||
3600
|
||||
7200
|
||||
'--opt --single-transaction'
|
||||
3
|
||||
''
|
||||
''
|
||||
'/lost+found;/tmp'
|
||||
include
|
||||
''
|
||||
''
|
||||
''
|
||||
''
|
||||
\;
|
||||
''
|
||||
true
|
||||
true
|
||||
true
|
||||
true
|
||||
false
|
||||
false
|
||||
true
|
||||
true
|
||||
false
|
||||
false
|
||||
3600
|
||||
7200
|
||||
false
|
||||
false
|
||||
true
|
||||
0
|
||||
rsync
|
||||
infrastructure@example.com
|
||||
''
|
||||
sender@example.com
|
||||
smtp.isp.tld
|
||||
25
|
||||
none
|
||||
''
|
||||
''
|
||||
30000
|
||||
36000
|
||||
1801
|
||||
false
|
||||
7
|
||||
false
|
||||
7
|
||||
''
|
||||
''
|
||||
''
|
||||
''
|
||||
0
|
||||
0
|
||||
false
|
||||
false
|
||||
)
|
||||
|
||||
function Usage {
|
||||
echo "$PROGRAM $PROGRAM_BUILD"
|
||||
echo $AUTHOR
|
||||
echo $CONTACT
|
||||
echo ""
|
||||
echo "This script migrates $SUBPROGRAM $OLD_PROGRAM_VERSION config files to $NEW_PROGRAM_VERSION."
|
||||
echo ""
|
||||
echo "Usage: $0 /path/to/config_file.conf"
|
||||
echo "Please make sure the config file is writable."
|
||||
exit 128
|
||||
}
|
||||
|
||||
function LoadConfigFile {
|
||||
local config_file="${1}"
|
||||
|
||||
if [ ! -f "$config_file" ]; then
|
||||
echo "Cannot load configuration file [$config_file]. Sync cannot start."
|
||||
exit 1
|
||||
elif [[ "$1" != *".conf" ]]; then
|
||||
echo "Wrong configuration file supplied [$config_file]. Sync cannot start."
|
||||
exit 1
|
||||
else
|
||||
egrep '^#|^[^ ]*=[^;&]*' "$config_file" > "./$SUBPROGRAM.$FUNCNAME.$$"
|
||||
source "./$SUBPROGRAM.$FUNCNAME.$$"
|
||||
rm -f "./$SUBPROGRAM.$FUNCNAME.$$"
|
||||
fi
|
||||
}
|
||||
|
||||
function CheckAndBackup {
|
||||
local config_file="${1}"
|
||||
|
||||
if ! grep "BACKUP_ID=" $config_file > /dev/null && ! grep "INSTANCE_ID=" $config_file > /dev/null; then
|
||||
echo "File [$config_file] does not seem to be a obackup config file."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Backing up [$config_file] as [$config_file.save]"
|
||||
cp -p "$config_file" "$config_file.save"
|
||||
if [ $? != 0 ]; then
|
||||
echo "Cannot backup config file."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
function RewriteOldConfigFiles {
|
||||
local config_file="${1}"
|
||||
|
||||
echo "Rewriting config file $config_file"
|
||||
|
||||
sed -i'.tmp' 's/^BACKUP_ID=/INSTANCE_ID=/g' "$config_file"
|
||||
sed -i'.tmp' 's/^BACKUP_SQL=/SQL_BACKUP=/g' "$config_file"
|
||||
sed -i'.tmp' 's/^BACKUP_FILES=/FILE_BACKUP=/g' "$config_file"
|
||||
sed -i'.tmp' 's/^LOCAL_SQL_STORAGE=/SQL_STORAGE=/g' "$config_file"
|
||||
sed -i'.tmp' 's/^LOCAL_FILE_STORAGE=/FILE_STORAGE=/g' "$config_file"
|
||||
|
||||
sed -i'.tmp' 's/^DISABLE_GET_BACKUP_FILE_SIZE=no/GET_BACKUP_SIZE=true/g' "$config_file"
|
||||
sed -i'.tmp' 's/^DISABLE_GET_BACKUP_FILE_SIZE=yes/GET_BACKUP_SIZE=false/g' "$config_file"
|
||||
sed -i'.tmp' 's/^LOCAL_STORAGE_KEEP_ABSOLUTE_PATHS=/KEEP_ABSOLUTE_PATHS=/g' "$config_file"
|
||||
sed -i'.tmp' 's/^LOCAL_STORAGE_WARN_MIN_SPACE=/SQL_WARN_MIN_SPACE=/g' "$config_file"
|
||||
if ! grep "^FILE_WARN_MIN_SPACE=" "$config_file" > /dev/null; then
|
||||
VALUE=$(grep "SQL_WARN_MIN_SPACE=" "$config_file")
|
||||
VALUE="${VALUE#*=}"
|
||||
sed -i'.tmp' '/^SQL_WARN_MIN_SPACE=*/a\'$'\n''FILE_WARN_MIN_SPACE='$VALUE'\'$'\n''' "$config_file"
|
||||
fi
|
||||
sed -i'.tmp' 's/^DIRECTORIES_SIMPLE_LIST=/DIRECTORY_LIST=/g' "$config_file"
|
||||
sed -i'.tmp' 's/^DIRECTORIES_RECURSE_LIST=/RECURSIVE_DIRECTORY_LIST=/g' "$config_file"
|
||||
sed -i'.tmp' 's/^DIRECTORIES_RECURSE_EXCLUDE_LIST=/RECURSIVE_EXCLUDE_LIST=/g' "$config_file"
|
||||
sed -i'.tmp' 's/^ROTATE_BACKUPS=/ROTATE_SQL_BACKUPS=/g' "$config_file"
|
||||
if ! grep "^ROTATE_FILE_BACKUPS=" "$config_file" > /dev/null; then
|
||||
VALUE=$(grep "ROTATE_SQL_BACKUPS=" "$config_file")
|
||||
VALUE="${VALUE#*=}"
|
||||
sed -i'.tmp' '/^ROTATE_SQL_BACKUPS=*/a\'$'\n''ROTATE_FILE_BACKUPS='$VALUE'\'$'\n''' "$config_file"
|
||||
fi
|
||||
sed -i'.tmp' 's/^ROTATE_COPIES=/ROTATE_SQL_COPIES=/g' "$config_file"
|
||||
if ! grep "^ROTATE_FILE_COPIES=" "$config_file" > /dev/null; then
|
||||
VALUE=$(grep "ROTATE_SQL_COPIES=" "$config_file")
|
||||
VALUE="${VALUE#*=}"
|
||||
sed -i'.tmp' '/^ROTATE_SQL_COPIES=*/a\'$'\n''ROTATE_FILE_COPIES='$VALUE'\'$'\n''' "$config_file"
|
||||
fi
|
||||
REMOTE_BACKUP=$(grep "REMOTE_BACKUP=" "$config_file")
|
||||
REMOTE_BACKUP="${REMOTE_BACKUP#*=}"
|
||||
if [ "$REMOTE_BACKUP" == "yes" ]; then
|
||||
REMOTE_USER=$(grep "REMOTE_USER=" "$config_file")
|
||||
REMOTE_USER="${REMOTE_USER#*=}"
|
||||
REMOTE_HOST=$(grep "REMOTE_HOST=" "$config_file")
|
||||
REMOTE_HOST="${REMOTE_HOST#*=}"
|
||||
REMOTE_PORT=$(grep "REMOTE_PORT=" "$config_file")
|
||||
REMOTE_PORT="${REMOTE_PORT#*=}"
|
||||
|
||||
REMOTE_SYSTEM_URI="ssh://$REMOTE_USER@$REMOTE_HOST:$REMOTE_PORT/"
|
||||
|
||||
sed -i'.tmp' 's#^REMOTE_BACKUP=true#REMOTE_SYSTEM_URI='$REMOTE_SYSTEM_URI'#g' "$config_file"
|
||||
sed -i'.tmp' 's#^REMOTE_BACKUP=yes#REMOTE_SYSTEM_URI='$REMOTE_SYSTEM_URI'#g' "$config_file"
|
||||
sed -i'.tmp' '/^REMOTE_USER==*/d' "$config_file"
|
||||
sed -i'.tmp' '/^REMOTE_HOST==*/d' "$config_file"
|
||||
sed -i'.tmp' '/^REMOTE_PORT==*/d' "$config_file"
|
||||
|
||||
sed -i'.tmp' '/^INSTANCE_ID=*/a\'$'\n''BACKUP_TYPE=pull\'$'\n''' "$config_file"
|
||||
else
|
||||
if ! grep "^BACKUP_TYPE=" "$config_file" > /dev/null; then
|
||||
sed -i'.tmp' '/^INSTANCE_ID=*/a\'$'\n''BACKUP_TYPE=local\'$'\n''' "$config_file"
|
||||
fi
|
||||
fi
|
||||
sed -i'.tmp' 's/^REMOTE_3RD_PARTY_HOST=/REMOTE_3RD_PARTY_HOSTS=/g' "$config_file"
|
||||
}
|
||||
|
||||
function AddMissingConfigOptions {
|
||||
local config_file="${1}"
|
||||
local counter=0
|
||||
|
||||
while [ $counter -lt ${#KEYWORDS[@]} ]; do
|
||||
if ! grep "^${KEYWORDS[$counter]}=" > /dev/null "$config_file"; then
|
||||
echo "${KEYWORDS[$counter]} not found"
|
||||
if [ $counter -gt 0 ]; then
|
||||
if [ "${VALUES[$counter]}" == true ] || [ "${VALUES[$counter]}" == false ]; then
|
||||
sed -i'.tmp' '/^'${KEYWORDS[$((counter-1))]}'=*/a\'$'\n'${KEYWORDS[$counter]}'='"${VALUES[$counter]}"'\'$'\n''' "$config_file"
|
||||
else
|
||||
sed -i'.tmp' '/^'${KEYWORDS[$((counter-1))]}'=*/a\'$'\n'${KEYWORDS[$counter]}'="'"${VALUES[$counter]}"'"\'$'\n''' "$config_file"
|
||||
fi
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Cannot add missing ${[KEYWORDS[$counter]}."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
if [ "${VALUES[$counter]}" == true ] || [ "${VALUES[$counter]}" == false ]; then
|
||||
sed -i'.tmp' '/[GENERAL\]$//a\'$'\n'${KEYWORDS[$counter]}'='"${VALUES[$counter]}"'\'$'\n''' "$config_file"
|
||||
else
|
||||
sed -i'.tmp' '/[GENERAL\]$//a\'$'\n'${KEYWORDS[$counter]}'="'"${VALUES[$counter]}"'"\'$'\n''' "$config_file"
|
||||
fi
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Cannot add missing ${[KEYWORDS[$counter]}."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
echo "Added missing ${KEYWORDS[$counter]} config option with default option [${VALUES[$counter]}]"
|
||||
else
|
||||
# Not the most elegant but the quickest way :)
|
||||
if grep "^${KEYWORDS[$counter]}=yes$" > /dev/null "$config_file"; then
|
||||
sed -i'.tmp' 's/^'${KEYWORDS[$counter]}'=.*/'${KEYWORDS[$counter]}'=true/g' "$config_file"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Cannot rewrite ${[KEYWORDS[$counter]} boolean to true."
|
||||
exit 1
|
||||
fi
|
||||
elif grep "^${KEYWORDS[$counter]}=no$" > /dev/null "$config_file"; then
|
||||
sed -i'.tmp' 's/^'${KEYWORDS[$counter]}'=.*/'${KEYWORDS[$counter]}'=false/g' "$config_file"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Cannot rewrite ${[KEYWORDS[$counter]} boolean to false."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
counter=$((counter+1))
|
||||
done
|
||||
}
|
||||
|
||||
function RewriteSections {
|
||||
local config_file="${1}"
|
||||
|
||||
# Earlier config files has GENERAL BACKUP OPTIONS set twice. Let's replace the first one only. sed does a horrible job doing this, at least if portability is required
|
||||
awk '/###### GENERAL BACKUP OPTIONS/ && !done { gsub(/###### GENERAL BACKUP OPTIONS/, "[GENERAL]"); done=1}; 1' "$config_file" >> "$config_file.tmp"
|
||||
#sed -i'.tmp' 's/###### GENERAL BACKUP OPTIONS/[GENERAL]/' "$config_file"
|
||||
# Fix using earlier tmp file from awk
|
||||
sed 's/###### BACKUP STORAGE/[BACKUP STORAGE]/g' "$config_file.tmp" > "$config_file"
|
||||
sed -i'.tmp' 's/###### REMOTE ONLY OPTIONS/[REMOTE_OPTIONS]/g' "$config_file"
|
||||
sed -i'.tmp' 's/###### DATABASE SPECIFIC OPTIONS/[DATABASE BACKUP SETTINGS]/g' "$config_file"
|
||||
sed -i'.tmp' 's/###### FILES SPECIFIC OPTIONS/[FILE BACKUP SETTINGS]/g' "$config_file"
|
||||
sed -i'.tmp' 's/###### ALERT OPTIONS/[ALERT_OPTIONS]/g' "$config_file"
|
||||
sed -i'.tmp' 's/###### GENERAL BACKUP OPTIONS/[BACKUP SETTINGS]/g' "$config_file"
|
||||
sed -i'.tmp' 's/###### EXECUTION HOOKS/[EXECUTION_HOOKS]/g' "$config_file"
|
||||
}
|
||||
|
||||
function UpdateConfigHeader {
|
||||
local config_file="${1}"
|
||||
|
||||
if ! grep "^CONFIG_FILE_REVISION=" > /dev/null "$config_file"; then
|
||||
if grep "\[GENERAL\]" > /dev/null "$config_file"; then
|
||||
sed -i'.tmp' '/^\[GENERAL\]$/a\'$'\n'CONFIG_FILE_REVISION=$CONFIG_FILE_REVISION$'\n''' "$config_file"
|
||||
else
|
||||
sed -i'.tmp' '/.*onfig file rev.*/a\'$'\n'CONFIG_FILE_REVISION=$CONFIG_FILE_REVISION$'\n''' "$config_file"
|
||||
fi
|
||||
# "onfig file rev" to deal with earlier variants of the file
|
||||
#sed -i'.tmp' 's/.*onfig file rev.*//' "$config_file"
|
||||
|
||||
fi
|
||||
}
|
||||
|
||||
if [ "$1" != "" ] && [ -f "$1" ] && [ -w "$1" ]; then
|
||||
CONF_FILE="$1"
|
||||
# Make sure there is no ending slash
|
||||
CONF_FILE="${CONF_FILE%/}"
|
||||
LoadConfigFile "$CONF_FILE"
|
||||
CheckAndBackup "$CONF_FILE"
|
||||
RewriteSections "$CONF_FILE"
|
||||
RewriteOldConfigFiles "$CONF_FILE"
|
||||
AddMissingConfigOptions "$CONF_FILE"
|
||||
UpdateConfigHeader "$CONF_FILE"
|
||||
rm -f "$CONF_FILE.tmp"
|
||||
echo "Configuration file upgrade finished"
|
||||
else
|
||||
Usage
|
||||
fi
|
Loading…
Reference in New Issue
Block a user