Compare commits
49 commits
isoc_f1__s
...
main
Author | SHA1 | Date | |
---|---|---|---|
e2d38521f2 | |||
572253936f | |||
pedro | cf066b0b11 | ||
Cayo Puigdefabregas | 82e60dff24 | ||
Cayo Puigdefabregas | 4d2905799f | ||
Cayo Puigdefabregas | 7f224618ee | ||
Cayo Puigdefabregas | ea6fcf174b | ||
Cayo Puigdefabregas | 2eb6154267 | ||
Cayo Puigdefabregas | f90d67ff4d | ||
Cayo Puigdefabregas | 69654d1e7f | ||
Cayo Puigdefabregas | 7c1e0a4870 | ||
Cayo Puigdefabregas | ad2b375346 | ||
8914c309ec | |||
Cayo Puigdefabregas | b4e18e52da | ||
Cayo Puigdefabregas | 68a2df1b28 | ||
Cayo Puigdefabregas | b3003a4e00 | ||
Cayo Puigdefabregas | 83e717fb5e | ||
Cayo Puigdefabregas | 9832cbf5f2 | ||
Cayo Puigdefabregas | 57deefdcf4 | ||
Cayo Puigdefabregas | 5a50be493b | ||
Cayo Puigdefabregas | 03a25a4a63 | ||
Cayo Puigdefabregas | 56f15f6eae | ||
b255dddff7 | |||
fb7e768229 | |||
768851090a | |||
4cb3e34b6b | |||
b4d86fcc12 | |||
a857db5de1 | |||
192861b47e | |||
f7c3e138f1 | |||
13f38dacb8 | |||
cf81579819 | |||
2d0048433b | |||
3fb1cd19dc | |||
d9e85dca36 | |||
12209c84fa | |||
55eec35d58 | |||
pedro | 5889e81f04 | ||
c1867d1ce3 | |||
8ac0da99fe | |||
Cayo Puigdefabregas | 71e06b13a4 | ||
Cayo Puigdefabregas | 82f93a9446 | ||
Cayo Puigdefabregas | b1136e3dd8 | ||
af780b1247 | |||
da7b78eae3 | |||
e8b1d62290 | |||
3e5e151bef | |||
Cayo Puigdefabregas | 81a3c5240b | ||
09b7f085ef |
20
Makefile
20
Makefile
|
@ -44,20 +44,14 @@ boot_iso_uefi_secureboot:
|
||||||
-drive file=deploy/iso/workbench_debug.iso,cache=none,if=virtio,format=raw,index=0,media=disk \
|
-drive file=deploy/iso/workbench_debug.iso,cache=none,if=virtio,format=raw,index=0,media=disk \
|
||||||
-boot menu=on
|
-boot menu=on
|
||||||
|
|
||||||
test_usody_sanitize:
|
# when you change something, you need to refresh it this way
|
||||||
# TODO adapt settings accordingly for this test
|
regenerate_pxe_install:
|
||||||
# ERASE=y ./deploy-workbench.sh
|
./deploy-workbench.sh
|
||||||
# create 3 disks for testing
|
pxe/install-pxe.sh
|
||||||
qemu-img create -f raw test_sanitize_disk1.img 1G
|
|
||||||
qemu-img create -f raw test_sanitize_disk2.img 1G
|
|
||||||
qemu-img create -f raw test_sanitize_disk3.img 1G
|
|
||||||
sudo qemu-system-x86_64 \
|
|
||||||
-enable-kvm -m 2G -vga qxl -netdev user,id=wan -device virtio-net,netdev=wan,id=nic1 \
|
|
||||||
-drive format=raw,file=iso/workbench_debug.iso,cache=none,if=virtio \
|
|
||||||
-drive format=raw,file=test_sanitize_disk1.img,cache=none,if=virtio \
|
|
||||||
-drive format=raw,file=test_sanitize_disk2.img,cache=none,if=virtio \
|
|
||||||
-drive format=raw,file=test_sanitize_disk3.img,cache=none,if=virtio
|
|
||||||
|
|
||||||
|
es_gen:
|
||||||
|
$(MAKE) es_gen_po
|
||||||
|
$(MAKE) es_gen_mo
|
||||||
|
|
||||||
es_gen_po:
|
es_gen_po:
|
||||||
cp locale/es/LC_MESSAGES/messages.po locale/es/LC_MESSAGES/messages.pot.bak
|
cp locale/es/LC_MESSAGES/messages.po locale/es/LC_MESSAGES/messages.pot.bak
|
||||||
|
|
|
@ -199,12 +199,12 @@ create_persistence_partition() {
|
||||||
mkdir -p "${tmp_rw_mount}"
|
mkdir -p "${tmp_rw_mount}"
|
||||||
${SUDO} mount "$(pwd)/${rw_img_path}" "${tmp_rw_mount}"
|
${SUDO} mount "$(pwd)/${rw_img_path}" "${tmp_rw_mount}"
|
||||||
${SUDO} mkdir -p "${tmp_rw_mount}"
|
${SUDO} mkdir -p "${tmp_rw_mount}"
|
||||||
if [ -f "settings.ini" ]; then
|
if [ ! -f "settings.ini" ]; then
|
||||||
${SUDO} cp -v settings.ini "${tmp_rw_mount}/settings.ini"
|
${SUDO} cp -v settings.ini.example settings.ini
|
||||||
else
|
echo "WARNING: settings.ini was not there, settings.ini.example was copied, this only happens once"
|
||||||
echo "ERROR: settings.ini does not exist yet, cannot read config from there. You can take inspiration with file settings.ini.example"
|
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
|
${SUDO} cp -v settings.ini "${tmp_rw_mount}/settings.ini"
|
||||||
|
|
||||||
${SUDO} umount "${tmp_rw_mount}"
|
${SUDO} umount "${tmp_rw_mount}"
|
||||||
|
|
||||||
uuid="$(blkid "${rw_img_path}" | awk '{ print $3; }')"
|
uuid="$(blkid "${rw_img_path}" | awk '{ print $3; }')"
|
||||||
|
@ -253,6 +253,27 @@ END2
|
||||||
END
|
END
|
||||||
)"
|
)"
|
||||||
|
|
||||||
|
|
||||||
|
# thanks https://wiki.debian.org/Keyboard
|
||||||
|
chroot_kbd_conf_str="$(cat<<END
|
||||||
|
chroot_kbd_conf() {
|
||||||
|
###################
|
||||||
|
# configure keyboard
|
||||||
|
cat > /etc/default/keyboard <<END2
|
||||||
|
# KEYBOARD CONFIGURATION FILE
|
||||||
|
# generated by deploy-workbench.sh
|
||||||
|
|
||||||
|
# Consult the keyboard(5) manual page.
|
||||||
|
|
||||||
|
XKBMODEL="pc105"
|
||||||
|
XKBLAYOUT="\${CUSTOM_LANG}"
|
||||||
|
|
||||||
|
BACKSPACE="guess"
|
||||||
|
END2
|
||||||
|
}
|
||||||
|
END
|
||||||
|
)"
|
||||||
|
|
||||||
prepare_app() {
|
prepare_app() {
|
||||||
# prepare app during prepare_chroot_env
|
# prepare app during prepare_chroot_env
|
||||||
workbench_dir="${ISO_PATH}/chroot/opt/workbench"
|
workbench_dir="${ISO_PATH}/chroot/opt/workbench"
|
||||||
|
@ -264,8 +285,6 @@ prepare_app() {
|
||||||
|
|
||||||
# startup script execution
|
# startup script execution
|
||||||
cat > "${ISO_PATH}/chroot/root/.profile" <<END
|
cat > "${ISO_PATH}/chroot/root/.profile" <<END
|
||||||
# pipx path for usody-sanitize
|
|
||||||
PATH="${PATH}:/root/.local/bin"
|
|
||||||
if [ -f /tmp/workbench_lock ]; then
|
if [ -f /tmp/workbench_lock ]; then
|
||||||
return 0
|
return 0
|
||||||
else
|
else
|
||||||
|
@ -282,13 +301,15 @@ if [ "\${nfs_host}" ]; then
|
||||||
mount --bind /run/live/medium /mnt
|
mount --bind /run/live/medium /mnt
|
||||||
# debian live nfs path is readonly, do a trick
|
# debian live nfs path is readonly, do a trick
|
||||||
# to make snapshots subdir readwrite
|
# to make snapshots subdir readwrite
|
||||||
mount \${nfs_host}:/snapshots /run/live/medium/snapshots
|
mount -v \${nfs_host}:/snapshots /run/live/medium/snapshots
|
||||||
# reload mounts on systemd
|
# reload mounts on systemd
|
||||||
systemctl daemon-reload
|
systemctl daemon-reload
|
||||||
fi
|
fi
|
||||||
# clearly specify the right working directory, used in the python script as os.getcwd()
|
# clearly specify the right working directory, used in the python script as os.getcwd()
|
||||||
cd /mnt
|
cd /mnt
|
||||||
pipenv run python /opt/workbench/workbench-script.py --config /mnt/settings.ini
|
#pipenv run python /opt/workbench/workbench-script.py --config /mnt/settings.ini
|
||||||
|
# works meanwhile this project is vanilla python
|
||||||
|
python /opt/workbench/workbench-script.py --config /mnt/settings.ini
|
||||||
|
|
||||||
stty echo
|
stty echo
|
||||||
set +x
|
set +x
|
||||||
|
@ -304,18 +325,16 @@ echo 'Install requirements'
|
||||||
|
|
||||||
# Install debian requirements
|
# Install debian requirements
|
||||||
apt-get install -y --no-install-recommends \
|
apt-get install -y --no-install-recommends \
|
||||||
sudo locales \
|
sudo locales keyboard-configuration console-setup qrencode \
|
||||||
python-is-python3 python3 python3-dev python3-pip pipenv \
|
python-is-python3 python3 python3-dev python3-pip pipenv \
|
||||||
dmidecode smartmontools hwinfo pciutils lshw nfs-common pipx < /dev/null
|
dmidecode smartmontools hwinfo pciutils lshw nfs-common inxi < /dev/null
|
||||||
|
|
||||||
pipx install usody-sanitize
|
|
||||||
|
|
||||||
# Install lshw B02.19 utility using backports (DEPRECATED in Debian 12)
|
# Install lshw B02.19 utility using backports (DEPRECATED in Debian 12)
|
||||||
#apt install -y -t ${VERSION_CODENAME}-backports lshw < /dev/null
|
#apt install -y -t ${VERSION_CODENAME}-backports lshw < /dev/null
|
||||||
|
|
||||||
echo 'Install usody-sanitize requirements'
|
echo 'Install sanitize requirements'
|
||||||
|
|
||||||
# Install usody-sanitize debian requirements
|
# Install sanitize debian requirements
|
||||||
apt-get install -y --no-install-recommends \
|
apt-get install -y --no-install-recommends \
|
||||||
hdparm nvme-cli < /dev/null
|
hdparm nvme-cli < /dev/null
|
||||||
|
|
||||||
|
@ -364,8 +383,15 @@ ${install_app_str}
|
||||||
# thanks src https://serverfault.com/questions/362903/how-do-you-set-a-locale-non-interactively-on-debian-ubuntu
|
# thanks src https://serverfault.com/questions/362903/how-do-you-set-a-locale-non-interactively-on-debian-ubuntu
|
||||||
export LANG=${LANG}
|
export LANG=${LANG}
|
||||||
export LC_ALL=${LANG}
|
export LC_ALL=${LANG}
|
||||||
|
echo "${MYLOCALE}" > /etc/locale.gen
|
||||||
|
# Generate the locale
|
||||||
|
locale-gen
|
||||||
|
# feeds /etc/default/locale for the shell env var
|
||||||
|
update-locale LANG=${LANG} LC_ALL=${LANG}
|
||||||
# this is a high level command that does locale-gen and update-locale altogether
|
# this is a high level command that does locale-gen and update-locale altogether
|
||||||
dpkg-reconfigure --frontend=noninteractive locales
|
# but it is too interactive
|
||||||
|
#dpkg-reconfigure --frontend=noninteractive locales
|
||||||
|
# DEBUG
|
||||||
locale -a
|
locale -a
|
||||||
|
|
||||||
# Autologin root user
|
# Autologin root user
|
||||||
|
@ -390,6 +416,9 @@ apt-get install -y --no-install-recommends \
|
||||||
< /dev/null
|
< /dev/null
|
||||||
|
|
||||||
${chroot_netdns_conf_str}
|
${chroot_netdns_conf_str}
|
||||||
|
CUSTOM_LANG=${CUSTOM_LANG}
|
||||||
|
${chroot_kbd_conf_str}
|
||||||
|
chroot_kbd_conf
|
||||||
|
|
||||||
# Set up root user
|
# Set up root user
|
||||||
# this is the root password
|
# this is the root password
|
||||||
|
@ -409,7 +438,19 @@ CHROOT
|
||||||
}
|
}
|
||||||
|
|
||||||
prepare_chroot_env() {
|
prepare_chroot_env() {
|
||||||
LANG="${CUSTOM_LANG:-es_ES.UTF-8}"
|
CUSTOM_LANG="${CUSTOM_LANG:-es}"
|
||||||
|
case "${CUSTOM_LANG}" in
|
||||||
|
es)
|
||||||
|
export LANG="es_ES.UTF-8"
|
||||||
|
export MYLOCALE="${LANG} UTF-8"
|
||||||
|
;;
|
||||||
|
en)
|
||||||
|
export LANG="en_US.UTF-8"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "ERROR: CUSTOM_LANG not supported. Available: es"
|
||||||
|
exit 1
|
||||||
|
esac
|
||||||
# version of debian the bootstrap is going to build
|
# version of debian the bootstrap is going to build
|
||||||
# if no VERSION_CODENAME is specified we assume that the bootstrap is going to
|
# if no VERSION_CODENAME is specified we assume that the bootstrap is going to
|
||||||
# be build with the same version of debian being executed because some files
|
# be build with the same version of debian being executed because some files
|
||||||
|
@ -433,6 +474,7 @@ prepare_chroot_env() {
|
||||||
prepare_app
|
prepare_app
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# thanks https://willhaley.com/blog/custom-debian-live-environment/
|
# thanks https://willhaley.com/blog/custom-debian-live-environment/
|
||||||
install_requirements() {
|
install_requirements() {
|
||||||
# Install requirements
|
# Install requirements
|
||||||
|
|
112
docs/dev-es.md
112
docs/dev-es.md
|
@ -1,112 +0,0 @@
|
||||||
## borrado minimalista
|
|
||||||
|
|
||||||
Un enfoque inicial que teníamos para el borrado de disco son las siguientes funciones, esto lo hemos descartado para usar una herramienta más avanzada en el borrado [usody-sanitize](https://github.com/usody/sanitize/)
|
|
||||||
|
|
||||||
```python
|
|
||||||
## Xavier Functions ##
|
|
||||||
def erase_basic(disk):
|
|
||||||
"""
|
|
||||||
Basic Erasure
|
|
||||||
https://tsapps.nist.gov/publication/get_pdf.cfm?pub_id=917935
|
|
||||||
|
|
||||||
Settings for basic data erasure using shred Linux command.
|
|
||||||
A software-based fast non-100%-secured way of erasing data storage.
|
|
||||||
|
|
||||||
Performs 1 pass overwriting one round using all zeros.
|
|
||||||
Compliant with NIST SP-800-8y8.
|
|
||||||
|
|
||||||
In settings appear:
|
|
||||||
|
|
||||||
WB_ERASE = EraseBasic
|
|
||||||
WB_ERASE_STEPS = 1
|
|
||||||
WB_ERASE_LEADING_ZEROS = False
|
|
||||||
|
|
||||||
"""
|
|
||||||
cmd = f'shred -vn 1 /dev/{disk}'
|
|
||||||
return [exec_cmd_erase(cmd)]
|
|
||||||
|
|
||||||
|
|
||||||
def erase_baseline(disk):
|
|
||||||
"""
|
|
||||||
Baseline Secure Erasure
|
|
||||||
Settings for advanced data erasure using badblocks Linux software.
|
|
||||||
A secured-way of erasing data storages, erase hidden areas,
|
|
||||||
checking the erase sector by sector.
|
|
||||||
|
|
||||||
Performs 1 pass overwriting each sector with zeros and a final verification.
|
|
||||||
Compliant with HMG Infosec Standard 5 Baseline.
|
|
||||||
|
|
||||||
In settings appear:
|
|
||||||
|
|
||||||
WB_ERASE = EraseSectors
|
|
||||||
WB_ERASE_STEPS = 1
|
|
||||||
WB_ERASE_LEADING_ZEROS = True
|
|
||||||
|
|
||||||
WB_ERASE_1_METHOD = EraseBasic
|
|
||||||
WB_ERASE_1_STEP_TYPE = 0
|
|
||||||
WB_ERASE_2_METHOD = EraseSectors
|
|
||||||
WB_ERASE_2_STEP_TYPE = 1
|
|
||||||
"""
|
|
||||||
result = []
|
|
||||||
cmd = f'shred -zvn 0 /dev/{disk}'
|
|
||||||
result.append(exec_cmd_erase(cmd))
|
|
||||||
cmd = f'badblocks -st random -w /dev/{disk}'
|
|
||||||
result.append(exec_cmd_erase(cmd))
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
def erase_enhanced(disk):
|
|
||||||
"""
|
|
||||||
Enhanced Secure Erasure
|
|
||||||
Settings for advanced data erasure using badblocks Linux software.
|
|
||||||
A secured-way of erasing data storages, erase hidden areas,
|
|
||||||
checking the erase sector by sector.
|
|
||||||
|
|
||||||
Performs 3 passes overwriting every sector with zeros and ones,
|
|
||||||
and final verification. Compliant with HMG Infosec Standard 5 Enhanced.
|
|
||||||
|
|
||||||
In settings appear:
|
|
||||||
|
|
||||||
WB_ERASE = EraseSectors
|
|
||||||
WB_ERASE_LEADING_ZEROS = True
|
|
||||||
|
|
||||||
WB_ERASE_1_METHOD = EraseBasic
|
|
||||||
WB_ERASE_1_STEP_TYPE = 1
|
|
||||||
WB_ERASE_2_METHOD = EraseBasic
|
|
||||||
WB_ERASE_2_STEP_TYPE = 0
|
|
||||||
WB_ERASE_3_METHOD = EraseSectors
|
|
||||||
WB_ERASE_3_STEP_TYPE = 1
|
|
||||||
"""
|
|
||||||
result = []
|
|
||||||
cmd = f'shred -vn 1 /dev/{disk}'
|
|
||||||
result.append(exec_cmd_erase(cmd))
|
|
||||||
cmd = f'shred -zvn 0 /dev/{disk}'
|
|
||||||
result.append(exec_cmd_erase(cmd))
|
|
||||||
## creo que realmente seria asi (3 pases y una extra poniendo a ceros):
|
|
||||||
# shred -zvn 3 /def/{disk}
|
|
||||||
# tampoco estoy seguro que el badblocks haga un proceso de verificacion.
|
|
||||||
cmd = f'badblocks -st random -w /dev/{disk}'
|
|
||||||
result.append(exec_cmd_erase(cmd))
|
|
||||||
return result
|
|
||||||
|
|
||||||
## End Xavier Functions ##
|
|
||||||
|
|
||||||
## Erase Functions ##
|
|
||||||
|
|
||||||
def ata_secure_erase_null(disk):
|
|
||||||
cmd_baseline = f'hdparm --user-master u --security-erase NULL /dev/{disk}'
|
|
||||||
return [exec_cmd_erase(cmd_baseline)]
|
|
||||||
|
|
||||||
|
|
||||||
def ata_secure_erase_enhanced(disk):
|
|
||||||
cmd_enhanced = f'hdparm --user-master u --security-erase-enhanced /dev/{disk}'
|
|
||||||
return [exec_cmd_erase(cmd_enhanced)]
|
|
||||||
|
|
||||||
|
|
||||||
def nvme_secure_erase(disk):
|
|
||||||
cmd_encrypted = f'nvme format /dev/{disk} --ses=1'
|
|
||||||
return [exec_cmd_erase(cmd_encrypted)]
|
|
||||||
|
|
||||||
|
|
||||||
## End Erase Functions ##
|
|
||||||
```
|
|
|
@ -9,7 +9,7 @@ set -u
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
main() {
|
main() {
|
||||||
sudo apt install smartmontools lshw hwinfo dmidecode
|
sudo apt install qrencode smartmontools lshw hwinfo dmidecode inxi
|
||||||
}
|
}
|
||||||
|
|
||||||
main "${@}"
|
main "${@}"
|
||||||
|
|
Binary file not shown.
|
@ -8,7 +8,7 @@ msgid ""
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"Project-Id-Version: PACKAGE VERSION\n"
|
"Project-Id-Version: PACKAGE VERSION\n"
|
||||||
"Report-Msgid-Bugs-To: \n"
|
"Report-Msgid-Bugs-To: \n"
|
||||||
"POT-Creation-Date: 2024-10-15 21:15+0200\n"
|
"POT-Creation-Date: 2024-11-08 18:25+0100\n"
|
||||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||||
|
@ -17,19 +17,19 @@ msgstr ""
|
||||||
"Content-Type: text/plain; charset=UTF-8\n"
|
"Content-Type: text/plain; charset=UTF-8\n"
|
||||||
"Content-Transfer-Encoding: 8bit\n"
|
"Content-Transfer-Encoding: 8bit\n"
|
||||||
|
|
||||||
#: workbench-script.py:48 workbench-script.py:53
|
#: workbench-script.py:49 workbench-script.py:54
|
||||||
msgid "Running command `%s`"
|
msgid "Running command `%s`"
|
||||||
msgstr "Ejecutando comando `%s`"
|
msgstr "Ejecutando comando `%s`"
|
||||||
|
|
||||||
#: workbench-script.py:284
|
#: workbench-script.py:279
|
||||||
msgid "Created snapshots directory at '%s'"
|
msgid "Created snapshots directory at '%s'"
|
||||||
msgstr "Creado directorio de snapshots en '%s'"
|
msgstr "Creado directorio de snapshots en '%s'"
|
||||||
|
|
||||||
#: workbench-script.py:287
|
#: workbench-script.py:282
|
||||||
msgid "Snapshot written in path '%s'"
|
msgid "Snapshot written in path '%s'"
|
||||||
msgstr "Snapshot escrito en ruta '%s'"
|
msgstr "Snapshot escrito en ruta '%s'"
|
||||||
|
|
||||||
#: workbench-script.py:290
|
#: workbench-script.py:285
|
||||||
msgid ""
|
msgid ""
|
||||||
"Attempting to save file in actual path. Reason: Failed to write in snapshots "
|
"Attempting to save file in actual path. Reason: Failed to write in snapshots "
|
||||||
"directory:\n"
|
"directory:\n"
|
||||||
|
@ -39,11 +39,11 @@ msgstr ""
|
||||||
"escribir en el directorio de snapshots:\n"
|
"escribir en el directorio de snapshots:\n"
|
||||||
" %s."
|
" %s."
|
||||||
|
|
||||||
#: workbench-script.py:297
|
#: workbench-script.py:292
|
||||||
msgid "Snapshot written in fallback path '%s'"
|
msgid "Snapshot written in fallback path '%s'"
|
||||||
msgstr "Snapshot escrito en ruta alternativa '%s'"
|
msgstr "Snapshot escrito en ruta alternativa '%s'"
|
||||||
|
|
||||||
#: workbench-script.py:299
|
#: workbench-script.py:294
|
||||||
msgid ""
|
msgid ""
|
||||||
"Could not save snapshot locally. Reason: Failed to write in fallback path:\n"
|
"Could not save snapshot locally. Reason: Failed to write in fallback path:\n"
|
||||||
" %s"
|
" %s"
|
||||||
|
@ -52,49 +52,53 @@ msgstr ""
|
||||||
"alternativa:\n"
|
"alternativa:\n"
|
||||||
" %s"
|
" %s"
|
||||||
|
|
||||||
#: workbench-script.py:316
|
#: workbench-script.py:317
|
||||||
msgid "Snapshot successfully sent to '%s'"
|
msgid "Snapshot successfully sent to '%s'"
|
||||||
msgstr "Snapshot enviado con éxito a '%s'"
|
msgstr "Snapshot enviado con éxito a '%s'"
|
||||||
|
|
||||||
#: workbench-script.py:331
|
#: workbench-script.py:335
|
||||||
|
msgid "Snapshot %s could not be sent to URL '%s'"
|
||||||
|
msgstr "Snapshot %s no se pudo enviar a la URL '%s'"
|
||||||
|
|
||||||
|
#: workbench-script.py:338
|
||||||
msgid ""
|
msgid ""
|
||||||
"Snapshot not remotely sent to URL '%s'. Do you have internet? Is your server "
|
"Snapshot %s not remotely sent to URL '%s'. Do you have internet? Is your "
|
||||||
"up & running? Is the url token authorized?\n"
|
"server up & running? Is the url token authorized?\n"
|
||||||
" %s"
|
" %s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"Snapshot no enviado remotamente a la URL '%s'. Tienes internet? Está el "
|
"Snapshot %s no enviado remotamente a la URL '%s'. Tienes internet? Está el "
|
||||||
"servidor en marcha? Está autorizado el url token?\n"
|
"servidor en marcha? Está autorizado el url token?\n"
|
||||||
" %s"
|
" %s"
|
||||||
|
|
||||||
#: workbench-script.py:342
|
#: workbench-script.py:350
|
||||||
msgid "Found config file in path: %s."
|
msgid "Found config file in path: %s."
|
||||||
msgstr "Encontrado fichero de configuración en ruta: %s."
|
msgstr "Encontrado fichero de configuración en ruta: %s."
|
||||||
|
|
||||||
#: workbench-script.py:353
|
#: workbench-script.py:361
|
||||||
msgid "Config file '%s' not found. Using default values."
|
msgid "Config file '%s' not found. Using default values."
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"Fichero de configuración '%s' no encontrado. Utilizando valores por defecto."
|
"Fichero de configuración '%s' no encontrado. Utilizando valores por defecto."
|
||||||
|
|
||||||
#: workbench-script.py:373
|
#: workbench-script.py:379
|
||||||
msgid "workbench-script.py [-h] [--config CONFIG]"
|
msgid "workbench-script.py [-h] [--config CONFIG]"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: workbench-script.py:374
|
#: workbench-script.py:380
|
||||||
msgid "Optional config loader for workbench."
|
msgid "Optional config loader for workbench."
|
||||||
msgstr "Cargador opcional de configuración para workbench"
|
msgstr "Cargador opcional de configuración para workbench"
|
||||||
|
|
||||||
#: workbench-script.py:377
|
#: workbench-script.py:383
|
||||||
msgid ""
|
msgid ""
|
||||||
"path to the config file. Defaults to 'settings.ini' in the current directory."
|
"path to the config file. Defaults to 'settings.ini' in the current directory."
|
||||||
msgstr ""
|
msgstr ""
|
||||||
"ruta al fichero de configuración. Por defecto es 'settings.ini' en el "
|
"ruta al fichero de configuración. Por defecto es 'settings.ini' en el "
|
||||||
"directorio actual"
|
"directorio actual"
|
||||||
|
|
||||||
#: workbench-script.py:410
|
#: workbench-script.py:416
|
||||||
msgid "START"
|
msgid "START"
|
||||||
msgstr "INICIO"
|
msgstr "INICIO"
|
||||||
|
|
||||||
#: workbench-script.py:423
|
#: workbench-script.py:430
|
||||||
msgid ""
|
msgid ""
|
||||||
"This script must be run as root. Collected data will be incomplete or "
|
"This script must be run as root. Collected data will be incomplete or "
|
||||||
"unusable"
|
"unusable"
|
||||||
|
@ -102,6 +106,6 @@ msgstr ""
|
||||||
"Es conveniente que este script sea ejecutado como administrador (root). Los "
|
"Es conveniente que este script sea ejecutado como administrador (root). Los "
|
||||||
"datos recopilados serán incompletos o no usables."
|
"datos recopilados serán incompletos o no usables."
|
||||||
|
|
||||||
#: workbench-script.py:441
|
#: workbench-script.py:448
|
||||||
msgid "END"
|
msgid "END"
|
||||||
msgstr "FIN"
|
msgstr "FIN"
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
server_ip=192.168.1.2
|
# assuming server_ip using qemu
|
||||||
nfs_allowed_lan=192.168.1.0/24
|
server_ip=10.0.2.1
|
||||||
|
nfs_allowed_lan=10.0.2.0/24
|
||||||
tftp_path='/srv/pxe-tftp'
|
tftp_path='/srv/pxe-tftp'
|
||||||
nfs_path='/srv/pxe-nfs'
|
nfs_path='/srv/pxe-nfs'
|
||||||
|
|
|
@ -37,7 +37,7 @@ backup_file() {
|
||||||
|
|
||||||
if [ -f "${target}" ]; then
|
if [ -f "${target}" ]; then
|
||||||
if ! grep -q 'we should do a backup' "${target}"; then
|
if ! grep -q 'we should do a backup' "${target}"; then
|
||||||
${SUDO} cp -a "${target}" "${target}-bak_${ts}"
|
${SUDO} cp -v -a "${target}" "${target}-bak_${ts}"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -69,14 +69,14 @@ END
|
||||||
# reload nfs exports
|
# reload nfs exports
|
||||||
${SUDO} exportfs -vra
|
${SUDO} exportfs -vra
|
||||||
|
|
||||||
|
if [ ! -f ./settings.ini ]; then
|
||||||
|
cp -v ./settings.ini.example ./settings.ini
|
||||||
|
echo "WARNING: settings.ini was not there, settings.ini.example was copied, this only happens once"
|
||||||
|
fi
|
||||||
|
|
||||||
if [ ! -f "${nfs_path}/settings.ini" ]; then
|
if [ ! -f "${nfs_path}/settings.ini" ]; then
|
||||||
if [ -f "settings.ini" ]; then
|
${SUDO} cp -v settings.ini "${nfs_path}/settings.ini"
|
||||||
${SUDO} cp settings.ini "${nfs_path}/settings.ini"
|
echo "WARNING: ${nfs_path}/settings.ini was not there, ./settings.ini was copied, this only happens once"
|
||||||
else
|
|
||||||
echo "ERROR: $(pwd)/settings.ini does not exist yet, cannot read config from there. You can take inspiration with file $(pwd)/settings.ini.example"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,6 +93,7 @@ pxe-service=x86PC,"Network Boot",pxelinux
|
||||||
enable-tftp
|
enable-tftp
|
||||||
tftp-root=${tftp_path}
|
tftp-root=${tftp_path}
|
||||||
END
|
END
|
||||||
|
sudo systemctl restart dnsmasq || true
|
||||||
}
|
}
|
||||||
|
|
||||||
install_netboot() {
|
install_netboot() {
|
||||||
|
@ -110,8 +111,12 @@ install_netboot() {
|
||||||
${SUDO} cp -fv "${PXE_DIR}/../iso/staging/live/vmlinuz" "${tftp_path}/"
|
${SUDO} cp -fv "${PXE_DIR}/../iso/staging/live/vmlinuz" "${tftp_path}/"
|
||||||
${SUDO} cp -fv "${PXE_DIR}/../iso/staging/live/initrd" "${tftp_path}/"
|
${SUDO} cp -fv "${PXE_DIR}/../iso/staging/live/initrd" "${tftp_path}/"
|
||||||
|
|
||||||
${SUDO} cp /usr/lib/syslinux/memdisk "${tftp_path}/"
|
${SUDO} cp -v /usr/lib/syslinux/memdisk "${tftp_path}/"
|
||||||
${SUDO} cp /usr/lib/syslinux/modules/bios/* "${tftp_path}/"
|
${SUDO} cp -v /usr/lib/syslinux/modules/bios/* "${tftp_path}/"
|
||||||
|
if [ ! -f ./pxe-menu.cfg ]; then
|
||||||
|
${SUDO} cp -v ./pxe-menu.cfg.example pxe-menu.cfg
|
||||||
|
echo "WARNING: pxe-menu.cfg was not there, pxe-menu.cfg.example was copied, this only happens once"
|
||||||
|
fi
|
||||||
envsubst < ./pxe-menu.cfg | ${SUDO} tee "${tftp_path}/pxelinux.cfg/default"
|
envsubst < ./pxe-menu.cfg | ${SUDO} tee "${tftp_path}/pxelinux.cfg/default"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
@ -128,11 +133,11 @@ init_config() {
|
||||||
|
|
||||||
PXE_DIR="$(pwd)"
|
PXE_DIR="$(pwd)"
|
||||||
|
|
||||||
if [ -f ./.env ]; then
|
if [ ! -f ./.env ]; then
|
||||||
. ./.env
|
cp -v ./.env.example ./.env
|
||||||
else
|
echo "WARNING: .env was not there, .env.example was copied, this only happens once"
|
||||||
echo "PXE: WARNING: $(pwd)/.env does not exist yet, cannot read config from there. You can take inspiration with file $(pwd)/.env.example"
|
|
||||||
fi
|
fi
|
||||||
|
. ./.env
|
||||||
VERSION_CODENAME="${VERSION_CODENAME:-bookworm}"
|
VERSION_CODENAME="${VERSION_CODENAME:-bookworm}"
|
||||||
tftp_path="${tftp_path:-/srv/pxe-tftp}"
|
tftp_path="${tftp_path:-/srv/pxe-tftp}"
|
||||||
# vars used in envsubst require to be exported:
|
# vars used in envsubst require to be exported:
|
||||||
|
|
|
@ -1,7 +1,14 @@
|
||||||
[settings]
|
[settings]
|
||||||
url = http://localhost:8000/api/snapshot/
|
url = http://localhost:8000/api/v1/snapshot/
|
||||||
token = '1234'
|
#url = https://demo.ereuse.org/api/v1/snapshot/
|
||||||
|
# sample token that works with default deployment such as the previous two urls
|
||||||
|
token = 5018dd65-9abd-4a62-8896-80f34ac66150
|
||||||
|
|
||||||
|
# Idhub
|
||||||
|
# wb_sign_token = "27de6ad7-cee2-4fe8-84d4-c7eea9c969c8"
|
||||||
|
# url_wallet = "http://localhost"
|
||||||
|
|
||||||
# path = /path/to/save
|
# path = /path/to/save
|
||||||
# device = your_device_name
|
# device = your_device_name
|
||||||
# # erase = basic
|
# # erase = basic
|
||||||
# legacy = true
|
# legacy = True
|
||||||
|
|
|
@ -6,6 +6,7 @@ import uuid
|
||||||
import hashlib
|
import hashlib
|
||||||
import argparse
|
import argparse
|
||||||
import configparser
|
import configparser
|
||||||
|
import urllib.parse
|
||||||
import urllib.request
|
import urllib.request
|
||||||
|
|
||||||
import gettext
|
import gettext
|
||||||
|
@ -15,20 +16,17 @@ import logging
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
|
|
||||||
## Legacy Functions ##
|
SNAPSHOT_BASE = {
|
||||||
def convert_to_legacy_snapshot(snapshot):
|
'timestamp': str(datetime.now()),
|
||||||
snapshot["sid"] = str(uuid.uuid4()).split("-")[0]
|
'type': 'Snapshot',
|
||||||
snapshot["software"] = "workbench-script"
|
'uuid': str(uuid.uuid4()),
|
||||||
snapshot["version"] = "dev"
|
'software': "workbench-script",
|
||||||
snapshot["schema_api"] = "1.0.0"
|
'version': "0.0.1",
|
||||||
snapshot["settings_version"] = "No Settings Version (NaN)"
|
'operator_id': "",
|
||||||
snapshot["timestamp"] = snapshot["timestamp"].replace(" ", "T")
|
'data': {},
|
||||||
snapshot["data"]["smart"] = snapshot["data"]["disks"]
|
'erase': []
|
||||||
snapshot["data"].pop("disks")
|
}
|
||||||
snapshot.pop("code")
|
|
||||||
snapshot.pop("erase")
|
|
||||||
|
|
||||||
## End Legacy Functions ##
|
|
||||||
|
|
||||||
|
|
||||||
## Utility Functions ##
|
## Utility Functions ##
|
||||||
|
@ -48,32 +46,151 @@ def exec_cmd(cmd):
|
||||||
logger.info(_('Running command `%s`'), cmd)
|
logger.info(_('Running command `%s`'), cmd)
|
||||||
return os.popen(cmd).read()
|
return os.popen(cmd).read()
|
||||||
|
|
||||||
|
|
||||||
@logs
|
@logs
|
||||||
def exec_cmd_erase(cmd):
|
def exec_cmd_erase(cmd):
|
||||||
logger.info(_('Running command `%s`'), cmd)
|
logger.info(_('Running command `%s`'), cmd)
|
||||||
return ''
|
return ''
|
||||||
# return os.popen(cmd).read()
|
# return os.popen(cmd).read()
|
||||||
|
|
||||||
|
|
||||||
def gen_code():
|
|
||||||
uid = str(uuid.uuid4()).encode('utf-8')
|
|
||||||
return hashlib.shake_256(uid).hexdigest(3)
|
|
||||||
|
|
||||||
## End Utility functions ##
|
## End Utility functions ##
|
||||||
|
|
||||||
|
|
||||||
SNAPSHOT_BASE = {
|
## Legacy Functions ##
|
||||||
'timestamp': str(datetime.now()),
|
|
||||||
'type': 'Snapshot',
|
def convert_to_legacy_snapshot(snapshot):
|
||||||
'uuid': str(uuid.uuid4()),
|
snapshot["sid"] = str(uuid.uuid4()).split("-")[1]
|
||||||
'code': gen_code(),
|
snapshot["software"] = "workbench-script"
|
||||||
'software': "workbench-script",
|
snapshot["version"] = "dev"
|
||||||
'version': "0.0.1",
|
snapshot["schema_api"] = "1.0.0"
|
||||||
'data': {},
|
snapshot["settings_version"] = "No Settings Version (NaN)"
|
||||||
'erase': []
|
snapshot["timestamp"] = snapshot["timestamp"].replace(" ", "T")
|
||||||
}
|
snapshot["data"]["smart"] = json.loads(snapshot["data"]["smartctl"])
|
||||||
|
snapshot["data"].pop("smartctl")
|
||||||
|
snapshot["data"].pop("inxi")
|
||||||
|
snapshot.pop("operator_id")
|
||||||
|
snapshot.pop("erase")
|
||||||
|
|
||||||
|
lshw = 'sudo lshw -json'
|
||||||
|
hwinfo = 'sudo hwinfo --reallyall'
|
||||||
|
lspci = 'sudo lspci -vv'
|
||||||
|
|
||||||
|
data = {
|
||||||
|
'lshw': exec_cmd(lshw) or "{}",
|
||||||
|
'hwinfo': exec_cmd(hwinfo),
|
||||||
|
'lspci': exec_cmd(lspci)
|
||||||
|
}
|
||||||
|
snapshot['data'].update(data)
|
||||||
|
|
||||||
|
## End Legacy Functions ##
|
||||||
|
|
||||||
|
|
||||||
## Command Functions ##
|
## Command Functions ##
|
||||||
|
## Erase Functions ##
|
||||||
|
## Xavier Functions ##
|
||||||
|
def erase_basic(disk):
|
||||||
|
"""
|
||||||
|
Basic Erasure
|
||||||
|
https://tsapps.nist.gov/publication/get_pdf.cfm?pub_id=917935
|
||||||
|
|
||||||
|
Settings for basic data erasure using shred Linux command.
|
||||||
|
A software-based fast non-100%-secured way of erasing data storage.
|
||||||
|
|
||||||
|
Performs 1 pass overwriting one round using all zeros.
|
||||||
|
Compliant with NIST SP-800-8y8.
|
||||||
|
|
||||||
|
In settings appear:
|
||||||
|
|
||||||
|
WB_ERASE = EraseBasic
|
||||||
|
WB_ERASE_STEPS = 1
|
||||||
|
WB_ERASE_LEADING_ZEROS = False
|
||||||
|
|
||||||
|
"""
|
||||||
|
cmd = f'shred -vn 1 /dev/{disk}'
|
||||||
|
return [exec_cmd_erase(cmd)]
|
||||||
|
|
||||||
|
|
||||||
|
def erase_baseline(disk):
|
||||||
|
"""
|
||||||
|
Baseline Secure Erasure
|
||||||
|
Settings for advanced data erasure using badblocks Linux software.
|
||||||
|
A secured-way of erasing data storages, erase hidden areas,
|
||||||
|
checking the erase sector by sector.
|
||||||
|
|
||||||
|
Performs 1 pass overwriting each sector with zeros and a final verification.
|
||||||
|
Compliant with HMG Infosec Standard 5 Baseline.
|
||||||
|
|
||||||
|
In settings appear:
|
||||||
|
|
||||||
|
WB_ERASE = EraseSectors
|
||||||
|
WB_ERASE_STEPS = 1
|
||||||
|
WB_ERASE_LEADING_ZEROS = True
|
||||||
|
|
||||||
|
WB_ERASE_1_METHOD = EraseBasic
|
||||||
|
WB_ERASE_1_STEP_TYPE = 0
|
||||||
|
WB_ERASE_2_METHOD = EraseSectors
|
||||||
|
WB_ERASE_2_STEP_TYPE = 1
|
||||||
|
"""
|
||||||
|
result = []
|
||||||
|
cmd = f'shred -zvn 0 /dev/{disk}'
|
||||||
|
result.append(exec_cmd_erase(cmd))
|
||||||
|
cmd = f'badblocks -st random -w /dev/{disk}'
|
||||||
|
result.append(exec_cmd_erase(cmd))
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def erase_enhanced(disk):
|
||||||
|
"""
|
||||||
|
Enhanced Secure Erasure
|
||||||
|
Settings for advanced data erasure using badblocks Linux software.
|
||||||
|
A secured-way of erasing data storages, erase hidden areas,
|
||||||
|
checking the erase sector by sector.
|
||||||
|
|
||||||
|
Performs 3 passes overwriting every sector with zeros and ones,
|
||||||
|
and final verification. Compliant with HMG Infosec Standard 5 Enhanced.
|
||||||
|
|
||||||
|
In settings appear:
|
||||||
|
|
||||||
|
WB_ERASE = EraseSectors
|
||||||
|
WB_ERASE_LEADING_ZEROS = True
|
||||||
|
|
||||||
|
WB_ERASE_1_METHOD = EraseBasic
|
||||||
|
WB_ERASE_1_STEP_TYPE = 1
|
||||||
|
WB_ERASE_2_METHOD = EraseBasic
|
||||||
|
WB_ERASE_2_STEP_TYPE = 0
|
||||||
|
WB_ERASE_3_METHOD = EraseSectors
|
||||||
|
WB_ERASE_3_STEP_TYPE = 1
|
||||||
|
"""
|
||||||
|
result = []
|
||||||
|
cmd = f'shred -vn 1 /dev/{disk}'
|
||||||
|
result.append(exec_cmd_erase(cmd))
|
||||||
|
cmd = f'shred -zvn 0 /dev/{disk}'
|
||||||
|
result.append(exec_cmd_erase(cmd))
|
||||||
|
## creo que realmente seria asi (3 pases y una extra poniendo a ceros):
|
||||||
|
# shred -zvn 3 /def/{disk}
|
||||||
|
# tampoco estoy seguro que el badblocks haga un proceso de verificacion.
|
||||||
|
cmd = f'badblocks -st random -w /dev/{disk}'
|
||||||
|
result.append(exec_cmd_erase(cmd))
|
||||||
|
return result
|
||||||
|
|
||||||
|
## End Xavier Functions ##
|
||||||
|
|
||||||
|
def ata_secure_erase_null(disk):
|
||||||
|
cmd_baseline = f'hdparm --user-master u --security-erase NULL /dev/{disk}'
|
||||||
|
return [exec_cmd_erase(cmd_baseline)]
|
||||||
|
|
||||||
|
|
||||||
|
def ata_secure_erase_enhanced(disk):
|
||||||
|
cmd_enhanced = f'hdparm --user-master u --security-erase-enhanced /dev/{disk}'
|
||||||
|
return [exec_cmd_erase(cmd_enhanced)]
|
||||||
|
|
||||||
|
|
||||||
|
def nvme_secure_erase(disk):
|
||||||
|
cmd_encrypted = f'nvme format /dev/{disk} --ses=1'
|
||||||
|
return [exec_cmd_erase(cmd_encrypted)]
|
||||||
|
|
||||||
|
|
||||||
|
## End Erase Functions ##
|
||||||
|
|
||||||
@logs
|
@logs
|
||||||
def get_disks():
|
def get_disks():
|
||||||
|
@ -82,13 +199,39 @@ def get_disks():
|
||||||
)
|
)
|
||||||
return disks.get('blockdevices', [])
|
return disks.get('blockdevices', [])
|
||||||
|
|
||||||
|
|
||||||
@logs
|
@logs
|
||||||
def gen_erase(type_erase, user_disk=None):
|
def gen_erase(all_disks, type_erase, user_disk=None):
|
||||||
if user_disk:
|
erase = []
|
||||||
return exec_cmd(f"sanitize -d {user_disk} -m {type_erase}")
|
for disk in all_disks:
|
||||||
return exec_cmd(f"sanitize -a -m {type_erase}")
|
if user_disk and disk['name'] not in user_disk:
|
||||||
# return exec_cmd(f"sanitize -a -m {type_erase} --confirm")
|
continue
|
||||||
|
|
||||||
|
if disk['type'] != 'disk':
|
||||||
|
continue
|
||||||
|
|
||||||
|
if 'boot' in disk['mountpoints']:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if not disk['rota']:
|
||||||
|
# if soport nvme erase
|
||||||
|
erase.append(nvme_secure_erase(disk['name']))
|
||||||
|
elif disk['tran'] in ['ata', 'sata']:
|
||||||
|
# if soport ata erase
|
||||||
|
if type_erase == 'basic':
|
||||||
|
erase.append(ata_secure_erase_null(disk['name']))
|
||||||
|
elif type_erase == 'baseline':
|
||||||
|
erase.append(ata_secure_erase_null(disk['name']))
|
||||||
|
elif type_erase == 'enhanced':
|
||||||
|
erase.append(ata_secure_erase_enhanced(disk['name']))
|
||||||
|
else:
|
||||||
|
# For old disks
|
||||||
|
if type_erase == 'basic':
|
||||||
|
erase.append(erase_basic(disk['name']))
|
||||||
|
elif type_erase == 'baseline':
|
||||||
|
erase.append(erase_baseline(disk['name']))
|
||||||
|
elif type_erase == 'enhanced':
|
||||||
|
erase.append(erase_enhanced(disk['name']))
|
||||||
|
return erase
|
||||||
|
|
||||||
|
|
||||||
@logs
|
@logs
|
||||||
|
@ -109,7 +252,7 @@ def smartctl(all_disks, disk=None):
|
||||||
data = exec_smart(disk['name'])
|
data = exec_smart(disk['name'])
|
||||||
data_list.append(data)
|
data_list.append(data)
|
||||||
|
|
||||||
return data_list
|
return json.dumps(data_list)
|
||||||
|
|
||||||
## End Command Functions ##
|
## End Command Functions ##
|
||||||
|
|
||||||
|
@ -117,16 +260,13 @@ def smartctl(all_disks, disk=None):
|
||||||
# TODO permitir selección
|
# TODO permitir selección
|
||||||
# TODO permitir que vaya más rápido
|
# TODO permitir que vaya más rápido
|
||||||
def get_data(all_disks):
|
def get_data(all_disks):
|
||||||
lshw = 'sudo lshw -json'
|
|
||||||
hwinfo = 'sudo hwinfo --reallyall'
|
|
||||||
dmidecode = 'sudo dmidecode'
|
dmidecode = 'sudo dmidecode'
|
||||||
lspci = 'sudo lspci -vv'
|
inxi = "sudo inxi -afmnGEMABD -x 3 --edid --output json --output-file print"
|
||||||
|
|
||||||
data = {
|
data = {
|
||||||
'lshw': exec_cmd(lshw) or "{}",
|
'smartctl': smartctl(all_disks),
|
||||||
'disks': smartctl(all_disks),
|
|
||||||
'hwinfo': exec_cmd(hwinfo),
|
|
||||||
'dmidecode': exec_cmd(dmidecode),
|
'dmidecode': exec_cmd(dmidecode),
|
||||||
'lspci': exec_cmd(lspci)
|
'inxi': exec_cmd(inxi)
|
||||||
}
|
}
|
||||||
|
|
||||||
return data
|
return data
|
||||||
|
@ -138,20 +278,20 @@ def gen_snapshot(all_disks):
|
||||||
return snapshot
|
return snapshot
|
||||||
|
|
||||||
|
|
||||||
def save_snapshot_in_disk(snapshot, path):
|
def save_snapshot_in_disk(snapshot, path, snap_uuid):
|
||||||
snapshot_path = os.path.join(path, 'snapshots')
|
snapshot_path = os.path.join(path, 'snapshots')
|
||||||
|
|
||||||
filename = "{}/{}_{}.json".format(
|
filename = "{}/{}_{}.json".format(
|
||||||
snapshot_path,
|
snapshot_path,
|
||||||
datetime.now().strftime("%Y%m%d-%H_%M_%S"),
|
datetime.now().strftime("%Y%m%d-%H_%M_%S"),
|
||||||
snapshot['uuid'])
|
snap_uuid)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
if not os.path.exists(snapshot_path):
|
if not os.path.exists(snapshot_path):
|
||||||
os.makedirs(snapshot_path)
|
os.makedirs(snapshot_path)
|
||||||
logger.info(_("Created snapshots directory at '%s'"), snapshot_path)
|
logger.info(_("Created snapshots directory at '%s'"), snapshot_path)
|
||||||
with open(filename, "w") as f:
|
with open(filename, "w") as f:
|
||||||
f.write(json.dumps(snapshot))
|
f.write(snapshot)
|
||||||
logger.info(_("Snapshot written in path '%s'"), filename)
|
logger.info(_("Snapshot written in path '%s'"), filename)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
try:
|
try:
|
||||||
|
@ -159,22 +299,69 @@ def save_snapshot_in_disk(snapshot, path):
|
||||||
fallback_filename = "{}/{}_{}.json".format(
|
fallback_filename = "{}/{}_{}.json".format(
|
||||||
path,
|
path,
|
||||||
datetime.now().strftime("%Y%m%d-%H_%M_%S"),
|
datetime.now().strftime("%Y%m%d-%H_%M_%S"),
|
||||||
snapshot['uuid'])
|
snap_uuid)
|
||||||
with open(fallback_filename, "w") as f:
|
with open(fallback_filename, "w") as f:
|
||||||
f.write(json.dumps(snapshot))
|
f.write(snapshot)
|
||||||
logger.warning(_("Snapshot written in fallback path '%s'"), fallback_filename)
|
logger.warning(_("Snapshot written in fallback path '%s'"), fallback_filename)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(_("Could not save snapshot locally. Reason: Failed to write in fallback path:\n %s"), e)
|
logger.error(_("Could not save snapshot locally. Reason: Failed to write in fallback path:\n %s"), e)
|
||||||
|
|
||||||
|
|
||||||
|
def send_to_sign_credential(snapshot, token, url):
|
||||||
|
headers = {
|
||||||
|
"Authorization": f"Bearer {token}",
|
||||||
|
"Content-Type": "application/json"
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
cred = {
|
||||||
|
"type": "DeviceSnapshotV1",
|
||||||
|
"save": False,
|
||||||
|
"data": {
|
||||||
|
"operator_id": snapshot["operator_id"],
|
||||||
|
"dmidecode": snapshot["data"]["dmidecode"],
|
||||||
|
"inxi": snapshot["data"]["inxi"],
|
||||||
|
"smartctl": snapshot["data"]["smartctl"],
|
||||||
|
"uuid": snapshot["uuid"],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
data = json.dumps(cred).encode('utf-8')
|
||||||
|
request = urllib.request.Request(url, data=data, headers=headers)
|
||||||
|
with urllib.request.urlopen(request) as response:
|
||||||
|
status_code = response.getcode()
|
||||||
|
response_text = response.read().decode('utf-8')
|
||||||
|
|
||||||
|
if 200 <= status_code < 300:
|
||||||
|
logger.info(_("Credential successfully signed"))
|
||||||
|
res = json.loads(response_text)
|
||||||
|
if res.get("status") == "success" and res.get("data"):
|
||||||
|
return res["data"]
|
||||||
|
return json.dumps(snapshot)
|
||||||
|
else:
|
||||||
|
logger.error(_("Credential cannot signed in '%s'"), url)
|
||||||
|
return json.dumps(snapshot)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(_("Credential not remotely builded to URL '%s'. Do you have internet? Is your server up & running? Is the url token authorized?\n %s"), url, e)
|
||||||
|
return json.dumps(snapshot)
|
||||||
|
|
||||||
|
|
||||||
# TODO sanitize url, if url is like this, it fails
|
# TODO sanitize url, if url is like this, it fails
|
||||||
# url = 'http://127.0.0.1:8000/api/snapshot/'
|
# url = 'http://127.0.0.1:8000/api/snapshot/'
|
||||||
def send_snapshot_to_devicehub(snapshot, token, url):
|
def send_snapshot_to_devicehub(snapshot, token, url, ev_uuid, legacy):
|
||||||
|
url_components = urllib.parse.urlparse(url)
|
||||||
|
ev_path = f"evidence/{ev_uuid}"
|
||||||
|
components = (url_components.scheme, url_components.netloc, ev_path, '', '', '')
|
||||||
|
ev_url = urllib.parse.urlunparse(components)
|
||||||
|
# apt install qrencode
|
||||||
|
|
||||||
headers = {
|
headers = {
|
||||||
"Authorization": f"Bearer {token}",
|
"Authorization": f"Bearer {token}",
|
||||||
"Content-Type": "application/json"
|
"Content-Type": "application/json"
|
||||||
}
|
}
|
||||||
try:
|
try:
|
||||||
data = json.dumps(snapshot).encode('utf-8')
|
data = snapshot.encode('utf-8')
|
||||||
request = urllib.request.Request(url, data=data, headers=headers)
|
request = urllib.request.Request(url, data=data, headers=headers)
|
||||||
with urllib.request.urlopen(request) as response:
|
with urllib.request.urlopen(request) as response:
|
||||||
status_code = response.getcode()
|
status_code = response.getcode()
|
||||||
|
@ -182,22 +369,31 @@ def send_snapshot_to_devicehub(snapshot, token, url):
|
||||||
|
|
||||||
if 200 <= status_code < 300:
|
if 200 <= status_code < 300:
|
||||||
logger.info(_("Snapshot successfully sent to '%s'"), url)
|
logger.info(_("Snapshot successfully sent to '%s'"), url)
|
||||||
|
if legacy:
|
||||||
try:
|
try:
|
||||||
response = json.loads(response_text)
|
response = json.loads(response_text)
|
||||||
if response.get('url'):
|
public_url = response.get('public_url')
|
||||||
|
dhid = response.get('dhid')
|
||||||
|
if public_url:
|
||||||
# apt install qrencode
|
# apt install qrencode
|
||||||
qr = "echo {} | qrencode -t ANSI".format(response['url'])
|
qr = "echo {} | qrencode -t ANSI".format(public_url)
|
||||||
print(exec_cmd(qr))
|
print(exec_cmd(qr))
|
||||||
print("url: {}".format(response['url']))
|
print("url: {}".format(public_url))
|
||||||
if response.get("dhid"):
|
if dhid:
|
||||||
print("dhid: {}".format(response['dhid']))
|
print("dhid: {}".format(dhid))
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.error(response_text)
|
logger.error(response_text)
|
||||||
|
else:
|
||||||
|
qr = "echo {} | qrencode -t ANSI".format(ev_url)
|
||||||
|
print(exec_cmd(qr))
|
||||||
|
print(f"url: {ev_url}")
|
||||||
|
else:
|
||||||
|
logger.error(_("Snapshot %s not remotely sent to URL '%s'. Server responded with error:\n %s"), ev_uuid, url, response_text)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(_("Snapshot not remotely sent to URL '%s'. Do you have internet? Is your server up & running? Is the url token authorized?\n %s"), url, e)
|
logger.error(_("Snapshot not remotely sent to URL '%s'. Do you have internet? Is your server up & running? Is the url token authorized?\n %s"), url, e)
|
||||||
|
|
||||||
|
|
||||||
def load_config(config_file="settings.ini"):
|
def load_config(config_file="settings.ini"):
|
||||||
"""
|
"""
|
||||||
Tries to load configuration from a config file.
|
Tries to load configuration from a config file.
|
||||||
|
@ -217,10 +413,12 @@ def load_config(config_file="settings.ini"):
|
||||||
device = config.get('settings', 'device', fallback=None)
|
device = config.get('settings', 'device', fallback=None)
|
||||||
erase = config.get('settings', 'erase', fallback=None)
|
erase = config.get('settings', 'erase', fallback=None)
|
||||||
legacy = config.get('settings', 'legacy', fallback=None)
|
legacy = config.get('settings', 'legacy', fallback=None)
|
||||||
|
url_wallet = config.get('settings', 'url_wallet', fallback=None)
|
||||||
|
wb_sign_token = config.get('settings', 'wb_sign_token', fallback=None)
|
||||||
else:
|
else:
|
||||||
logger.error(_("Config file '%s' not found. Using default values."), config_file)
|
logger.error(_("Config file '%s' not found. Using default values."), config_file)
|
||||||
path = os.path.join(os.getcwd())
|
path = os.path.join(os.getcwd())
|
||||||
url, token, device, erase, legacy = None, None, None, None, None
|
url, token, device, erase, legacy, url_wallet, wb_sign_token = (None,)*7
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'path': path,
|
'path': path,
|
||||||
|
@ -228,7 +426,9 @@ def load_config(config_file="settings.ini"):
|
||||||
'token': token,
|
'token': token,
|
||||||
'device': device,
|
'device': device,
|
||||||
'erase': erase,
|
'erase': erase,
|
||||||
'legacy': legacy
|
'legacy': legacy,
|
||||||
|
'wb_sign_token': wb_sign_token,
|
||||||
|
'url_wallet': url_wallet
|
||||||
}
|
}
|
||||||
|
|
||||||
def parse_args():
|
def parse_args():
|
||||||
|
@ -282,6 +482,7 @@ def main():
|
||||||
config_file = args.config
|
config_file = args.config
|
||||||
|
|
||||||
config = load_config(config_file)
|
config = load_config(config_file)
|
||||||
|
legacy = config.get("legacy")
|
||||||
|
|
||||||
# TODO show warning if non root, means data is not complete
|
# TODO show warning if non root, means data is not complete
|
||||||
# if annotate as potentially invalid snapshot (pending the new API to be done)
|
# if annotate as potentially invalid snapshot (pending the new API to be done)
|
||||||
|
@ -290,16 +491,39 @@ def main():
|
||||||
|
|
||||||
all_disks = get_disks()
|
all_disks = get_disks()
|
||||||
snapshot = gen_snapshot(all_disks)
|
snapshot = gen_snapshot(all_disks)
|
||||||
|
snap_uuid = snapshot["uuid"]
|
||||||
|
|
||||||
if config.get("legacy"):
|
if config['erase'] and config['device'] and not legacy:
|
||||||
|
snapshot['erase'] = gen_erase(all_disks, config['erase'], user_disk=config['device'])
|
||||||
|
elif config['erase'] and not legacy:
|
||||||
|
snapshot['erase'] = gen_erase(all_disks, config['erase'])
|
||||||
|
|
||||||
|
if legacy:
|
||||||
convert_to_legacy_snapshot(snapshot)
|
convert_to_legacy_snapshot(snapshot)
|
||||||
|
snapshot = json.dumps(snapshot)
|
||||||
else:
|
else:
|
||||||
snapshot['erase'] = gen_erase(config['erase'], user_disk=config['device'])
|
url_wallet = config.get("url_wallet")
|
||||||
|
wb_sign_token = config.get("wb_sign_token")
|
||||||
|
|
||||||
save_snapshot_in_disk(snapshot, config['path'])
|
if wb_sign_token:
|
||||||
|
tk = wb_sign_token.encode("utf8")
|
||||||
|
snapshot["operator_id"] = hashlib.sha3_256(tk).hexdigest()
|
||||||
|
|
||||||
|
if url_wallet and wb_sign_token:
|
||||||
|
snapshot = send_to_sign_credential(snapshot, wb_sign_token, url_wallet)
|
||||||
|
else:
|
||||||
|
snapshot = json.dumps(snapshot)
|
||||||
|
|
||||||
|
save_snapshot_in_disk(snapshot, config['path'], snap_uuid)
|
||||||
|
|
||||||
if config['url']:
|
if config['url']:
|
||||||
send_snapshot_to_devicehub(snapshot, config['token'], config['url'])
|
send_snapshot_to_devicehub(
|
||||||
|
snapshot,
|
||||||
|
config['token'],
|
||||||
|
config['url'],
|
||||||
|
snap_uuid,
|
||||||
|
legacy
|
||||||
|
)
|
||||||
|
|
||||||
logger.info(_("END"))
|
logger.info(_("END"))
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue