Merge pull request 'contribuciones en estancia con ekoa' (#1) from ekoa into main

Reviewed-on: #1
This commit is contained in:
pedro 2024-09-28 02:16:27 +00:00
commit 8e4186d9d4
5 changed files with 99 additions and 58 deletions

3
.gitignore vendored
View file

@ -1 +1,4 @@
iso iso
settings.ini
# ignore all possible snapshots in this dir
*.json

View file

@ -22,7 +22,7 @@ install_dependencies:
boot_iso: boot_iso:
sudo qemu-system-x86_64 \ sudo qemu-system-x86_64 \
-enable-kvm -m 2G -vga qxl -netdev user,id=wan -device virtio-net,netdev=wan,id=nic1 \ -enable-kvm -m 2G -vga qxl -netdev user,id=wan -device virtio-net,netdev=wan,id=nic1 \
-drive format=raw,file=iso/workbench.iso,cache=none,if=virtio -drive format=raw,file=iso/workbench_production.iso,cache=none,if=virtio
# src https://www.ubuntubuzz.com/2021/04/how-to-boot-uefi-on-qemu.html # src https://www.ubuntubuzz.com/2021/04/how-to-boot-uefi-on-qemu.html
# needs `sudo apt-get install ovmf` # needs `sudo apt-get install ovmf`

View file

@ -98,7 +98,7 @@ LABEL linux
END END
)" )"
# TIMEOUT 60 means 6 seconds :) # TIMEOUT 60 means 6 seconds :)
sudo tee "${ISO_PATH}/staging/isolinux/isolinux.cfg" <<EOF ${SUDO} tee "${ISO_PATH}/staging/isolinux/isolinux.cfg" <<EOF
${isolinuxcfg_str} ${isolinuxcfg_str}
EOF EOF
${SUDO} cp /usr/lib/ISOLINUX/isolinux.bin "${ISO_PATH}/staging/isolinux/" ${SUDO} cp /usr/lib/ISOLINUX/isolinux.bin "${ISO_PATH}/staging/isolinux/"
@ -188,8 +188,9 @@ create_persistence_partition() {
# persistent partition # persistent partition
rw_img_name="workbench_vfat.img" rw_img_name="workbench_vfat.img"
rw_img_path="${ISO_PATH}/staging/${rw_img_name}" rw_img_path="${ISO_PATH}/staging/${rw_img_name}"
if [ ! -f "${rw_img_path}" ] || [ "${DEBUG:-}" ]; then if [ ! -f "${rw_img_path}" ] || [ "${DEBUG:-}" ] || [ "${FORCE:-}" ]; then
${SUDO} dd if=/dev/zero of="${rw_img_path}" bs=10M count=1 persistent_volume_size_MB=100
${SUDO} dd if=/dev/zero of="${rw_img_path}" bs=1M count=${persistent_volume_size_MB}
${SUDO} mkfs.vfat "${rw_img_path}" ${SUDO} mkfs.vfat "${rw_img_path}"
# generate structure on persistent partition # generate structure on persistent partition
@ -198,17 +199,7 @@ create_persistence_partition() {
mkdir -p "${tmp_rw_mount}" mkdir -p "${tmp_rw_mount}"
${SUDO} mount "$(pwd)/${rw_img_path}" "${tmp_rw_mount}" ${SUDO} mount "$(pwd)/${rw_img_path}" "${tmp_rw_mount}"
${SUDO} mkdir -p "${tmp_rw_mount}/settings" ${SUDO} mkdir -p "${tmp_rw_mount}/settings"
# TODO without SUDO fails ${SUDO} cp -v settings.ini "${tmp_rw_mount}/settings/settings.ini"
${SUDO} cat > "${tmp_rw_mount}/settings/settings.ini" <<END
[settings]
DH_TOKEN =
DH_URL =
SNAPSHOTS_PATH = /mnt
LOGS_PATH = /mnt
VERSION =
END
${SUDO} umount "${tmp_rw_mount}" ${SUDO} umount "${tmp_rw_mount}"
uuid="$(blkid "${rw_img_path}" | awk '{ print $3; }')" uuid="$(blkid "${rw_img_path}" | awk '{ print $3; }')"
@ -249,7 +240,7 @@ END2
################### ###################
# configure hosts # configure hosts
cat > /etc/hosts <<END2 cat > /etc/hosts <<END2
127.0.0.1 localhost \${hostname} 127.0.0.1 localhost workbench
::1 localhost ip6-localhost ip6-loopback ::1 localhost ip6-localhost ip6-loopback
ff02::1 ip6-allnodes ff02::1 ip6-allnodes
ff02::2 ip6-allrouters ff02::2 ip6-allrouters
@ -261,8 +252,9 @@ prepare_app() {
# prepare app during prepare_chroot_env # prepare app during prepare_chroot_env
# Install hardware_metadata module # Install hardware_metadata module
workbench_dir="${ISO_PATH}/chroot/opt/workbench" workbench_dir="${ISO_PATH}/chroot/opt/workbench"
${SUDO} cp workbench-script.py "${workbench_dir}" ${SUDO} mkdir -p "${workbench_dir}"
${SUDO} cp requirements.txt "${workbench_dir}" ${SUDO} cp workbench-script.py "${workbench_dir}/"
${SUDO} cp requirements.txt "${workbench_dir}/"
# startup script execution # startup script execution
cat > "${ISO_PATH}/chroot/root/.profile" <<END cat > "${ISO_PATH}/chroot/root/.profile" <<END
@ -272,7 +264,7 @@ stty -echo # Do not show what we type in terminal so it does not meddle with our
dmesg -n 1 # Do not report *useless* system messages to the terminal dmesg -n 1 # Do not report *useless* system messages to the terminal
# clearly specify the right working directory, used in the python script as os.getcwd() # clearly specify the right working directory, used in the python script as os.getcwd()
cd /mnt cd /mnt
pipenv run python /opt/workbench/workbench-script.py pipenv run python /opt/workbench/workbench-script.py --config "/mnt/settings/settings.ini"
stty echo stty echo
END END
#TODO add some useful commands #TODO add some useful commands
@ -286,6 +278,7 @@ echo 'Install requirements'
# Install debian requirements # Install debian requirements
apt-get install -y --no-install-recommends \ apt-get install -y --no-install-recommends \
sudo \
python3 python3-dev python3-pip pipenv \ python3 python3-dev python3-pip pipenv \
dmidecode smartmontools hwinfo pciutils lshw < /dev/null dmidecode smartmontools hwinfo pciutils lshw < /dev/null
# Install python requirements using apt instead of pip # Install python requirements using apt instead of pip
@ -312,7 +305,7 @@ run_chroot() {
set -x set -x
set -e set -e
echo "${hostname}" > /etc/hostname echo workbench > /etc/hostname
# check what linux images are available on the system # check what linux images are available on the system
# Figure out which Linux Kernel you want in the live environment. # Figure out which Linux Kernel you want in the live environment.
@ -389,6 +382,11 @@ prepare_chroot_env() {
if [ -z "${VERSION_CODENAME:-}" ]; then if [ -z "${VERSION_CODENAME:-}" ]; then
. /etc/os-release . /etc/os-release
echo "TAKING OS-RELEASE FILE" echo "TAKING OS-RELEASE FILE"
if [ ! "${ID}" = "debian" ]; then
echo "ERROR: ubuntu detected, then you are enforced to specify debian variant"
echo " use for example \`VERSION_CODENAME='bookworm'\` or similar"
exit 1
fi
fi fi
chroot_path="${ISO_PATH}/chroot" chroot_path="${ISO_PATH}/chroot"

6
settings.ini.example Normal file
View file

@ -0,0 +1,6 @@
[settings]
url = http://127.0.0.1:8000/api/snapshot/
token = '1234'
# path = /path/to/save
# device = your_device_name
# # erase = basic

View file

@ -5,6 +5,7 @@ import json
import uuid import uuid
import hashlib import hashlib
import argparse import argparse
import configparser
import ntplib import ntplib
import requests import requests
@ -228,9 +229,9 @@ def smartctl(all_disks, disk=None):
def get_data(all_disks): def get_data(all_disks):
lshw = 'lshw -json' lshw = 'sudo lshw -json'
hwinfo = 'hwinfo --reallyall' hwinfo = 'sudo hwinfo --reallyall'
dmidecode = 'dmidecode' dmidecode = 'sudo dmidecode'
data = { data = {
'lshw': exec_cmd(lshw), 'lshw': exec_cmd(lshw),
'disks': smartctl(all_disks), 'disks': smartctl(all_disks),
@ -253,17 +254,23 @@ def save_snapshot_in_disk(snapshot, path):
datetime.now().strftime("%Y%m%d-%H_%M_%S"), datetime.now().strftime("%Y%m%d-%H_%M_%S"),
snapshot['uuid'] snapshot['uuid']
) )
print(f"workbench: INFO: Snapshot written in path '{filename}'")
with open(filename, "w") as f: with open(filename, "w") as f:
f.write(json.dumps(snapshot)) f.write(json.dumps(snapshot))
# TODO sanitize url, if url is like this, it fails
# url = 'http://127.0.0.1:8000/api/snapshot/'
def send_snapshot_to_devicehub(snapshot, token, url): def send_snapshot_to_devicehub(snapshot, token, url):
headers = { headers = {
f"Authorization": "Basic {token}", f"Authorization": "Basic {token}",
"Content-Type": "application/json" "Content-Type": "application/json"
} }
return requests.post(url, data=snapshot, header=headers) try:
requests.post(url, data=json.dumps(snapshot), headers=headers)
print(f"workbench: INFO: Snapshot sent to '{url}'")
except:
print(f"workbench: ERROR: Snapshot not remotely sent. URL '{url}' is unreachable. Do you have internet? Is your server up & running?")
@logs @logs
def sync_time(): def sync_time():
@ -271,48 +278,75 @@ def sync_time():
ntplib.NTPClient() ntplib.NTPClient()
response = client.request('pool.ntp.org') response = client.request('pool.ntp.org')
def load_config(config_file="settings.ini"):
"""
Tries to load configuration from a config file.
"""
config = configparser.ConfigParser()
if os.path.exists(config_file):
# If config file exists, read from it
print(f"workbench: INFO: Found config file in path: '{config_file}'.")
config.read(config_file)
path = config.get('settings', 'path', fallback=os.getcwd())
# TODO validate that has http:// start
url = config.get('settings', 'url', fallback=None)
token = config.get('settings', 'token', fallback=None)
# TODO validate that the device exists?
device = config.get('settings', 'device', fallback=None)
erase = config.get('settings', 'erase', fallback=None)
else:
print(f"workbench: ERROR: Config file '{config_file}' not found. Using default values.")
path = os.path.join(os.getcwd())
url, token, device, erase = None, None, None, None
return {
'path': path,
'url': url,
'token': token,
'device': device,
'erase': erase
}
def parse_args():
"""
Parse config argument, if available
"""
parser = argparse.ArgumentParser(description="Optional config loader for workbench.")
parser.add_argument(
'--config',
help="Path to the config file. Defaults to 'settings.ini' in the current directory.",
default="settings.ini" # Fallback to 'settings.ini' by default
)
return parser.parse_args()
def main(): def main():
print("START") vline='\n___________\n\n'
parser=argparse.ArgumentParser() print(f"{vline}workbench: START\n")
parser.add_argument("-p", "--path", required=True)
parser.add_argument("-u", "--url", required=False)
parser.add_argument("-t", "--token", required=False)
parser.add_argument("-d", "--device", required=False)
parser.add_argument(
"-e",
"--erase",
choices=["basic", "baseline", "enhanced"],
required=False
)
args=parser.parse_args()
if args.device and not args.erase: # Parse the command-line arguments
print("error: argument --erase: expected one argument") args = parse_args()
return
# Load the config file, either specified via --config or the default 'settings.ini'
if args.token and not args.url: config_file = args.config
print("error: argument --url: expected one argument")
return config = load_config(config_file)
if args.url and not args.token:
print("error: argument --token: expected one argument")
return
all_disks = get_disks() all_disks = get_disks()
snapshot = gen_snapshot(all_disks) snapshot = gen_snapshot(all_disks)
if args.erase and args.device: if config['erase'] and config['device']:
snapshot['erase'] = gen_erase(all_disks, args.erase, user_disk=args.device) snapshot['erase'] = gen_erase(all_disks, config['erase'], user_disk=config['device'])
elif args.erase: elif config['erase']:
snapshot['erase'] = gen_erase(all_disks, args.erase) snapshot['erase'] = gen_erase(all_disks, config['erase'])
save_snapshot_in_disk(snapshot, args.path) save_snapshot_in_disk(snapshot, config['path'])
if args.url:
send_snapshot_to_devicehub(snapshot, args.token, args.url)
print("END") if config['url']:
send_snapshot_to_devicehub(snapshot, config['token'], config['url'])
print(f"\nworkbench: END{vline}")
if __name__ == '__main__': if __name__ == '__main__':