2024-08-02 11:05:02 +00:00
|
|
|
#!/bin/sh
|
|
|
|
|
|
|
|
set -e
|
|
|
|
set -u
|
|
|
|
# DEBUG
|
|
|
|
set -x
|
|
|
|
|
2024-11-22 11:07:25 +00:00
|
|
|
# TODO there is a conflict between two shared vars
|
|
|
|
# 1. from the original docker compose devicehub-teal
|
|
|
|
# 2. from the new docker compose that integrates all dpp services
|
|
|
|
wait_for_dpp_shared() {
|
|
|
|
while true; do
|
|
|
|
# specially ensure VERAMO_API_CRED_FILE is not empty,
|
|
|
|
# it takes some time to get data in
|
|
|
|
OPERATOR_TOKEN_FILE='operator-token.txt'
|
|
|
|
if [ -f "/shared/${OPERATOR_TOKEN_FILE}" ] && \
|
|
|
|
[ -f "/shared/create_user_operator_finished" ]; then
|
|
|
|
sleep 5
|
|
|
|
echo "Files ready to process."
|
|
|
|
break
|
|
|
|
else
|
|
|
|
echo "Waiting for file in shared: ${OPERATOR_TOKEN_FILE}"
|
|
|
|
sleep 5
|
|
|
|
fi
|
|
|
|
done
|
|
|
|
}
|
|
|
|
|
|
|
|
# 3. Generate an environment .env file.
|
|
|
|
# TODO cargar via shared
|
|
|
|
gen_env_vars() {
|
2024-11-26 23:11:44 +00:00
|
|
|
INIT_ORG="${INIT_ORG:-example-org}"
|
|
|
|
INIT_USER="${INIT_USER:-user@example.org}"
|
|
|
|
INIT_PASSWD="${INIT_PASSWD:-1234}"
|
|
|
|
ADMIN='True'
|
|
|
|
PREDEFINED_TOKEN="${PREDEFINED_TOKEN:-}"
|
2024-11-22 11:07:25 +00:00
|
|
|
# specific dpp env vars
|
2024-12-11 10:25:35 +00:00
|
|
|
if [ "${DPP:-}" = 'true' ]; then
|
2024-11-27 14:06:41 +00:00
|
|
|
# fill env vars in this docker entrypoint
|
|
|
|
wait_for_dpp_shared
|
|
|
|
export API_DLT='http://api_connector:3010'
|
|
|
|
export API_DLT_TOKEN="$(cat "/shared/${OPERATOR_TOKEN_FILE}")"
|
|
|
|
export API_RESOLVER='http://id_index_api:3012'
|
|
|
|
# TODO hardcoded
|
|
|
|
export ID_FEDERATED='DH1'
|
|
|
|
# propagate to .env
|
|
|
|
dpp_env_vars="$(cat <<END
|
|
|
|
API_DLT=${API_DLT}
|
|
|
|
API_DLT_TOKEN=${API_DLT_TOKEN}
|
|
|
|
API_RESOLVER=${API_RESOLVER}
|
|
|
|
ID_FEDERATED=${ID_FEDERATED}
|
2024-11-22 11:07:25 +00:00
|
|
|
END
|
|
|
|
)"
|
|
|
|
# generate config using env vars from docker
|
2024-11-26 18:25:30 +00:00
|
|
|
# TODO rethink if this is needed because now this is django, not flask
|
2024-11-22 11:07:25 +00:00
|
|
|
cat > .env <<END
|
|
|
|
${dpp_env_vars:-}
|
|
|
|
END
|
2024-12-16 18:25:32 +00:00
|
|
|
fi
|
2024-12-16 18:29:48 +00:00
|
|
|
}
|
2024-11-22 11:07:25 +00:00
|
|
|
|
|
|
|
handle_federated_id() {
|
|
|
|
|
|
|
|
# devicehub host and id federated checker
|
|
|
|
|
|
|
|
# //getAll queries are not accepted by this service, so we remove them
|
|
|
|
EXPECTED_ID_FEDERATED="$(curl -s "${API_RESOLVER%/}/getAll" \
|
|
|
|
| jq -r '.url | to_entries | .[] | select(.value == "'"${DEVICEHUB_HOST}"'") | .key' \
|
|
|
|
| head -n 1)"
|
|
|
|
|
|
|
|
# if is a new DEVICEHUB_HOST, then register it
|
|
|
|
if [ -z "${EXPECTED_ID_FEDERATED}" ]; then
|
|
|
|
# TODO better docker compose run command
|
|
|
|
cmd="docker compose run --entrypoint= devicehub flask dlt_insert_members ${DEVICEHUB_HOST}"
|
|
|
|
big_error "No FEDERATED ID maybe you should run \`${cmd}\`"
|
|
|
|
fi
|
|
|
|
|
|
|
|
# if not new DEVICEHUB_HOST, then check consistency
|
|
|
|
|
|
|
|
# if there is already an ID in the DLT, it should match with my internal ID
|
|
|
|
if [ ! "${EXPECTED_ID_FEDERATED}" = "${ID_FEDERATED}" ]; then
|
|
|
|
|
|
|
|
big_error "ID_FEDERATED should be ${EXPECTED_ID_FEDERATED} instead of ${ID_FEDERATED}"
|
|
|
|
fi
|
|
|
|
|
|
|
|
# not needed, but reserved
|
|
|
|
# EXPECTED_DEVICEHUB_HOST="$(curl -s "${API_RESOLVER%/}/getAll" \
|
|
|
|
# | jq -r '.url | to_entries | .[] | select(.key == "'"${ID_FEDERATED}"'") | .value' \
|
|
|
|
# | head -n 1)"
|
|
|
|
# if [ ! "${EXPECTED_DEVICEHUB_HOST}" = "${DEVICEHUB_HOST}" ]; then
|
|
|
|
# big_error "ERROR: DEVICEHUB_HOST should be ${EXPECTED_DEVICEHUB_HOST} instead of ${DEVICEHUB_HOST}"
|
|
|
|
# fi
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
config_dpp_part1() {
|
|
|
|
# 12. Add a new server to the 'api resolver'
|
|
|
|
if [ "${ID_SERVICE:-}" ]; then
|
|
|
|
handle_federated_id
|
|
|
|
else
|
|
|
|
# TODO when this runs more than one time per service, this is a problem, but for the docker-reset.sh workflow, that's fine
|
|
|
|
# TODO put this in already_configured
|
2024-11-26 23:11:44 +00:00
|
|
|
# TODO hardcoded http proto and port
|
|
|
|
./manage.py dlt_insert_members "http://${DOMAIN}:8000"
|
2024-11-22 11:07:25 +00:00
|
|
|
fi
|
|
|
|
|
|
|
|
# 13. Do a rsync api resolve
|
|
|
|
./manage.py dlt_rsync_members
|
|
|
|
|
|
|
|
# 14. Register a new user to the DLT
|
|
|
|
DATASET_FILE='/tmp/dataset.json'
|
|
|
|
cat > "${DATASET_FILE}" <<END
|
|
|
|
{
|
2024-11-26 23:11:44 +00:00
|
|
|
"email": "${INIT_USER}",
|
|
|
|
"password": "${INIT_PASSWD}",
|
2024-11-22 11:07:25 +00:00
|
|
|
"api_token": "${API_DLT_TOKEN}"
|
|
|
|
}
|
|
|
|
END
|
|
|
|
./manage.py dlt_register_user "${DATASET_FILE}"
|
|
|
|
}
|
|
|
|
|
|
|
|
config_phase() {
|
2024-11-27 14:06:41 +00:00
|
|
|
# TODO review this flag file
|
|
|
|
init_flagfile="${program_dir}/already_configured"
|
2024-11-22 11:07:25 +00:00
|
|
|
if [ ! -f "${init_flagfile}" ]; then
|
|
|
|
|
2024-11-27 00:25:15 +00:00
|
|
|
# non DL user (only for the inventory)
|
|
|
|
./manage.py add_institution "${INIT_ORG}"
|
|
|
|
# TODO: one error on add_user, and you don't add user anymore
|
|
|
|
./manage.py add_user "${INIT_ORG}" "${INIT_USER}" "${INIT_PASSWD}" "${ADMIN}" "${PREDEFINED_TOKEN}"
|
|
|
|
|
2024-12-11 10:25:35 +00:00
|
|
|
if [ "${DPP:-}" = 'true' ]; then
|
2024-11-22 11:07:25 +00:00
|
|
|
# 12, 13, 14
|
|
|
|
config_dpp_part1
|
2024-11-27 00:08:22 +00:00
|
|
|
|
2024-11-27 00:10:34 +00:00
|
|
|
# cleanup other spnapshots and copy dlt/dpp snapshots
|
2024-12-11 10:23:35 +00:00
|
|
|
# TODO make this better
|
2024-11-27 00:28:19 +00:00
|
|
|
rm example/snapshots/*
|
2024-11-27 00:08:22 +00:00
|
|
|
cp example/dpp-snapshots/*.json example/snapshots/
|
2024-11-22 11:07:25 +00:00
|
|
|
fi
|
|
|
|
|
2024-11-26 23:11:44 +00:00
|
|
|
# # 15. Add inventory snapshots for user "${INIT_USER}".
|
2024-11-27 00:08:22 +00:00
|
|
|
if [ "${DEMO:-}" = 'true' ]; then
|
2024-11-27 00:38:24 +00:00
|
|
|
/usr/bin/time ./manage.py up_snapshots example/snapshots/ "${INIT_USER}"
|
2024-11-22 11:07:25 +00:00
|
|
|
fi
|
|
|
|
|
|
|
|
# remain next command as the last operation for this if conditional
|
|
|
|
touch "${init_flagfile}"
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
2024-08-02 11:05:02 +00:00
|
|
|
check_app_is_there() {
|
|
|
|
if [ ! -f "./manage.py" ]; then
|
|
|
|
usage
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
deploy() {
|
2024-10-18 08:45:08 +00:00
|
|
|
# TODO this is weird, find better workaround
|
2024-11-27 14:06:41 +00:00
|
|
|
git config --global --add safe.directory "${program_dir}"
|
2024-10-18 08:45:08 +00:00
|
|
|
export COMMIT=$(git log --format="%H %ad" --date=iso -n 1)
|
|
|
|
|
2024-10-18 08:20:23 +00:00
|
|
|
if [ "${DEBUG:-}" = 'true' ]; then
|
|
|
|
./manage.py print_settings
|
2024-11-05 03:00:35 +00:00
|
|
|
else
|
|
|
|
echo "DOMAIN: ${DOMAIN}"
|
2024-10-18 08:20:23 +00:00
|
|
|
fi
|
|
|
|
|
2024-08-02 11:05:02 +00:00
|
|
|
# detect if existing deployment (TODO only works with sqlite)
|
|
|
|
if [ -f "${program_dir}/db/db.sqlite3" ]; then
|
|
|
|
echo "INFO: detected EXISTING deployment"
|
|
|
|
./manage.py migrate
|
|
|
|
else
|
|
|
|
# move the migrate thing in docker entrypoint
|
|
|
|
# inspired by https://medium.com/analytics-vidhya/django-with-docker-and-docker-compose-python-part-2-8415976470cc
|
|
|
|
echo "INFO detected NEW deployment"
|
|
|
|
./manage.py migrate
|
2024-11-27 00:08:22 +00:00
|
|
|
config_phase
|
2024-08-02 11:05:02 +00:00
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
runserver() {
|
|
|
|
PORT="${PORT:-8000}"
|
2024-10-18 08:20:23 +00:00
|
|
|
if [ "${DEBUG:-}" = 'true' ]; then
|
2024-08-02 11:05:02 +00:00
|
|
|
./manage.py runserver 0.0.0.0:${PORT}
|
|
|
|
else
|
|
|
|
# TODO
|
|
|
|
#./manage.py collectstatic
|
|
|
|
true
|
2024-09-24 12:52:18 +00:00
|
|
|
if [ "${EXPERIMENTAL:-}" ]; then
|
2024-08-02 11:05:02 +00:00
|
|
|
# TODO
|
|
|
|
# reloading on source code changing is a debugging future, maybe better then use debug
|
|
|
|
# src https://stackoverflow.com/questions/12773763/gunicorn-autoreload-on-source-change/24893069#24893069
|
|
|
|
# gunicorn with 1 worker, with more than 1 worker this is not expected to work
|
|
|
|
#gunicorn --access-logfile - --error-logfile - -b :${PORT} trustchain_idhub.wsgi:application
|
|
|
|
true
|
|
|
|
else
|
|
|
|
./manage.py runserver 0.0.0.0:${PORT}
|
|
|
|
fi
|
|
|
|
fi
|
|
|
|
}
|
|
|
|
|
|
|
|
main() {
|
|
|
|
program_dir='/opt/devicehub-django'
|
|
|
|
cd "${program_dir}"
|
2024-11-22 11:07:25 +00:00
|
|
|
gen_env_vars
|
2024-08-02 11:05:02 +00:00
|
|
|
deploy
|
|
|
|
runserver
|
|
|
|
}
|
|
|
|
|
|
|
|
main "${@}"
|