优化条件判断和文件处理逻辑,提升代码可读性和执行效率

This commit is contained in:
Ing 2024-11-19 12:06:06 +08:00
parent c091d0dba0
commit 04eeea5e84
42 changed files with 9331 additions and 9222 deletions

View File

@ -33,8 +33,10 @@ jobs:
git config --global user.name "github-actions[bot]" git config --global user.name "github-actions[bot]"
sudo timedatectl set-timezone "Asia/Shanghai" sudo timedatectl set-timezone "Asia/Shanghai"
sudo apt update - name: Delay
sudo apt install -y build-essential libtool pkgconf libzstd-dev liblzma-dev libssl-dev # kmodule dependencies run: |
echo "Delaying for 1 minutes..."
sleep 60
- name: Get Release RR - name: Get Release RR
run: | run: |
@ -51,7 +53,7 @@ jobs:
[ "${TAG:0:1}" = "v" ] && TAG="${TAG:1}" [ "${TAG:0:1}" = "v" ] && TAG="${TAG:1}"
rm -f rr-${TAG}.img.zip rm -f rr-${TAG}.img.zip
STATUS=$(curl -kL --connect-timeout 10 -w "%{http_code}" "${REPO}/releases/download/${TAG}/rr-${TAG}.img.zip" -o "rr-${TAG}.img.zip") STATUS=$(curl -kL --connect-timeout 10 -w "%{http_code}" "${REPO}/releases/download/${TAG}/rr-${TAG}.img.zip" -o "rr-${TAG}.img.zip")
if [ $? -ne 0 -o ${STATUS:-0} -ne 200 ]; then if [ $? -ne 0 ] || [ ${STATUS:-0} -ne 200 ]; then
echo "Download failed" echo "Download failed"
exit 1 exit 1
fi fi
@ -68,12 +70,20 @@ jobs:
- name: Get data - name: Get data
run: | run: |
pip install -r scripts/requirements.txt sudo apt update
python scripts/func.py getmodels -w "rr/ws/initrd" -j "docs/models.json" -x "docs/models.xlsx" sudo apt install -y locales busybox dialog gettext sed gawk jq curl
python scripts/func.py getaddons -w "rr/ws" -j "docs/addons.json" -x "docs/addons.xlsx" sudo apt install -y python-is-python3 python3-pip libelf-dev qemu-utils cpio xz-utils lz4 lzma bzip2 gzip zstd
python scripts/func.py getmodules -w "rr/ws" -j "docs/modules.json" -x "docs/modules.xlsx" sudo apt install -y build-essential libtool pkgconf libzstd-dev liblzma-dev libssl-dev # kmodule dependencies
python scripts/func.py getpats -w "rr/ws/initrd" -j "docs/pats.json" -x "docs/pats.xlsx" # Backup the original python3 executable.
sudo mv -f "$(realpath $(which python3))/EXTERNALLY-MANAGED" "$(realpath $(which python3))/EXTERNALLY-MANAGED.bak" 2>/dev/null || true
sudo pip3 install -U -r scripts/requirements.txt
python3 scripts/func.py getmodels -w "rr/ws/initrd" -j "docs/models.json" -x "docs/models.xlsx"
python3 scripts/func.py getaddons -w "rr/ws" -j "docs/addons.json" -x "docs/addons.xlsx"
python3 scripts/func.py getmodules -w "rr/ws" -j "docs/modules.json" -x "docs/modules.xlsx"
python3 scripts/func.py getpats -w "rr/ws/initrd" -j "docs/pats.json" -x "docs/pats.xlsx"
- name: Upload to Artifacts - name: Upload to Artifacts
if: success() if: success()

View File

@ -148,20 +148,22 @@ jobs:
run: | run: |
# 累了, 毁灭吧! # 累了, 毁灭吧!
# yq need sudo !!!
function writeConfigKey() { function writeConfigKey() {
[ "${2}" = "{}" ] && sudo yq eval '.'${1}' = {}' --inplace "${3}" 2>/dev/null || sudo yq eval '.'${1}' = "'"${2}"'"' --inplace "${3}" 2>/dev/null local value="${2}"
[ "${value}" = "{}" ] && sudo yq eval ".${1} = {}" --inplace "${3}" 2>/dev/null || sudo yq eval ".${1} = \"${value}\"" --inplace "${3}" 2>/dev/null
} }
function readConfigKey() { function readConfigKey() {
RESULT=$(yq eval '.'${1}' | explode(.)' "${2}" 2>/dev/null) local result=$(sudo yq eval ".${1} | explode(.)" "${2}" 2>/dev/null)
[ "${RESULT}" == "null" ] && echo "" || echo "${RESULT}" [ "${result}" = "null" ] && echo "" || echo "${result}"
} }
function mergeConfigStr() { function mergeConfigStr() {
local JF=$(mktemp) local xmlfile=$(mktemp)
echo "${2}" | yq -p ${1} -o y > "${JF}" echo "${2}" | sudo yq -p "${1}" -o y >"${xmlfile}"
yq eval-all --inplace '. as $item ireduce ({}; . * $item)' --inplace "${3}" "${JF}" 2>/dev/null sudo yq eval-all --inplace '. as $item ireduce ({}; . * $item)' --inplace "${3}" "${xmlfile}" 2>/dev/null
rm -f "${JF}" rm -f "${xmlfile}"
} }
REPO="${{ github.server_url }}/${{ github.repository }}" REPO="${{ github.server_url }}/${{ github.repository }}"
@ -179,7 +181,7 @@ jobs:
[ "${TAG:0:1}" = "v" ] && TAG="${TAG:1}" [ "${TAG:0:1}" = "v" ] && TAG="${TAG:1}"
rm -f rr-${TAG}.img.zip rm -f rr-${TAG}.img.zip
STATUS=$(curl -kL --connect-timeout 10 -w "%{http_code}" "${REPO}/releases/download/${TAG}/rr-${TAG}.img.zip" -o "rr-${TAG}.img.zip") STATUS=$(curl -kL --connect-timeout 10 -w "%{http_code}" "${REPO}/releases/download/${TAG}/rr-${TAG}.img.zip" -o "rr-${TAG}.img.zip")
if [ $? -ne 0 -o ${STATUS:-0} -ne 200 ]; then if [ $? -ne 0 ] || [ ${STATUS:-0} -ne 200 ]; then
echo "Download failed" echo "Download failed"
exit 1 exit 1
fi fi
@ -231,11 +233,11 @@ jobs:
echo "set modules: ${{ env.modules }}" echo "set modules: ${{ env.modules }}"
USER_CONFIG_FILE="rr/ws/mnt/p1/user-config.yml" USER_CONFIG_FILE="rr/ws/mnt/p1/user-config.yml"
writeConfigKey "modules" "{}" "${USER_CONFIG_FILE}" writeConfigKey "modules" "{}" "${USER_CONFIG_FILE}"
# L="$(for I in $(echo "${{ env.modules }}" | sed 's/,/ /g'); do echo "modules.${I}:"; done)" L="$(for I in $(echo "${{ env.modules }}" | sed 's/,/ /g'); do echo "modules.${I}:"; done)"
# mergeConfigStr p "${L}" "${USER_CONFIG_FILE}" mergeConfigStr p "${L}" "${USER_CONFIG_FILE}"
for M in $(echo "${{ env.modules }}" | sed 's/,/ /g'); do # for M in $(echo "${{ env.modules }}" | sed 's/,/ /g'); do
writeConfigKey "modules.\"${M}\"" "" "${USER_CONFIG_FILE}" # writeConfigKey "modules.\"${M}\"" "" "${USER_CONFIG_FILE}"
done # done
fi fi
sudo ./localbuild.sh build sudo ./localbuild.sh build
@ -254,23 +256,22 @@ jobs:
RR_VERSION_FILE="rr/ws/mnt/p1/RR_VERSION" RR_VERSION_FILE="rr/ws/mnt/p1/RR_VERSION"
USER_CONFIG_FILE="rr/ws/mnt/p1/user-config.yml" USER_CONFIG_FILE="rr/ws/mnt/p1/user-config.yml"
echo "" > README.txt {
echo "RR: " >> README.txt echo "RR: "
echo " VERSION: $(cat ${RR_VERSION_FILE} 2>/dev/null | head -1)" >> README.txt echo " VERSION: $(cat ${RR_VERSION_FILE} 2>/dev/null | head -1)"
echo " CUSTOM: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" >> README.txt echo " CUSTOM: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
echo
echo "" >> README.txt echo "DSM:"
echo "DSM:" >> README.txt echo " MODEL: $(readConfigKey "model" "${USER_CONFIG_FILE}")"
echo " MODEL: $(readConfigKey "model" "${USER_CONFIG_FILE}")" >> README.txt echo " VERSION: $(readConfigKey "productver" "${USER_CONFIG_FILE}")"
echo " VERSION: $(readConfigKey "productver" "${USER_CONFIG_FILE}")" >> README.txt echo " KERNEL: $(readConfigKey "kernel" "${USER_CONFIG_FILE}")"
echo " KERNEL: $(readConfigKey "kernel" "${USER_CONFIG_FILE}")" >> README.txt echo " PATURL: $(readConfigKey "paturl" "${USER_CONFIG_FILE}")"
echo " PATURL: $(readConfigKey "paturl" "${USER_CONFIG_FILE}")" >> README.txt echo " PATSUM: $(readConfigKey "patsum" "${USER_CONFIG_FILE}")"
echo " PATSUM: $(readConfigKey "patsum" "${USER_CONFIG_FILE}")" >> README.txt echo
echo
echo "" >> README.txt echo "After the image is written to the disk, it will boot directly into DSM without the need to compile again."
echo "" >> README.txt echo "Of course, you can also modify the settings yourself."
echo "After the image is written to the disk, it will boot directly into DSM without the need to compile again." >> README.txt } >README.txt
echo "Of course, you can also modify the settings yourself." >> README.txt
if [ "${{ env.format }}" = "ova" ]; then if [ "${{ env.format }}" = "ova" ]; then
. scripts/func.sh "${{ secrets.RRORG }}" . scripts/func.sh "${{ secrets.RRORG }}"

1
.gitignore vendored
View File

@ -12,6 +12,7 @@ rr*.vmdk
tests tests
Changelog* Changelog*
sha256sum
ovftool* ovftool*
OVA* OVA*

View File

@ -1,60 +1,56 @@
#!/usr/bin/env bash #!/usr/bin/env bash
set -e set -e
[ -z "${WORK_PATH}" -o ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" [ -z "${WORK_PATH}" ] || [ ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. ${WORK_PATH}/include/functions.sh
. "${WORK_PATH}/include/functions.sh"
[ -z "${LOADER_DISK}" ] && die "$(TEXT "Loader is not init!")" [ -z "${LOADER_DISK}" ] && die "$(TEXT "Loader is not init!")"
# Sanity check # Sanity check
loaderIsConfigured || die "$(TEXT "Loader is not configured!")" loaderIsConfigured || die "$(TEXT "Loader is not configured!")"
# Clear logs for dbgutils addons # Clear logs for dbgutils addons
rm -rf "${PART1_PATH}/logs" >/dev/null 2>&1 || true rm -rf "${PART1_PATH}/logs" /sys/fs/pstore/* >/dev/null 2>&1 || true
rm -rf /sys/fs/pstore/* >/dev/null 2>&1 || true
# Check if machine has EFI # Check if machine has EFI
[ -d /sys/firmware/efi ] && EFI=1 || EFI=0 EFI=$([ -d /sys/firmware/efi ] && echo 1 || echo 0)
BUS=$(getBus "${LOADER_DISK}") BUS=$(getBus "${LOADER_DISK}")
# Print text centralized # Print text centralized
clear clear
COLUMNS=$(ttysize 2>/dev/null | awk '{print $1}') COLUMNS=$(ttysize 2>/dev/null | awk '{print $1}')
[ -z "${COLUMNS}" ] && COLUMNS=80 COLUMNS=${COLUMNS:-80}
TITLE="$(printf "$(TEXT "Welcome to %s")" "$([ -z "${RR_RELEASE}" ] && echo "${RR_TITLE}" || echo "${RR_TITLE}(${RR_RELEASE})")")" TITLE="$(printf "$(TEXT "Welcome to %s")" "$([ -z "${RR_RELEASE}" ] && echo "${RR_TITLE}" || echo "${RR_TITLE}(${RR_RELEASE})")")"
DATE="$(date)" DATE="$(date)"
printf "\033[1;44m%*s\n" ${COLUMNS} "" printf "\033[1;44m%*s\n" "${COLUMNS}" ""
printf "\033[1;44m%*s\033[A\n" ${COLUMNS} "" printf "\033[1;44m%*s\033[A\n" "${COLUMNS}" ""
printf "\033[1;31m%*s\033[0m\n" $(((${#TITLE} + ${COLUMNS}) / 2)) "${TITLE}" printf "\033[1;31m%*s\033[0m\n" "$(((${#TITLE} + ${COLUMNS}) / 2))" "${TITLE}"
printf "\033[1;44m%*s\033[A\n" ${COLUMNS} "" printf "\033[1;44m%*s\033[A\n" "${COLUMNS}" ""
printf "\033[1;32m%*s\033[0m\n" ${COLUMNS} "${DATE}" printf "\033[1;32m%*s\033[0m\n" "${COLUMNS}" "${DATE}"
TITLE="BOOTING:" TITLE="BOOTING:"
[ ${EFI} -eq 1 ] && TITLE+=" [UEFI]" || TITLE+=" [BIOS]" TITLE+="$([ ${EFI} -eq 1 ] && echo " [UEFI]" || echo " [BIOS]")"
[ "${BUS}" = "usb" ] && TITLE+=" [${BUS^^} flashdisk]" || TITLE+=" [${BUS^^} DoM]" TITLE+="$([ "${BUS}" = "usb" ] && echo " [${BUS^^} flashdisk]" || echo " [${BUS^^} DoM]")"
printf "\033[1;33m%*s\033[0m\n" $(((${#TITLE} + ${COLUMNS}) / 2)) "${TITLE}" printf "\033[1;33m%*s\033[0m\n" $(((${#TITLE} + ${COLUMNS}) / 2)) "${TITLE}"
# Check if DSM zImage changed, patch it if necessary # Check if DSM zImage changed, patch it if necessary
ZIMAGE_HASH="$(readConfigKey "zimage-hash" "${USER_CONFIG_FILE}")" ZIMAGE_HASH="$(readConfigKey "zimage-hash" "${USER_CONFIG_FILE}")"
if [ -f ${PART1_PATH}/.build -o "$(sha256sum "${ORI_ZIMAGE_FILE}" | awk '{print $1}')" != "${ZIMAGE_HASH}" ]; then if [ -f ${PART1_PATH}/.build ] || [ "$(sha256sum "${ORI_ZIMAGE_FILE}" | awk '{print $1}')" != "${ZIMAGE_HASH}" ]; then
echo -e "\033[1;43m$(TEXT "DSM zImage changed")\033[0m" printf "\033[1;43m%s\033[0m\n" "$(TEXT "DSM zImage changed")"
${WORK_PATH}/zimage-patch.sh ${WORK_PATH}/zimage-patch.sh || {
if [ $? -ne 0 ]; then printf "\033[1;43m%s\n%s\n%s:\n%s\033[0m\n" "$(TEXT "DSM zImage not patched")" "$(TEXT "Please upgrade the bootloader version and try again.")" "$(TEXT "Error")" "$(cat "${LOG_FILE}")"
echo -e "\033[1;43m$(TEXT "zImage not patched,\nPlease upgrade the bootloader version and try again.\nPatch error:\n")$(cat "${LOG_FILE}")\033[0m"
exit 1 exit 1
fi }
fi fi
# Check if DSM ramdisk changed, patch it if necessary # Check if DSM ramdisk changed, patch it if necessary
RAMDISK_HASH="$(readConfigKey "ramdisk-hash" "${USER_CONFIG_FILE}")" RAMDISK_HASH="$(readConfigKey "ramdisk-hash" "${USER_CONFIG_FILE}")"
if [ -f ${PART1_PATH}/.build -o "$(sha256sum "${ORI_RDGZ_FILE}" | awk '{print $1}')" != "${RAMDISK_HASH}" ]; then if [ -f ${PART1_PATH}/.build ] || [ "$(sha256sum "${ORI_RDGZ_FILE}" | awk '{print $1}')" != "${RAMDISK_HASH}" ]; then
echo -e "\033[1;43m$(TEXT "DSM Ramdisk changed")\033[0m" printf "\033[1;43m%s\033[0m\n" "$(TEXT "DSM ramdisk changed")"
${WORK_PATH}/ramdisk-patch.sh ${WORK_PATH}/ramdisk-patch.sh || {
if [ $? -ne 0 ]; then printf "\033[1;43m%s\n%s\n%s:\n%s\033[0m\n" "$(TEXT "DSM ramdisk not patched")" "$(TEXT "Please upgrade the bootloader version and try again.")" "$(TEXT "Error")" "$(cat "${LOG_FILE}")"
echo -e "\033[1;43m$(TEXT "Ramdisk not patched,\nPlease upgrade the bootloader version and try again.\nPatch error:\n")$(cat "${LOG_FILE}")\033[0m"
exit 1 exit 1
fi }
fi fi
[ -f ${PART1_PATH}/.build ] && rm -f ${PART1_PATH}/.build [ -f ${PART1_PATH}/.build ] && rm -f ${PART1_PATH}/.build
@ -68,17 +64,21 @@ SMALLNUM="$(readConfigKey "smallnum" "${USER_CONFIG_FILE}")"
KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")" KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")"
LKM="$(readConfigKey "lkm" "${USER_CONFIG_FILE}")" LKM="$(readConfigKey "lkm" "${USER_CONFIG_FILE}")"
DMI="$(dmesg 2>/dev/null | grep -i "DMI:" | head -1 | sed 's/\[.*\] DMI: //i')" DT="$(readConfigKey "platforms.${PLATFORM}.dt" "${WORK_PATH}/platforms.yml")"
CPU="$(echo $(cat /proc/cpuinfo 2>/dev/null | grep 'model name' | uniq | awk -F':' '{print $2}'))" KVER="$(readConfigKey "platforms.${PLATFORM}.productvers.\"${PRODUCTVER}\".kver" "${WORK_PATH}/platforms.yml")"
MEM="$(awk '/MemTotal:/ {printf "%.0f", $2 / 1024}' /proc/meminfo 2>/dev/null) MB" KPRE="$(readConfigKey "platforms.${PLATFORM}.productvers.\"${PRODUCTVER}\".kpre" "${WORK_PATH}/platforms.yml")"
echo -e "$(TEXT "Model: ") \033[1;36m${MODEL}(${PLATFORM})\033[0m" DMI="$(dmesg 2>/dev/null | grep -i "DMI:" | head -1 | sed 's/\[.*\] DMI: //i')"
echo -e "$(TEXT "Version: ") \033[1;36m${PRODUCTVER}(${BUILDNUM}$([ ${SMALLNUM:-0} -ne 0 ] && echo "u${SMALLNUM}"))\033[0m" CPU="$(awk -F': ' '/model name/ {print $2}' /proc/cpuinfo | uniq)"
echo -e "$(TEXT "Kernel: ") \033[1;36m${KERNEL}\033[0m" MEM="$(awk '/MemTotal:/ {printf "%.0f", $2 / 1024}' /proc/meminfo) MB"
echo -e "$(TEXT "LKM: ") \033[1;36m${LKM}\033[0m"
echo -e "$(TEXT "DMI: ") \033[1;36m${DMI}\033[0m" printf "%s \033[1;36m%s(%s)\033[0m\n" "$(TEXT "Model: ")" "${MODEL}" "${PLATFORM}"
echo -e "$(TEXT "CPU: ") \033[1;36m${CPU}\033[0m" printf "%s \033[1;36m%s(%s%s)\033[0m\n" "$(TEXT "Version: ")" "${PRODUCTVER}" "${BUILDNUM}" "$([ ${SMALLNUM:-0} -ne 0 ] && echo "u${SMALLNUM}")"
echo -e "$(TEXT "MEM: ") \033[1;36m${MEM}\033[0m" printf "%s \033[1;36m%s\033[0m\n" "$(TEXT "Kernel: ")" "${KERNEL}"
printf "%s \033[1;36m%s\033[0m\n" "$(TEXT "LKM: ")" "${LKM}"
printf "%s \033[1;36m%s\033[0m\n" "$(TEXT "DMI: ")" "${DMI}"
printf "%s \033[1;36m%s\033[0m\n" "$(TEXT "CPU: ")" "${CPU}"
printf "%s \033[1;36m%s\033[0m\n" "$(TEXT "MEM: ")" "${MEM}"
if ! readConfigMap "addons" "${USER_CONFIG_FILE}" | grep -q nvmesystem; then if ! readConfigMap "addons" "${USER_CONFIG_FILE}" | grep -q nvmesystem; then
HASATA=0 HASATA=0
@ -89,7 +89,11 @@ if ! readConfigMap "addons" "${USER_CONFIG_FILE}" | grep -q nvmesystem; then
break break
fi fi
done done
[ ${HASATA} = "0" ] && echo -e "\033[1;33m*** $(TEXT "Please insert at least one sata/scsi disk for system installation, except for the bootloader disk.") ***\033[0m" [ ${HASATA} = "0" ] && printf "\033[1;33m*** %s ***\033[0m\n" "$(TEXT "Notice: Please insert at least one sata/scsi disk for system installation (except for the bootloader disk).")"
fi
if checkBIOS_VT_d && [ $(echo "${KVER:-4}" | cut -d'.' -f1) -lt 5 ]; then
printf "\033[1;33m*** %s ***\033[0m\n" "$(TEXT "Notice: Please disable Intel(VT-d)/AMD(AMD-Vi) in BIOS/UEFI settings if you encounter a boot failure.")"
fi fi
VID="$(readConfigKey "vid" "${USER_CONFIG_FILE}")" VID="$(readConfigKey "vid" "${USER_CONFIG_FILE}")"
@ -110,7 +114,10 @@ CMDLINE['pid']="${PID:-"0x0001"}" # Sanity check
CMDLINE['sn']="${SN}" CMDLINE['sn']="${SN}"
CMDLINE['netif_num']="0" CMDLINE['netif_num']="0"
[ -z "${MAC1}" -a -n "${MAC2}" ] && MAC1=${MAC2} && MAC2="" # Sanity check [ -z "${MAC1}" ] && [ -n "${MAC2}" ] && {
MAC1=${MAC2}
MAC2=""
} # Sanity check
[ -n "${MAC1}" ] && CMDLINE['mac1']="${MAC1}" && CMDLINE['netif_num']="1" [ -n "${MAC1}" ] && CMDLINE['mac1']="${MAC1}" && CMDLINE['netif_num']="1"
[ -n "${MAC2}" ] && CMDLINE['mac2']="${MAC2}" && CMDLINE['netif_num']="2" [ -n "${MAC2}" ] && CMDLINE['mac2']="${MAC2}" && CMDLINE['netif_num']="2"
@ -127,9 +134,6 @@ if [ ${EFI} -eq 1 ]; then
else else
CMDLINE['noefi']="" CMDLINE['noefi']=""
fi fi
DT="$(readConfigKey "platforms.${PLATFORM}.dt" "${WORK_PATH}/platforms.yml")"
KVER="$(readConfigKey "platforms.${PLATFORM}.productvers.\"${PRODUCTVER}\".kver" "${WORK_PATH}/platforms.yml")"
KPRE="$(readConfigKey "platforms.${PLATFORM}.productvers.\"${PRODUCTVER}\".kpre" "${WORK_PATH}/platforms.yml")"
if [ $(echo "${KVER:-4}" | cut -d'.' -f1) -lt 5 ]; then if [ $(echo "${KVER:-4}" | cut -d'.' -f1) -lt 5 ]; then
if [ ! "${BUS}" = "usb" ]; then if [ ! "${BUS}" = "usb" ]; then
SZ=$(blockdev --getsz ${LOADER_DISK} 2>/dev/null) # SZ=$(cat /sys/block/${LOADER_DISK/\/dev\//}/size) SZ=$(blockdev --getsz ${LOADER_DISK} 2>/dev/null) # SZ=$(cat /sys/block/${LOADER_DISK/\/dev\//}/size)
@ -144,6 +148,7 @@ if [ $(echo "${KVER:-4}" | cut -d'.' -f1) -lt 5 ]; then
else else
CMDLINE["split_lock_detect"]="off" CMDLINE["split_lock_detect"]="off"
fi fi
if [ "${DT}" = "true" ]; then if [ "${DT}" = "true" ]; then
CMDLINE["syno_ttyS0"]="serial,0x3f8" CMDLINE["syno_ttyS0"]="serial,0x3f8"
CMDLINE["syno_ttyS1"]="serial,0x2f8" CMDLINE["syno_ttyS1"]="serial,0x2f8"
@ -152,10 +157,10 @@ else
CMDLINE["syno_hdd_detect"]="0" CMDLINE["syno_hdd_detect"]="0"
CMDLINE["syno_hdd_powerup_seq"]="0" CMDLINE["syno_hdd_powerup_seq"]="0"
fi fi
CMDLINE["HddHotplug"]="1" CMDLINE["HddHotplug"]="1"
CMDLINE["vender_format_version"]="2" CMDLINE["vender_format_version"]="2"
CMDLINE['skip_vender_mac_interfaces']="0,1,2,3,4,5,6,7" CMDLINE['skip_vender_mac_interfaces']="0,1,2,3,4,5,6,7"
CMDLINE['earlyprintk']="" CMDLINE['earlyprintk']=""
CMDLINE['earlycon']="uart8250,io,0x3f8,115200n8" CMDLINE['earlycon']="uart8250,io,0x3f8,115200n8"
CMDLINE['console']="ttyS0,115200n8" CMDLINE['console']="ttyS0,115200n8"
@ -190,39 +195,39 @@ fi
if echo "apollolake geminilake" | grep -wq "${PLATFORM}"; then if echo "apollolake geminilake" | grep -wq "${PLATFORM}"; then
CMDLINE["intel_iommu"]="igfx_off" CMDLINE["intel_iommu"]="igfx_off"
fi fi
if echo "purley broadwellnkv2" | grep -wq "${PLATFORM}"; then if echo "purley broadwellnkv2" | grep -wq "${PLATFORM}"; then
CMDLINE["SASmodel"]="1" CMDLINE["SASmodel"]="1"
fi fi
while IFS=': ' read KEY VALUE; do while IFS=': ' read -r KEY VALUE; do
[ -n "${KEY}" ] && CMDLINE["network.${KEY}"]="${VALUE}" [ -n "${KEY}" ] && CMDLINE["network.${KEY}"]="${VALUE}"
done <<<$(readConfigMap "network" "${USER_CONFIG_FILE}") done <<<$(readConfigMap "network" "${USER_CONFIG_FILE}")
while IFS=': ' read KEY VALUE; do while IFS=': ' read -r KEY VALUE; do
[ -n "${KEY}" ] && CMDLINE["${KEY}"]="${VALUE}" [ -n "${KEY}" ] && CMDLINE["${KEY}"]="${VALUE}"
done <<<$(readConfigMap "cmdline" "${USER_CONFIG_FILE}") done <<<$(readConfigMap "cmdline" "${USER_CONFIG_FILE}")
# Prepare command line # Prepare command line
CMDLINE_LINE="" CMDLINE_LINE=""
for KEY in ${!CMDLINE[@]}; do for KEY in "${!CMDLINE[@]}"; do
VALUE="${CMDLINE[${KEY}]}" VALUE="${CMDLINE[${KEY}]}"
CMDLINE_LINE+=" ${KEY}" CMDLINE_LINE+=" ${KEY}"
[ -n "${VALUE}" ] && CMDLINE_LINE+="=${VALUE}" [ -n "${VALUE}" ] && CMDLINE_LINE+="=${VALUE}"
done done
CMDLINE_LINE=$(echo "${CMDLINE_LINE}" | sed 's/^ //') # Remove leading space CMDLINE_LINE=$(echo "${CMDLINE_LINE}" | sed 's/^ //') # Remove leading space
echo -e "$(TEXT "Cmdline:\n")\033[1;36m${CMDLINE_LINE}\033[0m" printf "%s:\n \033[1;36m%s\033[0m\n" "$(TEXT "Cmdline")" "${CMDLINE_LINE}"
function _bootwait() { function _bootwait() {
BOOTWAIT="$(readConfigKey "bootwait" "${USER_CONFIG_FILE}")" BOOTWAIT="$(readConfigKey "bootwait" "${USER_CONFIG_FILE}")"
[ -z "${BOOTWAIT}" ] && BOOTWAIT=10 [ -z "${BOOTWAIT}" ] && BOOTWAIT=10
busybox w 2>/dev/null | awk '{print $1" "$2" "$4" "$5" "$6}' >WB busybox w 2>/dev/null | awk '{print $1" "$2" "$4" "$5" "$6}' >WB
MSG="" MSG=""
while test ${BOOTWAIT} -ge 0; do while [ ${BOOTWAIT} -ge 0 ]; do
MSG="$(printf "\033[1;33m$(TEXT "%2ds (Changing access(ssh/web) status will interrupt boot)")\033[0m" "${BOOTWAIT}")" MSG="$(printf "\033[1;33m$(TEXT "%2ds (Changing access(ssh/web) status will interrupt boot)")\033[0m" "${BOOTWAIT}")"
echo -en "\r${MSG}" printf "\r${MSG}"
busybox w 2>/dev/null | awk '{print $1" "$2" "$4" "$5" "$6}' >WC busybox w 2>/dev/null | awk '{print $1" "$2" "$4" "$5" "$6}' >WC
if ! diff WB WC >/dev/null 2>&1; then if ! diff WB WC >/dev/null 2>&1; then
echo -en "\r\033[1;33m$(TEXT "access(ssh/web) status has changed and booting is interrupted.")\033[0m\n" printf "\r\033[1;33m%s\033[0m\n" "$(TEXT "access(ssh/web) status has changed and booting is interrupted.")"
rm -f WB WC rm -f WB WC
return 1 return 1
fi fi
@ -230,7 +235,7 @@ function _bootwait() {
BOOTWAIT=$((BOOTWAIT - 1)) BOOTWAIT=$((BOOTWAIT - 1))
done done
rm -f WB WC rm -f WB WC
echo -en "\r$(printf "%$((${#MSG} * 2))s" " ")\n" printf "\r%$((${#MSG} * 2))s\n" " "
return 0 return 0
} }
@ -251,7 +256,7 @@ if [ "${DIRECT}" = "true" ]; then
_bootwait || exit 0 _bootwait || exit 0
echo -e "\033[1;33m$(TEXT "Reboot to boot directly in DSM")\033[0m" printf "\033[1;33m%s\033[0m\n" "$(TEXT "Reboot to boot directly in DSM")"
reboot reboot
exit 0 exit 0
else else
@ -266,11 +271,11 @@ else
grub-editenv ${USER_GRUBENVFILE} unset dsm_cmdline grub-editenv ${USER_GRUBENVFILE} unset dsm_cmdline
grub-editenv ${USER_GRUBENVFILE} unset next_entry grub-editenv ${USER_GRUBENVFILE} unset next_entry
ETHX=$(ls /sys/class/net/ 2>/dev/null | grep -v lo) || true ETHX=$(ls /sys/class/net/ 2>/dev/null | grep -v lo) || true
echo "$(printf "$(TEXT "Detected %s network cards.")" "$(echo ${ETHX} | wc -w)")" printf "$(TEXT "Detected %s network cards.\n")" "$(echo "${ETHX}" | wc -w)"
echo -en "$(TEXT "Checking Connect.")" printf "$(TEXT "Checking Connect.")"
COUNT=0 COUNT=0
BOOTIPWAIT="$(readConfigKey "bootipwait" "${USER_CONFIG_FILE}")" BOOTIPWAIT="$(readConfigKey "bootipwait" "${USER_CONFIG_FILE}")"
[ -z "${BOOTIPWAIT}" ] && BOOTIPWAIT=10 BOOTIPWAIT=${BOOTIPWAIT:-10}
while [ ${COUNT} -lt $((${BOOTIPWAIT} + 32)) ]; do while [ ${COUNT} -lt $((${BOOTIPWAIT} + 32)) ]; do
MSG="" MSG=""
for N in ${ETHX}; do for N in ${ETHX}; do
@ -279,77 +284,79 @@ else
fi fi
done done
if [ -n "${MSG}" ]; then if [ -n "${MSG}" ]; then
echo -en "\r${MSG}$(TEXT "connected.") \n" printf "\r%s%s \n" "${MSG}" "$(TEXT "connected.")"
break break
fi fi
COUNT=$((${COUNT} + 1)) COUNT=$((COUNT + 1))
echo -n "." printf "."
sleep 1 sleep 1
done done
[ ! -f /var/run/dhcpcd/pid ] && /etc/init.d/S41dhcpcd restart >/dev/null 2>&1 || true [ ! -f /var/run/dhcpcd/pid ] && /etc/init.d/S41dhcpcd restart >/dev/null 2>&1 || true
echo "$(TEXT "Waiting IP.")" printf "$(TEXT "Waiting IP.\n")"
for N in ${ETHX}; do for N in ${ETHX}; do
COUNT=0 COUNT=0
DRIVER=$(ls -ld /sys/class/net/${N}/device/driver 2>/dev/null | awk -F '/' '{print $NF}') DRIVER=$(ls -ld /sys/class/net/${N}/device/driver 2>/dev/null | awk -F '/' '{print $NF}')
echo -en "${N}(${DRIVER}): " printf "%s(%s): " "${N}" "${DRIVER}"
while true; do while true; do
if [ -z "$(cat /sys/class/net/${N}/carrier 2>/dev/null)" ]; then if [ -z "$(cat /sys/class/net/${N}/carrier 2>/dev/null)" ]; then
echo -en "\r${N}(${DRIVER}): $(TEXT "DOWN")\n" printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "DOWN")"
break break
fi fi
if [ "0" = "$(cat /sys/class/net/${N}/carrier 2>/dev/null)" ]; then if [ "0" = "$(cat /sys/class/net/${N}/carrier 2>/dev/null)" ]; then
echo -en "\r${N}(${DRIVER}): $(TEXT "NOT CONNECTED")\n" printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "NOT CONNECTED")"
break break
fi fi
if [ ${COUNT} -eq ${BOOTIPWAIT} ]; then # Under normal circumstances, no errors should occur here. if [ ${COUNT} -eq ${BOOTIPWAIT} ]; then # Under normal circumstances, no errors should occur here.
echo -en "\r${N}(${DRIVER}): $(TEXT "TIMEOUT (Please check the IP on the router.)")\n" printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "TIMEOUT (Please check the IP on the router.)")"
break break
fi fi
COUNT=$((${COUNT} + 1)) COUNT=$((COUNT + 1))
IP="$(getIP ${N})" IP="$(getIP "${N}")"
if [ -n "${IP}" ]; then if [ -n "${IP}" ]; then
if [[ "${IP}" =~ ^169\.254\..* ]]; then if echo "${IP}" | grep -q "^169\.254\."; then
echo -en "\r${N}(${DRIVER}): $(TEXT "LINK LOCAL (No DHCP server detected.)")\n" printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "LINK LOCAL (No DHCP server detected.)")"
else else
echo -en "\r${N}(${DRIVER}): $(printf "$(TEXT "Access \033[1;34mhttp://%s:5000\033[0m to connect the DSM via web.")" "${IP}")\n" printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(printf "$(TEXT "Access \033[1;34mhttp://%s:5000\033[0m to connect the DSM via web.")" "${IP}")"
fi fi
break break
fi fi
echo -n "." printf "."
sleep 1 sleep 1
done done
done done
_bootwait || exit 0 _bootwait || exit 0
echo -e "\033[1;37m$(TEXT "Loading DSM kernel ...")\033[0m" printf "\033[1;37m%s\033[0m\n" "$(TEXT "Loading DSM kernel ...")"
DSMLOGO="$(readConfigKey "dsmlogo" "${USER_CONFIG_FILE}")" DSMLOGO="$(readConfigKey "dsmlogo" "${USER_CONFIG_FILE}")"
if [ "${DSMLOGO}" = "true" -a -c "/dev/fb0" ]; then if [ "${DSMLOGO}" = "true" ] && [ -c "/dev/fb0" ]; then
IP="$(getIP)" IP="$(getIP)"
[[ "${IP}" =~ ^169\.254\..* ]] && IP="" echo "${IP}" | grep -q "^169\.254\." && IP=""
[ -n "${IP}" ] && URL="http://${IP}:5000" || URL="http://find.synology.com/" [ -n "${IP}" ] && URL="http://${IP}:5000" || URL="http://find.synology.com/"
python ${WORK_PATH}/include/functions.py makeqr -d "${URL}" -l "6" -o "${TMP_PATH}/qrcode_boot.png" python3 ${WORK_PATH}/include/functions.py makeqr -d "${URL}" -l "6" -o "${TMP_PATH}/qrcode_boot.png"
[ -f "${TMP_PATH}/qrcode_boot.png" ] && echo | fbv -acufi "${TMP_PATH}/qrcode_boot.png" >/dev/null 2>/dev/null || true [ -f "${TMP_PATH}/qrcode_boot.png" ] && echo | fbv -acufi "${TMP_PATH}/qrcode_boot.png" >/dev/null 2>/dev/null || true
python ${WORK_PATH}/include/functions.py makeqr -f "${WORK_PATH}/include/qhxg.png" -l "7" -o "${TMP_PATH}/qrcode_qhxg.png" python3 ${WORK_PATH}/include/functions.py makeqr -f "${WORK_PATH}/include/qhxg.png" -l "7" -o "${TMP_PATH}/qrcode_qhxg.png"
[ -f "${TMP_PATH}/qrcode_qhxg.png" ] && echo | fbv -acufi "${TMP_PATH}/qrcode_qhxg.png" >/dev/null 2>/dev/null || true [ -f "${TMP_PATH}/qrcode_qhxg.png" ] && echo | fbv -acufi "${TMP_PATH}/qrcode_qhxg.png" >/dev/null 2>/dev/null || true
fi fi
# Executes DSM kernel via KEXEC # Executes DSM kernel via KEXEC
KEXECARGS="-a" KEXECARGS="-a"
if [ $(echo "${KVER:-4}" | cut -d'.' -f1) -lt 4 ] && [ ${EFI} -eq 1 ]; then if [ $(echo "${KVER:-4}" | cut -d'.' -f1) -lt 4 ] && [ ${EFI} -eq 1 ]; then
echo -e "\033[1;33m$(TEXT "Warning, running kexec with --noefi param, strange things will happen!!")\033[0m" printf "\033[1;33m%s\033[0m\n" "$(TEXT "Warning, running kexec with --noefi param, strange things will happen!!")"
KEXECARGS+=" --noefi" KEXECARGS+=" --noefi"
fi fi
kexec ${KEXECARGS} -l "${MOD_ZIMAGE_FILE}" --initrd "${MOD_RDGZ_FILE}" --command-line="${CMDLINE_LINE} kexecboot" >"${LOG_FILE}" 2>&1 || dieLog kexec ${KEXECARGS} -l "${MOD_ZIMAGE_FILE}" --initrd "${MOD_RDGZ_FILE}" --command-line="${CMDLINE_LINE} kexecboot" >"${LOG_FILE}" 2>&1 || dieLog
echo -e "\033[1;37m$(TEXT "Booting ...")\033[0m" printf "\033[1;37m%s\033[0m\n" "$(TEXT "Booting ...")"
# show warning message # show warning message
for T in $(busybox w 2>/dev/null | grep -v 'TTY' | awk '{print $2}'); do for T in $(busybox w 2>/dev/null | grep -v 'TTY' | awk '{print $2}'); do
[ -w "/dev/${T}" ] && echo -e "\n\033[1;43m$(TEXT "[This interface will not be operational. Please wait a few minutes.\nFind DSM via http://find.synology.com/ or Synology Assistant and connect.]")\033[0m\n" >"/dev/${T}" 2>/dev/null || true if [ -w "/dev/${T}" ]; then
echo -e "\n\033[1;43m$(TEXT "Interface not operational. Wait a few minutes.\nFind DSM via http://find.synology.com/ or Synology Assistant.")\033[0m\n" > "/dev/${T}" 2>/dev/null || true
fi
done done
# # Unload all network interfaces # # Unload all network interfaces

View File

@ -1,16 +1,16 @@
#!/usr/bin/env bash #!/usr/bin/env bash
read_u8() { read_u8() {
dd if=$1 bs=1 skip=$(($2)) count=1 2>/dev/null | od -An -tu1 | grep -Eo '[0-9]+' dd if="${1}" bs=1 skip="$((${2}))" count=1 2>/dev/null | od -An -tu1 | grep -Eo '[0-9]+'
} }
read_u32() { read_u32() {
dd if=$1 bs=1 skip=$(($2)) count=4 2>/dev/null | od -An -tu4 | grep -Eo '[0-9]+' dd if="${1}" bs=1 skip="$((${2}))" count=4 2>/dev/null | od -An -tu4 | grep -Eo '[0-9]+'
} }
set -x set -x
setup_size=$(read_u8 $1 0x1f1) setup_size=$(read_u8 "${1}" 0x1f1)
payload_offset=$(read_u32 $1 0x248) payload_offset=$(read_u32 "${1}" 0x248)
payload_length=$(read_u32 $1 0x24c) payload_length=$(read_u32 "${1}" 0x24c)
inner_pos=$((($setup_size + 1) * 512)) inner_pos=$(((setup_size + 1) * 512))
tail -c+$(($inner_pos + 1)) $1 | tail -c+$(($payload_offset + 1)) | head -c $(($payload_length)) | head -c $(($payload_length - 4)) | unlzma >$2 tail -c+$((inner_pos + 1)) "${1}" | tail -c+$((payload_offset + 1)) | head -c "${payload_length}" | head -c $((payload_length - 4)) | unlzma >"${2}"

View File

@ -7,28 +7,22 @@
# objdump -h a.out | sh calc_run_size.sh # objdump -h a.out | sh calc_run_size.sh
NUM='\([0-9a-fA-F]*[ \t]*\)' NUM='\([0-9a-fA-F]*[ \t]*\)'
OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"${NUM}${NUM}${NUM}${NUM}"'.*/\1\4/p') OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"${NUM}${NUM}${NUM}${NUM}"'.*/0x\1 0x\4/p')
if [ -z "$OUT" ]; then
if [ -z "${OUT}" ]; then
echo "Never found .bss or .brk file offset" >&2 echo "Never found .bss or .brk file offset" >&2
exit 1 exit 1
fi fi
OUT=$(echo ${OUT# }) read -r sizeA offsetA sizeB offsetB <<<$(echo ${OUT} | awk '{printf "%d %d %d %d", strtonum($1), strtonum($2), strtonum($3), strtonum($4)}')
sizeA=$(printf "%d" 0x${OUT%% *})
OUT=${OUT#* }
offsetA=$(printf "%d" 0x${OUT%% *})
OUT=${OUT#* }
sizeB=$(printf "%d" 0x${OUT%% *})
OUT=${OUT#* }
offsetB=$(printf "%d" 0x${OUT%% *})
run_size=$((${offsetA} + ${sizeA} + ${sizeB})) runSize=$((offsetA + sizeA + sizeB))
# BFD linker shows the same file offset in ELF. # BFD linker shows the same file offset in ELF.
if [ "${offsetA}" -ne "${offsetB}" ]; then if [ "${offsetA}" -ne "${offsetB}" ]; then
# Gold linker shows them as consecutive. # Gold linker shows them as consecutive.
endB=$((${offsetB} + ${sizeB})) endSize=$((offsetB + sizeB))
if [ "$endB" != "$run_size" ]; then if [ "${endSize}" -ne "${runSize}" ]; then
printf "sizeA: 0x%x\n" ${sizeA} >&2 printf "sizeA: 0x%x\n" ${sizeA} >&2
printf "offsetA: 0x%x\n" ${offsetA} >&2 printf "offsetA: 0x%x\n" ${offsetA} >&2
printf "sizeB: 0x%x\n" ${sizeB} >&2 printf "sizeB: 0x%x\n" ${sizeB} >&2
@ -38,5 +32,5 @@ if [ "${offsetA}" -ne "${offsetB}" ]; then
fi fi
fi fi
printf "%d\n" ${run_size} printf "%d\n" ${runSize}
exit 0 exit 0

View File

@ -10,27 +10,24 @@
# #
# ---------------------------------------------------------------------- # ----------------------------------------------------------------------
check_vmlinux() check_vmlinux() {
{
# Use readelf to check if it's a valid ELF # Use readelf to check if it's a valid ELF
# TODO: find a better to way to check that it's really vmlinux # TODO: find a better to way to check that it's really vmlinux
# and not just an elf # and not just an elf
readelf -h $1 > /dev/null 2>&1 || return 1 readelf -h $1 >/dev/null 2>&1 || return 1
cat $1 cat $1
exit 0 exit 0
} }
try_decompress() try_decompress() {
{
# The obscure use of the "tr" filter is to work around older versions of # The obscure use of the "tr" filter is to work around older versions of
# "grep" that report the byte offset of the line instead of the pattern. # "grep" that report the byte offset of the line instead of the pattern.
# Try to find the header ($1) and decompress from here # Try to find the header ($1) and decompress from here
for pos in `tr "$1\n$2" "\n$2=" < "$img" | grep -abo "^$2"` for pos in $(tr "$1\n$2" "\n$2=" <"$img" | grep -abo "^$2"); do
do
pos=${pos%%:*} pos=${pos%%:*}
tail -c+$pos "$img" | $3 > $tmp 2> /dev/null tail -c+$pos "$img" | $3 >$tmp 2>/dev/null
check_vmlinux $tmp check_vmlinux $tmp
done done
} }
@ -38,8 +35,7 @@ try_decompress()
# Check invocation: # Check invocation:
me=${0##*/} me=${0##*/}
img=$1 img=$1
if [ $# -ne 1 -o ! -s "$img" ] if [ $# -ne 1 ] || [ ! -s "$img" ]; then
then
echo "Usage: $me <kernel-image>" >&2 echo "Usage: $me <kernel-image>" >&2
exit 2 exit 2
fi fi

View File

@ -3,23 +3,23 @@
# 1 - Platform # 1 - Platform
# 2 - Kernel Version # 2 - Kernel Version
function availableAddons() { function availableAddons() {
if [ -z "${1}" -o -z "${2}" ]; then if [ -z "${1}" ] || [ -z "${2}" ]; then
echo "" echo ""
return 1 return 1
fi fi
for D in $(find "${ADDONS_PATH}" -maxdepth 1 -type d 2>/dev/null | sort); do while read -r D; do
[ ! -f "${D}/manifest.yml" ] && continue [ ! -f "${D}/manifest.yml" ] && continue
ADDON=$(basename ${D}) local ADDON=$(basename "${D}")
checkAddonExist "${ADDON}" "${1}" "${2}" || continue checkAddonExist "${ADDON}" "${1}" "${2}" || continue
SYSTEM=$(readConfigKey "system" "${D}/manifest.yml") local SYSTEM=$(readConfigKey "system" "${D}/manifest.yml")
[ "${SYSTEM}" = "true" ] && continue [ "${SYSTEM}" = "true" ] && continue
LOCALE="${LC_ALL%%.*}" local LOCALE="${LC_ALL%%.*}"
DESC="" local DESC=""
[ -z "${DESC}" ] && DESC="$(readConfigKey "description.${LOCALE:-"en_US"}" "${D}/manifest.yml")" [ -z "${DESC}" ] && DESC="$(readConfigKey "description.${LOCALE:-"en_US"}" "${D}/manifest.yml")"
[ -z "${DESC}" ] && DESC="$(readConfigKey "description.en_US" "${D}/manifest.yml")" [ -z "${DESC}" ] && DESC="$(readConfigKey "description.en_US" "${D}/manifest.yml")"
[ -z "${DESC}" ] && DESC="$(readConfigKey "description" "${D}/manifest.yml")" [ -z "${DESC}" ] && DESC="$(readConfigKey "description" "${D}/manifest.yml")"
echo -e "${ADDON}\t${DESC:-"unknown"}" echo -e "${ADDON}\t${DESC:-"unknown"}"
done done <<<$(find "${ADDONS_PATH}" -maxdepth 1 -type d 2>/dev/null | sort)
} }
############################################################################### ###############################################################################
@ -29,7 +29,7 @@ function availableAddons() {
# 3 - Kernel Version # 3 - Kernel Version
# Return ERROR if not exists # Return ERROR if not exists
function checkAddonExist() { function checkAddonExist() {
if [ -z "${1}" -o -z "${2}" -o -z "${3}" ]; then if [ -z "${1}" ] || [ -z "${2}" ] || [ -z "${3}" ]; then
return 1 # ERROR return 1 # ERROR
fi fi
# First check generic files # First check generic files
@ -80,7 +80,7 @@ function installAddon() {
fi fi
cp -f "${TMP_PATH}/${ADDON}/install.sh" "${RAMDISK_PATH}/addons/${ADDON}.sh" 2>"${LOG_FILE}" cp -f "${TMP_PATH}/${ADDON}/install.sh" "${RAMDISK_PATH}/addons/${ADDON}.sh" 2>"${LOG_FILE}"
chmod +x "${RAMDISK_PATH}/addons/${ADDON}.sh" chmod +x "${RAMDISK_PATH}/addons/${ADDON}.sh"
[ -d ${TMP_PATH}/${ADDON}/root ] && (cp -rnf "${TMP_PATH}/${ADDON}/root/"* "${RAMDISK_PATH}/" 2>"${LOG_FILE}") [ -d "${TMP_PATH}/${ADDON}/root" ] && cp -rnf "${TMP_PATH}/${ADDON}/root/"* "${RAMDISK_PATH}/" 2>"${LOG_FILE}"
rm -rf "${TMP_PATH}/${ADDON}" rm -rf "${TMP_PATH}/${ADDON}"
return 0 return 0
} }
@ -88,7 +88,7 @@ function installAddon() {
############################################################################### ###############################################################################
# Untar an addon to correct path # Untar an addon to correct path
# 1 - Addon file path # 1 - Addon file path
# Return name of addon on sucess or empty on error # Return name of addon on success or empty on error
function untarAddon() { function untarAddon() {
if [ -z "${1}" ]; then if [ -z "${1}" ]; then
echo "" echo ""
@ -96,10 +96,11 @@ function untarAddon() {
fi fi
rm -rf "${TMP_PATH}/addon" rm -rf "${TMP_PATH}/addon"
mkdir -p "${TMP_PATH}/addon" mkdir -p "${TMP_PATH}/addon"
tar -xaf "${1}" -C "${TMP_PATH}/addon" || return tar -xaf "${1}" -C "${TMP_PATH}/addon" || return 1
local ADDON=$(readConfigKey "name" "${TMP_PATH}/addon/manifest.yml") local ADDON=$(readConfigKey "name" "${TMP_PATH}/addon/manifest.yml")
[ -z "${ADDON}" ] && return [ -z "${ADDON}" ] && return 1
rm -rf "${ADDONS_PATH}/${ADDON}" rm -rf "${ADDONS_PATH}/${ADDON}"
mv -f "${TMP_PATH}/addon" "${ADDONS_PATH}/${ADDON}" mv -f "${TMP_PATH}/addon" "${ADDONS_PATH}/${ADDON}"
echo "${ADDON}" echo "${ADDON}"
return 0
} }

View File

@ -3,7 +3,7 @@
# 1 - Path of Key # 1 - Path of Key
# 2 - Path of yaml config file # 2 - Path of yaml config file
function deleteConfigKey() { function deleteConfigKey() {
yq eval 'del(.'${1}')' --inplace "${2}" 2>/dev/null yq eval "del(.${1})" --inplace "${2}" 2>/dev/null
} }
############################################################################### ###############################################################################
@ -12,7 +12,8 @@ function deleteConfigKey() {
# 2 - Value # 2 - Value
# 3 - Path of yaml config file # 3 - Path of yaml config file
function writeConfigKey() { function writeConfigKey() {
[ "${2}" = "{}" ] && yq eval '.'${1}' = {}' --inplace "${3}" 2>/dev/null || yq eval '.'${1}' = "'"${2}"'"' --inplace "${3}" 2>/dev/null local value="${2}"
[ "${value}" = "{}" ] && yq eval ".${1} = {}" --inplace "${3}" 2>/dev/null || yq eval ".${1} = \"${value}\"" --inplace "${3}" 2>/dev/null
} }
############################################################################### ###############################################################################
@ -21,19 +22,20 @@ function writeConfigKey() {
# 2 - Path of yaml config file # 2 - Path of yaml config file
# Return Value # Return Value
function readConfigKey() { function readConfigKey() {
RESULT=$(yq eval '.'${1}' | explode(.)' "${2}" 2>/dev/null) local result=$(yq eval ".${1} | explode(.)" "${2}" 2>/dev/null)
[ "${RESULT}" == "null" ] && echo "" || echo "${RESULT}" [ "${result}" = "null" ] && echo "" || echo "${result}"
} }
###############################################################################
# Write to yaml config file # Write to yaml config file
# 1 - format # 1 - format
# 2 - string # 2 - string
# 3 - Path of yaml config file # 3 - Path of yaml config file
function mergeConfigStr() { function mergeConfigStr() {
local JF=$(mktemp) local xmlfile=$(mktemp)
echo "${2}" | yq -p ${1} -o y > "${JF}" echo "${2}" | yq -p "${1}" -o y >"${xmlfile}"
yq eval-all --inplace '. as $item ireduce ({}; . * $item)' --inplace "${3}" "${JF}" 2>/dev/null yq eval-all --inplace '. as $item ireduce ({}; . * $item)' --inplace "${3}" "${xmlfile}" 2>/dev/null
rm -f "${JF}" rm -f "${xmlfile}"
} }
############################################################################### ###############################################################################
@ -51,7 +53,7 @@ function initConfigKey() {
# 2 - Path of yaml config file # 2 - Path of yaml config file
# Returns map of values # Returns map of values
function readConfigMap() { function readConfigMap() {
yq eval '.'${1}' | explode(.) | to_entries | map([.key, .value] | join(": ")) | .[]' "${2}" 2>/dev/null yq eval ".${1} | explode(.) | to_entries | map([.key, .value] | join(\": \")) | .[]" "${2}" 2>/dev/null
} }
############################################################################### ###############################################################################
@ -60,7 +62,7 @@ function readConfigMap() {
# 2 - Path of yaml config file # 2 - Path of yaml config file
# Returns array/map of values # Returns array/map of values
function readConfigArray() { function readConfigArray() {
yq eval '.'${1}'[]' "${2}" 2>/dev/null yq eval ".${1}[]" "${2}" 2>/dev/null
} }
############################################################################### ###############################################################################
@ -69,7 +71,7 @@ function readConfigArray() {
# 2 - Path of yaml config file # 2 - Path of yaml config file
# Returns array of values # Returns array of values
function readConfigEntriesArray() { function readConfigEntriesArray() {
yq eval '.'${1}' | explode(.) | to_entries | map([.key])[] | .[]' "${2}" 2>/dev/null yq eval ".${1} | explode(.) | to_entries | map([.key])[] | .[]" "${2}" 2>/dev/null
} }
############################################################################### ###############################################################################

View File

@ -5,6 +5,11 @@
# This is free software, licensed under the MIT License. # This is free software, licensed under the MIT License.
# See /LICENSE for more information. # See /LICENSE for more information.
# #
# This script is a CLI to RR.
#
# # Backup the original python3 executable.
# sudo mv -f "$(realpath $(which python3))/EXTERNALLY-MANAGED" "$(realpath $(which python3))/EXTERNALLY-MANAGED.bak" 2>/dev/null || true
# sudo pip3 install -U click requests requests-toolbelt urllib3 qrcode[pil] beautifulsoup4
import os, click import os, click
@ -31,17 +36,13 @@ def validate_required_param(ctx, param, value):
raise click.MissingParameter(param_decls=[param.name]) raise click.MissingParameter(param_decls=[param.name])
return value return value
def __fullversion(ver): def __fullversion(ver):
out = ver
arr = ver.split('-') arr = ver.split('-')
if len(arr) > 0: a, b, c = (arr[0].split('.') + ['0', '0', '0'])[:3]
a = arr[0].split('.')[0] if len(arr[0].split('.')) > 0 else '0'
b = arr[0].split('.')[1] if len(arr[0].split('.')) > 1 else '0'
c = arr[0].split('.')[2] if len(arr[0].split('.')) > 2 else '0'
d = arr[1] if len(arr) > 1 else '00000' d = arr[1] if len(arr) > 1 else '00000'
e = arr[2] if len(arr) > 2 else '0' e = arr[2] if len(arr) > 2 else '0'
out = '{}.{}.{}-{}-{}'.format(a,b,c,d,e) return f'{a}.{b}.{c}-{d}-{e}'
return out
@cli.command() @cli.command()
@ -63,24 +64,24 @@ def makeqr(data, file, location, output):
FBIOPUT_VSCREENINFO = 0x4601 FBIOPUT_VSCREENINFO = 0x4601
FBIOGET_FSCREENINFO = 0x4602 FBIOGET_FSCREENINFO = 0x4602
FBDEV = "/dev/fb0" FBDEV = "/dev/fb0"
if data is not None:
qr = qrcode.QRCode(version=1, box_size=10, error_correction=qrcode.constants.ERROR_CORRECT_H, border=4,) if data:
qr = qrcode.QRCode(version=1, box_size=10, error_correction=qrcode.constants.ERROR_CORRECT_H, border=4)
qr.add_data(data) qr.add_data(data)
qr.make(fit=True) qr.make(fit=True)
img = qr.make_image(fill_color="purple", back_color="white") img = qr.make_image(fill_color="purple", back_color="white").convert("RGBA")
img = img.convert("RGBA")
pixels = img.load() pixels = img.load()
for i in range(img.size[0]): for i in range(img.size[0]):
for j in range(img.size[1]): for j in range(img.size[1]):
if pixels[i, j] == (255, 255, 255, 255): if pixels[i, j] == (255, 255, 255, 255):
pixels[i, j] = (255, 255, 255, 0) pixels[i, j] = (255, 255, 255, 0)
if os.path.exists(os.path.join(WORK_PATH, "logo.png")): logo_path = os.path.join(WORK_PATH, "logo.png")
icon = Image.open(os.path.join(WORK_PATH, "logo.png")) if os.path.exists(logo_path):
icon = icon.convert("RGBA") icon = Image.open(logo_path).convert("RGBA")
img.paste(icon.resize((int(img.size[0] / 5), int(img.size[1] / 5))), (int((img.size[0] - int(img.size[0] / 5)) / 2), int((img.size[1] - int(img.size[1] / 5)) / 2),),) img.paste(icon.resize((img.size[0] // 5, img.size[1] // 5)), ((img.size[0] - img.size[0] // 5) // 2, (img.size[1] - img.size[1] // 5) // 2))
if file is not None: elif file:
img = Image.open(file) img = Image.open(file)
# img = img.convert("RGBA") # img = img.convert("RGBA")
# pixels = img.load() # pixels = img.load()
@ -88,25 +89,22 @@ def makeqr(data, file, location, output):
# for j in range(img.size[1]): # for j in range(img.size[1]):
# if pixels[i, j] == (255, 255, 255, 255): # if pixels[i, j] == (255, 255, 255, 255):
# pixels[i, j] = (255, 255, 255, 0) # pixels[i, j] = (255, 255, 255, 0)
else:
raise click.UsageError("Either data or file must be provided.")
(xres, yres) = (1920, 1080)
with open(FBDEV, "rb") as fb: with open(FBDEV, "rb") as fb:
vi = fcntl.ioctl(fb, FBIOGET_VSCREENINFO, bytes(160)) vi = fcntl.ioctl(fb, FBIOGET_VSCREENINFO, bytes(160))
res = struct.unpack("I" * 40, vi) res = struct.unpack("I" * 40, vi)
if res[0] != 0 and res[1] != 0: xres, yres = res[0], res[1] if res[0] and res[1] else (1920, 1080)
(xres, yres) = (res[0], res[1])
xqr, yqr = (int(xres / 8), int(xres / 8))
img = img.resize((xqr, yqr))
img = img.resize((xres // 8, xres // 8))
alpha = Image.new("RGBA", (xres, yres), (0, 0, 0, 0)) alpha = Image.new("RGBA", (xres, yres), (0, 0, 0, 0))
if int(location) not in range(0, 8): loc = (img.size[0] * location, alpha.size[1] - img.size[1])
location = 0
loc = (img.size[0] * int(location), alpha.size[1] - img.size[1])
alpha.paste(img, loc) alpha.paste(img, loc)
alpha.save(output) alpha.save(output)
except: except Exception as e:
pass click.echo(f"Error: {e}")
@cli.command() @cli.command()
@ -119,16 +117,13 @@ def getmodels(platforms=None):
from requests.adapters import HTTPAdapter from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry # type: ignore from requests.packages.urllib3.util.retry import Retry # type: ignore
adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=0.5, status_forcelist=[500, 502, 503, 504])) adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=1, status_forcelist=[500, 502, 503, 504]))
session = requests.Session() session = requests.Session()
session.mount("http://", adapter) session.mount("http://", adapter)
session.mount("https://", adapter) session.mount("https://", adapter)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
if platforms is not None and platforms != "": PS = platforms.lower().replace(",", " ").split() if platforms else []
PS = platforms.lower().replace(",", " ").split()
else:
PS = []
models = [] models = []
try: try:
@ -136,71 +131,66 @@ def getmodels(platforms=None):
req.encoding = "utf-8" req.encoding = "utf-8"
data = json.loads(req.text) data = json.loads(req.text)
for I in data["channel"]["item"]: for item in data["channel"]["item"]:
if not I["title"].startswith("DSM"): if not item["title"].startswith("DSM"):
continue continue
for J in I["model"]: for model in item["model"]:
arch = J["mUnique"].split("_")[1] arch = model["mUnique"].split("_")[1]
name = J["mLink"].split("/")[-1].split("_")[1].replace("%2B", "+") name = model["mLink"].split("/")[-1].split("_")[1].replace("%2B", "+")
if len(PS) > 0 and arch.lower() not in PS: if PS and arch.lower() not in PS:
continue
if any(name == B["name"] for B in models):
continue continue
if not any(m["name"] == name for m in models):
models.append({"name": name, "arch": arch}) models.append({"name": name, "arch": arch})
models = sorted(models, key=lambda k: (k["arch"], k["name"])) models.sort(key=lambda k: (k["arch"], k["name"]))
except: except Exception as e:
pass click.echo(f"Error: {e}")
models.sort(key=lambda x: (x["arch"], x["name"]))
print(json.dumps(models, indent=4)) print(json.dumps(models, indent=4))
@cli.command() @cli.command()
@click.option("-p", "--platforms", type=str, help="The platforms of Syno.") @click.option("-p", "--platforms", type=str, help="The platforms of Syno.")
def getmodelsbykb(platforms=None): def getmodelsbykb(platforms=None):
""" """
Get Syno Models. Get Syno Models.
""" """
import json, requests, urllib3 import re, json, requests, urllib3
from bs4 import BeautifulSoup
from requests.adapters import HTTPAdapter from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry # type: ignore from requests.packages.urllib3.util.retry import Retry # type: ignore
adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=0.5, status_forcelist=[500, 502, 503, 504])) adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=1, status_forcelist=[500, 502, 503, 504]))
session = requests.Session() session = requests.Session()
session.mount("http://", adapter) session.mount("http://", adapter)
session.mount("https://", adapter) session.mount("https://", adapter)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
if platforms is not None and platforms != "": PS = platforms.lower().replace(",", " ").split() if platforms else []
PS = platforms.lower().replace(",", " ").split()
else:
PS = []
models = [] models = []
try: try:
import re url = "https://kb.synology.com/en-us/DSM/tutorial/What_kind_of_CPU_does_my_NAS_have"
from bs4 import BeautifulSoup
url="https://kb.synology.com/en-us/DSM/tutorial/What_kind_of_CPU_does_my_NAS_have"
#url = "https://kb.synology.cn/zh-cn/DSM/tutorial/What_kind_of_CPU_does_my_NAS_have" #url = "https://kb.synology.cn/zh-cn/DSM/tutorial/What_kind_of_CPU_does_my_NAS_have"
req = session.get(url, timeout=10, verify=False) req = session.get(url, timeout=10, verify=False)
req.encoding = "utf-8" req.encoding = "utf-8"
bs = BeautifulSoup(req.text, "html.parser") bs = BeautifulSoup(req.text, "html.parser")
p = re.compile(r"data: (.*?),$", re.MULTILINE | re.DOTALL) p = re.compile(r"data: (.*?),$", re.MULTILINE | re.DOTALL)
data = json.loads(p.search(bs.find("script", string=p).prettify()).group(1)) data = json.loads(p.search(bs.find("script", string=p).prettify()).group(1))
model = "(.*?)" # (.*?): all, FS6400: one model = "(.*?)" # (.*?): all, FS6400: one
p = re.compile(r"<td>{}<\/td><td>(.*?)<\/td><td>(.*?)<\/td><td>(.*?)<\/td><td>(.*?)<\/td><td>(.*?)<\/td><td>(.*?)<\/td>".format(model), re.MULTILINE | re.DOTALL,) p = re.compile(r"<td>{}<\/td><td>(.*?)<\/td><td>(.*?)<\/td><td>(.*?)<\/td><td>(.*?)<\/td><td>(.*?)<\/td><td>(.*?)<\/td>".format(model), re.MULTILINE | re.DOTALL)
it = p.finditer(data["preload"]["content"].replace("\n", "").replace("\t", "")) it = p.finditer(data["preload"]["content"].replace("\n", "").replace("\t", ""))
for i in it: for i in it:
d = i.groups() d = i.groups()
if len(d) == 6: if len(d) == 6:
d = model + d d = model + d
if len(PS) > 0 and d[5].lower() not in PS: if PS and d[5].lower() not in PS:
continue continue
models.append({"name": d[0].split("<br")[0], "arch": d[5].lower()}) models.append({"name": d[0].split("<br")[0], "arch": d[5].lower()})
except: except Exception as e:
pass click.echo(f"Error: {e}")
models.sort(key=lambda x: (x["arch"], x["name"])) models.sort(key=lambda x: (x["arch"], x["name"]))
print(json.dumps(models, indent=4)) print(json.dumps(models, indent=4))
@ -214,7 +204,7 @@ def getpats4mv(model, version):
from requests.adapters import HTTPAdapter from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry # type: ignore from requests.packages.urllib3.util.retry import Retry # type: ignore
adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=0.5, status_forcelist=[500, 502, 503, 504])) adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=1, status_forcelist=[500, 502, 503, 504]))
session = requests.Session() session = requests.Session()
session.mount("http://", adapter) session.mount("http://", adapter)
session.mount("https://", adapter) session.mount("https://", adapter)
@ -227,59 +217,62 @@ def getpats4mv(model, version):
#urlInfo = "https://www.synology.cn/api/support/findDownloadInfo?lang=zh-cn" #urlInfo = "https://www.synology.cn/api/support/findDownloadInfo?lang=zh-cn"
#urlSteps = "https://www.synology.cn/api/support/findUpgradeSteps?" #urlSteps = "https://www.synology.cn/api/support/findUpgradeSteps?"
major = "&major={}".format(version.split('.')[0]) if len(version.split('.')) > 0 else "" major = f"&major={version.split('.')[0]}" if len(version.split('.')) > 0 else ""
minor = "&minor={}".format(version.split('.')[1]) if len(version.split('.')) > 1 else "" minor = f"&minor={version.split('.')[1]}" if len(version.split('.')) > 1 else ""
req = session.get("{}&product={}{}{}".format(urlInfo, model.replace("+", "%2B"), major, minor), timeout=10, verify=False) req = session.get(f"{urlInfo}&product={model.replace('+', '%2B')}{major}{minor}", timeout=10, verify=False)
req.encoding = "utf-8" req.encoding = "utf-8"
data = json.loads(req.text) data = json.loads(req.text)
build_ver = data['info']['system']['detail'][0]['items'][0]['build_ver'] build_ver = data['info']['system']['detail'][0]['items'][0]['build_ver']
build_num = data['info']['system']['detail'][0]['items'][0]['build_num'] build_num = data['info']['system']['detail'][0]['items'][0]['build_num']
buildnano = data['info']['system']['detail'][0]['items'][0]['nano'] buildnano = data['info']['system']['detail'][0]['items'][0]['nano']
V=__fullversion("{}-{}-{}".format(build_ver, build_num, buildnano)) V = __fullversion(f"{build_ver}-{build_num}-{buildnano}")
if not V in pats: if V not in pats:
pats[V]={} pats[V] = {
pats[V]['url'] = data['info']['system']['detail'][0]['items'][0]['files'][0]['url'].split('?')[0] 'url': data['info']['system']['detail'][0]['items'][0]['files'][0]['url'].split('?')[0],
pats[V]['sum'] = data['info']['system']['detail'][0]['items'][0]['files'][0]['checksum'] 'sum': data['info']['system']['detail'][0]['items'][0]['files'][0]['checksum']
}
from_ver=0 from_ver = min(I['build'] for I in data['info']['pubVers'])
for I in data['info']['pubVers']:
if from_ver == 0 or I['build'] < from_ver: from_ver = I['build']
for I in data['info']['productVers']: for I in data['info']['productVers']:
if not I['version'].startswith(version): continue if not I['version'].startswith(version):
if major == "" or minor == "": continue
majorTmp = "&major={}".format(I['version'].split('.')[0]) if len(I['version'].split('.')) > 0 else "" if not major or not minor:
minorTmp = "&minor={}".format(I['version'].split('.')[1]) if len(I['version'].split('.')) > 1 else "" majorTmp = f"&major={I['version'].split('.')[0]}" if len(I['version'].split('.')) > 0 else ""
reqTmp = session.get("{}&product={}{}{}".format(urlInfo, model.replace("+", "%2B"), majorTmp, minorTmp), timeout=10, verify=False) minorTmp = f"&minor={I['version'].split('.')[1]}" if len(I['version'].split('.')) > 1 else ""
reqTmp = session.get(f"{urlInfo}&product={model.replace('+', '%2B')}{majorTmp}{minorTmp}", timeout=10, verify=False)
reqTmp.encoding = "utf-8" reqTmp.encoding = "utf-8"
dataTmp = json.loads(reqTmp.text) dataTmp = json.loads(reqTmp.text)
build_ver = dataTmp['info']['system']['detail'][0]['items'][0]['build_ver'] build_ver = dataTmp['info']['system']['detail'][0]['items'][0]['build_ver']
build_num = dataTmp['info']['system']['detail'][0]['items'][0]['build_num'] build_num = dataTmp['info']['system']['detail'][0]['items'][0]['build_num']
buildnano = dataTmp['info']['system']['detail'][0]['items'][0]['nano'] buildnano = dataTmp['info']['system']['detail'][0]['items'][0]['nano']
V=__fullversion("{}-{}-{}".format(build_ver, build_num, buildnano)) V = __fullversion(f"{build_ver}-{build_num}-{buildnano}")
if not V in pats: if V not in pats:
pats[V]={} pats[V] = {
pats[V]['url'] = dataTmp['info']['system']['detail'][0]['items'][0]['files'][0]['url'].split('?')[0] 'url': dataTmp['info']['system']['detail'][0]['items'][0]['files'][0]['url'].split('?')[0],
pats[V]['sum'] = dataTmp['info']['system']['detail'][0]['items'][0]['files'][0]['checksum'] 'sum': dataTmp['info']['system']['detail'][0]['items'][0]['files'][0]['checksum']
}
for J in I['versions']: for J in I['versions']:
to_ver=J['build'] to_ver = J['build']
reqSteps = session.get("{}&product={}&from_ver={}&to_ver={}".format(urlSteps, model.replace("+", "%2B"), from_ver, to_ver), timeout=10, verify=False) reqSteps = session.get(f"{urlSteps}&product={model.replace('+', '%2B')}&from_ver={from_ver}&to_ver={to_ver}", timeout=10, verify=False)
if reqSteps.status_code != 200: continue if reqSteps.status_code != 200:
continue
reqSteps.encoding = "utf-8" reqSteps.encoding = "utf-8"
dataSteps = json.loads(reqSteps.text) dataSteps = json.loads(reqSteps.text)
for S in dataSteps['upgrade_steps']: for S in dataSteps['upgrade_steps']:
if not 'full_patch' in S or S['full_patch'] is False: continue if not S.get('full_patch') or not S['build_ver'].startswith(version):
if not 'build_ver' in S or not S['build_ver'].startswith(version): continue continue
V=__fullversion("{}-{}-{}".format(S['build_ver'], S['build_num'], S['nano'])) V = __fullversion(f"{S['build_ver']}-{S['build_num']}-{S['nano']}")
if not V in pats: if V not in pats:
pats[V] = {} pats[V] = {
pats[V]['url'] = S['files'][0]['url'].split('?')[0] 'url': S['files'][0]['url'].split('?')[0],
pats[V]['sum'] = S['files'][0]['checksum'] 'sum': S['files'][0]['checksum']
except: }
pass except Exception as e:
click.echo(f"Error: {e}")
pats = {k: pats[k] for k in sorted(pats.keys(), reverse=True)} pats = {k: pats[k] for k in sorted(pats.keys(), reverse=True)}
print(json.dumps(pats, indent=4)) print(json.dumps(pats, indent=4))
@ -288,52 +281,50 @@ def getpats4mv(model, version):
@cli.command() @cli.command()
@click.option("-p", "--models", type=str, help="The models of Syno.") @click.option("-p", "--models", type=str, help="The models of Syno.")
def getpats(models=None): def getpats(models=None):
import json, requests, urllib3, re import re, json, requests, urllib3
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from requests.adapters import HTTPAdapter from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry # type: ignore from requests.packages.urllib3.util.retry import Retry # type: ignore
adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=0.5, status_forcelist=[500, 502, 503, 504])) adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=1, status_forcelist=[500, 502, 503, 504]))
session = requests.Session() session = requests.Session()
session.mount("http://", adapter) session.mount("http://", adapter)
session.mount("https://", adapter) session.mount("https://", adapter)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
if models is not None and models != "": MS = models.lower().replace(",", " ").split() if models else []
MS = models.lower().replace(",", " ").split()
else:
MS = []
pats = {} pats = {}
try: try:
req = session.get('https://archive.synology.com/download/Os/DSM', timeout=10, verify=False) req = session.get('https://archive.synology.com/download/Os/DSM', timeout=10, verify=False)
req.encoding = 'utf-8' req.encoding = 'utf-8'
bs=BeautifulSoup(req.text, 'html.parser') bs = BeautifulSoup(req.text, 'html.parser')
p = re.compile(r"(.*?)-(.*?)", re.MULTILINE | re.DOTALL) p = re.compile(r"(.*?)-(.*?)", re.MULTILINE | re.DOTALL)
l = bs.find_all('a', string=p) l = bs.find_all('a', string=p)
for i in l: for i in l:
ver = i.attrs['href'].split('/')[-1] ver = i.attrs['href'].split('/')[-1]
if not ver.startswith('7'): continue if not ver.startswith('7'):
req = session.get('https://archive.synology.com{}'.format(i.attrs['href']), timeout=10, verify=False) continue
req = session.get(f'https://archive.synology.com{i.attrs["href"]}', timeout=10, verify=False)
req.encoding = 'utf-8' req.encoding = 'utf-8'
bs=BeautifulSoup(req.text, 'html.parser') bs = BeautifulSoup(req.text, 'html.parser')
p = re.compile(r"^(.*?)_(.*?)_(.*?).pat$", re.MULTILINE | re.DOTALL) p = re.compile(r"DSM_(.*?)_(.*?).pat", re.MULTILINE | re.DOTALL)
data = bs.find_all('a', string=p) data = bs.find_all('a', string=p)
for item in data: for item in data:
p = re.compile(r"DSM_(.*?)_(.*?).pat", re.MULTILINE | re.DOTALL)
rels = p.search(item.attrs['href']) rels = p.search(item.attrs['href'])
if rels != None: if rels:
info = p.search(item.attrs['href']).groups() model, _ = rels.groups()
model = info[0].replace('%2B', '+') model = model.replace('%2B', '+')
if len(MS) > 0 and model.lower() not in MS: if MS and model.lower() not in MS:
continue continue
if model not in pats.keys(): if model not in pats:
pats[model]={} pats[model] = {}
pats[model][__fullversion(ver)] = item.attrs['href'] pats[model][__fullversion(ver)] = item.attrs['href']
except: except Exception as e:
pass click.echo(f"Error: {e}")
print(json.dumps(pats, indent=4)) print(json.dumps(pats, indent=4))
if __name__ == "__main__": if __name__ == "__main__":
cli() cli()

View File

@ -1,13 +1,13 @@
[ -z "${WORK_PATH}" -o ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")/../" >/dev/null 2>&1 && pwd)" [ -z "${WORK_PATH}" ] || [ ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")/../" >/dev/null 2>&1 && pwd)"
. ${WORK_PATH}/include/consts.sh . "${WORK_PATH}/include/consts.sh"
. ${WORK_PATH}/include/configFile.sh . "${WORK_PATH}/include/configFile.sh"
. ${WORK_PATH}/include/i18n.sh . "${WORK_PATH}/include/i18n.sh"
############################################################################### ###############################################################################
# Check loader disk # Check loader disk
function checkBootLoader() { function checkBootLoader() {
while read KNAME RO; do while read -r KNAME RO; do
[ -z "${KNAME}" ] && continue [ -z "${KNAME}" ] && continue
[ "${RO}" = "0" ] && continue [ "${RO}" = "0" ] && continue
hdparm -r0 "${KNAME}" >/dev/null 2>&1 || true hdparm -r0 "${KNAME}" >/dev/null 2>&1 || true
@ -51,48 +51,41 @@ function dieLog() {
} }
############################################################################### ###############################################################################
# Check if a item exists into array # Check if an item exists in an array
# 1 - Item # 1 - Item
# 2.. - Array # 2.. - Array
# Return 0 if exists # Return 0 if exists
function arrayExistItem() { function arrayExistItem() {
EXISTS=1 local ITEM="${1}"
ITEM="${1}"
shift shift
for i in "$@"; do for i in "$@"; do
[ "${i}" = "${ITEM}" ] || continue [ "${i}" = "${ITEM}" ] && return 0
EXISTS=0
break
done done
return ${EXISTS} return 1
} }
############################################################################### ###############################################################################
# Generate a number with 6 digits from 1 to 30000 # Generate a number with 6 digits from 1 to 30000
function random() { function random() {
printf "%06d" $((${RANDOM} % 30000 + 1)) printf "%06d" $((RANDOM % 30000 + 1))
} }
############################################################################### ###############################################################################
# Generate a hexa number from 0x00 to 0xFF # Generate a hex number from 0x00 to 0xFF
function randomhex() { function randomhex() {
printf "&02X" "$((${RANDOM} % 255 + 1))" printf "%02X" $((RANDOM % 255 + 1))
} }
############################################################################### ###############################################################################
# Generate a random letter # Generate a random letter
function genRandomLetter() { function genRandomLetter() {
for i in A B C D E F G H J K L M N P Q R S T V W X Y Z; do echo {A..Z} | tr ' ' '\n' | grep -v '[IO]' | sort -R | head -1
echo ${i}
done | sort -R | tail -1
} }
############################################################################### ###############################################################################
# Generate a random digit (0-9A-Z) # Generate a random digit (0-9A-Z)
function genRandomValue() { function genRandomValue() {
for i in 0 1 2 3 4 5 6 7 8 9 A B C D E F G H J K L M N P Q R S T V W X Y Z; do echo {0..9} {A..Z} | tr ' ' '\n' | grep -v '[IO]' | sort -R | head -1
echo ${i}
done | sort -R | tail -1
} }
############################################################################### ###############################################################################
@ -100,11 +93,12 @@ function genRandomValue() {
# 1 - Model # 1 - Model
# Returns serial number # Returns serial number
function generateSerial() { function generateSerial() {
PREFIX="$(readConfigArray "${1}.prefix" "${WORK_PATH}/serialnumber.yml" 2>/dev/null | sort -R | tail -1)" local PREFIX MIDDLE SUFFIX SERIAL
MIDDLE="$(readConfigArray "${1}.middle" "${WORK_PATH}/serialnumber.yml" 2>/dev/null | sort -R | tail -1)" PREFIX="$(readConfigArray "${1}.prefix" "${WORK_PATH}/serialnumber.yml" 2>/dev/null | sort -R | head -1)"
MIDDLE="$(readConfigArray "${1}.middle" "${WORK_PATH}/serialnumber.yml" 2>/dev/null | sort -R | head -1)"
SUFFIX="$(readConfigKey "${1}.suffix" "${WORK_PATH}/serialnumber.yml" 2>/dev/null)" SUFFIX="$(readConfigKey "${1}.suffix" "${WORK_PATH}/serialnumber.yml" 2>/dev/null)"
local SERIAL="${PREFIX:-"0000"}${MIDDLE:-"XXX"}" SERIAL="${PREFIX:-"0000"}${MIDDLE:-"XXX"}"
case "${SUFFIX:-"alpha"}" in case "${SUFFIX:-"alpha"}" in
numeric) numeric)
SERIAL+="$(random)" SERIAL+="$(random)"
@ -122,12 +116,13 @@ function generateSerial() {
# 2 - number # 2 - number
# Returns serial number # Returns serial number
function generateMacAddress() { function generateMacAddress() {
local MACPRE MACSUF NUM MACS
MACPRE="$(readConfigArray "${1}.macpre" "${WORK_PATH}/serialnumber.yml" 2>/dev/null)" MACPRE="$(readConfigArray "${1}.macpre" "${WORK_PATH}/serialnumber.yml" 2>/dev/null)"
MACSUF="$(printf '%02x%02x%02x' $((${RANDOM} % 256)) $((${RANDOM} % 256)) $((${RANDOM} % 256)))" MACSUF="$(printf '%02x%02x%02x' $((RANDOM % 256)) $((RANDOM % 256)) $((RANDOM % 256)))"
NUM=${2:-1} NUM=${2:-1}
local MACS="" MACS=""
for I in $(seq 1 ${NUM}); do for I in $(seq 1 ${NUM}); do
MACS+="$(printf '%06x%06x' $((0x${MACPRE:-"001132"})) $(($((0x${MACSUF})) + ${I})))" MACS+="$(printf '%06x%06x' $((0x${MACPRE:-"001132"})) $((0x${MACSUF} + I)))"
[ ${I} -lt ${NUM} ] && MACS+=" " [ ${I} -lt ${NUM} ] && MACS+=" "
done done
echo "${MACS}" echo "${MACS}"
@ -140,6 +135,7 @@ function generateMacAddress() {
# 2 - Serial number to test # 2 - Serial number to test
# Returns 1 if serial number is invalid # Returns 1 if serial number is invalid
function validateSerial() { function validateSerial() {
local PREFIX MIDDLE SUFFIX P M S L
PREFIX="$(readConfigArray "${1}.prefix" "${WORK_PATH}/serialnumber.yml" 2>/dev/null)" PREFIX="$(readConfigArray "${1}.prefix" "${WORK_PATH}/serialnumber.yml" 2>/dev/null)"
MIDDLE="$(readConfigArray "${1}.middle" "${WORK_PATH}/serialnumber.yml" 2>/dev/null)" MIDDLE="$(readConfigArray "${1}.middle" "${WORK_PATH}/serialnumber.yml" 2>/dev/null)"
SUFFIX="$(readConfigKey "${1}.suffix" "${WORK_PATH}/serialnumber.yml" 2>/dev/null)" SUFFIX="$(readConfigKey "${1}.suffix" "${WORK_PATH}/serialnumber.yml" 2>/dev/null)"
@ -176,7 +172,7 @@ function validateSerial() {
# 1 - key # 1 - key
# 2 - file # 2 - file
function _get_conf_kv() { function _get_conf_kv() {
grep "${1}" "${2}" 2>/dev/null | sed "s|^${1}=\"\(.*\)\"$|\1|g" grep "^${1}=" "${2}" 2>/dev/null | cut -d'=' -f2- | sed 's/^"//;s/"$//' 2>/dev/null
} }
############################################################################### ###############################################################################
@ -187,18 +183,19 @@ function _get_conf_kv() {
function _set_conf_kv() { function _set_conf_kv() {
# Delete # Delete
if [ -z "${2}" ]; then if [ -z "${2}" ]; then
sed -i "${3}" -e "s/^${1}=.*$//" 2>/dev/null sed -i "/^${1}=/d" "${3}" 2>/dev/null
return $? return $?
fi fi
# Replace # Replace
if grep -q "^${1}=" "${3}"; then if grep -q "^${1}=" "${3}"; then
sed -i "${3}" -e "s\"^${1}=.*\"${1}=\\\"${2}\\\"\"" 2>/dev/null sed -i "s#^${1}=.*#${1}=\"${2}\"#" "${3}" 2>/dev/null
return $? return $?
fi fi
# Add if doesn't exist # Add if doesn't exist
echo "${1}=\"${2}\"" >>"${3}" echo "${1}=\"${2}\"" >>"${3}"
return $?
} }
############################################################################### ###############################################################################
@ -206,14 +203,14 @@ function _set_conf_kv() {
# @ - url list # @ - url list
function _get_fastest() { function _get_fastest() {
local speedlist="" local speedlist=""
if ! command -v ping >/dev/null 2>&1; then if command -v ping >/dev/null 2>&1; then
for I in $@; do for I in "$@"; do
speed=$(ping -c 1 -W 5 ${I} 2>/dev/null | awk -F'[= ]' '/time=/ {for(i=1;i<=NF;i++) if ($i=="time") print $(i+1)}') speed=$(ping -c 1 -W 5 "${I}" 2>/dev/null | awk -F'[= ]' '/time=/ {for(i=1;i<=NF;i++) if ($i=="time") print $(i+1)}')
speedlist+="${I} ${speed:-999}\n" # Assign default value 999 if speed is empty speedlist+="${I} ${speed:-999}\n" # Assign default value 999 if speed is empty
done done
else else
for I in $@; do for I in "$@"; do
speed=$(curl -o /dev/null -s -w '%{time_total}' ${I}) speed=$(curl -o /dev/null -s -w '%{time_total}' "${I}")
speed=$(awk "BEGIN {print (${speed:-0.999} * 1000)}") speed=$(awk "BEGIN {print (${speed:-0.999} * 1000)}")
speedlist+="${I} ${speed:-999}\n" # Assign default value 999 if speed is empty speedlist+="${I} ${speed:-999}\n" # Assign default value 999 if speed is empty
done done
@ -222,7 +219,7 @@ function _get_fastest() {
URL="$(echo "${fastest}" | awk '{print $1}')" URL="$(echo "${fastest}" | awk '{print $1}')"
SPD="$(echo "${fastest}" | awk '{print $2}')" # It is a float type SPD="$(echo "${fastest}" | awk '{print $2}')" # It is a float type
echo "${URL}" echo "${URL}"
[ $(echo ${SPD:-999} | cut -d. -f1) -ge 999 ] && return 1 || return 0 [ $(echo "${SPD:-999}" | cut -d. -f1) -ge 999 ] && return 1 || return 0
} }
############################################################################### ###############################################################################
@ -245,7 +242,7 @@ function _sort_netif() {
ETHLISTTMPB="$(echo -e "${ETHLISTTMPB}" | grep -v "${MACX}")\n" ETHLISTTMPB="$(echo -e "${ETHLISTTMPB}" | grep -v "${MACX}")\n"
done done
fi fi
local ETHLIST="$(echo -e "${ETHLISTTMPM}${ETHLISTTMPB}" | grep -v '^$')" ETHLIST="$(echo -e "${ETHLISTTMPM}${ETHLISTTMPB}" | grep -v '^$')"
local ETHSEQ="$(echo -e "${ETHLIST}" | awk '{print $3}' | sed 's/eth//g')" local ETHSEQ="$(echo -e "${ETHLIST}" | awk '{print $3}' | sed 's/eth//g')"
local ETHNUM="$(echo -e "${ETHLIST}" | wc -l)" local ETHNUM="$(echo -e "${ETHLIST}" | wc -l)"
@ -255,12 +252,12 @@ function _sort_netif() {
/etc/init.d/S41dhcpcd stop >/dev/null 2>&1 /etc/init.d/S41dhcpcd stop >/dev/null 2>&1
/etc/init.d/S40network stop >/dev/null 2>&1 /etc/init.d/S40network stop >/dev/null 2>&1
for i in $(seq 0 $((${ETHNUM:0} - 1))); do for i in $(seq 0 $((${ETHNUM:0} - 1))); do
ip link set dev eth${i} name tmp${i} ip link set dev "eth${i}" name "tmp${i}"
done done
I=0 I=0
for i in ${ETHSEQ}; do for i in ${ETHSEQ}; do
ip link set dev tmp${i} name eth${I} ip link set dev "tmp${i}" name "eth${I}"
I=$((${I} + 1)) I=$((I + 1))
done done
/etc/init.d/S40network start >/dev/null 2>&1 /etc/init.d/S40network start >/dev/null 2>&1
/etc/init.d/S41dhcpcd start >/dev/null 2>&1 /etc/init.d/S41dhcpcd start >/dev/null 2>&1
@ -287,9 +284,9 @@ function getBus() {
# 1 - ethN # 1 - ethN
function getIP() { function getIP() {
local IP="" local IP=""
if [ -n "${1}" -a -d "/sys/class/net/${1}" ]; then if [ -n "${1}" ] && [ -d "/sys/class/net/${1}" ]; then
IP=$(ip route show dev ${1} 2>/dev/null | sed -n 's/.* via .* src \(.*\) metric .*/\1/p') IP=$(ip route show dev "${1}" 2>/dev/null | sed -n 's/.* via .* src \(.*\) metric .*/\1/p')
[ -z "${IP}" ] && IP=$(ip addr show ${1} scope global 2>/dev/null | grep -E "inet .* eth" | awk '{print $2}' | cut -f1 -d'/' | head -1) [ -z "${IP}" ] && IP=$(ip addr show "${1}" scope global 2>/dev/null | grep -E "inet .* eth" | awk '{print $2}' | cut -f1 -d'/' | head -1)
else else
IP=$(ip route show 2>/dev/null | sed -n 's/.* via .* src \(.*\) metric .*/\1/p' | head -1) IP=$(ip route show 2>/dev/null | sed -n 's/.* via .* src \(.*\) metric .*/\1/p' | head -1)
[ -z "${IP}" ] && IP=$(ip addr show scope global 2>/dev/null | grep -E "inet .* eth" | awk '{print $2}' | cut -f1 -d'/' | head -1) [ -z "${IP}" ] && IP=$(ip addr show scope global 2>/dev/null | grep -E "inet .* eth" | awk '{print $2}' | cut -f1 -d'/' | head -1)
@ -309,7 +306,7 @@ function getLogo() {
return 1 return 1
fi fi
local STATUS=$(curl -skL --connect-timeout 10 -w "%{http_code}" "https://${fastest}/api/products/getPhoto?product=${MODEL/+/%2B}&type=img_s&sort=0" -o "${PART3_PATH}/logo.png") local STATUS=$(curl -skL --connect-timeout 10 -w "%{http_code}" "https://${fastest}/api/products/getPhoto?product=${MODEL/+/%2B}&type=img_s&sort=0" -o "${PART3_PATH}/logo.png")
if [ $? -ne 0 -o ${STATUS:-0} -ne 200 -o ! -f "${PART3_PATH}/logo.png" ]; then if [ $? -ne 0 ] || [ "${STATUS:-0}" -ne 200 ] || [ ! -f "${PART3_PATH}/logo.png" ]; then
rm -f "${PART3_PATH}/logo.png" rm -f "${PART3_PATH}/logo.png"
return 1 return 1
fi fi
@ -324,42 +321,72 @@ function getLogo() {
# 1 - key name # 1 - key name
# 2 - key string # 2 - key string
function checkCmdline() { function checkCmdline() {
return $(grub-editenv ${USER_GRUBENVFILE} list 2>/dev/null | grep "^${1}=" | cut -d'=' -f2- | grep -q "${2}") grub-editenv "${USER_GRUBENVFILE}" list 2>/dev/null | grep -q "^${1}=\"\?${2}\"\?"
} }
############################################################################### ###############################################################################
# get logo of model # set Cmdline
# 1 - key name # 1 - key name
# 2 - key string # 2 - key string
function setCmdline() { function setCmdline() {
[ -z "${1}" ] && return 1 [ -z "${1}" ] && return 1
if [ -n "${2}" ]; then if [ -n "${2}" ]; then
grub-editenv ${USER_GRUBENVFILE} set "${1}=${2}" grub-editenv "${USER_GRUBENVFILE}" set "${1}=${2}"
else else
grub-editenv ${USER_GRUBENVFILE} unset "${1}" grub-editenv "${USER_GRUBENVFILE}" unset "${1}"
fi fi
} }
############################################################################### ###############################################################################
# get logo of model # add Cmdline
# check Cmdline
# 1 - key name # 1 - key name
# 2 - key string # 2 - key string
function addCmdline() { function addCmdline() {
local CMDLINE="$(grub-editenv ${USER_GRUBENVFILE} list 2>/dev/null | grep "^${1}=" | cut -d'=' -f2-)" local CMDLINE
CMDLINE="$(grub-editenv "${USER_GRUBENVFILE}" list 2>/dev/null | grep "^${1}=" | cut -d'=' -f2- | sed 's/^"//;s/"$//')"
[ -n "${CMDLINE}" ] && CMDLINE="${CMDLINE} ${2}" || CMDLINE="${2}" [ -n "${CMDLINE}" ] && CMDLINE="${CMDLINE} ${2}" || CMDLINE="${2}"
setCmdline "${1}" "${CMDLINE}" setCmdline "${1}" "${CMDLINE}"
} }
############################################################################### ###############################################################################
# get logo of model # del Cmdline
# 1 - model # 1 - key name
# 2 - key string
function delCmdline() { function delCmdline() {
local CMDLINE="$(grub-editenv ${USER_GRUBENVFILE} list 2>/dev/null | grep "^${1}=" | cut -d'=' -f2-)" local CMDLINE
CMDLINE="$(echo "${CMDLINE}" | sed "s/ *${2}//; s/^[[:space:]]*//;s/[[:space:]]*$//")" CMDLINE="$(grub-editenv "${USER_GRUBENVFILE}" list 2>/dev/null | grep "^${1}=" | cut -d'=' -f2- | sed 's/^"//;s/"$//')"
CMDLINE="$(echo "${CMDLINE}" | sed "s/[ \t]*${2}//; s/^[ \t]*//;s/[ \t]*$//")"
setCmdline "${1}" "${CMDLINE}" setCmdline "${1}" "${CMDLINE}"
} }
###############################################################################
# check CPU Intel(VT-d)/AMD(AMD-Vi)
function checkCPU_VT_d() {
lsmod | grep -q msr || modprobe msr 2>/dev/null
if grep -q "GenuineIntel" /proc/cpuinfo; then
local VT_D_ENABLED=$(rdmsr 0x3a 2>/dev/null)
[ "$((${VT_D_ENABLED:-0x0} & 0x5))" -eq $((0x5)) ] && return 0
elif grep -q "AuthenticAMD" /proc/cpuinfo; then
local IOMMU_ENABLED=$(rdmsr 0xC0010114 2>/dev/null)
[ "$((${IOMMU_ENABLED:-0x0} & 0x1))" -eq $((0x1)) ] && return 0
else
return 1
fi
}
###############################################################################
# check BIOS Intel(VT-d)/AMD(AMD-Vi)
function checkBIOS_VT_d() {
if grep -q "GenuineIntel" /proc/cpuinfo; then
dmesg | grep -iq "DMAR-IR.*DRHD base" && return 0
elif grep -q "AuthenticAMD" /proc/cpuinfo; then
# TODO: need check
dmesg | grep -iq "AMD-Vi.*enabled" && return 0
else
return 1
fi
}
############################################################################### ###############################################################################
# Rebooting # Rebooting
# 1 - mode # 1 - mode
@ -367,11 +394,11 @@ function rebootTo() {
local MODES="config recovery junior bios memtest" local MODES="config recovery junior bios memtest"
if [ -z "${1}" ] || ! echo "${MODES}" | grep -qw "${1}"; then exit 1; fi if [ -z "${1}" ] || ! echo "${MODES}" | grep -qw "${1}"; then exit 1; fi
# echo "Rebooting to ${1} mode" # echo "Rebooting to ${1} mode"
GRUBPATH="$(dirname $(find ${PART1_PATH}/ -name grub.cfg 2>/dev/null | head -1))" GRUBPATH="$(dirname "$(find "${PART1_PATH}/" -name grub.cfg 2>/dev/null | head -1)")"
[ -z "${GRUBPATH}" ] && exit 1 [ -z "${GRUBPATH}" ] && exit 1
ENVFILE="${GRUBPATH}/grubenv" ENVFILE="${GRUBPATH}/grubenv"
[ ! -f "${ENVFILE}" ] && grub-editenv ${ENVFILE} create [ ! -f "${ENVFILE}" ] && grub-editenv "${ENVFILE}" create
grub-editenv ${ENVFILE} set next_entry="${1}" grub-editenv "${ENVFILE}" set next_entry="${1}"
reboot reboot
} }
@ -380,23 +407,23 @@ function rebootTo() {
# 1 netif name # 1 netif name
# 2 enable/disable (1/0) # 2 enable/disable (1/0)
function connectwlanif() { function connectwlanif() {
[ -z "${1}" -o ! -d "/sys/class/net/${1}" ] && return 1 [ -z "${1}" ] || [ ! -d "/sys/class/net/${1}" ] && return 1
if [ "${2}" = "0" ]; then if [ "${2}" = "0" ]; then
if [ -f "/var/run/wpa_supplicant.pid.${1}" ]; then if [ -f "/var/run/wpa_supplicant.pid.${1}" ]; then
kill -9 $(cat /var/run/wpa_supplicant.pid.${1}) kill -9 "$(cat /var/run/wpa_supplicant.pid.${1})"
rm -f /var/run/wpa_supplicant.pid.${1} rm -f "/var/run/wpa_supplicant.pid.${1}"
fi fi
else else
local CONF="" local CONF=""
[ -z "${CONF}" -a -f "${PART1_PATH}/wpa_supplicant.conf.${1}" ] && CONF="${PART1_PATH}/wpa_supplicant.conf.${1}" [ -z "${CONF}" ] && [ -f "${PART1_PATH}/wpa_supplicant.conf.${1}" ] && CONF="${PART1_PATH}/wpa_supplicant.conf.${1}"
[ -z "${CONF}" -a -f "${PART1_PATH}/wpa_supplicant.conf" ] && CONF="${PART1_PATH}/wpa_supplicant.conf" [ -z "${CONF}" ] && [ -f "${PART1_PATH}/wpa_supplicant.conf" ] && CONF="${PART1_PATH}/wpa_supplicant.conf"
[ -z "${CONF}" ] && return 2 [ -z "${CONF}" ] && return 2
if [ -f "/var/run/wpa_supplicant.pid.${1}" ]; then if [ -f "/var/run/wpa_supplicant.pid.${1}" ]; then
kill -9 $(cat /var/run/wpa_supplicant.pid.${1}) kill -9 "$(cat /var/run/wpa_supplicant.pid.${1})"
rm -f /var/run/wpa_supplicant.pid.${1} rm -f "/var/run/wpa_supplicant.pid.${1}"
fi fi
wpa_supplicant -i ${1} -c "${CONF}" -B -P "/var/run/wpa_supplicant.pid.${1}" >/dev/null 2>&1 wpa_supplicant -i "${1}" -c "${CONF}" -B -P "/var/run/wpa_supplicant.pid.${1}" >/dev/null 2>&1
fi fi
return 0 return 0
} }

View File

@ -1,19 +1,14 @@
[ -z "${WORK_PATH}" -o ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")/../" >/dev/null 2>&1 && pwd)" [ -z "${WORK_PATH}" ] || [ ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")/../" >/dev/null 2>&1 && pwd)"
type gettext >/dev/null 2>&1 && alias TEXT='gettext "rr"' || alias TEXT='echo'
shopt -s expand_aliases
[ -d "${WORK_PATH}/lang" ] && export TEXTDOMAINDIR="${WORK_PATH}/lang"
[ -f "${PART1_PATH}/.locale" ] && export LC_ALL="$(cat "${PART1_PATH}/.locale")"
if type gettext >/dev/null 2>&1; then
alias TEXT='gettext "rr"'
shopt -s expand_aliases
else
alias TEXT='echo'
shopt -s expand_aliases
fi
if [ -d "${WORK_PATH}/lang" ]; then
export TEXTDOMAINDIR="${WORK_PATH}/lang"
fi
if [ -f "${PART1_PATH}/.locale" ]; then
export LC_ALL="$(cat ${PART1_PATH}/.locale)"
fi
if [ -f "${PART1_PATH}/.timezone" ]; then if [ -f "${PART1_PATH}/.timezone" ]; then
TIMEZONE="$(cat ${PART1_PATH}/.timezone)" TIMEZONE="$(cat "${PART1_PATH}/.timezone")"
if [ -f "/usr/share/zoneinfo/right/${TIMEZONE}" ]; then
ln -sf "/usr/share/zoneinfo/right/${TIMEZONE}" /etc/localtime ln -sf "/usr/share/zoneinfo/right/${TIMEZONE}" /etc/localtime
fi
fi fi

View File

@ -1,3 +1,37 @@
###############################################################################
# Unpack modules from a tgz file
# 1 - Platform
# 2 - Kernel Version
function unpackModules() {
local PLATFORM=${1}
local KVER=${2}
local KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")"
rm -rf "${TMP_PATH}/modules"
mkdir -p "${TMP_PATH}/modules"
if [ "${KERNEL}" = "custom" ]; then
tar -zxf "${CKS_PATH}/modules-${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules"
else
tar -zxf "${MODULES_PATH}/${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules"
fi
}
###############################################################################
# Packag modules to a tgz file
# 1 - Platform
# 2 - Kernel Version
function packagModules() {
local PLATFORM=${1}
local KVER=${2}
local KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")"
if [ "${KERNEL}" = "custom" ]; then
tar -zcf "${CKS_PATH}/modules-${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules" .
else
tar -zcf "${MODULES_PATH}/${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules" .
fi
}
############################################################################### ###############################################################################
# Return list of all modules available # Return list of all modules available
# 1 - Platform # 1 - Platform
@ -6,27 +40,20 @@ function getAllModules() {
local PLATFORM=${1} local PLATFORM=${1}
local KVER=${2} local KVER=${2}
if [ -z "${PLATFORM}" -o -z "${KVER}" ]; then if [ -z "${PLATFORM}" ] || [ -z "${KVER}" ]; then
echo ""
return 1 return 1
fi fi
# Unzip modules for temporary folder
rm -rf "${TMP_PATH}/modules" unpackModules "${PLATFORM}" "${KVER}"
mkdir -p "${TMP_PATH}/modules"
local KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")"
if [ "${KERNEL}" = "custom" ]; then
tar -zxf "${CKS_PATH}/modules-${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules"
else
tar -zxf "${MODULES_PATH}/${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules"
fi
# Get list of all modules
for F in $(ls ${TMP_PATH}/modules/*.ko 2>/dev/null); do for F in $(ls ${TMP_PATH}/modules/*.ko 2>/dev/null); do
local X=$(basename ${F}) local X=$(basename "${F}")
local M=${X:0:-3} local M=${X:0:-3}
local DESC=$(modinfo ${F} 2>/dev/null | awk -F':' '/description:/{ print $2}' | awk '{sub(/^[ ]+/,""); print}') local DESC=$(modinfo "${F}" 2>/dev/null | awk -F':' '/description:/{ print $2}' | awk '{sub(/^[ ]+/,""); print}')
[ -z "${DESC}" ] && DESC="${X}" [ -z "${DESC}" ] && DESC="${X}"
echo "${M} \"${DESC}\"" echo "${M} \"${DESC}\""
done done
rm -rf "${TMP_PATH}/modules" rm -rf "${TMP_PATH}/modules"
} }
@ -41,34 +68,26 @@ function installModules() {
shift 2 shift 2
local MLIST="${@}" local MLIST="${@}"
if [ -z "${PLATFORM}" -o -z "${KVER}" ]; then if [ -z "${PLATFORM}" ] || [ -z "${KVER}" ]; then
echo "ERROR: installModules: Platform or Kernel Version not defined" >"${LOG_FILE}" echo "ERROR: installModules: Platform or Kernel Version not defined" >"${LOG_FILE}"
return 1 return 1
fi fi
# Unzip modules for temporary folder
rm -rf "${TMP_PATH}/modules" unpackModules "${PLATFORM}" "${KVER}"
mkdir -p "${TMP_PATH}/modules"
local KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")"
if [ "${KERNEL}" = "custom" ]; then
tar -zxf "${CKS_PATH}/modules-${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules" 2>"${LOG_FILE}"
else
tar -zxf "${MODULES_PATH}/${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules" 2>"${LOG_FILE}"
fi
if [ $? -ne 0 ]; then
return 1
fi
local ODP="$(readConfigKey "odp" "${USER_CONFIG_FILE}")" local ODP="$(readConfigKey "odp" "${USER_CONFIG_FILE}")"
for F in $(ls "${TMP_PATH}/modules/"*.ko 2>/dev/null); do for F in $(ls "${TMP_PATH}/modules/"*.ko 2>/dev/null); do
local M=$(basename ${F}) local M=$(basename "${F}")
[ "${ODP}" = "true" -a -f "${RAMDISK_PATH}/usr/lib/modules/${M}" ] && continue [ "${ODP}" == "true" ] && [ -f "${RAMDISK_PATH}/usr/lib/modules/${M}" ] && continue
if echo "${MLIST}" | grep -wq "${M:0:-3}"; then if echo "${MLIST}" | grep -wq "${M:0:-3}"; then
cp -f "${F}" "${RAMDISK_PATH}/usr/lib/modules/${M}" 2>"${LOG_FILE}" cp -f "${F}" "${RAMDISK_PATH}/usr/lib/modules/${M}" 2>"${LOG_FILE}"
else else
rm -f "${RAMDISK_PATH}/usr/lib/modules/${M}" 2>"${LOG_FILE}" rm -f "${RAMDISK_PATH}/usr/lib/modules/${M}" 2>"${LOG_FILE}"
fi fi
done done
mkdir -p "${RAMDISK_PATH}/usr/lib/firmware" mkdir -p "${RAMDISK_PATH}/usr/lib/firmware"
local KERNEL=$(readConfigKey "kernel" "${USER_CONFIG_FILE}")
if [ "${KERNEL}" = "custom" ]; then if [ "${KERNEL}" = "custom" ]; then
tar -zxf "${CKS_PATH}/firmware.tgz" -C "${RAMDISK_PATH}/usr/lib/firmware" 2>"${LOG_FILE}" tar -zxf "${CKS_PATH}/firmware.tgz" -C "${RAMDISK_PATH}/usr/lib/firmware" 2>"${LOG_FILE}"
else else
@ -78,7 +97,6 @@ function installModules() {
return 1 return 1
fi fi
# Clean
rm -rf "${TMP_PATH}/modules" rm -rf "${TMP_PATH}/modules"
return 0 return 0
} }
@ -93,25 +111,16 @@ function addToModules() {
local KVER=${2} local KVER=${2}
local KOFILE=${3} local KOFILE=${3}
if [ -z "${PLATFORM}" -o -z "${KVER}" -o -z "${KOFILE}" ]; then if [ -z "${PLATFORM}" ] || [ -z "${KVER}" ] || [ -z "${KOFILE}" ]; then
echo "" echo ""
return 1 return 1
fi fi
# Unzip modules for temporary folder
rm -rf "${TMP_PATH}/modules" unpackModules "${PLATFORM}" "${KVER}"
mkdir -p "${TMP_PATH}/modules"
local KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")" cp -f "${KOFILE}" "${TMP_PATH}/modules"
if [ "${KERNEL}" = "custom" ]; then
tar -zxf "${CKS_PATH}/modules-${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules" packagModules "${PLATFORM}" "${KVER}"
else
tar -zxf "${MODULES_PATH}/${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules"
fi
cp -f ${KOFILE} ${TMP_PATH}/modules
if [ "${KERNEL}" = "custom" ]; then
tar -zcf "${CKS_PATH}/modules-${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules" .
else
tar -zcf "${MODULES_PATH}/${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules" .
fi
} }
############################################################################### ###############################################################################
@ -124,25 +133,16 @@ function delToModules() {
local KVER=${2} local KVER=${2}
local KONAME=${3} local KONAME=${3}
if [ -z "${PLATFORM}" -o -z "${KVER}" -o -z "${KONAME}" ]; then if [ -z "${PLATFORM}" ] || [ -z "${KVER}" ] || [ -z "${KONAME}" ]; then
echo "" echo ""
return 1 return 1
fi fi
# Unzip modules for temporary folder
rm -rf "${TMP_PATH}/modules" unpackModules "${PLATFORM}" "${KVER}"
mkdir -p "${TMP_PATH}/modules"
local KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")" rm -f "${TMP_PATH}/modules/${KONAME}"
if [ "${KERNEL}" = "custom" ]; then
tar -zxf "${CKS_PATH}/modules-${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules" packagModules "${PLATFORM}" "${KVER}"
else
tar -zxf "${MODULES_PATH}/${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules"
fi
rm -f ${TMP_PATH}/modules/${KONAME}
if [ "${KERNEL}" = "true" ]; then
tar -zcf "${CKS_PATH}/modules-${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules" .
else
tar -zcf "${MODULES_PATH}/${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules" .
fi
} }
############################################################################### ###############################################################################
@ -153,33 +153,28 @@ function delToModules() {
function getdepends() { function getdepends() {
function _getdepends() { function _getdepends() {
if [ -f "${TMP_PATH}/modules/${1}.ko" ]; then if [ -f "${TMP_PATH}/modules/${1}.ko" ]; then
depends=($(modinfo "${TMP_PATH}/modules/${1}.ko" 2>/dev/null | grep depends: | awk -F: '{print $2}' | awk '$1=$1' | sed 's/,/ /g')) local depends=($(modinfo "${TMP_PATH}/modules/${1}.ko" 2>/dev/null | grep depends: | awk -F: '{print $2}' | awk '$1=$1' | sed 's/,/ /g'))
if [ ${#depends[@]} -gt 0 ]; then if [ ${#depends[@]} -gt 0 ]; then
for k in ${depends[@]}; do for k in "${depends[@]}"; do
echo "${k}" echo "${k}"
_getdepends "${k}" _getdepends "${k}"
done done
fi fi
fi fi
} }
local PLATFORM=${1} local PLATFORM=${1}
local KVER=${2} local KVER=${2}
local KONAME=${3} local KONAME=${3}
if [ -z "${PLATFORM}" -o -z "${KVER}" -o -z "${KONAME}" ]; then if [ -z "${PLATFORM}" ] || [ -z "${KVER}" ] || [ -z "${KONAME}" ]; then
echo "" echo ""
return 1 return 1
fi fi
# Unzip modules for temporary folder
rm -rf "${TMP_PATH}/modules" unpackModules "${PLATFORM}" "${KVER}"
mkdir -p "${TMP_PATH}/modules"
local KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")" local DPS=($(_getdepends "${KONAME}" | tr ' ' '\n' | sort -u))
if [ "${KERNEL}" = "custom" ]; then echo "${DPS[@]}"
tar -zxf "${CKS_PATH}/modules-${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules"
else
tar -zxf "${MODULES_PATH}/${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules"
fi
local DPS=($(_getdepends ${KONAME} | tr ' ' '\n' | sort -u))
echo ${DPS[@]}
rm -rf "${TMP_PATH}/modules" rm -rf "${TMP_PATH}/modules"
} }

View File

@ -1,10 +1,10 @@
#!/usr/bin/env bash #!/usr/bin/env bash
set -e set -e
[ -z "${WORK_PATH}" -o ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" [ -z "${WORK_PATH}" ] || [ ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. ${WORK_PATH}/include/functions.sh . "${WORK_PATH}/include/functions.sh"
. ${WORK_PATH}/include/addons.sh . "${WORK_PATH}/include/addons.sh"
[ -z "${LOADER_DISK}" ] && die "$(TEXT "Loader is not init!")" [ -z "${LOADER_DISK}" ] && die "$(TEXT "Loader is not init!")"
checkBootLoader || die "$(TEXT "The loader is corrupted, please rewrite it!")" checkBootLoader || die "$(TEXT "The loader is corrupted, please rewrite it!")"
@ -12,19 +12,19 @@ checkBootLoader || die "$(TEXT "The loader is corrupted, please rewrite it!")"
# Shows title # Shows title
clear clear
COLUMNS=$(ttysize 2>/dev/null | awk '{print $1}') COLUMNS=$(ttysize 2>/dev/null | awk '{print $1}')
[ -z "${COLUMNS}" ] && COLUMNS=80 COLUMNS=${COLUMNS:-80}
TITLE="$(printf "$(TEXT "Welcome to %s")" "$([ -z "${RR_RELEASE}" ] && echo "${RR_TITLE}" || echo "${RR_TITLE}(${RR_RELEASE})")")" TITLE="$(printf "$(TEXT "Welcome to %s")" "$([ -z "${RR_RELEASE}" ] && echo "${RR_TITLE}" || echo "${RR_TITLE}(${RR_RELEASE})")")"
DATE="$(date)" DATE="$(date)"
printf "\033[1;44m%*s\n" ${COLUMNS} "" printf "\033[1;44m%*s\n" "${COLUMNS}" ""
printf "\033[1;44m%*s\033[A\n" ${COLUMNS} "" printf "\033[1;44m%*s\033[A\n" "${COLUMNS}" ""
printf "\033[1;31m%*s\033[0m\n" $(((${#TITLE} + ${COLUMNS}) / 2)) "${TITLE}" printf "\033[1;31m%*s\033[0m\n" "$(((${#TITLE} + ${COLUMNS}) / 2))" "${TITLE}"
printf "\033[1;44m%*s\033[A\n" ${COLUMNS} "" printf "\033[1;44m%*s\033[A\n" "${COLUMNS}" ""
printf "\033[1;32m%*s\033[0m\n" ${COLUMNS} "${DATE}" printf "\033[1;32m%*s\033[0m\n" "${COLUMNS}" "${DATE}"
# Get first MAC address # Get first MAC address
ETHX=$(ls /sys/class/net/ 2>/dev/null | grep -v lo) || true ETHX=$(ls /sys/class/net/ 2>/dev/null | grep -v lo) || true
# No network devices # No network devices
[ $(echo ${ETHX} | wc -w) -le 0 ] && die "$(TEXT "Network devices not found! Please re execute init.sh after connecting to the network!")" [ "$(echo "${ETHX}" | wc -w)" -le 0 ] && die "$(TEXT "Network devices not found! Please re execute init.sh after connecting to the network!")"
# If user config file not exists, initialize it # If user config file not exists, initialize it
if [ ! -f "${USER_CONFIG_FILE}" ]; then if [ ! -f "${USER_CONFIG_FILE}" ]; then
@ -77,13 +77,13 @@ if [ -f "${PART2_PATH}/GRUB_VER" ]; then
[ -z "$(readConfigKey "platform" "${USER_CONFIG_FILE}")" ] && [ -z "$(readConfigKey "platform" "${USER_CONFIG_FILE}")" ] &&
writeConfigKey "platform" "${PLATFORMTMP,,}" "${USER_CONFIG_FILE}" writeConfigKey "platform" "${PLATFORMTMP,,}" "${USER_CONFIG_FILE}"
[ -z "$(readConfigKey "model" "${USER_CONFIG_FILE}")" ] && [ -z "$(readConfigKey "model" "${USER_CONFIG_FILE}")" ] &&
writeConfigKey "model" "$(echo ${MODELTMP} | sed 's/d$/D/; s/rp$/RP/; s/rp+/RP+/')" "${USER_CONFIG_FILE}" writeConfigKey "model" "$(echo "${MODELTMP}" | sed 's/d$/D/; s/rp$/RP/; s/rp+/RP+/')" "${USER_CONFIG_FILE}"
[ -z "$(readConfigKey "modelid" "${USER_CONFIG_FILE}")" ] && [ -z "$(readConfigKey "modelid" "${USER_CONFIG_FILE}")" ] &&
writeConfigKey "modelid" "${MODELTMP}" "${USER_CONFIG_FILE}" writeConfigKey "modelid" "${MODELTMP}" "${USER_CONFIG_FILE}"
fi fi
if [ ! "LOCALBUILD" = "${LOADER_DISK}" ]; then if [ ! "LOCALBUILD" = "${LOADER_DISK}" ]; then
if arrayExistItem "sortnetif:" $(readConfigMap "addons" "${USER_CONFIG_FILE}"); then if arrayExistItem "sortnetif:" "$(readConfigMap "addons" "${USER_CONFIG_FILE}")"; then
_sort_netif "$(readConfigKey "addons.sortnetif" "${USER_CONFIG_FILE}")" _sort_netif "$(readConfigKey "addons.sortnetif" "${USER_CONFIG_FILE}")"
fi fi
for N in ${ETHX}; do for N in ${ETHX}; do
@ -91,10 +91,10 @@ if [ ! "LOCALBUILD" = "${LOADER_DISK}" ]; then
IPR="$(readConfigKey "network.${MACR}" "${USER_CONFIG_FILE}")" IPR="$(readConfigKey "network.${MACR}" "${USER_CONFIG_FILE}")"
if [ -n "${IPR}" ] && [ "1" = "$(cat /sys/class/net/${N}/carrier 2>/dev/null)" ]; then if [ -n "${IPR}" ] && [ "1" = "$(cat /sys/class/net/${N}/carrier 2>/dev/null)" ]; then
IFS='/' read -r -a IPRA <<<"${IPR}" IFS='/' read -r -a IPRA <<<"${IPR}"
ip addr flush dev ${N} ip addr flush dev "${N}"
ip addr add ${IPRA[0]}/${IPRA[1]:-"255.255.255.0"} dev ${N} ip addr add "${IPRA[0]}/${IPRA[1]:-"255.255.255.0"}" dev "${N}"
if [ -n "${IPRA[2]}" ]; then if [ -n "${IPRA[2]}" ]; then
ip route add default via ${IPRA[2]} dev ${N} ip route add default via "${IPRA[2]}" dev "${N}"
fi fi
if [ -n "${IPRA[3]:-${IPRA[2]}}" ]; then if [ -n "${IPRA[3]:-${IPRA[2]}}" ]; then
sed -i "/nameserver ${IPRA[3]:-${IPRA[2]}}/d" /etc/resolv.conf sed -i "/nameserver ${IPRA[3]:-${IPRA[2]}}/d" /etc/resolv.conf
@ -103,7 +103,7 @@ if [ ! "LOCALBUILD" = "${LOADER_DISK}" ]; then
sleep 1 sleep 1
fi fi
[ "${N::4}" = "wlan" ] && connectwlanif "${N}" 1 && sleep 1 [ "${N::4}" = "wlan" ] && connectwlanif "${N}" 1 && sleep 1
[ "${N::3}" = "eth" ] && ethtool -s ${N} wol g 2>/dev/null || true [ "${N::3}" = "eth" ] && ethtool -s "${N}" wol g 2>/dev/null || true
# [ "${N::3}" = "eth" ] && ethtool -K ${N} rxhash off 2>/dev/null || true # [ "${N::3}" = "eth" ] && ethtool -K ${N} rxhash off 2>/dev/null || true
done done
fi fi
@ -116,8 +116,8 @@ BUS=$(getBus "${LOADER_DISK}")
BUSLIST="usb sata sas scsi nvme mmc ide virtio vmbus xen" BUSLIST="usb sata sas scsi nvme mmc ide virtio vmbus xen"
if [ "${BUS}" = "usb" ]; then if [ "${BUS}" = "usb" ]; then
VID="0x$(udevadm info --query property --name ${LOADER_DISK} 2>/dev/null | grep ID_VENDOR_ID | cut -d= -f2)" VID="0x$(udevadm info --query property --name "${LOADER_DISK}" 2>/dev/null | grep ID_VENDOR_ID | cut -d= -f2)"
PID="0x$(udevadm info --query property --name ${LOADER_DISK} 2>/dev/null | grep ID_MODEL_ID | cut -d= -f2)" PID="0x$(udevadm info --query property --name "${LOADER_DISK}" 2>/dev/null | grep ID_MODEL_ID | cut -d= -f2)"
TYPE="flashdisk" TYPE="flashdisk"
elif ! echo "${BUSLIST}" | grep -wq "${BUS}"; then elif ! echo "${BUSLIST}" | grep -wq "${BUS}"; then
if [ "LOCALBUILD" = "${LOADER_DISK}" ]; then if [ "LOCALBUILD" = "${LOADER_DISK}" ]; then
@ -129,11 +129,11 @@ elif ! echo "${BUSLIST}" | grep -wq "${BUS}"; then
fi fi
# Save variables to user config file # Save variables to user config file
writeConfigKey "vid" ${VID} "${USER_CONFIG_FILE}" writeConfigKey "vid" "${VID}" "${USER_CONFIG_FILE}"
writeConfigKey "pid" ${PID} "${USER_CONFIG_FILE}" writeConfigKey "pid" "${PID}" "${USER_CONFIG_FILE}"
# Inform user # Inform user
echo -e "$(TEXT "Loader disk:") \033[1;32m${LOADER_DISK}\033[0m (\033[1;32m${BUS^^} ${TYPE}\033[0m)" printf "%s \033[1;32m%s (%s %s)\033[0m\n" "$(TEXT "Loader disk:")" "${LOADER_DISK}" "${BUS^^}" "${TYPE}"
# Load keymap name # Load keymap name
LAYOUT="$(readConfigKey "layout" "${USER_CONFIG_FILE}")" LAYOUT="$(readConfigKey "layout" "${USER_CONFIG_FILE}")"
@ -141,23 +141,23 @@ KEYMAP="$(readConfigKey "keymap" "${USER_CONFIG_FILE}")"
# Loads a keymap if is valid # Loads a keymap if is valid
if [ -f "/usr/share/keymaps/i386/${LAYOUT}/${KEYMAP}.map.gz" ]; then if [ -f "/usr/share/keymaps/i386/${LAYOUT}/${KEYMAP}.map.gz" ]; then
echo -e "$(TEXT "Loading keymap") \033[1;32m${LAYOUT}/${KEYMAP}\033[0m" printf "%s \033[1;32m%s/%s\033[0m\n" "$(TEXT "Loading keymap:")" "${LAYOUT}" "${KEYMAP}"
zcat "/usr/share/keymaps/i386/${LAYOUT}/${KEYMAP}.map.gz" | loadkeys zcat "/usr/share/keymaps/i386/${LAYOUT}/${KEYMAP}.map.gz" | loadkeys
fi fi
# Decide if boot automatically # Decide if boot automatically
BOOT=1 BOOT=1
if ! loaderIsConfigured; then if ! loaderIsConfigured; then
echo -e "\033[1;33m$(TEXT "Loader is not configured!")\033[0m" printf "\033[1;33m%s\033[0m\n" "$(TEXT "Loader is not configured!")"
BOOT=0 BOOT=0
elif grep -q "IWANTTOCHANGETHECONFIG" /proc/cmdline; then elif grep -q "IWANTTOCHANGETHECONFIG" /proc/cmdline; then
echo -e "\033[1;33m$(TEXT "User requested edit settings.")\033[0m" printf "\033[1;33m%s\033[0m\n" "$(TEXT "User requested edit settings.")"
BOOT=0 BOOT=0
fi fi
# If is to boot automatically, do it # If is to boot automatically, do it
if [ ${BOOT} -eq 1 ]; then if [ ${BOOT} -eq 1 ]; then
${WORK_PATH}/boot.sh && exit 0 "${WORK_PATH}/boot.sh" && exit 0
fi fi
HTTP=$(grep -i '^HTTP_PORT=' /etc/rrorg.conf 2>/dev/null | cut -d'=' -f2) HTTP=$(grep -i '^HTTP_PORT=' /etc/rrorg.conf 2>/dev/null | cut -d'=' -f2)
@ -165,8 +165,8 @@ DUFS=$(grep -i '^DUFS_PORT=' /etc/rrorg.conf 2>/dev/null | cut -d'=' -f2)
TTYD=$(grep -i '^TTYD_PORT=' /etc/rrorg.conf 2>/dev/null | cut -d'=' -f2) TTYD=$(grep -i '^TTYD_PORT=' /etc/rrorg.conf 2>/dev/null | cut -d'=' -f2)
# Wait for an IP # Wait for an IP
echo "$(printf "$(TEXT "Detected %s network cards.")" "$(echo ${ETHX} | wc -w)")" printf "$(TEXT "Detected %s network cards.\n")" "$(echo "${ETHX}" | wc -w)"
echo -en "$(TEXT "Checking Connect.")" printf "$(TEXT "Checking Connect.")"
COUNT=0 COUNT=0
while [ ${COUNT} -lt 30 ]; do while [ ${COUNT} -lt 30 ]; do
MSG="" MSG=""
@ -176,82 +176,82 @@ while [ ${COUNT} -lt 30 ]; do
fi fi
done done
if [ -n "${MSG}" ]; then if [ -n "${MSG}" ]; then
echo -en "\r${MSG}$(TEXT "connected.") \n" printf "\r%s%s \n" "${MSG}" "$(TEXT "connected.")"
break break
fi fi
COUNT=$((${COUNT} + 1)) COUNT=$((COUNT + 1))
echo -n "." printf "."
sleep 1 sleep 1
done done
[ ! -f /var/run/dhcpcd/pid ] && /etc/init.d/S41dhcpcd restart >/dev/null 2>&1 || true [ ! -f /var/run/dhcpcd/pid ] && /etc/init.d/S41dhcpcd restart >/dev/null 2>&1 || true
echo "$(TEXT "Waiting IP.")" printf "$(TEXT "Waiting IP.\n")"
for N in ${ETHX}; do for N in ${ETHX}; do
COUNT=0 COUNT=0
DRIVER=$(ls -ld /sys/class/net/${N}/device/driver 2>/dev/null | awk -F '/' '{print $NF}') DRIVER=$(ls -ld /sys/class/net/${N}/device/driver 2>/dev/null | awk -F '/' '{print $NF}')
echo -en "${N}(${DRIVER}): " printf "%s(%s): " "${N}" "${DRIVER}"
while true; do while true; do
if [ -z "$(cat /sys/class/net/${N}/carrier 2>/dev/null)" ]; then if [ -z "$(cat /sys/class/net/${N}/carrier 2>/dev/null)" ]; then
echo -en "\r${N}(${DRIVER}): $(TEXT "DOWN")\n" printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "DOWN")"
break break
fi fi
if [ "0" = "$(cat /sys/class/net/${N}/carrier 2>/dev/null)" ]; then if [ "0" = "$(cat /sys/class/net/${N}/carrier 2>/dev/null)" ]; then
echo -en "\r${N}(${DRIVER}): $(TEXT "NOT CONNECTED")\n" printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "NOT CONNECTED")"
break break
fi fi
if [ ${COUNT} -eq 15 ]; then if [ ${COUNT} -eq 15 ]; then # Under normal circumstances, no errors should occur here.
echo -en "\r${N}(${DRIVER}): $(TEXT "TIMEOUT (Please check the IP on the router.)")\n" printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "TIMEOUT (Please check the IP on the router.)")"
break break
fi fi
COUNT=$((${COUNT} + 1)) COUNT=$((COUNT + 1))
IP="$(getIP ${N})" IP="$(getIP "${N}")"
if [ -n "${IP}" ]; then if [ -n "${IP}" ]; then
if [[ "${IP}" =~ ^169\.254\..* ]]; then if echo "${IP}" | grep -q "^169\.254\."; then
echo -en "\r${N}(${DRIVER}): $(TEXT "LINK LOCAL (No DHCP server detected.)")\n" printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "LINK LOCAL (No DHCP server detected.)")"
else else
echo -en "\r${N}(${DRIVER}): $(printf "$(TEXT "Access \033[1;34mhttp://%s:%d\033[0m to configure the loader via web terminal.")" "${IP}" "${TTYD:-7681}")\n" printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(printf "$(TEXT "Access \033[1;34mhttp://%s:%d\033[0m to configure the loader via web terminal.")" "${IP}" "${TTYD:-7681}")"
fi fi
break break
fi fi
echo -n "." printf "."
sleep 1 sleep 1
done done
done done
# Inform user # Inform user
echo printf "\n"
echo -e "$(TEXT "Call \033[1;32minit.sh\033[0m to re get init info")" printf "$(TEXT "Call \033[1;32minit.sh\033[0m to re get init info\n")"
echo -e "$(TEXT "Call \033[1;32mmenu.sh\033[0m to configure loader")" printf "$(TEXT "Call \033[1;32mmenu.sh\033[0m to configure loader\n")"
echo printf "\n"
echo -e "$(printf "$(TEXT "User config is on \033[1;32m%s\033[0m")" "${USER_CONFIG_FILE}")" printf "$(TEXT "User config is on \033[1;32m%s\033[0m\n")" "${USER_CONFIG_FILE}"
echo -e "$(printf "$(TEXT "HTTP: \033[1;34mhttp://%s:%d\033[0m")" "rr" "${HTTP:-7080}")" printf "$(TEXT "HTTP: \033[1;34mhttp://%s:%d\033[0m\n")" "rr" "${HTTP:-7080}"
echo -e "$(printf "$(TEXT "DUFS: \033[1;34mhttp://%s:%d\033[0m")" "rr" "${DUFS:-7304}")" printf "$(TEXT "DUFS: \033[1;34mhttp://%s:%d\033[0m\n")" "rr" "${DUFS:-7304}"
echo -e "$(printf "$(TEXT "TTYD: \033[1;34mhttp://%s:%d\033[0m")" "rr" "${TTYD:-7681}")" printf "$(TEXT "TTYD: \033[1;34mhttp://%s:%d\033[0m\n")" "rr" "${TTYD:-7681}"
echo printf "\n"
if [ -f "/etc/shadow-" ]; then if [ -f "/etc/shadow-" ]; then
echo -e "$(printf "$(TEXT "SSH port is \033[1;31m%d\033[0m, The \033[1;31mroot\033[0m password has been changed")" "22")" printf "$(TEXT "SSH port is \033[1;31m%d\033[0m, The \033[1;31mroot\033[0m password has been changed\n")" "22"
else else
echo -e "$(printf "$(TEXT "SSH port is \033[1;31m%d\033[0m, The \033[1;31mroot\033[0m password is \033[1;31m%s\033[0m")" "22" "rr")" printf "$(TEXT "SSH port is \033[1;31m%d\033[0m, The \033[1;31mroot\033[0m password is \033[1;31m%s\033[0m\n")" "22" "rr"
fi fi
echo printf "\n"
DSMLOGO="$(readConfigKey "dsmlogo" "${USER_CONFIG_FILE}")" DSMLOGO="$(readConfigKey "dsmlogo" "${USER_CONFIG_FILE}")"
if [ "${DSMLOGO}" = "true" -a -c "/dev/fb0" -a ! "LOCALBUILD" = "${LOADER_DISK}" ]; then if [ "${DSMLOGO}" = "true" ] && [ -c "/dev/fb0" ] && [ ! "LOCALBUILD" = "${LOADER_DISK}" ]; then
IP="$(getIP)" IP="$(getIP)"
[[ "${IP}" =~ ^169\.254\..* ]] && IP="" echo "${IP}" | grep -q "^169\.254\." && IP=""
[ -n "${IP}" ] && URL="http://${IP}:${TTYD:-7681}" || URL="http://rr:${TTYD:-7681}" [ -n "${IP}" ] && URL="http://${IP}:${TTYD:-7681}" || URL="http://rr:${TTYD:-7681}"
python ${WORK_PATH}/include/functions.py makeqr -d "${URL}" -l "0" -o "${TMP_PATH}/qrcode_init.png" python3 "${WORK_PATH}/include/functions.py" makeqr -d "${URL}" -l "0" -o "${TMP_PATH}/qrcode_init.png"
[ -f "${TMP_PATH}/qrcode_init.png" ] && echo | fbv -acufi "${TMP_PATH}/qrcode_init.png" >/dev/null 2>/dev/null || true [ -f "${TMP_PATH}/qrcode_init.png" ] && echo | fbv -acufi "${TMP_PATH}/qrcode_init.png" >/dev/null 2>/dev/null || true
python ${WORK_PATH}/include/functions.py makeqr -f "${WORK_PATH}/include/qhxg.png" -l "7" -o "${TMP_PATH}/qrcode_qhxg.png" python3 "${WORK_PATH}/include/functions.py" makeqr -f "${WORK_PATH}/include/qhxg.png" -l "7" -o "${TMP_PATH}/qrcode_qhxg.png"
[ -f "${TMP_PATH}/qrcode_qhxg.png" ] && echo | fbv -acufi "${TMP_PATH}/qrcode_qhxg.png" >/dev/null 2>/dev/null || true [ -f "${TMP_PATH}/qrcode_qhxg.png" ] && echo | fbv -acufi "${TMP_PATH}/qrcode_qhxg.png" >/dev/null 2>/dev/null || true
fi fi
# Check memory # Check memory
RAM=$(awk '/MemTotal:/ {printf "%.0f", $2 / 1024}' /proc/meminfo 2>/dev/null) RAM=$(awk '/MemTotal:/ {printf "%.0f", $2 / 1024}' /proc/meminfo 2>/dev/null)
if [ ${RAM:-0} -le 3500 ]; then if [ "${RAM:-0}" -le 3500 ]; then
echo -e "\033[1;33m$(TEXT "You have less than 4GB of RAM, if errors occur in loader creation, please increase the amount of memory.")\033[0m\n" printf "\033[1;33m%s\033[0m\n" "$(TEXT "You have less than 4GB of RAM, if errors occur in loader creation, please increase the amount of memory.")"
fi fi
mkdir -p "${CKS_PATH}" mkdir -p "${CKS_PATH}"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -13,7 +13,7 @@ fi
##$1 from, $2 to, $3 file to path ##$1 from, $2 to, $3 file to path
_replace_in_file() { _replace_in_file() {
if grep -q "${1}" "${3}"; then if grep -q "${1}" "${3}"; then
"${SED_PATH}" -i "${3}" -e "s#${1}#${2}#" "${SED_PATH}" -i "s#${1}#${2}#" "${3}" 2>/dev/null
fi fi
} }
@ -22,17 +22,18 @@ _replace_in_file() {
# Args: $1 name, $2 new_val, $3 path # Args: $1 name, $2 new_val, $3 path
_set_conf_kv() { _set_conf_kv() {
# Delete # Delete
if [ -z "$2" ]; then if [ -z "${2}" ]; then
"${SED_PATH}" -i "${3}" -e "s/^${1}=.*$//" "${SED_PATH}" -i "/^${1}=/d" "${3}" 2>/dev/null
return 0 return 0
fi fi
# Replace # Replace
if grep -q "^${1}=" "${3}"; then if grep -q "^${1}=" "${3}"; then
"${SED_PATH}" -i "${3}" -e "s\"^${1}=.*\"${1}=\\\"${2}\\\"\"" "${SED_PATH}" -i "s#^${1}=.*#${1}=\"${2}\"#" "${3}" 2>/dev/null
return 0 return 0
fi fi
# Add if doesn't exist # Add if doesn't exist
echo "${1}=\"${2}\"" >>"${3}" echo "${1}=\"${2}\"" >>"${3}"
return 0
} }

View File

@ -17,47 +17,43 @@ synoinfo: &synoinfo
maxlanport: "8" maxlanport: "8"
netif_seq: "0 1 2 3 4 5 6 7" netif_seq: "0 1 2 3 4 5 6 7"
buzzeroffen: "0xffff" buzzeroffen: "0xffff"
productvers4: &productvers4
"7.0":
kver: "4.4.180"
"7.1":
kver: "4.4.180"
"7.2":
kver: "4.4.302"
productvers5: &productvers5
"7.1":
kpre: "7.1"
kver: "5.10.55"
"7.2":
kpre: "7.2"
kver: "5.10.55"
platforms: platforms:
apollolake: apollolake:
dt: false dt: false
flags: flags: ["movbe"]
- "movbe" noflags: ["x2apic"]
noflags:
- "x2apic"
synoinfo: synoinfo:
<<: *synoinfo <<: *synoinfo
HddEnableDynamicPower: "no" HddEnableDynamicPower: "no"
productvers: productvers: *productvers4
"7.0":
kver: "4.4.180"
"7.1":
kver: "4.4.180"
"7.2":
kver: "4.4.302"
broadwell: broadwell:
dt: false dt: false
synoinfo: synoinfo: *synoinfo
<<: *synoinfo productvers: *productvers4
productvers:
"7.0":
kver: "4.4.180"
"7.1":
kver: "4.4.180"
"7.2":
kver: "4.4.302"
broadwellnk: broadwellnk:
dt: false dt: false
synoinfo: synoinfo:
<<: *synoinfo <<: *synoinfo
support_bde_internal_10g: "no" support_bde_internal_10g: "no"
supportsas: "no" supportsas: "no"
productvers: productvers: *productvers4
"7.0":
kver: "4.4.180"
"7.1":
kver: "4.4.180"
"7.2":
kver: "4.4.302"
broadwellnkv2: broadwellnkv2:
dt: true dt: true
synoinfo: synoinfo:
@ -66,13 +62,7 @@ platforms:
supportsas: "no" supportsas: "no"
supportsas_v2_r1: "no" supportsas_v2_r1: "no"
support_multipath: "yes" support_multipath: "yes"
productvers: productvers: *productvers4
"7.0":
kver: "4.4.180"
"7.1":
kver: "4.4.180"
"7.2":
kver: "4.4.302"
broadwellntbap: broadwellntbap:
dt: false dt: false
synoinfo: synoinfo:
@ -84,43 +74,20 @@ platforms:
support_auto_install: "no" support_auto_install: "no"
support_install_only_dev: "no" support_install_only_dev: "no"
required_system_disk_number: "0" required_system_disk_number: "0"
productvers: productvers: *productvers4
"7.0":
kver: "4.4.180"
"7.1":
kver: "4.4.180"
"7.2":
kver: "4.4.302"
denverton: denverton:
dt: false dt: false
flags: flags: ["movbe"]
- "movbe" synoinfo: *synoinfo
synoinfo: productvers: *productvers4
<<: *synoinfo
productvers:
"7.0":
kver: "4.4.180"
"7.1":
kver: "4.4.180"
"7.2":
kver: "4.4.302"
geminilake: geminilake:
dt: true dt: true
noflags: noflags: ["x2apic"]
- "x2apic" synoinfo: *synoinfo
synoinfo: productvers: *productvers4
<<: *synoinfo
productvers:
"7.0":
kver: "4.4.180"
"7.1":
kver: "4.4.180"
"7.2":
kver: "4.4.302"
purley: purley:
dt: true dt: true
noflags: noflags: ["x2apic"]
- "x2apic"
synoinfo: synoinfo:
<<: *synoinfo <<: *synoinfo
supportsas: "no" supportsas: "no"
@ -130,42 +97,18 @@ platforms:
isolated_disk_system: "no" isolated_disk_system: "no"
required_system_disk_number: "0" required_system_disk_number: "0"
internal_disk_without_led_mask: "no" internal_disk_without_led_mask: "no"
productvers: productvers: *productvers4
"7.0":
kver: "4.4.180"
"7.1":
kver: "4.4.180"
"7.2":
kver: "4.4.302"
r1000: r1000:
dt: true dt: true
synoinfo: synoinfo: *synoinfo
<<: *synoinfo productvers: *productvers4
productvers:
"7.1":
kver: "4.4.180"
"7.2":
kver: "4.4.302"
v1000: v1000:
dt: true dt: true
synoinfo: synoinfo: *synoinfo
<<: *synoinfo productvers: *productvers4
productvers:
"7.0":
kver: "4.4.180"
"7.1":
kver: "4.4.180"
"7.2":
kver: "4.4.302"
epyc7002: epyc7002:
dt: true dt: true
synoinfo: synoinfo:
<<: *synoinfo <<: *synoinfo
netif_seq_by_dts: "no" netif_seq_by_dts: "no"
productvers: productvers: *productvers5
"7.1":
kpre: "7.1"
kver: "5.10.55"
"7.2":
kpre: "7.2"
kver: "5.10.55"

View File

@ -1,10 +1,10 @@
#!/usr/bin/env bash #!/usr/bin/env bash
[ -z "${WORK_PATH}" -o ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" [ -z "${WORK_PATH}" ] || [ ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. ${WORK_PATH}/include/functions.sh . "${WORK_PATH}/include/functions.sh"
. ${WORK_PATH}/include/addons.sh . "${WORK_PATH}/include/addons.sh"
. ${WORK_PATH}/include/modules.sh . "${WORK_PATH}/include/modules.sh"
set -o pipefail # Get exit code from process piped set -o pipefail # Get exit code from process piped
@ -23,10 +23,7 @@ rm -f "${MOD_RDGZ_FILE}"
echo -n "." echo -n "."
rm -rf "${RAMDISK_PATH}" # Force clean rm -rf "${RAMDISK_PATH}" # Force clean
mkdir -p "${RAMDISK_PATH}" mkdir -p "${RAMDISK_PATH}"
( (cd "${RAMDISK_PATH}" && xz -dc <"${ORI_RDGZ_FILE}" | cpio -idm) >/dev/null 2>&1 || true
cd "${RAMDISK_PATH}"
xz -dc <"${ORI_RDGZ_FILE}" | cpio -idm
) >/dev/null 2>&1
# get user data # get user data
PLATFORM="$(readConfigKey "platform" "${USER_CONFIG_FILE}")" PLATFORM="$(readConfigKey "platform" "${USER_CONFIG_FILE}")"
@ -51,7 +48,7 @@ HDDSORT="$(readConfigKey "hddsort" "${USER_CONFIG_FILE}")"
# Check if DSM buildnumber changed # Check if DSM buildnumber changed
. "${RAMDISK_PATH}/etc/VERSION" . "${RAMDISK_PATH}/etc/VERSION"
if [ -n "${PRODUCTVER}" -a -n "${BUILDNUM}" -a -n "${SMALLNUM}" ] && if [ -n "${PRODUCTVER}" ] && [ -n "${BUILDNUM}" ] && [ -n "${SMALLNUM}" ] &&
([ ! "${PRODUCTVER}" = "${majorversion}.${minorversion}" ] || [ ! "${BUILDNUM}" = "${buildnumber}" ] || [ ! "${SMALLNUM}" = "${smallfixnumber}" ]); then ([ ! "${PRODUCTVER}" = "${majorversion}.${minorversion}" ] || [ ! "${BUILDNUM}" = "${buildnumber}" ] || [ ! "${SMALLNUM}" = "${smallfixnumber}" ]); then
OLDVER="${PRODUCTVER}(${BUILDNUM}$([ ${SMALLNUM:-0} -ne 0 ] && echo "u${SMALLNUM}"))" OLDVER="${PRODUCTVER}(${BUILDNUM}$([ ${SMALLNUM:-0} -ne 0 ] && echo "u${SMALLNUM}"))"
NEWVER="${majorversion}.${minorversion}(${buildnumber}$([ ${smallfixnumber:-0} -ne 0 ] && echo "u${smallfixnumber}"))" NEWVER="${majorversion}.${minorversion}(${buildnumber}$([ ${smallfixnumber:-0} -ne 0 ] && echo "u${smallfixnumber}"))"
@ -76,7 +73,7 @@ KVER="$(readConfigKey "platforms.${PLATFORM}.productvers.\"${PRODUCTVER}\".kver"
KPRE="$(readConfigKey "platforms.${PLATFORM}.productvers.\"${PRODUCTVER}\".kpre" "${WORK_PATH}/platforms.yml")" KPRE="$(readConfigKey "platforms.${PLATFORM}.productvers.\"${PRODUCTVER}\".kpre" "${WORK_PATH}/platforms.yml")"
# Sanity check # Sanity check
if [ -z "${PLATFORM}" -o -z "${KVER}" ]; then if [ -z "${PLATFORM}" ] || [ -z "${KVER}" ]; then
echo "ERROR: Configuration for model ${MODEL} and productversion ${PRODUCTVER} not found." >"${LOG_FILE}" echo "ERROR: Configuration for model ${MODEL} and productversion ${PRODUCTVER} not found." >"${LOG_FILE}"
exit 1 exit 1
fi fi
@ -86,35 +83,35 @@ declare -A ADDONS
declare -A MODULES declare -A MODULES
# Read synoinfo and addons from config # Read synoinfo and addons from config
while IFS=': ' read KEY VALUE; do while IFS=': ' read -r KEY VALUE; do
[ -n "${KEY}" ] && SYNOINFO["${KEY}"]="${VALUE}" [ -n "${KEY}" ] && SYNOINFO["${KEY}"]="${VALUE}"
done <<<$(readConfigMap "synoinfo" "${USER_CONFIG_FILE}") done <<<"$(readConfigMap "synoinfo" "${USER_CONFIG_FILE}")"
while IFS=': ' read KEY VALUE; do while IFS=': ' read -r KEY VALUE; do
[ -n "${KEY}" ] && ADDONS["${KEY}"]="${VALUE}" [ -n "${KEY}" ] && ADDONS["${KEY}"]="${VALUE}"
done <<<$(readConfigMap "addons" "${USER_CONFIG_FILE}") done <<<"$(readConfigMap "addons" "${USER_CONFIG_FILE}")"
# Read modules from user config # Read modules from user config
while IFS=': ' read KEY VALUE; do while IFS=': ' read -r KEY VALUE; do
[ -n "${KEY}" ] && MODULES["${KEY}"]="${VALUE}" [ -n "${KEY}" ] && MODULES["${KEY}"]="${VALUE}"
done <<<$(readConfigMap "modules" "${USER_CONFIG_FILE}") done <<<"$(readConfigMap "modules" "${USER_CONFIG_FILE}")"
# Patches (diff -Naru OLDFILE NEWFILE > xxx.patch) # Patches (diff -Naru OLDFILE NEWFILE > xxx.patch)
PATCHS=() PATCHS=(
PATCHS+=("ramdisk-etc-rc-*.patch") "ramdisk-etc-rc-*.patch"
PATCHS+=("ramdisk-init-script-*.patch") "ramdisk-init-script-*.patch"
PATCHS+=("ramdisk-post-init-script-*.patch") "ramdisk-post-init-script-*.patch"
PATCHS+=("ramdisk-disable-root-pwd-*.patch") "ramdisk-disable-root-pwd-*.patch"
PATCHS+=("ramdisk-disable-disabled-ports-*.patch") "ramdisk-disable-disabled-ports-*.patch"
for PE in ${PATCHS[@]}; do )
for PE in "${PATCHS[@]}"; do
RET=1 RET=1
echo "Patching with ${PE}" >"${LOG_FILE}" echo "Patching with ${PE}" >"${LOG_FILE}"
# ${PE} contains *, so double quotes cannot be added
for PF in $(ls ${WORK_PATH}/patch/${PE} 2>/dev/null); do for PF in $(ls ${WORK_PATH}/patch/${PE} 2>/dev/null); do
echo -n "." echo -n "."
echo "Patching with ${PF}" >>"${LOG_FILE}" echo "Patching with ${PF}" >>"${LOG_FILE}"
( # busybox patch and gun patch have different processing methods and parameters.
cd "${RAMDISK_PATH}" (cd "${RAMDISK_PATH}" && busybox patch -p1 -i "${PF}") >>"${LOG_FILE}" 2>&1
busybox patch -p1 -i "${PF}" >>"${LOG_FILE}" 2>&1 # busybox patch and gun patch have different processing methods and parameters.
)
RET=$? RET=$?
[ ${RET} -eq 0 ] && break [ ${RET} -eq 0 ] && break
done done
@ -127,7 +124,7 @@ echo -n "."
echo "Set synoinfo SN" >"${LOG_FILE}" echo "Set synoinfo SN" >"${LOG_FILE}"
_set_conf_kv "SN" "${SN}" "${RAMDISK_PATH}/etc/synoinfo.conf" >>"${LOG_FILE}" 2>&1 || exit 1 _set_conf_kv "SN" "${SN}" "${RAMDISK_PATH}/etc/synoinfo.conf" >>"${LOG_FILE}" 2>&1 || exit 1
_set_conf_kv "SN" "${SN}" "${RAMDISK_PATH}/etc.defaults/synoinfo.conf" >>"${LOG_FILE}" 2>&1 || exit 1 _set_conf_kv "SN" "${SN}" "${RAMDISK_PATH}/etc.defaults/synoinfo.conf" >>"${LOG_FILE}" 2>&1 || exit 1
for KEY in ${!SYNOINFO[@]}; do for KEY in "${!SYNOINFO[@]}"; do
echo "Set synoinfo ${KEY}" >>"${LOG_FILE}" echo "Set synoinfo ${KEY}" >>"${LOG_FILE}"
_set_conf_kv "${KEY}" "${SYNOINFO[${KEY}]}" "${RAMDISK_PATH}/etc/synoinfo.conf" >>"${LOG_FILE}" 2>&1 || exit 1 _set_conf_kv "${KEY}" "${SYNOINFO[${KEY}]}" "${RAMDISK_PATH}/etc/synoinfo.conf" >>"${LOG_FILE}" 2>&1 || exit 1
_set_conf_kv "${KEY}" "${SYNOINFO[${KEY}]}" "${RAMDISK_PATH}/etc.defaults/synoinfo.conf" >>"${LOG_FILE}" 2>&1 || exit 1 _set_conf_kv "${KEY}" "${SYNOINFO[${KEY}]}" "${RAMDISK_PATH}/etc.defaults/synoinfo.conf" >>"${LOG_FILE}" 2>&1 || exit 1
@ -141,7 +138,7 @@ rm -f "${TMP_PATH}/rp.txt"
touch "${TMP_PATH}/rp.txt" touch "${TMP_PATH}/rp.txt"
echo "_set_conf_kv 'SN' '${SN}' '/tmpRoot/etc/synoinfo.conf'" >>"${TMP_PATH}/rp.txt" echo "_set_conf_kv 'SN' '${SN}' '/tmpRoot/etc/synoinfo.conf'" >>"${TMP_PATH}/rp.txt"
echo "_set_conf_kv 'SN' '${SN}' '/tmpRoot/etc.defaults/synoinfo.conf'" >>"${TMP_PATH}/rp.txt" echo "_set_conf_kv 'SN' '${SN}' '/tmpRoot/etc.defaults/synoinfo.conf'" >>"${TMP_PATH}/rp.txt"
for KEY in ${!SYNOINFO[@]}; do for KEY in "${!SYNOINFO[@]}"; do
echo "_set_conf_kv '${KEY}' '${SYNOINFO[${KEY}]}' '/tmpRoot/etc/synoinfo.conf'" >>"${TMP_PATH}/rp.txt" echo "_set_conf_kv '${KEY}' '${SYNOINFO[${KEY}]}' '/tmpRoot/etc/synoinfo.conf'" >>"${TMP_PATH}/rp.txt"
echo "_set_conf_kv '${KEY}' '${SYNOINFO[${KEY}]}' '/tmpRoot/etc.defaults/synoinfo.conf'" >>"${TMP_PATH}/rp.txt" echo "_set_conf_kv '${KEY}' '${SYNOINFO[${KEY}]}' '/tmpRoot/etc.defaults/synoinfo.conf'" >>"${TMP_PATH}/rp.txt"
done done
@ -162,18 +159,20 @@ gzip -dc "${LKMS_PATH}/rp-${PLATFORM}-$([ -n "${KPRE}" ] && echo "${KPRE}-")${KV
echo -n "." echo -n "."
echo "Create addons.sh" >"${LOG_FILE}" echo "Create addons.sh" >"${LOG_FILE}"
mkdir -p "${RAMDISK_PATH}/addons" mkdir -p "${RAMDISK_PATH}/addons"
echo "#!/bin/sh" >"${RAMDISK_PATH}/addons/addons.sh" {
echo 'echo "addons.sh called with params ${@}"' >>"${RAMDISK_PATH}/addons/addons.sh" echo "#!/bin/sh"
echo "export LOADERLABEL=\"RR\"" >>"${RAMDISK_PATH}/addons/addons.sh" echo 'echo "addons.sh called with params ${@}"'
echo "export LOADERRELEASE=\"${RR_RELEASE}\"" >>"${RAMDISK_PATH}/addons/addons.sh" echo "export LOADERLABEL=\"RR\""
echo "export LOADERVERSION=\"${RR_VERSION}\"" >>"${RAMDISK_PATH}/addons/addons.sh" echo "export LOADERRELEASE=\"${RR_RELEASE}\""
echo "export PLATFORM=\"${PLATFORM}\"" >>"${RAMDISK_PATH}/addons/addons.sh" echo "export LOADERVERSION=\"${RR_VERSION}\""
echo "export MODEL=\"${MODEL}\"" >>"${RAMDISK_PATH}/addons/addons.sh" echo "export PLATFORM=\"${PLATFORM}\""
echo "export PRODUCTVERL=\"${PRODUCTVERL}\"" >>"${RAMDISK_PATH}/addons/addons.sh" echo "export MODEL=\"${MODEL}\""
echo "export MLINK=\"${PATURL}\"" >>"${RAMDISK_PATH}/addons/addons.sh" echo "export PRODUCTVERL=\"${PRODUCTVERL}\""
echo "export MCHECKSUM=\"${PATSUM}\"" >>"${RAMDISK_PATH}/addons/addons.sh" echo "export MLINK=\"${PATURL}\""
echo "export LAYOUT=\"${LAYOUT}\"" >>"${RAMDISK_PATH}/addons/addons.sh" echo "export MCHECKSUM=\"${PATSUM}\""
echo "export KEYMAP=\"${KEYMAP}\"" >>"${RAMDISK_PATH}/addons/addons.sh" echo "export LAYOUT=\"${LAYOUT}\""
echo "export KEYMAP=\"${KEYMAP}\""
} >"${RAMDISK_PATH}/addons/addons.sh"
chmod +x "${RAMDISK_PATH}/addons/addons.sh" chmod +x "${RAMDISK_PATH}/addons/addons.sh"
# This order cannot be changed. # This order cannot be changed.
@ -188,7 +187,7 @@ for ADDON in "redpill" "revert" "misc" "eudev" "disks" "localrss" "notify" "wol"
done done
# User addons # User addons
for ADDON in ${!ADDONS[@]}; do for ADDON in "${!ADDONS[@]}"; do
PARAMS=${ADDONS[${ADDON}]} PARAMS=${ADDONS[${ADDON}]}
installAddon "${ADDON}" "${PLATFORM}" "$([ -n "${KPRE}" ] && echo "${KPRE}-")${KVER}" || exit 1 installAddon "${ADDON}" "${PLATFORM}" "$([ -n "${KPRE}" ] && echo "${KPRE}-")${KVER}" || exit 1
echo "/addons/${ADDON}.sh \${1} ${PARAMS}" >>"${RAMDISK_PATH}/addons/addons.sh" 2>>"${LOG_FILE}" || exit 1 echo "/addons/${ADDON}.sh \${1} ${PARAMS}" >>"${RAMDISK_PATH}/addons/addons.sh" 2>>"${LOG_FILE}" || exit 1
@ -238,25 +237,25 @@ for N in $(seq 0 7); do
done done
# issues/313 # issues/313
if [ ${PLATFORM} = "epyc7002" ]; then if [ "${PLATFORM}" = "epyc7002" ]; then
sed -i 's#/dev/console#/var/log/lrc#g' ${RAMDISK_PATH}/usr/bin/busybox sed -i 's#/dev/console#/var/log/lrc#g' "${RAMDISK_PATH}/usr/bin/busybox"
sed -i '/^echo "START/a \\nmknod -m 0666 /dev/console c 1 3' ${RAMDISK_PATH}/linuxrc.syno sed -i '/^echo "START/a \\nmknod -m 0666 /dev/console c 1 3' "${RAMDISK_PATH}/linuxrc.syno"
fi fi
if [ "${PLATFORM}" = "broadwellntbap" ]; then if [ "${PLATFORM}" = "broadwellntbap" ]; then
sed -i 's/IsUCOrXA="yes"/XIsUCOrXA="yes"/g; s/IsUCOrXA=yes/XIsUCOrXA=yes/g' ${RAMDISK_PATH}/usr/syno/share/environments.sh sed -i 's/IsUCOrXA="yes"/XIsUCOrXA="yes"/g; s/IsUCOrXA=yes/XIsUCOrXA=yes/g' "${RAMDISK_PATH}/usr/syno/share/environments.sh"
fi fi
# Call user patch scripts # Call user patch scripts
echo -n "." echo -n "."
for F in $(ls -1 ${SCRIPTS_PATH}/*.sh 2>/dev/null); do for F in $(ls -1 "${SCRIPTS_PATH}/"*.sh 2>/dev/null); do
echo "Calling ${F}" >"${LOG_FILE}" echo "Calling ${F}" >"${LOG_FILE}"
. "${F}" >>"${LOG_FILE}" 2>&1 || exit 1 . "${F}" >>"${LOG_FILE}" 2>&1 || exit 1
done done
# Reassembly ramdisk # Reassembly ramdisk
echo -n "." echo -n "."
if [ "${RD_COMPRESSED}" == "true" ]; then if [ "${RD_COMPRESSED}" = "true" ]; then
(cd "${RAMDISK_PATH}" && find . 2>/dev/null | cpio -o -H newc -R root:root | xz -9 --format=lzma >"${MOD_RDGZ_FILE}") >"${LOG_FILE}" 2>&1 || exit 1 (cd "${RAMDISK_PATH}" && find . 2>/dev/null | cpio -o -H newc -R root:root | xz -9 --format=lzma >"${MOD_RDGZ_FILE}") >"${LOG_FILE}" 2>&1 || exit 1
else else
(cd "${RAMDISK_PATH}" && find . 2>/dev/null | cpio -o -H newc -R root:root >"${MOD_RDGZ_FILE}") >"${LOG_FILE}" 2>&1 || exit 1 (cd "${RAMDISK_PATH}" && find . 2>/dev/null | cpio -o -H newc -R root:root >"${MOD_RDGZ_FILE}") >"${LOG_FILE}" 2>&1 || exit 1

View File

@ -1,9 +1,9 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Based on code and ideas from @jumkey # Based on code and ideas from @jumkey
[ -z "${WORK_PATH}" -o ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" [ -z "${WORK_PATH}" ] || [ ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. ${WORK_PATH}/include/functions.sh . "${WORK_PATH}/include/functions.sh"
PLATFORM="$(readConfigKey "platform" "${USER_CONFIG_FILE}")" PLATFORM="$(readConfigKey "platform" "${USER_CONFIG_FILE}")"
PRODUCTVER="$(readConfigKey "productver" "${USER_CONFIG_FILE}")" PRODUCTVER="$(readConfigKey "productver" "${USER_CONFIG_FILE}")"
@ -15,60 +15,51 @@ KPRE="$(readConfigKey "platforms.${PLATFORM}.productvers.\"${PRODUCTVER}\".kpre"
# Output: LE HEX with size of file in bytes (to STDOUT) # Output: LE HEX with size of file in bytes (to STDOUT)
file_size_le() { file_size_le() {
printf $( printf $(
dec_size=0 local dec_size=0
for F in "${@}"; do for F in "$@"; do dec_size=$((dec_size + $(stat -c "%s" "${F}"))); done
fsize=$(stat -c "%s" ${F}) printf "%08x\n" "${dec_size}" | sed 's/\(..\)/\1 /g' | {
dec_size=$(expr ${dec_size} + ${fsize}) read -r ch0 ch1 ch2 ch3
done for ch in "${ch3}" "${ch2}" "${ch1}" "${ch0}"; do printf '%s%03o' '\' "$((0x${ch}))"; done
printf "%08x\n" ${dec_size} |
sed 's/\(..\)/\1 /g' | {
read ch0 ch1 ch2 ch3
for ch in ${ch3} ${ch2} ${ch1} ${ch0}; do
printf '%s%03o' '\' $((0x${ch}))
done
} }
) )
} }
size_le() { size_le() {
printf $( printf $(
printf "%08x\n" "${@}" | printf "%08x\n" "${@}" | sed 's/\(..\)/\1 /g' | {
sed 's/\(..\)/\1 /g' | { read -r ch0 ch1 ch2 ch3
read ch0 ch1 ch2 ch3 for ch in "${ch3}" "${ch2}" "${ch1}" "${ch0}"; do printf '%s%03o' '\' "$((0x${ch}))"; done
for ch in ${ch3} ${ch2} ${ch1} ${ch0}; do
printf '%s%03o' '\' $((0x${ch}))
done
} }
) )
} }
VMLINUX_MOD=${1} VMLINUX_MOD=${1}
ZIMAGE_MOD=${2} ZIMAGE_MOD=${2}
if [ $(echo "${KVER:-4}" | cut -d'.' -f1) -lt 5 ]; then if [ "$(echo "${KVER:-4}" | cut -d'.' -f1)" -lt 5 ]; then
# Kernel version 4.x or 3.x (bromolow) # Kernel version 4.x or 3.x (bromolow)
#zImage_head 16494 # zImage_head 16494
#payload( # payload(
# vmlinux.bin x # vmlinux.bin x
# padding 0xf00000-x # padding 0xf00000-x
# vmlinux.bin size 4 # vmlinux.bin size 4
#) 0xf00004 # ) 0xf00004
#zImage_tail( # zImage_tail(
# unknown 72 # unknown 72
# run_size 4 # run_size 4
# unknown 30 # unknown 30
# vmlinux.bin size 4 # vmlinux.bin size 4
# unknown 114460 # unknown 114460
#) 114570 # ) 114570
#crc32 4 # crc32 4
gzip -dc "${WORK_PATH}/bzImage-template-v4.gz" >"${ZIMAGE_MOD}" || exit 1 gzip -dc "${WORK_PATH}/bzImage-template-v4.gz" >"${ZIMAGE_MOD}" || exit 1
dd if="${VMLINUX_MOD}" of="${ZIMAGE_MOD}" bs=16494 seek=1 conv=notrunc || exit 1 dd if="${VMLINUX_MOD}" of="${ZIMAGE_MOD}" bs=16494 seek=1 conv=notrunc || exit 1
file_size_le "${VMLINUX_MOD}" | dd of="${ZIMAGE_MOD}" bs=15745134 seek=1 conv=notrunc || exit 1 file_size_le "${VMLINUX_MOD}" | dd of="${ZIMAGE_MOD}" bs=15745134 seek=1 conv=notrunc || exit 1
file_size_le "${VMLINUX_MOD}" | dd of="${ZIMAGE_MOD}" bs=15745244 seek=1 conv=notrunc || exit 1 file_size_le "${VMLINUX_MOD}" | dd of="${ZIMAGE_MOD}" bs=15745244 seek=1 conv=notrunc || exit 1
RUN_SIZE=$(objdump -h ${VMLINUX_MOD} | sh "${WORK_PATH}/calc_run_size.sh") RUN_SIZE=$(objdump -h "${VMLINUX_MOD}" | sh "${WORK_PATH}/calc_run_size.sh")
size_le ${RUN_SIZE} | dd of=${ZIMAGE_MOD} bs=15745210 seek=1 conv=notrunc || exit 1 size_le "${RUN_SIZE}" | dd of="${ZIMAGE_MOD}" bs=15745210 seek=1 conv=notrunc || exit 1
size_le $(($((16#$(crc32 "${ZIMAGE_MOD}" | awk '{print $1}'))) ^ 0xFFFFFFFF)) | dd of="${ZIMAGE_MOD}" conv=notrunc oflag=append || exit 1 size_le "$((16#$(crc32 "${ZIMAGE_MOD}" | awk '{print $1}') ^ 0xFFFFFFFF))" | dd of="${ZIMAGE_MOD}" conv=notrunc oflag=append || exit 1
else else
# Kernel version 5.x # Kernel version 5.x
gzip -dc "${WORK_PATH}/bzImage-template-v5.gz" >"${ZIMAGE_MOD}" || exit 1 gzip -dc "${WORK_PATH}/bzImage-template-v5.gz" >"${ZIMAGE_MOD}" || exit 1
@ -76,7 +67,7 @@ else
dd if="${VMLINUX_MOD}" of="${ZIMAGE_MOD}" bs=14561 seek=1 conv=notrunc || exit 1 dd if="${VMLINUX_MOD}" of="${ZIMAGE_MOD}" bs=14561 seek=1 conv=notrunc || exit 1
file_size_le "${VMLINUX_MOD}" | dd of="${ZIMAGE_MOD}" bs=34463421 seek=1 conv=notrunc || exit 1 file_size_le "${VMLINUX_MOD}" | dd of="${ZIMAGE_MOD}" bs=34463421 seek=1 conv=notrunc || exit 1
file_size_le "${VMLINUX_MOD}" | dd of="${ZIMAGE_MOD}" bs=34479132 seek=1 conv=notrunc || exit 1 file_size_le "${VMLINUX_MOD}" | dd of="${ZIMAGE_MOD}" bs=34479132 seek=1 conv=notrunc || exit 1
# RUN_SIZE=$(objdump -h ${VMLINUX_MOD} | sh "${WORK_PATH}/calc_run_size.sh") # RUN_SIZE=$(objdump -h "${VMLINUX_MOD}" | sh "${WORK_PATH}/calc_run_size.sh")
# size_le ${RUN_SIZE} | dd of=${ZIMAGE_MOD} bs=34626904 seek=1 conv=notrunc || exit 1 # size_le "${RUN_SIZE}" | dd of="${ZIMAGE_MOD}" bs=34626904 seek=1 conv=notrunc || exit 1
size_le $(($((16#$(crc32 "${ZIMAGE_MOD}" | awk '{print $1}'))) ^ 0xFFFFFFFF)) | dd of="${ZIMAGE_MOD}" conv=notrunc oflag=append || exit 1 size_le "$((16#$(crc32 "${ZIMAGE_MOD}" | awk '{print $1}') ^ 0xFFFFFFFF))" | dd of="${ZIMAGE_MOD}" conv=notrunc oflag=append || exit 1
fi fi

View File

@ -1,8 +1,8 @@
#!/usr/bin/env bash #!/usr/bin/env bash
[ -z "${WORK_PATH}" -o ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" [ -z "${WORK_PATH}" ] || [ ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. ${WORK_PATH}/include/functions.sh . "${WORK_PATH}/include/functions.sh"
set -o pipefail # Get exit code from process piped set -o pipefail # Get exit code from process piped
@ -28,20 +28,20 @@ if [ "${KERNEL}" = "custom" ]; then
else else
echo -n "." echo -n "."
# Extract vmlinux # Extract vmlinux
${WORK_PATH}/bzImage-to-vmlinux.sh "${ORI_ZIMAGE_FILE}" "${TMP_PATH}/vmlinux" >"${LOG_FILE}" 2>&1 || exit 1 "${WORK_PATH}/bzImage-to-vmlinux.sh" "${ORI_ZIMAGE_FILE}" "${TMP_PATH}/vmlinux" >"${LOG_FILE}" 2>&1 || exit 1
echo -n "." echo -n "."
# Patch boot params and ramdisk check # Patch boot params and ramdisk check
${WORK_PATH}/kpatch "${TMP_PATH}/vmlinux" "${TMP_PATH}/vmlinux-mod" >"${LOG_FILE}" 2>&1 || exit 1 "${WORK_PATH}/kpatch" "${TMP_PATH}/vmlinux" "${TMP_PATH}/vmlinux-mod" >"${LOG_FILE}" 2>&1 || exit 1
echo -n "." echo -n "."
# rebuild zImage # Rebuild zImage
${WORK_PATH}/vmlinux-to-bzImage.sh "${TMP_PATH}/vmlinux-mod" "${MOD_ZIMAGE_FILE}" >"${LOG_FILE}" 2>&1 || exit 1 "${WORK_PATH}/vmlinux-to-bzImage.sh" "${TMP_PATH}/vmlinux-mod" "${MOD_ZIMAGE_FILE}" >"${LOG_FILE}" 2>&1 || exit 1
fi fi
sync sync
echo -n "." echo -n "."
# Update HASH of new DSM zImage # Update HASH of new DSM zImage
HASH="$(sha256sum ${ORI_ZIMAGE_FILE} | awk '{print $1}')" HASH="$(sha256sum "${ORI_ZIMAGE_FILE}" | awk '{print $1}')"
writeConfigKey "zimage-hash" "${HASH}" "${USER_CONFIG_FILE}" writeConfigKey "zimage-hash" "${HASH}" "${USER_CONFIG_FILE}"
echo echo

View File

@ -75,10 +75,10 @@ function set_gfxpayload {
fi fi
} }
set RR_CMDLINE="earlyprintk earlycon=uart8250,io,0x3f8,115200n8 console=ttyS0,115200n8 root=/dev/ram rootwait net.ifnames=0 panic=5 split_lock_detect=off pcie_aspm=off intel_pstate=disable" set RR_CMDLINE="earlyprintk earlycon=uart8250,io,0x3f8,115200n8 console=ttyS0,115200n8 root=/dev/ram rootwait nointremap net.ifnames=0 panic=5 split_lock_detect=off pcie_aspm=off intel_pstate=disable"
search --set=root --label "RR3" search --set=root --label "RR3"
if [ -s /zImage-dsm -a -s /initrd-dsm ]; then if [ -s /zImage-dsm ] && [ -s /initrd-dsm ]; then
if [ "${default}" = "direct" ]; then if [ "${default}" = "direct" ]; then
set timeout="1" set timeout="1"
menuentry 'Boot DSM kernel directly' --id direct { menuentry 'Boot DSM kernel directly' --id direct {

View File

@ -6,27 +6,29 @@
# See /LICENSE for more information. # See /LICENSE for more information.
# #
PROMPT=$(sudo -nv 2>&1) if [ "$(id -u)" -ne 0 ]; then
if [ $? -ne 0 ]; then
echo "This script must be run as root" echo "This script must be run as root"
exit 1 exit 1
fi fi
function help() { function help() {
echo "Usage: $0 <command> [args]" cat <<EOF
echo "Commands:" Usage: $0 <command> [args]
echo " create [workspace] [rr.img] - Create the workspace" Commands:
echo " init - Initialize the environment" create [workspace] [rr.img] - Create the workspace
echo " config [model] [version] - Config the DSM system" init - Initialize the environment
echo " build - Build the DSM system" config [model] [version] - Config the DSM system
echo " pack [rr.img] - Pack to rr.img" build - Build the DSM system
echo " help - Show this help" pack [rr.img] - Pack to rr.img
help - Show this help
EOF
exit 1 exit 1
} }
function create() { function create() {
WORKSPACE="$(realpath ${1:-"workspace"})" local WORKSPACE RRIMGPATH LOOPX INITRD_FILE INITRD_FORMAT
RRIMGPATH="$(realpath ${2:-"rr.img"})" WORKSPACE="$(realpath "${1:-workspace}")"
RRIMGPATH="$(realpath "${2:-rr.img}")"
if [ ! -f "${RRIMGPATH}" ]; then if [ ! -f "${RRIMGPATH}" ]; then
echo "File not found: ${RRIMGPATH}" echo "File not found: ${RRIMGPATH}"
@ -34,47 +36,37 @@ function create() {
fi fi
sudo apt update sudo apt update
sudo apt install -y locales busybox dialog curl xz-utils cpio sed qemu-utils sudo apt install -y locales busybox dialog gettext sed gawk jq curl
sudo pip install bs4 sudo apt install -y python-is-python3 python3-pip libelf-dev qemu-utils cpio xz-utils lz4 lzma bzip2 gzip zstd
# sudo snap install yq
if ! command -v yq &>/dev/null || ! yq --version 2>/dev/null | grep -q "v4."; then
sudo curl -kL https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -o /usr/bin/yq && sudo chmod a+x /usr/bin/yq
fi
# Backup the original python3 executable.
sudo mv -f "$(realpath $(which python3))/EXTERNALLY-MANAGED" "$(realpath $(which python3))/EXTERNALLY-MANAGED.bak" 2>/dev/null || true
sudo pip3 install -U click requests requests-toolbelt urllib3 qrcode[pil] beautifulsoup4
sudo locale-gen ar_SA.UTF-8 de_DE.UTF-8 en_US.UTF-8 es_ES.UTF-8 fr_FR.UTF-8 ja_JP.UTF-8 ko_KR.UTF-8 ru_RU.UTF-8 th_TH.UTF-8 tr_TR.UTF-8 uk_UA.UTF-8 vi_VN.UTF-8 zh_CN.UTF-8 zh_HK.UTF-8 zh_TW.UTF-8 sudo locale-gen ar_SA.UTF-8 de_DE.UTF-8 en_US.UTF-8 es_ES.UTF-8 fr_FR.UTF-8 ja_JP.UTF-8 ko_KR.UTF-8 ru_RU.UTF-8 th_TH.UTF-8 tr_TR.UTF-8 uk_UA.UTF-8 vi_VN.UTF-8 zh_CN.UTF-8 zh_HK.UTF-8 zh_TW.UTF-8
YQ=$(command -v yq)
if [ -z "${YQ}" ] || ! ${YQ} --version 2>/dev/null | grep -q "v4."; then
wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O "${YQ:-"/usr/bin/yq"}" && chmod +x "${YQ:-"/usr/bin/yq"}"
fi
LOOPX=$(sudo losetup -f) LOOPX=$(sudo losetup -f)
sudo losetup -P "${LOOPX}" "${RRIMGPATH}" sudo losetup -P "${LOOPX}" "${RRIMGPATH}"
echo "Mounting image file" echo "Mounting image file"
rm -rf "/tmp/mnt/p1" for i in {1..3}; do
rm -rf "/tmp/mnt/p2" rm -rf "/tmp/mnt/p${i}"
rm -rf "/tmp/mnt/p3" mkdir -p "/tmp/mnt/p${i}"
mkdir -p "/tmp/mnt/p1" sudo mount "${LOOPX}p${i}" "/tmp/mnt/p${i}" || {
mkdir -p "/tmp/mnt/p2" echo "Can't mount ${LOOPX}p${i}."
mkdir -p "/tmp/mnt/p3"
sudo mount ${LOOPX}p1 "/tmp/mnt/p1" || (
echo -e "Can't mount ${LOOPX}p1."
exit 1 exit 1
) }
done
sudo mount ${LOOPX}p2 "/tmp/mnt/p2" || (
echo -e "Can't mount ${LOOPX}p2."
exit 1
)
sudo mount ${LOOPX}p3 "/tmp/mnt/p3" || (
echo -e "Can't mount ${LOOPX}p3."
exit 1
)
echo "Create WORKSPACE" echo "Create WORKSPACE"
rm -rf "${WORKSPACE}" rm -rf "${WORKSPACE}"
mkdir -p "${WORKSPACE}/mnt" mkdir -p "${WORKSPACE}/mnt" "${WORKSPACE}/tmp" "${WORKSPACE}/initrd"
mkdir -p "${WORKSPACE}/tmp" cp -rf /tmp/mnt/p{1,2,3} "${WORKSPACE}/mnt/"
mkdir -p "${WORKSPACE}/initrd"
cp -rf "/tmp/mnt/p1" "${WORKSPACE}/mnt/p1"
cp -rf "/tmp/mnt/p2" "${WORKSPACE}/mnt/p2"
cp -rf "/tmp/mnt/p3" "${WORKSPACE}/mnt/p3"
INITRD_FILE="${WORKSPACE}/mnt/p3/initrd-rr" INITRD_FILE="${WORKSPACE}/mnt/p3/initrd-rr"
INITRD_FORMAT=$(file -b --mime-type "${INITRD_FILE}") INITRD_FORMAT=$(file -b --mime-type "${INITRD_FILE}")
@ -91,49 +83,50 @@ function create() {
*) ;; *) ;;
esac esac
) 2>/dev/null ) 2>/dev/null
sudo sync
sudo umount "/tmp/mnt/p1"
sudo umount "/tmp/mnt/p2"
sudo umount "/tmp/mnt/p3"
rm -rf "/tmp/mnt/p1"
rm -rf "/tmp/mnt/p2"
rm -rf "/tmp/mnt/p3"
sudo losetup --detach ${LOOPX}
if [ ! -f "${WORKSPACE}/initrd/opt/rr/init.sh" ] || ! [ -f "${WORKSPACE}/initrd/opt/rr/menu.sh" ]; then sudo sync
for i in {1..3}; do
sudo umount "/tmp/mnt/p${i}"
rm -rf "/tmp/mnt/p${i}"
done
sudo losetup --detach "${LOOPX}"
if [ ! -f "${WORKSPACE}/initrd/opt/rr/init.sh" ] || [ ! -f "${WORKSPACE}/initrd/opt/rr/menu.sh" ]; then
echo "initrd decompression failed." echo "initrd decompression failed."
exit 1 exit 1
fi fi
rm -f $(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/rr.env rm -f "$(dirname "${BASH_SOURCE[0]}")/rr.env"
echo "export LOADER_DISK=\"LOCALBUILD\"" >>$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/rr.env cat <<EOF >"$(dirname "${BASH_SOURCE[0]}")/rr.env"
echo "export CHROOT_PATH=\"${WORKSPACE}\"" >>$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/rr.env export LOADER_DISK="LOCALBUILD"
export CHROOT_PATH="${WORKSPACE}"
EOF
echo "OK." echo "OK."
} }
function init() { function init() {
if [ ! -f $(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/rr.env ]; then if [ ! -f "$(dirname "${BASH_SOURCE[0]}")/rr.env" ]; then
echo "Please run init first" echo "Please run init first"
exit 1 exit 1
fi fi
. $(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/rr.env . "$(dirname "${BASH_SOURCE[0]}")/rr.env"
pushd "${CHROOT_PATH}/initrd/opt/rr" pushd "${CHROOT_PATH}/initrd/opt/rr" >/dev/null
echo "init" echo "init"
./init.sh ./init.sh
RET=$? local RET=$?
popd popd >/dev/null
[ ${RET} -ne 0 ] && echo "Failed." || echo "Success." [ ${RET} -ne 0 ] && echo "Failed." || echo "Success."
return ${RET} exit ${RET}
} }
function config() { function config() {
if [ ! -f $(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/rr.env ]; then if [ ! -f "$(dirname "${BASH_SOURCE[0]}")/rr.env" ]; then
echo "Please run init first" echo "Please run init first"
exit 1 exit 1
fi fi
. $(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/rr.env . "$(dirname "${BASH_SOURCE[0]}")/rr.env"
RET=1 local RET=1
pushd "${CHROOT_PATH}/initrd/opt/rr" pushd "${CHROOT_PATH}/initrd/opt/rr" >/dev/null
while true; do while true; do
if [ -z "${1}" ]; then if [ -z "${1}" ]; then
echo "menu" echo "menu"
@ -141,26 +134,26 @@ function config() {
RET=0 RET=0
else else
echo "model" echo "model"
./menu.sh modelMenu "${1:-"SA6400"}" || break ./menu.sh modelMenu "${1:-SA6400}" || break
echo "version" echo "version"
./menu.sh productversMenu "${2:-"7.2"}" || break ./menu.sh productversMenu "${2:-7.2}" || break
RET=0 RET=0
fi fi
break break
done done
popd popd >/dev/null
[ ${RET} -ne 0 ] && echo "Failed." || echo "Success." [ ${RET} -ne 0 ] && echo "Failed." || echo "Success."
return ${RET} exit ${RET}
} }
function build() { function build() {
if [ ! -f $(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/rr.env ]; then if [ ! -f "$(dirname "${BASH_SOURCE[0]}")/rr.env" ]; then
echo "Please run init first" echo "Please run init first"
exit 1 exit 1
fi fi
. $(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/rr.env . "$(dirname "${BASH_SOURCE[0]}")/rr.env"
RET=1 local RET=1
pushd "${CHROOT_PATH}/initrd/opt/rr" pushd "${CHROOT_PATH}/initrd/opt/rr" >/dev/null
while true; do while true; do
echo "build" echo "build"
./menu.sh make -1 || break ./menu.sh make -1 || break
@ -169,19 +162,20 @@ function build() {
RET=0 RET=0
break break
done done
popd popd >/dev/null
[ ${RET} -ne 0 ] && echo "Failed." || echo "Success." [ ${RET} -ne 0 ] && echo "Failed." || echo "Success."
return ${RET} exit ${RET}
} }
function pack() { function pack() {
if [ ! -f $(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/rr.env ]; then if [ ! -f "$(dirname "${BASH_SOURCE[0]}")/rr.env" ]; then
echo "Please run init first" echo "Please run init first"
exit 1 exit 1
fi fi
. $(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/rr.env . "$(dirname "${BASH_SOURCE[0]}")/rr.env"
RRIMGPATH="$(realpath ${1:-"rr.img"})" local RRIMGPATH LOOPX
RRIMGPATH="$(realpath "${1:-rr.img}")"
if [ ! -f "${RRIMGPATH}" ]; then if [ ! -f "${RRIMGPATH}" ]; then
gzip -dc "${CHROOT_PATH}/initrd/opt/rr/grub.img.gz" >"${RRIMGPATH}" gzip -dc "${CHROOT_PATH}/initrd/opt/rr/grub.img.gz" >"${RRIMGPATH}"
fi fi
@ -191,48 +185,32 @@ function pack() {
sudo losetup -P "${LOOPX}" "${RRIMGPATH}" sudo losetup -P "${LOOPX}" "${RRIMGPATH}"
echo "Mounting image file" echo "Mounting image file"
rm -rf "/tmp/mnt/p1" for i in {1..3}; do
rm -rf "/tmp/mnt/p2" rm -rf "/tmp/mnt/p${i}"
rm -rf "/tmp/mnt/p3" mkdir -p "/tmp/mnt/p${i}"
mkdir -p "/tmp/mnt/p1" sudo mount "${LOOPX}p${i}" "/tmp/mnt/p${i}" || {
mkdir -p "/tmp/mnt/p2" echo "Can't mount ${LOOPX}p${i}."
mkdir -p "/tmp/mnt/p3"
sudo mount ${LOOPX}p1 "/tmp/mnt/p1" || (
echo -e "Can't mount ${LOOPX}p1."
exit 1 exit 1
) }
sudo mount ${LOOPX}p2 "/tmp/mnt/p2" || ( done
echo -e "Can't mount ${LOOPX}p2."
exit 1
)
sudo mount ${LOOPX}p3 "/tmp/mnt/p3" || (
echo -e "Can't mount ${LOOPX}p3."
exit 1
)
echo "Pack image file" echo "Pack image file"
sudo cp -af "${CHROOT_PATH}/mnt/p1/.locale" "/tmp/mnt/p1" 2>/dev/null for i in {1..3}; do
sudo cp -rf "${CHROOT_PATH}/mnt/p1/"* "/tmp/mnt/p1" || ( [ ${i} -eq 1 ] && sudo cp -af "${CHROOT_PATH}/mnt/p${i}/"{.locale,.timezone} "/tmp/mnt/p${i}/" 2>/dev/null
echo -e "Can't cp ${LOOPX}p1." sudo cp -rf "${CHROOT_PATH}/mnt/p${i}/"* "/tmp/mnt/p${i}" || {
echo "Can't cp ${LOOPX}p${i}."
exit 1 exit 1
) }
sudo cp -rf "${CHROOT_PATH}/mnt/p2/"* "/tmp/mnt/p2" || ( done
echo -e "Can't cp ${LOOPX}p2."
exit 1
)
sudo cp -rf "${CHROOT_PATH}/mnt/p3/"* "/tmp/mnt/p3" || (
echo -e "Can't cp ${LOOPX}p3."
exit 1
)
sudo sync sudo sync
sudo umount "/tmp/mnt/p1" for i in {1..3}; do
sudo umount "/tmp/mnt/p2" sudo umount "/tmp/mnt/p${i}"
sudo umount "/tmp/mnt/p3" rm -rf "/tmp/mnt/p${i}"
rm -rf "/tmp/mnt/p1" done
rm -rf "/tmp/mnt/p2" sudo losetup --detach "${LOOPX}"
rm -rf "/tmp/mnt/p3"
sudo losetup --detach ${LOOPX}
echo "OK." echo "OK."
exit 0
} }
$@ $@

View File

@ -6,10 +6,11 @@
# See /LICENSE for more information. # See /LICENSE for more information.
# #
import os, sys, glob, json, yaml, click, shutil, tarfile, kmodule, requests import os, sys, glob, json, yaml, click, shutil, tarfile, kmodule, requests, urllib3
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry # type: ignore
from openpyxl import Workbook from openpyxl import Workbook
@click.group() @click.group()
def cli(): def cli():
""" """
@ -23,31 +24,40 @@ def cli():
@click.option("-j", "--jsonpath", type=str, required=True, help="The output path of jsonfile.") @click.option("-j", "--jsonpath", type=str, required=True, help="The output path of jsonfile.")
@click.option("-x", "--xlsxpath", type=str, required=False, help="The output path of xlsxfile.") @click.option("-x", "--xlsxpath", type=str, required=False, help="The output path of xlsxfile.")
def getmodels(workpath, jsonpath, xlsxpath): def getmodels(workpath, jsonpath, xlsxpath):
models = {} models = {}
with open("{}/opt/rr/platforms.yml".format(workpath), "r") as f: platforms_yml = os.path.join(workpath, "opt", "rr", "platforms.yml")
with open(platforms_yml, "r") as f:
P_data = yaml.safe_load(f) P_data = yaml.safe_load(f)
P_platforms = P_data.get("platforms", []) P_platforms = P_data.get("platforms", [])
for P in P_platforms: for P in P_platforms:
productvers = {} productvers = {}
for V in P_platforms[P]["productvers"]: for V in P_platforms[P]["productvers"]:
if P_platforms[P]["productvers"][V].get("kpre", "") != "": kpre = P_platforms[P]["productvers"][V].get("kpre", "")
productvers[V] = (P_platforms[P]["productvers"][V].get("kpre", "") + "-" + P_platforms[P]["productvers"][V].get("kver", "")) kver = P_platforms[P]["productvers"][V].get("kver", "")
else: productvers[V] = f"{kpre}-{kver}" if kpre else kver
productvers[V] = P_platforms[P]["productvers"][V].get("kver", "")
models[P] = {"productvers": productvers, "models": []} models[P] = {"productvers": productvers, "models": []}
req = requests.get("https://autoupdate.synology.com/os/v2") adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=1, status_forcelist=[500, 502, 503, 504]))
session = requests.Session()
session.mount("http://", adapter)
session.mount("https://", adapter)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
try:
req = session.get("https://autoupdate.synology.com/os/v2", timeout=10, verify=False)
req.encoding = "utf-8" req.encoding = "utf-8"
data = json.loads(req.text) data = json.loads(req.text)
except Exception as e:
click.echo(f"Error: {e}")
return
for I in data["channel"]["item"]: for item in data["channel"]["item"]:
if not I["title"].startswith("DSM"): if not item["title"].startswith("DSM"):
continue continue
for J in I["model"]: for model in item["model"]:
arch = J["mUnique"].split("_")[1].lower() arch = model["mUnique"].split("_")[1].lower()
name = J["mLink"].split("/")[-1].split("_")[1].replace("%2B", "+") name = model["mLink"].split("/")[-1].split("_")[1].replace("%2B", "+")
if arch not in models.keys(): if arch not in models:
continue continue
if name in (A for B in models for A in models[B]["models"]): if name in (A for B in models for A in models[B]["models"]):
continue continue
@ -64,39 +74,45 @@ def getmodels(workpath, jsonpath, xlsxpath):
ws.append([k, str(v["productvers"]), str(v["models"])]) ws.append([k, str(v["productvers"]), str(v["models"])])
wb.save(xlsxpath) wb.save(xlsxpath)
@cli.command() @cli.command()
@click.option("-w", "--workpath", type=str, required=True, help="The workpath of RR.") @click.option("-w", "--workpath", type=str, required=True, help="The workpath of RR.")
@click.option("-j", "--jsonpath", type=str, required=True, help="The output path of jsonfile.") @click.option("-j", "--jsonpath", type=str, required=True, help="The output path of jsonfile.")
@click.option("-x", "--xlsxpath", type=str, required=False, help="The output path of xlsxfile.") @click.option("-x", "--xlsxpath", type=str, required=False, help="The output path of xlsxfile.")
def getpats(workpath, jsonpath, xlsxpath): def getpats(workpath, jsonpath, xlsxpath):
def __fullversion(ver): def __fullversion(ver):
out = ver
arr = ver.split('-') arr = ver.split('-')
if len(arr) > 0: a, b, c = (arr[0].split('.') + ['0', '0', '0'])[:3]
a = arr[0].split('.')[0] if len(arr[0].split('.')) > 0 else '0'
b = arr[0].split('.')[1] if len(arr[0].split('.')) > 1 else '0'
c = arr[0].split('.')[2] if len(arr[0].split('.')) > 2 else '0'
d = arr[1] if len(arr) > 1 else '00000' d = arr[1] if len(arr) > 1 else '00000'
e = arr[2] if len(arr) > 2 else '0' e = arr[2] if len(arr) > 2 else '0'
out = '{}.{}.{}-{}-{}'.format(a,b,c,d,e) return f'{a}.{b}.{c}-{d}-{e}'
return out
platforms = [] platforms_yml = os.path.join(workpath, "opt", "rr", "platforms.yml")
models = [] with open(platforms_yml, "r") as f:
with open("{}/opt/rr/platforms.yml".format(workpath), "r") as f:
data = yaml.safe_load(f) data = yaml.safe_load(f)
platforms = data.get("platforms", []) platforms = data.get("platforms", [])
req = requests.get("https://autoupdate.synology.com/os/v2") adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=1, status_forcelist=[500, 502, 503, 504]))
session = requests.Session()
session.mount("http://", adapter)
session.mount("https://", adapter)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
try:
req = session.get("https://autoupdate.synology.com/os/v2", timeout=10, verify=False)
req.encoding = "utf-8" req.encoding = "utf-8"
data = json.loads(req.text) data = json.loads(req.text)
except Exception as e:
click.echo(f"Error: {e}")
return
for I in data["channel"]["item"]: models = []
if not I["title"].startswith("DSM"): for item in data["channel"]["item"]:
if not item["title"].startswith("DSM"):
continue continue
for J in I["model"]: for model in item["model"]:
arch = J["mUnique"].split("_")[1].lower() arch = model["mUnique"].split("_")[1].lower()
name = J["mLink"].split("/")[-1].split("_")[1].replace("%2B", "+") name = model["mLink"].split("/")[-1].split("_")[1].replace("%2B", "+")
if arch not in platforms: if arch not in platforms:
continue continue
if name in models: if name in models:
@ -112,57 +128,73 @@ def getpats(workpath, jsonpath, xlsxpath):
#urlInfo = "https://www.synology.cn/api/support/findDownloadInfo?lang=zh-cn" #urlInfo = "https://www.synology.cn/api/support/findDownloadInfo?lang=zh-cn"
#urlSteps = "https://www.synology.cn/api/support/findUpgradeSteps?" #urlSteps = "https://www.synology.cn/api/support/findUpgradeSteps?"
major = "&major={}".format(version.split('.')[0]) if len(version.split('.')) > 0 else "" major = f"&major={version.split('.')[0]}" if len(version.split('.')) > 0 else ""
minor = "&minor={}".format(version.split('.')[1]) if len(version.split('.')) > 1 else "" minor = f"&minor={version.split('.')[1]}" if len(version.split('.')) > 1 else ""
req = requests.get("{}&product={}{}{}".format(urlInfo, M.replace("+", "%2B"), major, minor)) try:
req = session.get(f"{urlInfo}&product={M.replace('+', '%2B')}{major}{minor}", timeout=10, verify=False)
req.encoding = "utf-8" req.encoding = "utf-8"
data = json.loads(req.text) data = json.loads(req.text)
except Exception as e:
click.echo(f"Error: {e}")
continue
build_ver = data['info']['system']['detail'][0]['items'][0]['build_ver'] build_ver = data['info']['system']['detail'][0]['items'][0]['build_ver']
build_num = data['info']['system']['detail'][0]['items'][0]['build_num'] build_num = data['info']['system']['detail'][0]['items'][0]['build_num']
buildnano = data['info']['system']['detail'][0]['items'][0]['nano'] buildnano = data['info']['system']['detail'][0]['items'][0]['nano']
V=__fullversion("{}-{}-{}".format(build_ver, build_num, buildnano)) V = __fullversion(f"{build_ver}-{build_num}-{buildnano}")
if not V in pats[M]: if V not in pats[M]:
pats[M][V]={} pats[M][V] = {
pats[M][V]['url'] = data['info']['system']['detail'][0]['items'][0]['files'][0]['url'].split('?')[0] 'url': data['info']['system']['detail'][0]['items'][0]['files'][0]['url'].split('?')[0],
pats[M][V]['sum'] = data['info']['system']['detail'][0]['items'][0]['files'][0]['checksum'] 'sum': data['info']['system']['detail'][0]['items'][0]['files'][0]['checksum']
}
from_ver=0 from_ver = min(I['build'] for I in data['info']['pubVers'])
for I in data['info']['pubVers']:
if from_ver == 0 or I['build'] < from_ver: from_ver = I['build']
for I in data['info']['productVers']: for I in data['info']['productVers']:
if not I['version'].startswith(version): continue if not I['version'].startswith(version):
if major == "" or minor == "": continue
majorTmp = "&major={}".format(I['version'].split('.')[0]) if len(I['version'].split('.')) > 0 else "" if not major or not minor:
minorTmp = "&minor={}".format(I['version'].split('.')[1]) if len(I['version'].split('.')) > 1 else "" majorTmp = f"&major={I['version'].split('.')[0]}" if len(I['version'].split('.')) > 0 else ""
reqTmp = requests.get("{}&product={}{}{}".format(urlInfo, M.replace("+", "%2B"), majorTmp, minorTmp)) minorTmp = f"&minor={I['version'].split('.')[1]}" if len(I['version'].split('.')) > 1 else ""
try:
reqTmp = session.get(f"{urlInfo}&product={M.replace('+', '%2B')}{majorTmp}{minorTmp}", timeout=10, verify=False)
reqTmp.encoding = "utf-8" reqTmp.encoding = "utf-8"
dataTmp = json.loads(reqTmp.text) dataTmp = json.loads(reqTmp.text)
except Exception as e:
click.echo(f"Error: {e}")
continue
build_ver = dataTmp['info']['system']['detail'][0]['items'][0]['build_ver'] build_ver = dataTmp['info']['system']['detail'][0]['items'][0]['build_ver']
build_num = dataTmp['info']['system']['detail'][0]['items'][0]['build_num'] build_num = dataTmp['info']['system']['detail'][0]['items'][0]['build_num']
buildnano = dataTmp['info']['system']['detail'][0]['items'][0]['nano'] buildnano = dataTmp['info']['system']['detail'][0]['items'][0]['nano']
V=__fullversion("{}-{}-{}".format(build_ver, build_num, buildnano)) V = __fullversion(f"{build_ver}-{build_num}-{buildnano}")
if not V in pats[M]: if V not in pats[M]:
pats[M][V]={} pats[M][V] = {
pats[M][V]['url'] = dataTmp['info']['system']['detail'][0]['items'][0]['files'][0]['url'].split('?')[0] 'url': dataTmp['info']['system']['detail'][0]['items'][0]['files'][0]['url'].split('?')[0],
pats[M][V]['sum'] = dataTmp['info']['system']['detail'][0]['items'][0]['files'][0]['checksum'] 'sum': dataTmp['info']['system']['detail'][0]['items'][0]['files'][0]['checksum']
}
for J in I['versions']: for J in I['versions']:
to_ver=J['build'] to_ver = J['build']
reqSteps = requests.get("{}&product={}&from_ver={}&to_ver={}".format(urlSteps, M.replace("+", "%2B"), from_ver, to_ver)) try:
if reqSteps.status_code != 200: continue reqSteps = session.get(f"{urlSteps}&product={M.replace('+', '%2B')}&from_ver={from_ver}&to_ver={to_ver}", timeout=10, verify=False)
if reqSteps.status_code != 200:
continue
reqSteps.encoding = "utf-8" reqSteps.encoding = "utf-8"
dataSteps = json.loads(reqSteps.text) dataSteps = json.loads(reqSteps.text)
except Exception as e:
click.echo(f"Error: {e}")
continue
for S in dataSteps['upgrade_steps']: for S in dataSteps['upgrade_steps']:
if not 'full_patch' in S or S['full_patch'] is False: continue if not S.get('full_patch') or not S['build_ver'].startswith(version):
if not 'build_ver' in S or not S['build_ver'].startswith(version): continue continue
V=__fullversion("{}-{}-{}".format(S['build_ver'], S['build_num'], S['nano'])) V = __fullversion(f"{S['build_ver']}-{S['build_num']}-{S['nano']}")
if not V in pats[M]: if V not in pats[M]:
pats[M][V] = {} pats[M][V] = {
pats[M][V]['url'] = S['files'][0]['url'].split('?')[0] 'url': S['files'][0]['url'].split('?')[0],
pats[M][V]['sum'] = S['files'][0]['checksum'] 'sum': S['files'][0]['checksum']
}
if jsonpath: if jsonpath:
with open(jsonpath, "w") as f: with open(jsonpath, "w") as f:
@ -176,13 +208,13 @@ def getpats(workpath, jsonpath, xlsxpath):
ws.append([k1, k2, v2["url"], v2["sum"]]) ws.append([k1, k2, v2["url"], v2["sum"]])
wb.save(xlsxpath) wb.save(xlsxpath)
@cli.command() @cli.command()
@click.option("-w", "--workpath", type=str, required=True, help="The workpath of RR.") @click.option("-w", "--workpath", type=str, required=True, help="The workpath of RR.")
@click.option("-j", "--jsonpath", type=str, required=True, help="The output path of jsonfile.") @click.option("-j", "--jsonpath", type=str, required=True, help="The output path of jsonfile.")
@click.option("-x", "--xlsxpath", type=str, required=False, help="The output path of xlsxfile.") @click.option("-x", "--xlsxpath", type=str, required=False, help="The output path of xlsxfile.")
def getaddons(workpath, jsonpath, xlsxpath): def getaddons(workpath, jsonpath, xlsxpath):
# Read the manifest.yml file AS = glob.glob(os.path.join(workpath, "mnt", "p3", "addons", "*", "manifest.yml"))
AS = glob.glob("{}/mnt/p3/addons/*/manifest.yml".format(workpath))
AS.sort() AS.sort()
addons = {} addons = {}
for A in AS: for A in AS:
@ -200,7 +232,7 @@ def getaddons(workpath, jsonpath, xlsxpath):
ws = wb.active ws = wb.active
ws.append(["Name", "system", "en_US", "zh_CN"]) ws.append(["Name", "system", "en_US", "zh_CN"])
for k1, v1 in addons.items(): for k1, v1 in addons.items():
ws.append([k1, v1.get("system", False), v1.get("description").get("en_US", ""), v1.get("description").get("zh_CN", ""),]) ws.append([k1, v1.get("system", False), v1.get("description").get("en_US", ""), v1.get("description").get("zh_CN", "")])
wb.save(xlsxpath) wb.save(xlsxpath)
@ -209,8 +241,7 @@ def getaddons(workpath, jsonpath, xlsxpath):
@click.option("-j", "--jsonpath", type=str, required=True, help="The output path of jsonfile.") @click.option("-j", "--jsonpath", type=str, required=True, help="The output path of jsonfile.")
@click.option("-x", "--xlsxpath", type=str, required=False, help="The output path of xlsxfile.") @click.option("-x", "--xlsxpath", type=str, required=False, help="The output path of xlsxfile.")
def getmodules(workpath, jsonpath, xlsxpath): def getmodules(workpath, jsonpath, xlsxpath):
# Read the module files MS = glob.glob(os.path.join(workpath, "mnt", "p3", "modules", "*.tgz"))
MS = glob.glob("{}/mnt/p3/modules/*.tgz".format(workpath))
MS.sort() MS.sort()
modules = {} modules = {}
TMP_PATH = "/tmp/modules" TMP_PATH = "/tmp/modules"
@ -219,12 +250,10 @@ def getmodules(workpath, jsonpath, xlsxpath):
for M in MS: for M in MS:
M_name = os.path.splitext(os.path.basename(M))[0] M_name = os.path.splitext(os.path.basename(M))[0]
M_modules = {} M_modules = {}
# Extract the module
os.makedirs(TMP_PATH) os.makedirs(TMP_PATH)
with tarfile.open(M, "r") as tar: with tarfile.open(M, "r") as tar:
tar.extractall(TMP_PATH) tar.extractall(TMP_PATH)
# Traverse the extracted files KS = glob.glob(os.path.join(TMP_PATH, "*.ko"))
KS = glob.glob("{}/*.ko".format(TMP_PATH))
KS.sort() KS.sort()
for K in KS: for K in KS:
K_name = os.path.splitext(os.path.basename(K))[0] K_name = os.path.splitext(os.path.basename(K))[0]

View File

@ -5,19 +5,23 @@
# This is free software, licensed under the MIT License. # This is free software, licensed under the MIT License.
# See /LICENSE for more information. # See /LICENSE for more information.
# #
# sudo apt install -y locales busybox dialog gettext sed gawk jq curl
# sudo apt install -y python-is-python3 python3-pip libelf-dev qemu-utils cpio xz-utils lz4 lzma bzip2 gzip zstd
[ -n "${1}" ] && export TOKEN="${1}" [ -n "${1}" ] && export TOKEN="${1}"
REPO="https://api.github.com/repos/RROrg"
# Convert po2mo # Convert po2mo
# $1 path # $1 path
function convertpo2mo() { function convertpo2mo() {
echo "Convert po2mo begin" echo "Convert po2mo begin"
local DEST_PATH="${1:-lang}" local DEST_PATH="${1:-lang}"
for P in $(ls ${DEST_PATH}/*/LC_MESSAGES/rr.po 2>/dev/null); do while read -r P; do
# Use msgfmt command to compile the .po file into a binary .mo file # Use msgfmt command to compile the .po file into a binary .mo file
echo "msgfmt ${P} to ${P/.po/.mo}" echo "msgfmt ${P} to ${P/.po/.mo}"
msgfmt ${P} -o ${P/.po/.mo} msgfmt "${P}" -o "${P/.po/.mo}"
done done <<<$(find "${DEST_PATH}" -type f -name 'rr.po')
echo "Convert po2mo end" echo "Convert po2mo end"
} }
@ -34,9 +38,9 @@ function getExtractor() {
local PAT_URL="https://global.synologydownload.com/download/DSM/release/7.0.1/42218/DSM_DS3622xs%2B_42218.pat" local PAT_URL="https://global.synologydownload.com/download/DSM/release/7.0.1/42218/DSM_DS3622xs%2B_42218.pat"
local PAT_FILE="DSM_DS3622xs+_42218.pat" local PAT_FILE="DSM_DS3622xs+_42218.pat"
local STATUS=$(curl -#L -w "%{http_code}" "${PAT_URL}" -o "${CACHE_DIR}/${PAT_FILE}") local STATUS=$(curl -#L -w "%{http_code}" "${PAT_URL}" -o "${CACHE_DIR}/${PAT_FILE}")
if [ $? -ne 0 -o ${STATUS:-0} -ne 200 ]; then if [ $? -ne 0 ] || [ "${STATUS:-0}" -ne 200 ]; then
echo "[E] DSM_DS3622xs%2B_42218.pat download error!" echo "[E] DSM_DS3622xs%2B_42218.pat download error!"
rm -rf ${CACHE_DIR} rm -rf "${CACHE_DIR}"
exit 1 exit 1
fi fi
@ -44,13 +48,10 @@ function getExtractor() {
tar -C "${CACHE_DIR}/ramdisk/" -xf "${CACHE_DIR}/${PAT_FILE}" rd.gz 2>&1 tar -C "${CACHE_DIR}/ramdisk/" -xf "${CACHE_DIR}/${PAT_FILE}" rd.gz 2>&1
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
echo "[E] extractor rd.gz error!" echo "[E] extractor rd.gz error!"
rm -rf ${CACHE_DIR} rm -rf "${CACHE_DIR}"
exit 1 exit 1
fi fi
( (cd "${CACHE_DIR}/ramdisk" && xz -dc <rd.gz | cpio -idm) >/dev/null 2>&1 || true
cd "${CACHE_DIR}/ramdisk"
xz -dc <rd.gz | cpio -idm
) >/dev/null 2>&1 || true
rm -rf "${DEST_PATH}" rm -rf "${DEST_PATH}"
mkdir -p "${DEST_PATH}" mkdir -p "${DEST_PATH}"
@ -62,7 +63,7 @@ function getExtractor() {
cp -f "${CACHE_DIR}/ramdisk/usr/syno/bin/scemd" "${DEST_PATH}/syno_extract_system_patch" cp -f "${CACHE_DIR}/ramdisk/usr/syno/bin/scemd" "${DEST_PATH}/syno_extract_system_patch"
# Clean up # Clean up
rm -rf ${CACHE_DIR} rm -rf "${CACHE_DIR}"
echo "Getting syno extractor end" echo "Getting syno extractor end"
} }
@ -75,18 +76,19 @@ function getBuildroot() {
local CACHE_DIR="/tmp/buildroot" local CACHE_DIR="/tmp/buildroot"
local CACHE_FILE="/tmp/buildroot.zip" local CACHE_FILE="/tmp/buildroot.zip"
rm -f "${CACHE_FILE}" rm -f "${CACHE_FILE}"
local TAG
if [ "${2}" = "true" ]; then if [ "${2}" = "true" ]; then
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "https://api.github.com/repos/RROrg/rr-buildroot/releases" | jq -r ".[].tag_name" | sort -rV | head -1) TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "${REPO}/rr-buildroot/releases" | jq -r ".[].tag_name" | sort -rV | head -1)
else else
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "https://api.github.com/repos/RROrg/rr-buildroot/releases/latest" | jq -r ".tag_name") TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "${REPO}/rr-buildroot/releases/latest" | jq -r ".tag_name")
fi fi
while read ID NAME; do while read -r ID NAME; do
if [ "${NAME}" = "buildroot-${TAG}.zip" ]; then if [ "${NAME}" = "buildroot-${TAG}.zip" ]; then
STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "https://api.github.com/repos/RROrg/rr-buildroot/releases/assets/${ID}" -o "${CACHE_FILE}") STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "${REPO}/rr-buildroot/releases/assets/${ID}" -o "${CACHE_FILE}")
echo "TAG=${TAG}; Status=${STATUS}" echo "TAG=${TAG}; Status=${STATUS}"
[ ${STATUS:-0} -ne 200 ] && exit 1 [ ${STATUS:-0} -ne 200 ] && exit 1
fi fi
done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "https://api.github.com/repos/RROrg/rr-buildroot/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"') done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "${REPO}/rr-buildroot/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')
# Unzip Buildroot # Unzip Buildroot
rm -rf "${CACHE_DIR}" rm -rf "${CACHE_DIR}"
mkdir -p "${CACHE_DIR}" mkdir -p "${CACHE_DIR}"
@ -107,18 +109,19 @@ function getCKs() {
local DEST_PATH="${1:-cks}" local DEST_PATH="${1:-cks}"
local CACHE_FILE="/tmp/rr-cks.zip" local CACHE_FILE="/tmp/rr-cks.zip"
rm -f "${CACHE_FILE}" rm -f "${CACHE_FILE}"
local TAG
if [ "${2}" = "true" ]; then if [ "${2}" = "true" ]; then
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "https://api.github.com/repos/RROrg/rr-cks/releases" | jq -r ".[].tag_name" | sort -rV | head -1) TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "${REPO}/rr-cks/releases" | jq -r ".[].tag_name" | sort -rV | head -1)
else else
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "https://api.github.com/repos/RROrg/rr-cks/releases/latest" | jq -r ".tag_name") TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "${REPO}/rr-cks/releases/latest" | jq -r ".tag_name")
fi fi
while read ID NAME; do while read -r ID NAME; do
if [ "${NAME}" = "rr-cks-${TAG}.zip" ]; then if [ "${NAME}" = "rr-cks-${TAG}.zip" ]; then
STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "https://api.github.com/repos/RROrg/rr-cks/releases/assets/${ID}" -o "${CACHE_FILE}") STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "${REPO}/rr-cks/releases/assets/${ID}" -o "${CACHE_FILE}")
echo "TAG=${TAG}; Status=${STATUS}" echo "TAG=${TAG}; Status=${STATUS}"
[ ${STATUS:-0} -ne 200 ] && exit 1 [ ${STATUS:-0} -ne 200 ] && exit 1
fi fi
done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "https://api.github.com/repos/RROrg/rr-cks/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"') done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "${REPO}/rr-cks/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')
[ ! -f "${CACHE_FILE}" ] && exit 1 [ ! -f "${CACHE_FILE}" ] && exit 1
# Unzip CKs # Unzip CKs
rm -rf "${DEST_PATH}" rm -rf "${DEST_PATH}"
@ -136,18 +139,19 @@ function getLKMs() {
local DEST_PATH="${1:-lkms}" local DEST_PATH="${1:-lkms}"
local CACHE_FILE="/tmp/rp-lkms.zip" local CACHE_FILE="/tmp/rp-lkms.zip"
rm -f "${CACHE_FILE}" rm -f "${CACHE_FILE}"
local TAG
if [ "${2}" = "true" ]; then if [ "${2}" = "true" ]; then
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "https://api.github.com/repos/RROrg/rr-lkms/releases" | jq -r ".[].tag_name" | sort -rV | head -1) TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "${REPO}/rr-lkms/releases" | jq -r ".[].tag_name" | sort -rV | head -1)
else else
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "https://api.github.com/repos/RROrg/rr-lkms/releases/latest" | jq -r ".tag_name") TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "${REPO}/rr-lkms/releases/latest" | jq -r ".tag_name")
fi fi
while read ID NAME; do while read -r ID NAME; do
if [ "${NAME}" = "rp-lkms-${TAG}.zip" ]; then if [ "${NAME}" = "rp-lkms-${TAG}.zip" ]; then
STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "https://api.github.com/repos/RROrg/rr-lkms/releases/assets/${ID}" -o "${CACHE_FILE}") STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "${REPO}/rr-lkms/releases/assets/${ID}" -o "${CACHE_FILE}")
echo "TAG=${TAG}; Status=${STATUS}" echo "TAG=${TAG}; Status=${STATUS}"
[ ${STATUS:-0} -ne 200 ] && exit 1 [ ${STATUS:-0} -ne 200 ] && exit 1
fi fi
done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "https://api.github.com/repos/RROrg/rr-lkms/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"') done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "${REPO}/rr-lkms/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')
[ ! -f "${CACHE_FILE}" ] && exit 1 [ ! -f "${CACHE_FILE}" ] && exit 1
# Unzip LKMs # Unzip LKMs
rm -rf "${DEST_PATH}" rm -rf "${DEST_PATH}"
@ -157,7 +161,7 @@ function getLKMs() {
echo "Getting LKMs end" echo "Getting LKMs end"
} }
# Get latest addons and install its # Get latest addons and install them
# $1 path # $1 path
# $2 (true|false[d]) include prerelease # $2 (true|false[d]) include prerelease
function getAddons() { function getAddons() {
@ -165,18 +169,19 @@ function getAddons() {
local DEST_PATH="${1:-addons}" local DEST_PATH="${1:-addons}"
local CACHE_DIR="/tmp/addons" local CACHE_DIR="/tmp/addons"
local CACHE_FILE="/tmp/addons.zip" local CACHE_FILE="/tmp/addons.zip"
local TAG
if [ "${2}" = "true" ]; then if [ "${2}" = "true" ]; then
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "https://api.github.com/repos/RROrg/rr-addons/releases" | jq -r ".[].tag_name" | sort -rV | head -1) TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "${REPO}/rr-addons/releases" | jq -r ".[].tag_name" | sort -rV | head -1)
else else
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "https://api.github.com/repos/RROrg/rr-addons/releases/latest" | jq -r ".tag_name") TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "${REPO}/rr-addons/releases/latest" | jq -r ".tag_name")
fi fi
while read ID NAME; do while read -r ID NAME; do
if [ "${NAME}" = "addons-${TAG}.zip" ]; then if [ "${NAME}" = "addons-${TAG}.zip" ]; then
STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "https://api.github.com/repos/RROrg/rr-addons/releases/assets/${ID}" -o "${CACHE_FILE}") STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "${REPO}/rr-addons/releases/assets/${ID}" -o "${CACHE_FILE}")
echo "TAG=${TAG}; Status=${STATUS}" echo "TAG=${TAG}; Status=${STATUS}"
[ ${STATUS:-0} -ne 200 ] && exit 1 [ ${STATUS:-0} -ne 200 ] && exit 1
fi fi
done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "https://api.github.com/repos/RROrg/rr-addons/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"') done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "${REPO}/rr-addons/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')
[ ! -f "${CACHE_FILE}" ] && exit 1 [ ! -f "${CACHE_FILE}" ] && exit 1
rm -rf "${DEST_PATH}" rm -rf "${DEST_PATH}"
mkdir -p "${DEST_PATH}" mkdir -p "${DEST_PATH}"
@ -185,8 +190,8 @@ function getAddons() {
mkdir -p "${CACHE_DIR}" mkdir -p "${CACHE_DIR}"
unzip "${CACHE_FILE}" -d "${CACHE_DIR}" unzip "${CACHE_FILE}" -d "${CACHE_DIR}"
echo "Installing addons to ${DEST_PATH}" echo "Installing addons to ${DEST_PATH}"
[ -f /tmp/addons/VERSION ] && cp -f /tmp/addons/VERSION ${DEST_PATH}/ [ -f "/tmp/addons/VERSION" ] && cp -f "/tmp/addons/VERSION" "${DEST_PATH}/"
for PKG in $(ls ${CACHE_DIR}/*.addon 2>/dev/null); do for PKG in "${CACHE_DIR}"/*.addon; do
ADDON=$(basename "${PKG}" .addon) ADDON=$(basename "${PKG}" .addon)
mkdir -p "${DEST_PATH}/${ADDON}" mkdir -p "${DEST_PATH}/${ADDON}"
echo "Extracting ${PKG} to ${DEST_PATH}/${ADDON}" echo "Extracting ${PKG} to ${DEST_PATH}/${ADDON}"
@ -205,18 +210,19 @@ function getModules() {
local DEST_PATH="${1:-addons}" local DEST_PATH="${1:-addons}"
local CACHE_FILE="/tmp/modules.zip" local CACHE_FILE="/tmp/modules.zip"
rm -f "${CACHE_FILE}" rm -f "${CACHE_FILE}"
local TAG
if [ "${2}" = "true" ]; then if [ "${2}" = "true" ]; then
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "https://api.github.com/repos/RROrg/rr-modules/releases" | jq -r ".[].tag_name" | sort -rV | head -1) TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "${REPO}/rr-modules/releases" | jq -r ".[].tag_name" | sort -rV | head -1)
else else
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "https://api.github.com/repos/RROrg/rr-modules/releases/latest" | jq -r ".tag_name") TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "${REPO}/rr-modules/releases/latest" | jq -r ".tag_name")
fi fi
while read ID NAME; do while read -r ID NAME; do
if [ "${NAME}" = "modules-${TAG}.zip" ]; then if [ "${NAME}" = "modules-${TAG}.zip" ]; then
STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "https://api.github.com/repos/RROrg/rr-modules/releases/assets/${ID}" -o "${CACHE_FILE}") STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "${REPO}/rr-modules/releases/assets/${ID}" -o "${CACHE_FILE}")
echo "TAG=${TAG}; Status=${STATUS}" echo "TAG=${TAG}; Status=${STATUS}"
[ ${STATUS:-0} -ne 200 ] && exit 1 [ ${STATUS:-0} -ne 200 ] && exit 1
fi fi
done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "https://api.github.com/repos/RROrg/rr-modules/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"') done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "${REPO}/rr-modules/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')
[ ! -f "${CACHE_FILE}" ] && exit 1 [ ! -f "${CACHE_FILE}" ] && exit 1
# Unzip Modules # Unzip Modules
rm -rf "${DEST_PATH}" rm -rf "${DEST_PATH}"
@ -231,20 +237,20 @@ function getModules() {
# $2 plugin path # $2 plugin path
# $3 output file # $3 output file
function repackInitrd() { function repackInitrd() {
INITRD_FILE="${1}" local INITRD_FILE="${1}"
PLUGIN_PATH="${2}" local PLUGIN_PATH="${2}"
OUTPUT_PATH="${3:-${INITRD_FILE}}" local OUTPUT_PATH="${3:-${INITRD_FILE}}"
[ -z "${INITRD_FILE}" -o ! -f "${INITRD_FILE}" ] && exit 1 [ -z "${INITRD_FILE}" ] || [ ! -f "${INITRD_FILE}" ] && exit 1
[ -z "${PLUGIN_PATH}" -o ! -d "${PLUGIN_PATH}" ] && exit 1 [ -z "${PLUGIN_PATH}" ] || [ ! -d "${PLUGIN_PATH}" ] && exit 1
INITRD_FILE="$(readlink -f "${INITRD_FILE}")" INITRD_FILE="$(readlink -f "${INITRD_FILE}")"
PLUGIN_PATH="$(readlink -f "${PLUGIN_PATH}")" PLUGIN_PATH="$(readlink -f "${PLUGIN_PATH}")"
OUTPUT_PATH="$(readlink -f "${OUTPUT_PATH}")" OUTPUT_PATH="$(readlink -f "${OUTPUT_PATH}")"
RDXZ_PATH="rdxz_tmp" local RDXZ_PATH="rdxz_tmp"
mkdir -p "${RDXZ_PATH}" mkdir -p "${RDXZ_PATH}"
INITRD_FORMAT=$(file -b --mime-type "${INITRD_FILE}") local INITRD_FORMAT=$(file -b --mime-type "${INITRD_FILE}")
( (
cd "${RDXZ_PATH}" cd "${RDXZ_PATH}"
case "${INITRD_FORMAT}" in case "${INITRD_FORMAT}" in
@ -281,17 +287,17 @@ function repackInitrd() {
# $2 changsize MB eg: +50M -50M # $2 changsize MB eg: +50M -50M
# $3 output file # $3 output file
function resizeImg() { function resizeImg() {
INPUT_FILE="${1}" local INPUT_FILE="${1}"
CHANGE_SIZE="${2}" local CHANGE_SIZE="${2}"
OUTPUT_FILE="${3:-${INPUT_FILE}}" local OUTPUT_FILE="${3:-${INPUT_FILE}}"
[ -z "${INPUT_FILE}" -o ! -f "${INPUT_FILE}" ] && exit 1 [ -z "${INPUT_FILE}" ] || [ ! -f "${INPUT_FILE}" ] && exit 1
[ -z "${CHANGE_SIZE}" ] && exit 1 [ -z "${CHANGE_SIZE}" ] && exit 1
INPUT_FILE="$(readlink -f "${INPUT_FILE}")" INPUT_FILE="$(readlink -f "${INPUT_FILE}")"
OUTPUT_FILE="$(readlink -f "${OUTPUT_FILE}")" OUTPUT_FILE="$(readlink -f "${OUTPUT_FILE}")"
SIZE=$(($(du -sm "${INPUT_FILE}" 2>/dev/null | awk '{print $1}')$(echo "${CHANGE_SIZE}" | sed 's/M//g; s/b//g'))) local SIZE=$(($(du -sm "${INPUT_FILE}" 2>/dev/null | awk '{print $1}')$(echo "${CHANGE_SIZE}" | sed 's/M//g; s/b//g')))
[ "${SIZE:-0}" -lt 0 ] && exit 1 [ "${SIZE:-0}" -lt 0 ] && exit 1
if [ ! "${INPUT_FILE}" = "${OUTPUT_FILE}" ]; then if [ ! "${INPUT_FILE}" = "${OUTPUT_FILE}" ]; then
@ -299,8 +305,8 @@ function resizeImg() {
fi fi
sudo truncate -s ${SIZE}M "${OUTPUT_FILE}" sudo truncate -s ${SIZE}M "${OUTPUT_FILE}"
echo -e "d\n\nn\n\n\n\n\nn\nw" | sudo fdisk "${OUTPUT_FILE}" echo -e "d\n\nn\n\n\n\n\nn\nw" | sudo fdisk "${OUTPUT_FILE}" >/dev/null 2>&1
LOOPX=$(sudo losetup -f) local LOOPX=$(sudo losetup -f)
sudo losetup -P ${LOOPX} "${OUTPUT_FILE}" sudo losetup -P ${LOOPX} "${OUTPUT_FILE}"
sudo e2fsck -fp $(ls ${LOOPX}* 2>/dev/null | sort -n | tail -1) sudo e2fsck -fp $(ls ${LOOPX}* 2>/dev/null | sort -n | tail -1)
sudo resize2fs $(ls ${LOOPX}* 2>/dev/null | sort -n | tail -1) sudo resize2fs $(ls ${LOOPX}* 2>/dev/null | sort -n | tail -1)
@ -311,12 +317,12 @@ function resizeImg() {
# $1 bootloader file # $1 bootloader file
# $2 ova file # $2 ova file
function convertova() { function convertova() {
BLIMAGE=${1} local BLIMAGE=${1}
OVAPATH=${2} local OVAPATH=${2}
BLIMAGE="$(readlink -f "${BLIMAGE}")" BLIMAGE="$(readlink -f "${BLIMAGE}")"
OVAPATH="$(readlink -f "${OVAPATH}")" OVAPATH="$(readlink -f "${OVAPATH}")"
VMNAME="$(basename "${OVAPATH}" .ova)" local VMNAME="$(basename "${OVAPATH}" .ova)"
# Download and install ovftool if it doesn't exist # Download and install ovftool if it doesn't exist
if [ ! -x ovftool/ovftool ]; then if [ ! -x ovftool/ovftool ]; then
@ -345,9 +351,9 @@ function convertova() {
# Create VM configuration # Create VM configuration
cat <<_EOF_ >"OVA_${VMNAME}/${VMNAME}.vmx" cat <<_EOF_ >"OVA_${VMNAME}/${VMNAME}.vmx"
.encoding = "GBK" .encoding = "UTF-8"
config.version = "8" config.version = "8"
virtualHW.version = "21" virtualHW.version = "17"
displayName = "${VMNAME}" displayName = "${VMNAME}"
annotation = "https://github.com/RROrg/rr" annotation = "https://github.com/RROrg/rr"
guestOS = "ubuntu-64" guestOS = "ubuntu-64"

View File

@ -2,4 +2,8 @@ bs4
click click
kmodule kmodule
requests requests
requests-toolbelt
urllib3
openpyxl openpyxl
qrcode[pil]
beautifulsoup4

View File

@ -6,35 +6,40 @@
# See /LICENSE for more information. # See /LICENSE for more information.
# #
# sudo apt update # sudo apt update
# sudo apt install -y locales busybox dialog curl xz-utils cpio sed # sudo apt install -y locales busybox dialog gettext sed gawk jq curl
# sudo apt install -y python-is-python3 python3-pip libelf-dev qemu-utils cpio xz-utils lz4 lzma bzip2 gzip zstd
# # sudo snap install yq
# if ! command -v yq &>/dev/null || ! yq --version 2>/dev/null | grep -q "v4."; then
# sudo curl -kL https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -o /usr/bin/yq && sudo chmod a+x /usr/bin/yq
# fi
#
# # Backup the original python3 executable.
# sudo mv -f "$(realpath $(which python3))/EXTERNALLY-MANAGED" "$(realpath $(which python3))/EXTERNALLY-MANAGED.bak" 2>/dev/null || true
# sudo pip3 install -U click requests requests-toolbelt urllib3 qrcode[pil] beautifulsoup4
#
# sudo locale-gen ar_SA.UTF-8 de_DE.UTF-8 en_US.UTF-8 es_ES.UTF-8 fr_FR.UTF-8 ja_JP.UTF-8 ko_KR.UTF-8 ru_RU.UTF-8 th_TH.UTF-8 tr_TR.UTF-8 uk_UA.UTF-8 vi_VN.UTF-8 zh_CN.UTF-8 zh_HK.UTF-8 zh_TW.UTF-8 # sudo locale-gen ar_SA.UTF-8 de_DE.UTF-8 en_US.UTF-8 es_ES.UTF-8 fr_FR.UTF-8 ja_JP.UTF-8 ko_KR.UTF-8 ru_RU.UTF-8 th_TH.UTF-8 tr_TR.UTF-8 uk_UA.UTF-8 vi_VN.UTF-8 zh_CN.UTF-8 zh_HK.UTF-8 zh_TW.UTF-8
# #
# export TOKEN="${1}" # export TOKEN="${1}"
# #
set -e if [ "$(id -u)" -ne 0 ]; then
PROMPT=$(sudo -nv 2>&1)
if [ $? -ne 0 ]; then
echo "This script must be run as root" echo "This script must be run as root"
exit 1 exit 1
fi fi
PRE="true" . scripts/func.sh "${TOKEN}"
. scripts/func.sh
echo "Get extractor" echo "Get extractor"
getCKs "files/p3/cks" "${PRE}" getCKs "files/mnt/p3/cks" "true"
getLKMs "files/p3/lkms" "${PRE}" getLKMs "files/mnt/p3/lkms" "true"
getAddons "files/p3/addons" "${PRE}" getAddons "files/mnt/p3/addons" "true"
getModules "files/p3/modules" "${PRE}" getModules "files/mnt/p3/modules" "true"
getBuildroot "files/p3" "${PRE}" getBuildroot "files/mnt/p3" "true"
getExtractor "files/p3/extractor" getExtractor "files/mnt/p3/extractor"
echo "Repack initrd" echo "Repack initrd"
convertpo2mo "files/initrd/opt/rr/lang" convertpo2mo "files/initrd/opt/rr/lang"
repackInitrd "files/p3/initrd-rr" "files/initrd" repackInitrd "files/mnt/p3/initrd-rr" "files/initrd"
if [ -n "${1}" ]; then if [ -n "${1}" ]; then
export LOADER_DISK="LOCALBUILD" export LOADER_DISK="LOCALBUILD"
@ -43,81 +48,63 @@ if [ -n "${1}" ]; then
cd "${CHROOT_PATH}/initrd/opt/rr" cd "${CHROOT_PATH}/initrd/opt/rr"
./init.sh ./init.sh
./menu.sh modelMenu "${1}" ./menu.sh modelMenu "${1}"
./menu.sh productversMenu "7.2" ./menu.sh productversMenu "${2:-7.2}"
./menu.sh make -1 ./menu.sh make -1
./menu.sh cleanCache -1 ./menu.sh cleanCache -1
) )
fi fi
IMAGE_FILE="rr.img" IMAGE_FILE="rr.img"
gzip -dc "files/grub.img.gz" >"${IMAGE_FILE}" gzip -dc "files/initrd/opt/rr/grub.img.gz" >"${IMAGE_FILE}"
fdisk -l "${IMAGE_FILE}" fdisk -l "${IMAGE_FILE}"
LOOPX=$(sudo losetup -f) LOOPX=$(sudo losetup -f)
sudo losetup -P "${LOOPX}" "${IMAGE_FILE}" sudo losetup -P "${LOOPX}" "${IMAGE_FILE}"
echo "Mounting image file" for i in {1..3}; do
rm -rf "/tmp/mnt/p1" [ ! -d "files/mnt/p${i}" ] && continue
rm -rf "/tmp/mnt/p2"
rm -rf "/tmp/mnt/p3"
mkdir -p "/tmp/mnt/p1"
mkdir -p "/tmp/mnt/p2"
mkdir -p "/tmp/mnt/p3"
sudo mount ${LOOPX}p1 "/tmp/mnt/p1" || (
echo -e "Can't mount ${LOOPX}p1."
exit 1
)
sudo mount ${LOOPX}p2 "/tmp/mnt/p2" || (
echo -e "Can't mount ${LOOPX}p2."
exit 1
)
sudo mount ${LOOPX}p3 "/tmp/mnt/p3" || (
echo -e "Can't mount ${LOOPX}p3."
exit 1
)
echo "Copying files" rm -rf "/tmp/mnt/p${i}"
sudo cp -af "files/mnt/p1/.locale" "/tmp/mnt/p1" 2>/dev/null mkdir -p "/tmp/mnt/p${i}"
sudo cp -rf "files/mnt/p1/"* "/tmp/mnt/p1" || (
echo -e "Can't cp ${LOOPX}p1."
exit 1
)
sudo cp -rf "files/mnt/p2/"* "/tmp/mnt/p2" || (
echo -e "Can't cp ${LOOPX}p2."
exit 1
)
sudo cp -rf "files/mnt/p3/"* "/tmp/mnt/p3" || (
echo -e "Can't cp ${LOOPX}p2."
exit 1
)
sudo sync echo "Mounting ${LOOPX}p${i}"
sudo mount "${LOOPX}p${i}" "/tmp/mnt/p${i}" || {
echo "Can't mount ${LOOPX}p${i}."
break
}
echo "Copying files to ${LOOPX}p${i}"
[ ${i} -eq 1 ] && sudo cp -af "files/mnt/p${i}/"{.locale,.timezone} "/tmp/mnt/p${i}/" 2>/dev/null || true
sudo cp -rf "files/mnt/p${i}/"* "/tmp/mnt/p${i}" || true
sudo sync
echo "Unmounting ${LOOPX}p${i}"
sudo umount "/tmp/mnt/p${i}" || {
echo "Can't umount ${LOOPX}p${i}."
break
}
rm -rf "/tmp/mnt/p${i}"
done
sudo losetup --detach "${LOOPX}"
resizeImg "${IMAGE_FILE}" "+2560M"
# convertova "${IMAGE_FILE}" "${IMAGE_FILE/.img/.ova}"
# update.zip # update.zip
sha256sum update-list.yml update-check.sh >sha256sum sha256sum update-list.yml update-check.sh >sha256sum
zip -9j update.zip update-list.yml update-check.sh zip -9j "update.zip" update-list.yml update-check.sh
while read F; do while read -r F; do
if [ -d "/tmp/${F}" ]; then if [ -d "${F}" ]; then
FTGZ="$(basename "/tmp/${F}").tgz" FTGZ="$(basename "${F}").tgz"
tar -czf "${FTGZ}" -C "/tmp/${F}" . tar -zcf "${FTGZ}" -C "${F}" .
sha256sum "${FTGZ}" >>sha256sum sha256sum "${FTGZ}" >>sha256sum
zip -9j update.zip "${FTGZ}" zip -9j "update.zip" "${FTGZ}"
sudo rm -f "${FTGZ}" rm -f "${FTGZ}"
else else
(cd $(dirname "/tmp/${F}") && sha256sum $(basename "/tmp/${F}")) >>sha256sum (cd $(dirname "${F}") && sha256sum $(basename "${F}")) >>sha256sum
zip -9j update.zip "/tmp/${F}" zip -9j "update.zip" "${F}"
fi fi
done <<<$(yq '.replace | explode(.) | to_entries | map([.key])[] | .[]' update-list.yml) done <<<$(yq '.replace | explode(.) | to_entries | map([.key])[] | .[]' update-list.yml)
zip -9j update.zip sha256sum zip -9j "update.zip" sha256sum
echo "Unmount image file"
sudo umount "/tmp/files/p1"
sudo umount "/tmp/files/p2"
sudo umount "/tmp/files/p3"
sudo losetup --detach ${LOOPX}
if [ -n "${1}" ]; then
echo "Packing image file"
sudo mv "${IMAGE_FILE}" "rr-${1}.img"
fi