优化条件判断和文件处理逻辑,提升代码可读性和执行效率

This commit is contained in:
Ing 2024-11-19 12:06:06 +08:00
parent c091d0dba0
commit 04eeea5e84
42 changed files with 9331 additions and 9222 deletions

View File

@ -33,8 +33,10 @@ jobs:
git config --global user.name "github-actions[bot]"
sudo timedatectl set-timezone "Asia/Shanghai"
sudo apt update
sudo apt install -y build-essential libtool pkgconf libzstd-dev liblzma-dev libssl-dev # kmodule dependencies
- name: Delay
run: |
echo "Delaying for 1 minutes..."
sleep 60
- name: Get Release RR
run: |
@ -51,11 +53,11 @@ jobs:
[ "${TAG:0:1}" = "v" ] && TAG="${TAG:1}"
rm -f rr-${TAG}.img.zip
STATUS=$(curl -kL --connect-timeout 10 -w "%{http_code}" "${REPO}/releases/download/${TAG}/rr-${TAG}.img.zip" -o "rr-${TAG}.img.zip")
if [ $? -ne 0 -o ${STATUS:-0} -ne 200 ]; then
if [ $? -ne 0 ] || [ ${STATUS:-0} -ne 200 ]; then
echo "Download failed"
exit 1
fi
unzip rr-${TAG}.img.zip -d "rr"
export TERM=xterm
@ -68,12 +70,20 @@ jobs:
- name: Get data
run: |
pip install -r scripts/requirements.txt
python scripts/func.py getmodels -w "rr/ws/initrd" -j "docs/models.json" -x "docs/models.xlsx"
python scripts/func.py getaddons -w "rr/ws" -j "docs/addons.json" -x "docs/addons.xlsx"
python scripts/func.py getmodules -w "rr/ws" -j "docs/modules.json" -x "docs/modules.xlsx"
sudo apt update
sudo apt install -y locales busybox dialog gettext sed gawk jq curl
sudo apt install -y python-is-python3 python3-pip libelf-dev qemu-utils cpio xz-utils lz4 lzma bzip2 gzip zstd
sudo apt install -y build-essential libtool pkgconf libzstd-dev liblzma-dev libssl-dev # kmodule dependencies
python scripts/func.py getpats -w "rr/ws/initrd" -j "docs/pats.json" -x "docs/pats.xlsx"
# Backup the original python3 executable.
sudo mv -f "$(realpath $(which python3))/EXTERNALLY-MANAGED" "$(realpath $(which python3))/EXTERNALLY-MANAGED.bak" 2>/dev/null || true
sudo pip3 install -U -r scripts/requirements.txt
python3 scripts/func.py getmodels -w "rr/ws/initrd" -j "docs/models.json" -x "docs/models.xlsx"
python3 scripts/func.py getaddons -w "rr/ws" -j "docs/addons.json" -x "docs/addons.xlsx"
python3 scripts/func.py getmodules -w "rr/ws" -j "docs/modules.json" -x "docs/modules.xlsx"
python3 scripts/func.py getpats -w "rr/ws/initrd" -j "docs/pats.json" -x "docs/pats.xlsx"
- name: Upload to Artifacts
if: success()

View File

@ -33,7 +33,7 @@ jobs:
issuetitle = ${{ toJSON(github.event.issue.title) }};
issuebody = ${{ toJSON(github.event.issue.body) }};
iscustom = 'false'
warinfo = 'false'
@ -148,20 +148,22 @@ jobs:
run: |
# 累了, 毁灭吧!
# yq need sudo !!!
function writeConfigKey() {
[ "${2}" = "{}" ] && sudo yq eval '.'${1}' = {}' --inplace "${3}" 2>/dev/null || sudo yq eval '.'${1}' = "'"${2}"'"' --inplace "${3}" 2>/dev/null
local value="${2}"
[ "${value}" = "{}" ] && sudo yq eval ".${1} = {}" --inplace "${3}" 2>/dev/null || sudo yq eval ".${1} = \"${value}\"" --inplace "${3}" 2>/dev/null
}
function readConfigKey() {
RESULT=$(yq eval '.'${1}' | explode(.)' "${2}" 2>/dev/null)
[ "${RESULT}" == "null" ] && echo "" || echo "${RESULT}"
local result=$(sudo yq eval ".${1} | explode(.)" "${2}" 2>/dev/null)
[ "${result}" = "null" ] && echo "" || echo "${result}"
}
function mergeConfigStr() {
local JF=$(mktemp)
echo "${2}" | yq -p ${1} -o y > "${JF}"
yq eval-all --inplace '. as $item ireduce ({}; . * $item)' --inplace "${3}" "${JF}" 2>/dev/null
rm -f "${JF}"
local xmlfile=$(mktemp)
echo "${2}" | sudo yq -p "${1}" -o y >"${xmlfile}"
sudo yq eval-all --inplace '. as $item ireduce ({}; . * $item)' --inplace "${3}" "${xmlfile}" 2>/dev/null
rm -f "${xmlfile}"
}
REPO="${{ github.server_url }}/${{ github.repository }}"
@ -179,11 +181,11 @@ jobs:
[ "${TAG:0:1}" = "v" ] && TAG="${TAG:1}"
rm -f rr-${TAG}.img.zip
STATUS=$(curl -kL --connect-timeout 10 -w "%{http_code}" "${REPO}/releases/download/${TAG}/rr-${TAG}.img.zip" -o "rr-${TAG}.img.zip")
if [ $? -ne 0 -o ${STATUS:-0} -ne 200 ]; then
if [ $? -ne 0 ] || [ ${STATUS:-0} -ne 200 ]; then
echo "Download failed"
exit 1
fi
unzip rr-${TAG}.img.zip -d "rr"
export TERM=xterm
@ -197,7 +199,7 @@ jobs:
# sudo cp -rf files/initrd/opt/rr/* rr/ws/initrd/opt/rr/
# sudo sed -i "s/set -e/set -ex/" rr/ws/initrd/opt/rr/init.sh
# sudo sed -i '/^alias/i\set -x' rr/ws/initrd/opt/rr/menu.sh
[ -n "${{ env.language }}" ] && echo "${{ env.language }}.UTF-8" | sudo tee rr/ws/mnt/p1/.locale
sudo ./localbuild.sh init
@ -231,11 +233,11 @@ jobs:
echo "set modules: ${{ env.modules }}"
USER_CONFIG_FILE="rr/ws/mnt/p1/user-config.yml"
writeConfigKey "modules" "{}" "${USER_CONFIG_FILE}"
# L="$(for I in $(echo "${{ env.modules }}" | sed 's/,/ /g'); do echo "modules.${I}:"; done)"
# mergeConfigStr p "${L}" "${USER_CONFIG_FILE}"
for M in $(echo "${{ env.modules }}" | sed 's/,/ /g'); do
writeConfigKey "modules.\"${M}\"" "" "${USER_CONFIG_FILE}"
done
L="$(for I in $(echo "${{ env.modules }}" | sed 's/,/ /g'); do echo "modules.${I}:"; done)"
mergeConfigStr p "${L}" "${USER_CONFIG_FILE}"
# for M in $(echo "${{ env.modules }}" | sed 's/,/ /g'); do
# writeConfigKey "modules.\"${M}\"" "" "${USER_CONFIG_FILE}"
# done
fi
sudo ./localbuild.sh build
@ -254,23 +256,22 @@ jobs:
RR_VERSION_FILE="rr/ws/mnt/p1/RR_VERSION"
USER_CONFIG_FILE="rr/ws/mnt/p1/user-config.yml"
echo "" > README.txt
echo "RR: " >> README.txt
echo " VERSION: $(cat ${RR_VERSION_FILE} 2>/dev/null | head -1)" >> README.txt
echo " CUSTOM: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" >> README.txt
echo "" >> README.txt
echo "DSM:" >> README.txt
echo " MODEL: $(readConfigKey "model" "${USER_CONFIG_FILE}")" >> README.txt
echo " VERSION: $(readConfigKey "productver" "${USER_CONFIG_FILE}")" >> README.txt
echo " KERNEL: $(readConfigKey "kernel" "${USER_CONFIG_FILE}")" >> README.txt
echo " PATURL: $(readConfigKey "paturl" "${USER_CONFIG_FILE}")" >> README.txt
echo " PATSUM: $(readConfigKey "patsum" "${USER_CONFIG_FILE}")" >> README.txt
echo "" >> README.txt
echo "" >> README.txt
echo "After the image is written to the disk, it will boot directly into DSM without the need to compile again." >> README.txt
echo "Of course, you can also modify the settings yourself." >> README.txt
{
echo "RR: "
echo " VERSION: $(cat ${RR_VERSION_FILE} 2>/dev/null | head -1)"
echo " CUSTOM: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
echo
echo "DSM:"
echo " MODEL: $(readConfigKey "model" "${USER_CONFIG_FILE}")"
echo " VERSION: $(readConfigKey "productver" "${USER_CONFIG_FILE}")"
echo " KERNEL: $(readConfigKey "kernel" "${USER_CONFIG_FILE}")"
echo " PATURL: $(readConfigKey "paturl" "${USER_CONFIG_FILE}")"
echo " PATSUM: $(readConfigKey "patsum" "${USER_CONFIG_FILE}")"
echo
echo
echo "After the image is written to the disk, it will boot directly into DSM without the need to compile again."
echo "Of course, you can also modify the settings yourself."
} >README.txt
if [ "${{ env.format }}" = "ova" ]; then
. scripts/func.sh "${{ secrets.RRORG }}"

1
.gitignore vendored
View File

@ -12,6 +12,7 @@ rr*.vmdk
tests
Changelog*
sha256sum
ovftool*
OVA*

View File

@ -1,60 +1,56 @@
#!/usr/bin/env bash
set -e
[ -z "${WORK_PATH}" -o ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. ${WORK_PATH}/include/functions.sh
[ -z "${WORK_PATH}" ] || [ ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "${WORK_PATH}/include/functions.sh"
[ -z "${LOADER_DISK}" ] && die "$(TEXT "Loader is not init!")"
# Sanity check
loaderIsConfigured || die "$(TEXT "Loader is not configured!")"
# Clear logs for dbgutils addons
rm -rf "${PART1_PATH}/logs" >/dev/null 2>&1 || true
rm -rf /sys/fs/pstore/* >/dev/null 2>&1 || true
rm -rf "${PART1_PATH}/logs" /sys/fs/pstore/* >/dev/null 2>&1 || true
# Check if machine has EFI
[ -d /sys/firmware/efi ] && EFI=1 || EFI=0
EFI=$([ -d /sys/firmware/efi ] && echo 1 || echo 0)
BUS=$(getBus "${LOADER_DISK}")
# Print text centralized
clear
COLUMNS=$(ttysize 2>/dev/null | awk '{print $1}')
[ -z "${COLUMNS}" ] && COLUMNS=80
COLUMNS=${COLUMNS:-80}
TITLE="$(printf "$(TEXT "Welcome to %s")" "$([ -z "${RR_RELEASE}" ] && echo "${RR_TITLE}" || echo "${RR_TITLE}(${RR_RELEASE})")")"
DATE="$(date)"
printf "\033[1;44m%*s\n" ${COLUMNS} ""
printf "\033[1;44m%*s\033[A\n" ${COLUMNS} ""
printf "\033[1;31m%*s\033[0m\n" $(((${#TITLE} + ${COLUMNS}) / 2)) "${TITLE}"
printf "\033[1;44m%*s\033[A\n" ${COLUMNS} ""
printf "\033[1;32m%*s\033[0m\n" ${COLUMNS} "${DATE}"
printf "\033[1;44m%*s\n" "${COLUMNS}" ""
printf "\033[1;44m%*s\033[A\n" "${COLUMNS}" ""
printf "\033[1;31m%*s\033[0m\n" "$(((${#TITLE} + ${COLUMNS}) / 2))" "${TITLE}"
printf "\033[1;44m%*s\033[A\n" "${COLUMNS}" ""
printf "\033[1;32m%*s\033[0m\n" "${COLUMNS}" "${DATE}"
TITLE="BOOTING:"
[ ${EFI} -eq 1 ] && TITLE+=" [UEFI]" || TITLE+=" [BIOS]"
[ "${BUS}" = "usb" ] && TITLE+=" [${BUS^^} flashdisk]" || TITLE+=" [${BUS^^} DoM]"
TITLE+="$([ ${EFI} -eq 1 ] && echo " [UEFI]" || echo " [BIOS]")"
TITLE+="$([ "${BUS}" = "usb" ] && echo " [${BUS^^} flashdisk]" || echo " [${BUS^^} DoM]")"
printf "\033[1;33m%*s\033[0m\n" $(((${#TITLE} + ${COLUMNS}) / 2)) "${TITLE}"
# Check if DSM zImage changed, patch it if necessary
ZIMAGE_HASH="$(readConfigKey "zimage-hash" "${USER_CONFIG_FILE}")"
if [ -f ${PART1_PATH}/.build -o "$(sha256sum "${ORI_ZIMAGE_FILE}" | awk '{print $1}')" != "${ZIMAGE_HASH}" ]; then
echo -e "\033[1;43m$(TEXT "DSM zImage changed")\033[0m"
${WORK_PATH}/zimage-patch.sh
if [ $? -ne 0 ]; then
echo -e "\033[1;43m$(TEXT "zImage not patched,\nPlease upgrade the bootloader version and try again.\nPatch error:\n")$(cat "${LOG_FILE}")\033[0m"
if [ -f ${PART1_PATH}/.build ] || [ "$(sha256sum "${ORI_ZIMAGE_FILE}" | awk '{print $1}')" != "${ZIMAGE_HASH}" ]; then
printf "\033[1;43m%s\033[0m\n" "$(TEXT "DSM zImage changed")"
${WORK_PATH}/zimage-patch.sh || {
printf "\033[1;43m%s\n%s\n%s:\n%s\033[0m\n" "$(TEXT "DSM zImage not patched")" "$(TEXT "Please upgrade the bootloader version and try again.")" "$(TEXT "Error")" "$(cat "${LOG_FILE}")"
exit 1
fi
}
fi
# Check if DSM ramdisk changed, patch it if necessary
RAMDISK_HASH="$(readConfigKey "ramdisk-hash" "${USER_CONFIG_FILE}")"
if [ -f ${PART1_PATH}/.build -o "$(sha256sum "${ORI_RDGZ_FILE}" | awk '{print $1}')" != "${RAMDISK_HASH}" ]; then
echo -e "\033[1;43m$(TEXT "DSM Ramdisk changed")\033[0m"
${WORK_PATH}/ramdisk-patch.sh
if [ $? -ne 0 ]; then
echo -e "\033[1;43m$(TEXT "Ramdisk not patched,\nPlease upgrade the bootloader version and try again.\nPatch error:\n")$(cat "${LOG_FILE}")\033[0m"
if [ -f ${PART1_PATH}/.build ] || [ "$(sha256sum "${ORI_RDGZ_FILE}" | awk '{print $1}')" != "${RAMDISK_HASH}" ]; then
printf "\033[1;43m%s\033[0m\n" "$(TEXT "DSM ramdisk changed")"
${WORK_PATH}/ramdisk-patch.sh || {
printf "\033[1;43m%s\n%s\n%s:\n%s\033[0m\n" "$(TEXT "DSM ramdisk not patched")" "$(TEXT "Please upgrade the bootloader version and try again.")" "$(TEXT "Error")" "$(cat "${LOG_FILE}")"
exit 1
fi
}
fi
[ -f ${PART1_PATH}/.build ] && rm -f ${PART1_PATH}/.build
@ -68,17 +64,21 @@ SMALLNUM="$(readConfigKey "smallnum" "${USER_CONFIG_FILE}")"
KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")"
LKM="$(readConfigKey "lkm" "${USER_CONFIG_FILE}")"
DMI="$(dmesg 2>/dev/null | grep -i "DMI:" | head -1 | sed 's/\[.*\] DMI: //i')"
CPU="$(echo $(cat /proc/cpuinfo 2>/dev/null | grep 'model name' | uniq | awk -F':' '{print $2}'))"
MEM="$(awk '/MemTotal:/ {printf "%.0f", $2 / 1024}' /proc/meminfo 2>/dev/null) MB"
DT="$(readConfigKey "platforms.${PLATFORM}.dt" "${WORK_PATH}/platforms.yml")"
KVER="$(readConfigKey "platforms.${PLATFORM}.productvers.\"${PRODUCTVER}\".kver" "${WORK_PATH}/platforms.yml")"
KPRE="$(readConfigKey "platforms.${PLATFORM}.productvers.\"${PRODUCTVER}\".kpre" "${WORK_PATH}/platforms.yml")"
echo -e "$(TEXT "Model: ") \033[1;36m${MODEL}(${PLATFORM})\033[0m"
echo -e "$(TEXT "Version: ") \033[1;36m${PRODUCTVER}(${BUILDNUM}$([ ${SMALLNUM:-0} -ne 0 ] && echo "u${SMALLNUM}"))\033[0m"
echo -e "$(TEXT "Kernel: ") \033[1;36m${KERNEL}\033[0m"
echo -e "$(TEXT "LKM: ") \033[1;36m${LKM}\033[0m"
echo -e "$(TEXT "DMI: ") \033[1;36m${DMI}\033[0m"
echo -e "$(TEXT "CPU: ") \033[1;36m${CPU}\033[0m"
echo -e "$(TEXT "MEM: ") \033[1;36m${MEM}\033[0m"
DMI="$(dmesg 2>/dev/null | grep -i "DMI:" | head -1 | sed 's/\[.*\] DMI: //i')"
CPU="$(awk -F': ' '/model name/ {print $2}' /proc/cpuinfo | uniq)"
MEM="$(awk '/MemTotal:/ {printf "%.0f", $2 / 1024}' /proc/meminfo) MB"
printf "%s \033[1;36m%s(%s)\033[0m\n" "$(TEXT "Model: ")" "${MODEL}" "${PLATFORM}"
printf "%s \033[1;36m%s(%s%s)\033[0m\n" "$(TEXT "Version: ")" "${PRODUCTVER}" "${BUILDNUM}" "$([ ${SMALLNUM:-0} -ne 0 ] && echo "u${SMALLNUM}")"
printf "%s \033[1;36m%s\033[0m\n" "$(TEXT "Kernel: ")" "${KERNEL}"
printf "%s \033[1;36m%s\033[0m\n" "$(TEXT "LKM: ")" "${LKM}"
printf "%s \033[1;36m%s\033[0m\n" "$(TEXT "DMI: ")" "${DMI}"
printf "%s \033[1;36m%s\033[0m\n" "$(TEXT "CPU: ")" "${CPU}"
printf "%s \033[1;36m%s\033[0m\n" "$(TEXT "MEM: ")" "${MEM}"
if ! readConfigMap "addons" "${USER_CONFIG_FILE}" | grep -q nvmesystem; then
HASATA=0
@ -89,7 +89,11 @@ if ! readConfigMap "addons" "${USER_CONFIG_FILE}" | grep -q nvmesystem; then
break
fi
done
[ ${HASATA} = "0" ] && echo -e "\033[1;33m*** $(TEXT "Please insert at least one sata/scsi disk for system installation, except for the bootloader disk.") ***\033[0m"
[ ${HASATA} = "0" ] && printf "\033[1;33m*** %s ***\033[0m\n" "$(TEXT "Notice: Please insert at least one sata/scsi disk for system installation (except for the bootloader disk).")"
fi
if checkBIOS_VT_d && [ $(echo "${KVER:-4}" | cut -d'.' -f1) -lt 5 ]; then
printf "\033[1;33m*** %s ***\033[0m\n" "$(TEXT "Notice: Please disable Intel(VT-d)/AMD(AMD-Vi) in BIOS/UEFI settings if you encounter a boot failure.")"
fi
VID="$(readConfigKey "vid" "${USER_CONFIG_FILE}")"
@ -110,7 +114,10 @@ CMDLINE['pid']="${PID:-"0x0001"}" # Sanity check
CMDLINE['sn']="${SN}"
CMDLINE['netif_num']="0"
[ -z "${MAC1}" -a -n "${MAC2}" ] && MAC1=${MAC2} && MAC2="" # Sanity check
[ -z "${MAC1}" ] && [ -n "${MAC2}" ] && {
MAC1=${MAC2}
MAC2=""
} # Sanity check
[ -n "${MAC1}" ] && CMDLINE['mac1']="${MAC1}" && CMDLINE['netif_num']="1"
[ -n "${MAC2}" ] && CMDLINE['mac2']="${MAC2}" && CMDLINE['netif_num']="2"
@ -127,9 +134,6 @@ if [ ${EFI} -eq 1 ]; then
else
CMDLINE['noefi']=""
fi
DT="$(readConfigKey "platforms.${PLATFORM}.dt" "${WORK_PATH}/platforms.yml")"
KVER="$(readConfigKey "platforms.${PLATFORM}.productvers.\"${PRODUCTVER}\".kver" "${WORK_PATH}/platforms.yml")"
KPRE="$(readConfigKey "platforms.${PLATFORM}.productvers.\"${PRODUCTVER}\".kpre" "${WORK_PATH}/platforms.yml")"
if [ $(echo "${KVER:-4}" | cut -d'.' -f1) -lt 5 ]; then
if [ ! "${BUS}" = "usb" ]; then
SZ=$(blockdev --getsz ${LOADER_DISK} 2>/dev/null) # SZ=$(cat /sys/block/${LOADER_DISK/\/dev\//}/size)
@ -144,6 +148,7 @@ if [ $(echo "${KVER:-4}" | cut -d'.' -f1) -lt 5 ]; then
else
CMDLINE["split_lock_detect"]="off"
fi
if [ "${DT}" = "true" ]; then
CMDLINE["syno_ttyS0"]="serial,0x3f8"
CMDLINE["syno_ttyS1"]="serial,0x2f8"
@ -152,10 +157,10 @@ else
CMDLINE["syno_hdd_detect"]="0"
CMDLINE["syno_hdd_powerup_seq"]="0"
fi
CMDLINE["HddHotplug"]="1"
CMDLINE["vender_format_version"]="2"
CMDLINE['skip_vender_mac_interfaces']="0,1,2,3,4,5,6,7"
CMDLINE['earlyprintk']=""
CMDLINE['earlycon']="uart8250,io,0x3f8,115200n8"
CMDLINE['console']="ttyS0,115200n8"
@ -190,39 +195,39 @@ fi
if echo "apollolake geminilake" | grep -wq "${PLATFORM}"; then
CMDLINE["intel_iommu"]="igfx_off"
fi
if echo "purley broadwellnkv2" | grep -wq "${PLATFORM}"; then
CMDLINE["SASmodel"]="1"
fi
while IFS=': ' read KEY VALUE; do
while IFS=': ' read -r KEY VALUE; do
[ -n "${KEY}" ] && CMDLINE["network.${KEY}"]="${VALUE}"
done <<<$(readConfigMap "network" "${USER_CONFIG_FILE}")
while IFS=': ' read KEY VALUE; do
while IFS=': ' read -r KEY VALUE; do
[ -n "${KEY}" ] && CMDLINE["${KEY}"]="${VALUE}"
done <<<$(readConfigMap "cmdline" "${USER_CONFIG_FILE}")
# Prepare command line
CMDLINE_LINE=""
for KEY in ${!CMDLINE[@]}; do
for KEY in "${!CMDLINE[@]}"; do
VALUE="${CMDLINE[${KEY}]}"
CMDLINE_LINE+=" ${KEY}"
[ -n "${VALUE}" ] && CMDLINE_LINE+="=${VALUE}"
done
CMDLINE_LINE=$(echo "${CMDLINE_LINE}" | sed 's/^ //') # Remove leading space
echo -e "$(TEXT "Cmdline:\n")\033[1;36m${CMDLINE_LINE}\033[0m"
printf "%s:\n \033[1;36m%s\033[0m\n" "$(TEXT "Cmdline")" "${CMDLINE_LINE}"
function _bootwait() {
BOOTWAIT="$(readConfigKey "bootwait" "${USER_CONFIG_FILE}")"
[ -z "${BOOTWAIT}" ] && BOOTWAIT=10
busybox w 2>/dev/null | awk '{print $1" "$2" "$4" "$5" "$6}' >WB
MSG=""
while test ${BOOTWAIT} -ge 0; do
while [ ${BOOTWAIT} -ge 0 ]; do
MSG="$(printf "\033[1;33m$(TEXT "%2ds (Changing access(ssh/web) status will interrupt boot)")\033[0m" "${BOOTWAIT}")"
echo -en "\r${MSG}"
printf "\r${MSG}"
busybox w 2>/dev/null | awk '{print $1" "$2" "$4" "$5" "$6}' >WC
if ! diff WB WC >/dev/null 2>&1; then
echo -en "\r\033[1;33m$(TEXT "access(ssh/web) status has changed and booting is interrupted.")\033[0m\n"
printf "\r\033[1;33m%s\033[0m\n" "$(TEXT "access(ssh/web) status has changed and booting is interrupted.")"
rm -f WB WC
return 1
fi
@ -230,7 +235,7 @@ function _bootwait() {
BOOTWAIT=$((BOOTWAIT - 1))
done
rm -f WB WC
echo -en "\r$(printf "%$((${#MSG} * 2))s" " ")\n"
printf "\r%$((${#MSG} * 2))s\n" " "
return 0
}
@ -251,7 +256,7 @@ if [ "${DIRECT}" = "true" ]; then
_bootwait || exit 0
echo -e "\033[1;33m$(TEXT "Reboot to boot directly in DSM")\033[0m"
printf "\033[1;33m%s\033[0m\n" "$(TEXT "Reboot to boot directly in DSM")"
reboot
exit 0
else
@ -266,11 +271,11 @@ else
grub-editenv ${USER_GRUBENVFILE} unset dsm_cmdline
grub-editenv ${USER_GRUBENVFILE} unset next_entry
ETHX=$(ls /sys/class/net/ 2>/dev/null | grep -v lo) || true
echo "$(printf "$(TEXT "Detected %s network cards.")" "$(echo ${ETHX} | wc -w)")"
echo -en "$(TEXT "Checking Connect.")"
printf "$(TEXT "Detected %s network cards.\n")" "$(echo "${ETHX}" | wc -w)"
printf "$(TEXT "Checking Connect.")"
COUNT=0
BOOTIPWAIT="$(readConfigKey "bootipwait" "${USER_CONFIG_FILE}")"
[ -z "${BOOTIPWAIT}" ] && BOOTIPWAIT=10
BOOTIPWAIT=${BOOTIPWAIT:-10}
while [ ${COUNT} -lt $((${BOOTIPWAIT} + 32)) ]; do
MSG=""
for N in ${ETHX}; do
@ -279,77 +284,79 @@ else
fi
done
if [ -n "${MSG}" ]; then
echo -en "\r${MSG}$(TEXT "connected.") \n"
printf "\r%s%s \n" "${MSG}" "$(TEXT "connected.")"
break
fi
COUNT=$((${COUNT} + 1))
echo -n "."
COUNT=$((COUNT + 1))
printf "."
sleep 1
done
[ ! -f /var/run/dhcpcd/pid ] && /etc/init.d/S41dhcpcd restart >/dev/null 2>&1 || true
echo "$(TEXT "Waiting IP.")"
printf "$(TEXT "Waiting IP.\n")"
for N in ${ETHX}; do
COUNT=0
DRIVER=$(ls -ld /sys/class/net/${N}/device/driver 2>/dev/null | awk -F '/' '{print $NF}')
echo -en "${N}(${DRIVER}): "
printf "%s(%s): " "${N}" "${DRIVER}"
while true; do
if [ -z "$(cat /sys/class/net/${N}/carrier 2>/dev/null)" ]; then
echo -en "\r${N}(${DRIVER}): $(TEXT "DOWN")\n"
printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "DOWN")"
break
fi
if [ "0" = "$(cat /sys/class/net/${N}/carrier 2>/dev/null)" ]; then
echo -en "\r${N}(${DRIVER}): $(TEXT "NOT CONNECTED")\n"
printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "NOT CONNECTED")"
break
fi
if [ ${COUNT} -eq ${BOOTIPWAIT} ]; then # Under normal circumstances, no errors should occur here.
echo -en "\r${N}(${DRIVER}): $(TEXT "TIMEOUT (Please check the IP on the router.)")\n"
printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "TIMEOUT (Please check the IP on the router.)")"
break
fi
COUNT=$((${COUNT} + 1))
IP="$(getIP ${N})"
COUNT=$((COUNT + 1))
IP="$(getIP "${N}")"
if [ -n "${IP}" ]; then
if [[ "${IP}" =~ ^169\.254\..* ]]; then
echo -en "\r${N}(${DRIVER}): $(TEXT "LINK LOCAL (No DHCP server detected.)")\n"
if echo "${IP}" | grep -q "^169\.254\."; then
printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "LINK LOCAL (No DHCP server detected.)")"
else
echo -en "\r${N}(${DRIVER}): $(printf "$(TEXT "Access \033[1;34mhttp://%s:5000\033[0m to connect the DSM via web.")" "${IP}")\n"
printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(printf "$(TEXT "Access \033[1;34mhttp://%s:5000\033[0m to connect the DSM via web.")" "${IP}")"
fi
break
fi
echo -n "."
printf "."
sleep 1
done
done
_bootwait || exit 0
echo -e "\033[1;37m$(TEXT "Loading DSM kernel ...")\033[0m"
printf "\033[1;37m%s\033[0m\n" "$(TEXT "Loading DSM kernel ...")"
DSMLOGO="$(readConfigKey "dsmlogo" "${USER_CONFIG_FILE}")"
if [ "${DSMLOGO}" = "true" -a -c "/dev/fb0" ]; then
if [ "${DSMLOGO}" = "true" ] && [ -c "/dev/fb0" ]; then
IP="$(getIP)"
[[ "${IP}" =~ ^169\.254\..* ]] && IP=""
echo "${IP}" | grep -q "^169\.254\." && IP=""
[ -n "${IP}" ] && URL="http://${IP}:5000" || URL="http://find.synology.com/"
python ${WORK_PATH}/include/functions.py makeqr -d "${URL}" -l "6" -o "${TMP_PATH}/qrcode_boot.png"
python3 ${WORK_PATH}/include/functions.py makeqr -d "${URL}" -l "6" -o "${TMP_PATH}/qrcode_boot.png"
[ -f "${TMP_PATH}/qrcode_boot.png" ] && echo | fbv -acufi "${TMP_PATH}/qrcode_boot.png" >/dev/null 2>/dev/null || true
python ${WORK_PATH}/include/functions.py makeqr -f "${WORK_PATH}/include/qhxg.png" -l "7" -o "${TMP_PATH}/qrcode_qhxg.png"
python3 ${WORK_PATH}/include/functions.py makeqr -f "${WORK_PATH}/include/qhxg.png" -l "7" -o "${TMP_PATH}/qrcode_qhxg.png"
[ -f "${TMP_PATH}/qrcode_qhxg.png" ] && echo | fbv -acufi "${TMP_PATH}/qrcode_qhxg.png" >/dev/null 2>/dev/null || true
fi
# Executes DSM kernel via KEXEC
KEXECARGS="-a"
if [ $(echo "${KVER:-4}" | cut -d'.' -f1) -lt 4 ] && [ ${EFI} -eq 1 ]; then
echo -e "\033[1;33m$(TEXT "Warning, running kexec with --noefi param, strange things will happen!!")\033[0m"
printf "\033[1;33m%s\033[0m\n" "$(TEXT "Warning, running kexec with --noefi param, strange things will happen!!")"
KEXECARGS+=" --noefi"
fi
kexec ${KEXECARGS} -l "${MOD_ZIMAGE_FILE}" --initrd "${MOD_RDGZ_FILE}" --command-line="${CMDLINE_LINE} kexecboot" >"${LOG_FILE}" 2>&1 || dieLog
echo -e "\033[1;37m$(TEXT "Booting ...")\033[0m"
printf "\033[1;37m%s\033[0m\n" "$(TEXT "Booting ...")"
# show warning message
for T in $(busybox w 2>/dev/null | grep -v 'TTY' | awk '{print $2}'); do
[ -w "/dev/${T}" ] && echo -e "\n\033[1;43m$(TEXT "[This interface will not be operational. Please wait a few minutes.\nFind DSM via http://find.synology.com/ or Synology Assistant and connect.]")\033[0m\n" >"/dev/${T}" 2>/dev/null || true
if [ -w "/dev/${T}" ]; then
echo -e "\n\033[1;43m$(TEXT "Interface not operational. Wait a few minutes.\nFind DSM via http://find.synology.com/ or Synology Assistant.")\033[0m\n" > "/dev/${T}" 2>/dev/null || true
fi
done
# # Unload all network interfaces

View File

@ -1,16 +1,16 @@
#!/usr/bin/env bash
read_u8() {
dd if=$1 bs=1 skip=$(($2)) count=1 2>/dev/null | od -An -tu1 | grep -Eo '[0-9]+'
dd if="${1}" bs=1 skip="$((${2}))" count=1 2>/dev/null | od -An -tu1 | grep -Eo '[0-9]+'
}
read_u32() {
dd if=$1 bs=1 skip=$(($2)) count=4 2>/dev/null | od -An -tu4 | grep -Eo '[0-9]+'
dd if="${1}" bs=1 skip="$((${2}))" count=4 2>/dev/null | od -An -tu4 | grep -Eo '[0-9]+'
}
set -x
setup_size=$(read_u8 $1 0x1f1)
payload_offset=$(read_u32 $1 0x248)
payload_length=$(read_u32 $1 0x24c)
inner_pos=$((($setup_size + 1) * 512))
setup_size=$(read_u8 "${1}" 0x1f1)
payload_offset=$(read_u32 "${1}" 0x248)
payload_length=$(read_u32 "${1}" 0x24c)
inner_pos=$(((setup_size + 1) * 512))
tail -c+$(($inner_pos + 1)) $1 | tail -c+$(($payload_offset + 1)) | head -c $(($payload_length)) | head -c $(($payload_length - 4)) | unlzma >$2
tail -c+$((inner_pos + 1)) "${1}" | tail -c+$((payload_offset + 1)) | head -c "${payload_length}" | head -c $((payload_length - 4)) | unlzma >"${2}"

View File

@ -7,28 +7,22 @@
# objdump -h a.out | sh calc_run_size.sh
NUM='\([0-9a-fA-F]*[ \t]*\)'
OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"${NUM}${NUM}${NUM}${NUM}"'.*/\1\4/p')
if [ -z "$OUT" ]; then
OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"${NUM}${NUM}${NUM}${NUM}"'.*/0x\1 0x\4/p')
if [ -z "${OUT}" ]; then
echo "Never found .bss or .brk file offset" >&2
exit 1
fi
OUT=$(echo ${OUT# })
sizeA=$(printf "%d" 0x${OUT%% *})
OUT=${OUT#* }
offsetA=$(printf "%d" 0x${OUT%% *})
OUT=${OUT#* }
sizeB=$(printf "%d" 0x${OUT%% *})
OUT=${OUT#* }
offsetB=$(printf "%d" 0x${OUT%% *})
read -r sizeA offsetA sizeB offsetB <<<$(echo ${OUT} | awk '{printf "%d %d %d %d", strtonum($1), strtonum($2), strtonum($3), strtonum($4)}')
run_size=$((${offsetA} + ${sizeA} + ${sizeB}))
runSize=$((offsetA + sizeA + sizeB))
# BFD linker shows the same file offset in ELF.
if [ "${offsetA}" -ne "${offsetB}" ]; then
# Gold linker shows them as consecutive.
endB=$((${offsetB} + ${sizeB}))
if [ "$endB" != "$run_size" ]; then
endSize=$((offsetB + sizeB))
if [ "${endSize}" -ne "${runSize}" ]; then
printf "sizeA: 0x%x\n" ${sizeA} >&2
printf "offsetA: 0x%x\n" ${offsetA} >&2
printf "sizeB: 0x%x\n" ${sizeB} >&2
@ -38,5 +32,5 @@ if [ "${offsetA}" -ne "${offsetB}" ]; then
fi
fi
printf "%d\n" ${run_size}
printf "%d\n" ${runSize}
exit 0

View File

@ -10,38 +10,34 @@
#
# ----------------------------------------------------------------------
check_vmlinux()
{
# Use readelf to check if it's a valid ELF
# TODO: find a better to way to check that it's really vmlinux
# and not just an elf
readelf -h $1 > /dev/null 2>&1 || return 1
check_vmlinux() {
# Use readelf to check if it's a valid ELF
# TODO: find a better to way to check that it's really vmlinux
# and not just an elf
readelf -h $1 >/dev/null 2>&1 || return 1
cat $1
exit 0
cat $1
exit 0
}
try_decompress()
{
# The obscure use of the "tr" filter is to work around older versions of
# "grep" that report the byte offset of the line instead of the pattern.
try_decompress() {
# The obscure use of the "tr" filter is to work around older versions of
# "grep" that report the byte offset of the line instead of the pattern.
# Try to find the header ($1) and decompress from here
for pos in `tr "$1\n$2" "\n$2=" < "$img" | grep -abo "^$2"`
do
pos=${pos%%:*}
tail -c+$pos "$img" | $3 > $tmp 2> /dev/null
check_vmlinux $tmp
done
# Try to find the header ($1) and decompress from here
for pos in $(tr "$1\n$2" "\n$2=" <"$img" | grep -abo "^$2"); do
pos=${pos%%:*}
tail -c+$pos "$img" | $3 >$tmp 2>/dev/null
check_vmlinux $tmp
done
}
# Check invocation:
me=${0##*/}
img=$1
if [ $# -ne 1 -o ! -s "$img" ]
then
echo "Usage: $me <kernel-image>" >&2
exit 2
if [ $# -ne 1 ] || [ ! -s "$img" ]; then
echo "Usage: $me <kernel-image>" >&2
exit 2
fi
# Prepare temp files:

View File

@ -3,23 +3,23 @@
# 1 - Platform
# 2 - Kernel Version
function availableAddons() {
if [ -z "${1}" -o -z "${2}" ]; then
if [ -z "${1}" ] || [ -z "${2}" ]; then
echo ""
return 1
fi
for D in $(find "${ADDONS_PATH}" -maxdepth 1 -type d 2>/dev/null | sort); do
while read -r D; do
[ ! -f "${D}/manifest.yml" ] && continue
ADDON=$(basename ${D})
local ADDON=$(basename "${D}")
checkAddonExist "${ADDON}" "${1}" "${2}" || continue
SYSTEM=$(readConfigKey "system" "${D}/manifest.yml")
local SYSTEM=$(readConfigKey "system" "${D}/manifest.yml")
[ "${SYSTEM}" = "true" ] && continue
LOCALE="${LC_ALL%%.*}"
DESC=""
local LOCALE="${LC_ALL%%.*}"
local DESC=""
[ -z "${DESC}" ] && DESC="$(readConfigKey "description.${LOCALE:-"en_US"}" "${D}/manifest.yml")"
[ -z "${DESC}" ] && DESC="$(readConfigKey "description.en_US" "${D}/manifest.yml")"
[ -z "${DESC}" ] && DESC="$(readConfigKey "description" "${D}/manifest.yml")"
echo -e "${ADDON}\t${DESC:-"unknown"}"
done
done <<<$(find "${ADDONS_PATH}" -maxdepth 1 -type d 2>/dev/null | sort)
}
###############################################################################
@ -29,7 +29,7 @@ function availableAddons() {
# 3 - Kernel Version
# Return ERROR if not exists
function checkAddonExist() {
if [ -z "${1}" -o -z "${2}" -o -z "${3}" ]; then
if [ -z "${1}" ] || [ -z "${2}" ] || [ -z "${3}" ]; then
return 1 # ERROR
fi
# First check generic files
@ -80,7 +80,7 @@ function installAddon() {
fi
cp -f "${TMP_PATH}/${ADDON}/install.sh" "${RAMDISK_PATH}/addons/${ADDON}.sh" 2>"${LOG_FILE}"
chmod +x "${RAMDISK_PATH}/addons/${ADDON}.sh"
[ -d ${TMP_PATH}/${ADDON}/root ] && (cp -rnf "${TMP_PATH}/${ADDON}/root/"* "${RAMDISK_PATH}/" 2>"${LOG_FILE}")
[ -d "${TMP_PATH}/${ADDON}/root" ] && cp -rnf "${TMP_PATH}/${ADDON}/root/"* "${RAMDISK_PATH}/" 2>"${LOG_FILE}"
rm -rf "${TMP_PATH}/${ADDON}"
return 0
}
@ -88,7 +88,7 @@ function installAddon() {
###############################################################################
# Untar an addon to correct path
# 1 - Addon file path
# Return name of addon on sucess or empty on error
# Return name of addon on success or empty on error
function untarAddon() {
if [ -z "${1}" ]; then
echo ""
@ -96,10 +96,11 @@ function untarAddon() {
fi
rm -rf "${TMP_PATH}/addon"
mkdir -p "${TMP_PATH}/addon"
tar -xaf "${1}" -C "${TMP_PATH}/addon" || return
tar -xaf "${1}" -C "${TMP_PATH}/addon" || return 1
local ADDON=$(readConfigKey "name" "${TMP_PATH}/addon/manifest.yml")
[ -z "${ADDON}" ] && return
[ -z "${ADDON}" ] && return 1
rm -rf "${ADDONS_PATH}/${ADDON}"
mv -f "${TMP_PATH}/addon" "${ADDONS_PATH}/${ADDON}"
echo "${ADDON}"
return 0
}

View File

@ -3,7 +3,7 @@
# 1 - Path of Key
# 2 - Path of yaml config file
function deleteConfigKey() {
yq eval 'del(.'${1}')' --inplace "${2}" 2>/dev/null
yq eval "del(.${1})" --inplace "${2}" 2>/dev/null
}
###############################################################################
@ -12,7 +12,8 @@ function deleteConfigKey() {
# 2 - Value
# 3 - Path of yaml config file
function writeConfigKey() {
[ "${2}" = "{}" ] && yq eval '.'${1}' = {}' --inplace "${3}" 2>/dev/null || yq eval '.'${1}' = "'"${2}"'"' --inplace "${3}" 2>/dev/null
local value="${2}"
[ "${value}" = "{}" ] && yq eval ".${1} = {}" --inplace "${3}" 2>/dev/null || yq eval ".${1} = \"${value}\"" --inplace "${3}" 2>/dev/null
}
###############################################################################
@ -21,19 +22,20 @@ function writeConfigKey() {
# 2 - Path of yaml config file
# Return Value
function readConfigKey() {
RESULT=$(yq eval '.'${1}' | explode(.)' "${2}" 2>/dev/null)
[ "${RESULT}" == "null" ] && echo "" || echo "${RESULT}"
local result=$(yq eval ".${1} | explode(.)" "${2}" 2>/dev/null)
[ "${result}" = "null" ] && echo "" || echo "${result}"
}
###############################################################################
# Write to yaml config file
# 1 - format
# 2 - string
# 3 - Path of yaml config file
function mergeConfigStr() {
local JF=$(mktemp)
echo "${2}" | yq -p ${1} -o y > "${JF}"
yq eval-all --inplace '. as $item ireduce ({}; . * $item)' --inplace "${3}" "${JF}" 2>/dev/null
rm -f "${JF}"
local xmlfile=$(mktemp)
echo "${2}" | yq -p "${1}" -o y >"${xmlfile}"
yq eval-all --inplace '. as $item ireduce ({}; . * $item)' --inplace "${3}" "${xmlfile}" 2>/dev/null
rm -f "${xmlfile}"
}
###############################################################################
@ -51,7 +53,7 @@ function initConfigKey() {
# 2 - Path of yaml config file
# Returns map of values
function readConfigMap() {
yq eval '.'${1}' | explode(.) | to_entries | map([.key, .value] | join(": ")) | .[]' "${2}" 2>/dev/null
yq eval ".${1} | explode(.) | to_entries | map([.key, .value] | join(\": \")) | .[]" "${2}" 2>/dev/null
}
###############################################################################
@ -60,7 +62,7 @@ function readConfigMap() {
# 2 - Path of yaml config file
# Returns array/map of values
function readConfigArray() {
yq eval '.'${1}'[]' "${2}" 2>/dev/null
yq eval ".${1}[]" "${2}" 2>/dev/null
}
###############################################################################
@ -69,7 +71,7 @@ function readConfigArray() {
# 2 - Path of yaml config file
# Returns array of values
function readConfigEntriesArray() {
yq eval '.'${1}' | explode(.) | to_entries | map([.key])[] | .[]' "${2}" 2>/dev/null
yq eval ".${1} | explode(.) | to_entries | map([.key])[] | .[]" "${2}" 2>/dev/null
}
###############################################################################

View File

@ -5,6 +5,11 @@
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
# This script is a CLI to RR.
#
# # Backup the original python3 executable.
# sudo mv -f "$(realpath $(which python3))/EXTERNALLY-MANAGED" "$(realpath $(which python3))/EXTERNALLY-MANAGED.bak" 2>/dev/null || true
# sudo pip3 install -U click requests requests-toolbelt urllib3 qrcode[pil] beautifulsoup4
import os, click
@ -31,17 +36,13 @@ def validate_required_param(ctx, param, value):
raise click.MissingParameter(param_decls=[param.name])
return value
def __fullversion(ver):
out = ver
arr = ver.split('-')
if len(arr) > 0:
a = arr[0].split('.')[0] if len(arr[0].split('.')) > 0 else '0'
b = arr[0].split('.')[1] if len(arr[0].split('.')) > 1 else '0'
c = arr[0].split('.')[2] if len(arr[0].split('.')) > 2 else '0'
d = arr[1] if len(arr) > 1 else '00000'
e = arr[2] if len(arr) > 2 else '0'
out = '{}.{}.{}-{}-{}'.format(a,b,c,d,e)
return out
a, b, c = (arr[0].split('.') + ['0', '0', '0'])[:3]
d = arr[1] if len(arr) > 1 else '00000'
e = arr[2] if len(arr) > 2 else '0'
return f'{a}.{b}.{c}-{d}-{e}'
@cli.command()
@ -63,24 +64,24 @@ def makeqr(data, file, location, output):
FBIOPUT_VSCREENINFO = 0x4601
FBIOGET_FSCREENINFO = 0x4602
FBDEV = "/dev/fb0"
if data is not None:
qr = qrcode.QRCode(version=1, box_size=10, error_correction=qrcode.constants.ERROR_CORRECT_H, border=4,)
if data:
qr = qrcode.QRCode(version=1, box_size=10, error_correction=qrcode.constants.ERROR_CORRECT_H, border=4)
qr.add_data(data)
qr.make(fit=True)
img = qr.make_image(fill_color="purple", back_color="white")
img = img.convert("RGBA")
img = qr.make_image(fill_color="purple", back_color="white").convert("RGBA")
pixels = img.load()
for i in range(img.size[0]):
for j in range(img.size[1]):
if pixels[i, j] == (255, 255, 255, 255):
pixels[i, j] = (255, 255, 255, 0)
if os.path.exists(os.path.join(WORK_PATH, "logo.png")):
icon = Image.open(os.path.join(WORK_PATH, "logo.png"))
icon = icon.convert("RGBA")
img.paste(icon.resize((int(img.size[0] / 5), int(img.size[1] / 5))), (int((img.size[0] - int(img.size[0] / 5)) / 2), int((img.size[1] - int(img.size[1] / 5)) / 2),),)
logo_path = os.path.join(WORK_PATH, "logo.png")
if os.path.exists(logo_path):
icon = Image.open(logo_path).convert("RGBA")
img.paste(icon.resize((img.size[0] // 5, img.size[1] // 5)), ((img.size[0] - img.size[0] // 5) // 2, (img.size[1] - img.size[1] // 5) // 2))
if file is not None:
elif file:
img = Image.open(file)
# img = img.convert("RGBA")
# pixels = img.load()
@ -88,25 +89,22 @@ def makeqr(data, file, location, output):
# for j in range(img.size[1]):
# if pixels[i, j] == (255, 255, 255, 255):
# pixels[i, j] = (255, 255, 255, 0)
else:
raise click.UsageError("Either data or file must be provided.")
(xres, yres) = (1920, 1080)
with open(FBDEV, "rb") as fb:
vi = fcntl.ioctl(fb, FBIOGET_VSCREENINFO, bytes(160))
res = struct.unpack("I" * 40, vi)
if res[0] != 0 and res[1] != 0:
(xres, yres) = (res[0], res[1])
xqr, yqr = (int(xres / 8), int(xres / 8))
img = img.resize((xqr, yqr))
xres, yres = res[0], res[1] if res[0] and res[1] else (1920, 1080)
img = img.resize((xres // 8, xres // 8))
alpha = Image.new("RGBA", (xres, yres), (0, 0, 0, 0))
if int(location) not in range(0, 8):
location = 0
loc = (img.size[0] * int(location), alpha.size[1] - img.size[1])
loc = (img.size[0] * location, alpha.size[1] - img.size[1])
alpha.paste(img, loc)
alpha.save(output)
except:
pass
except Exception as e:
click.echo(f"Error: {e}")
@cli.command()
@ -119,16 +117,13 @@ def getmodels(platforms=None):
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry # type: ignore
adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=0.5, status_forcelist=[500, 502, 503, 504]))
adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=1, status_forcelist=[500, 502, 503, 504]))
session = requests.Session()
session.mount("http://", adapter)
session.mount("https://", adapter)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
if platforms is not None and platforms != "":
PS = platforms.lower().replace(",", " ").split()
else:
PS = []
PS = platforms.lower().replace(",", " ").split() if platforms else []
models = []
try:
@ -136,71 +131,66 @@ def getmodels(platforms=None):
req.encoding = "utf-8"
data = json.loads(req.text)
for I in data["channel"]["item"]:
if not I["title"].startswith("DSM"):
for item in data["channel"]["item"]:
if not item["title"].startswith("DSM"):
continue
for J in I["model"]:
arch = J["mUnique"].split("_")[1]
name = J["mLink"].split("/")[-1].split("_")[1].replace("%2B", "+")
if len(PS) > 0 and arch.lower() not in PS:
for model in item["model"]:
arch = model["mUnique"].split("_")[1]
name = model["mLink"].split("/")[-1].split("_")[1].replace("%2B", "+")
if PS and arch.lower() not in PS:
continue
if any(name == B["name"] for B in models):
continue
models.append({"name": name, "arch": arch})
if not any(m["name"] == name for m in models):
models.append({"name": name, "arch": arch})
models = sorted(models, key=lambda k: (k["arch"], k["name"]))
models.sort(key=lambda k: (k["arch"], k["name"]))
except:
pass
except Exception as e:
click.echo(f"Error: {e}")
models.sort(key=lambda x: (x["arch"], x["name"]))
print(json.dumps(models, indent=4))
@cli.command()
@click.option("-p", "--platforms", type=str, help="The platforms of Syno.")
def getmodelsbykb(platforms=None):
"""
Get Syno Models.
"""
import json, requests, urllib3
import re, json, requests, urllib3
from bs4 import BeautifulSoup
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry # type: ignore
adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=0.5, status_forcelist=[500, 502, 503, 504]))
adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=1, status_forcelist=[500, 502, 503, 504]))
session = requests.Session()
session.mount("http://", adapter)
session.mount("https://", adapter)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
if platforms is not None and platforms != "":
PS = platforms.lower().replace(",", " ").split()
else:
PS = []
PS = platforms.lower().replace(",", " ").split() if platforms else []
models = []
try:
import re
from bs4 import BeautifulSoup
url="https://kb.synology.com/en-us/DSM/tutorial/What_kind_of_CPU_does_my_NAS_have"
url = "https://kb.synology.com/en-us/DSM/tutorial/What_kind_of_CPU_does_my_NAS_have"
#url = "https://kb.synology.cn/zh-cn/DSM/tutorial/What_kind_of_CPU_does_my_NAS_have"
req = session.get(url, timeout=10, verify=False)
req.encoding = "utf-8"
bs = BeautifulSoup(req.text, "html.parser")
p = re.compile(r"data: (.*?),$", re.MULTILINE | re.DOTALL)
data = json.loads(p.search(bs.find("script", string=p).prettify()).group(1))
model = "(.*?)" # (.*?): all, FS6400: one
p = re.compile(r"<td>{}<\/td><td>(.*?)<\/td><td>(.*?)<\/td><td>(.*?)<\/td><td>(.*?)<\/td><td>(.*?)<\/td><td>(.*?)<\/td>".format(model), re.MULTILINE | re.DOTALL,)
p = re.compile(r"<td>{}<\/td><td>(.*?)<\/td><td>(.*?)<\/td><td>(.*?)<\/td><td>(.*?)<\/td><td>(.*?)<\/td><td>(.*?)<\/td>".format(model), re.MULTILINE | re.DOTALL)
it = p.finditer(data["preload"]["content"].replace("\n", "").replace("\t", ""))
for i in it:
d = i.groups()
if len(d) == 6:
d = model + d
if len(PS) > 0 and d[5].lower() not in PS:
if PS and d[5].lower() not in PS:
continue
models.append({"name": d[0].split("<br")[0], "arch": d[5].lower()})
except:
pass
except Exception as e:
click.echo(f"Error: {e}")
models.sort(key=lambda x: (x["arch"], x["name"]))
print(json.dumps(models, indent=4))
@ -214,7 +204,7 @@ def getpats4mv(model, version):
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry # type: ignore
adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=0.5, status_forcelist=[500, 502, 503, 504]))
adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=1, status_forcelist=[500, 502, 503, 504]))
session = requests.Session()
session.mount("http://", adapter)
session.mount("https://", adapter)
@ -227,59 +217,62 @@ def getpats4mv(model, version):
#urlInfo = "https://www.synology.cn/api/support/findDownloadInfo?lang=zh-cn"
#urlSteps = "https://www.synology.cn/api/support/findUpgradeSteps?"
major = "&major={}".format(version.split('.')[0]) if len(version.split('.')) > 0 else ""
minor = "&minor={}".format(version.split('.')[1]) if len(version.split('.')) > 1 else ""
req = session.get("{}&product={}{}{}".format(urlInfo, model.replace("+", "%2B"), major, minor), timeout=10, verify=False)
major = f"&major={version.split('.')[0]}" if len(version.split('.')) > 0 else ""
minor = f"&minor={version.split('.')[1]}" if len(version.split('.')) > 1 else ""
req = session.get(f"{urlInfo}&product={model.replace('+', '%2B')}{major}{minor}", timeout=10, verify=False)
req.encoding = "utf-8"
data = json.loads(req.text)
build_ver = data['info']['system']['detail'][0]['items'][0]['build_ver']
build_num = data['info']['system']['detail'][0]['items'][0]['build_num']
buildnano = data['info']['system']['detail'][0]['items'][0]['nano']
V=__fullversion("{}-{}-{}".format(build_ver, build_num, buildnano))
if not V in pats:
pats[V]={}
pats[V]['url'] = data['info']['system']['detail'][0]['items'][0]['files'][0]['url'].split('?')[0]
pats[V]['sum'] = data['info']['system']['detail'][0]['items'][0]['files'][0]['checksum']
V = __fullversion(f"{build_ver}-{build_num}-{buildnano}")
if V not in pats:
pats[V] = {
'url': data['info']['system']['detail'][0]['items'][0]['files'][0]['url'].split('?')[0],
'sum': data['info']['system']['detail'][0]['items'][0]['files'][0]['checksum']
}
from_ver=0
for I in data['info']['pubVers']:
if from_ver == 0 or I['build'] < from_ver: from_ver = I['build']
from_ver = min(I['build'] for I in data['info']['pubVers'])
for I in data['info']['productVers']:
if not I['version'].startswith(version): continue
if major == "" or minor == "":
majorTmp = "&major={}".format(I['version'].split('.')[0]) if len(I['version'].split('.')) > 0 else ""
minorTmp = "&minor={}".format(I['version'].split('.')[1]) if len(I['version'].split('.')) > 1 else ""
reqTmp = session.get("{}&product={}{}{}".format(urlInfo, model.replace("+", "%2B"), majorTmp, minorTmp), timeout=10, verify=False)
if not I['version'].startswith(version):
continue
if not major or not minor:
majorTmp = f"&major={I['version'].split('.')[0]}" if len(I['version'].split('.')) > 0 else ""
minorTmp = f"&minor={I['version'].split('.')[1]}" if len(I['version'].split('.')) > 1 else ""
reqTmp = session.get(f"{urlInfo}&product={model.replace('+', '%2B')}{majorTmp}{minorTmp}", timeout=10, verify=False)
reqTmp.encoding = "utf-8"
dataTmp = json.loads(reqTmp.text)
build_ver = dataTmp['info']['system']['detail'][0]['items'][0]['build_ver']
build_num = dataTmp['info']['system']['detail'][0]['items'][0]['build_num']
buildnano = dataTmp['info']['system']['detail'][0]['items'][0]['nano']
V=__fullversion("{}-{}-{}".format(build_ver, build_num, buildnano))
if not V in pats:
pats[V]={}
pats[V]['url'] = dataTmp['info']['system']['detail'][0]['items'][0]['files'][0]['url'].split('?')[0]
pats[V]['sum'] = dataTmp['info']['system']['detail'][0]['items'][0]['files'][0]['checksum']
V = __fullversion(f"{build_ver}-{build_num}-{buildnano}")
if V not in pats:
pats[V] = {
'url': dataTmp['info']['system']['detail'][0]['items'][0]['files'][0]['url'].split('?')[0],
'sum': dataTmp['info']['system']['detail'][0]['items'][0]['files'][0]['checksum']
}
for J in I['versions']:
to_ver=J['build']
reqSteps = session.get("{}&product={}&from_ver={}&to_ver={}".format(urlSteps, model.replace("+", "%2B"), from_ver, to_ver), timeout=10, verify=False)
if reqSteps.status_code != 200: continue
to_ver = J['build']
reqSteps = session.get(f"{urlSteps}&product={model.replace('+', '%2B')}&from_ver={from_ver}&to_ver={to_ver}", timeout=10, verify=False)
if reqSteps.status_code != 200:
continue
reqSteps.encoding = "utf-8"
dataSteps = json.loads(reqSteps.text)
for S in dataSteps['upgrade_steps']:
if not 'full_patch' in S or S['full_patch'] is False: continue
if not 'build_ver' in S or not S['build_ver'].startswith(version): continue
V=__fullversion("{}-{}-{}".format(S['build_ver'], S['build_num'], S['nano']))
if not V in pats:
pats[V] = {}
pats[V]['url'] = S['files'][0]['url'].split('?')[0]
pats[V]['sum'] = S['files'][0]['checksum']
except:
pass
if not S.get('full_patch') or not S['build_ver'].startswith(version):
continue
V = __fullversion(f"{S['build_ver']}-{S['build_num']}-{S['nano']}")
if V not in pats:
pats[V] = {
'url': S['files'][0]['url'].split('?')[0],
'sum': S['files'][0]['checksum']
}
except Exception as e:
click.echo(f"Error: {e}")
pats = {k: pats[k] for k in sorted(pats.keys(), reverse=True)}
print(json.dumps(pats, indent=4))
@ -288,52 +281,50 @@ def getpats4mv(model, version):
@cli.command()
@click.option("-p", "--models", type=str, help="The models of Syno.")
def getpats(models=None):
import json, requests, urllib3, re
import re, json, requests, urllib3
from bs4 import BeautifulSoup
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry # type: ignore
adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=0.5, status_forcelist=[500, 502, 503, 504]))
adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=1, status_forcelist=[500, 502, 503, 504]))
session = requests.Session()
session.mount("http://", adapter)
session.mount("https://", adapter)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
if models is not None and models != "":
MS = models.lower().replace(",", " ").split()
else:
MS = []
MS = models.lower().replace(",", " ").split() if models else []
pats = {}
try:
req = session.get('https://archive.synology.com/download/Os/DSM', timeout=10, verify=False)
req.encoding = 'utf-8'
bs=BeautifulSoup(req.text, 'html.parser')
bs = BeautifulSoup(req.text, 'html.parser')
p = re.compile(r"(.*?)-(.*?)", re.MULTILINE | re.DOTALL)
l = bs.find_all('a', string=p)
for i in l:
ver = i.attrs['href'].split('/')[-1]
if not ver.startswith('7'): continue
req = session.get('https://archive.synology.com{}'.format(i.attrs['href']), timeout=10, verify=False)
if not ver.startswith('7'):
continue
req = session.get(f'https://archive.synology.com{i.attrs["href"]}', timeout=10, verify=False)
req.encoding = 'utf-8'
bs=BeautifulSoup(req.text, 'html.parser')
p = re.compile(r"^(.*?)_(.*?)_(.*?).pat$", re.MULTILINE | re.DOTALL)
bs = BeautifulSoup(req.text, 'html.parser')
p = re.compile(r"DSM_(.*?)_(.*?).pat", re.MULTILINE | re.DOTALL)
data = bs.find_all('a', string=p)
for item in data:
p = re.compile(r"DSM_(.*?)_(.*?).pat", re.MULTILINE | re.DOTALL)
rels = p.search(item.attrs['href'])
if rels != None:
info = p.search(item.attrs['href']).groups()
model = info[0].replace('%2B', '+')
if len(MS) > 0 and model.lower() not in MS:
if rels:
model, _ = rels.groups()
model = model.replace('%2B', '+')
if MS and model.lower() not in MS:
continue
if model not in pats.keys():
pats[model]={}
if model not in pats:
pats[model] = {}
pats[model][__fullversion(ver)] = item.attrs['href']
except:
pass
except Exception as e:
click.echo(f"Error: {e}")
print(json.dumps(pats, indent=4))
if __name__ == "__main__":
cli()

View File

@ -1,13 +1,13 @@
[ -z "${WORK_PATH}" -o ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")/../" >/dev/null 2>&1 && pwd)"
[ -z "${WORK_PATH}" ] || [ ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")/../" >/dev/null 2>&1 && pwd)"
. ${WORK_PATH}/include/consts.sh
. ${WORK_PATH}/include/configFile.sh
. ${WORK_PATH}/include/i18n.sh
. "${WORK_PATH}/include/consts.sh"
. "${WORK_PATH}/include/configFile.sh"
. "${WORK_PATH}/include/i18n.sh"
###############################################################################
# Check loader disk
function checkBootLoader() {
while read KNAME RO; do
while read -r KNAME RO; do
[ -z "${KNAME}" ] && continue
[ "${RO}" = "0" ] && continue
hdparm -r0 "${KNAME}" >/dev/null 2>&1 || true
@ -51,48 +51,41 @@ function dieLog() {
}
###############################################################################
# Check if a item exists into array
# Check if an item exists in an array
# 1 - Item
# 2.. - Array
# Return 0 if exists
function arrayExistItem() {
EXISTS=1
ITEM="${1}"
local ITEM="${1}"
shift
for i in "$@"; do
[ "${i}" = "${ITEM}" ] || continue
EXISTS=0
break
[ "${i}" = "${ITEM}" ] && return 0
done
return ${EXISTS}
return 1
}
###############################################################################
# Generate a number with 6 digits from 1 to 30000
function random() {
printf "%06d" $((${RANDOM} % 30000 + 1))
printf "%06d" $((RANDOM % 30000 + 1))
}
###############################################################################
# Generate a hexa number from 0x00 to 0xFF
# Generate a hex number from 0x00 to 0xFF
function randomhex() {
printf "&02X" "$((${RANDOM} % 255 + 1))"
printf "%02X" $((RANDOM % 255 + 1))
}
###############################################################################
# Generate a random letter
function genRandomLetter() {
for i in A B C D E F G H J K L M N P Q R S T V W X Y Z; do
echo ${i}
done | sort -R | tail -1
echo {A..Z} | tr ' ' '\n' | grep -v '[IO]' | sort -R | head -1
}
###############################################################################
# Generate a random digit (0-9A-Z)
function genRandomValue() {
for i in 0 1 2 3 4 5 6 7 8 9 A B C D E F G H J K L M N P Q R S T V W X Y Z; do
echo ${i}
done | sort -R | tail -1
echo {0..9} {A..Z} | tr ' ' '\n' | grep -v '[IO]' | sort -R | head -1
}
###############################################################################
@ -100,11 +93,12 @@ function genRandomValue() {
# 1 - Model
# Returns serial number
function generateSerial() {
PREFIX="$(readConfigArray "${1}.prefix" "${WORK_PATH}/serialnumber.yml" 2>/dev/null | sort -R | tail -1)"
MIDDLE="$(readConfigArray "${1}.middle" "${WORK_PATH}/serialnumber.yml" 2>/dev/null | sort -R | tail -1)"
local PREFIX MIDDLE SUFFIX SERIAL
PREFIX="$(readConfigArray "${1}.prefix" "${WORK_PATH}/serialnumber.yml" 2>/dev/null | sort -R | head -1)"
MIDDLE="$(readConfigArray "${1}.middle" "${WORK_PATH}/serialnumber.yml" 2>/dev/null | sort -R | head -1)"
SUFFIX="$(readConfigKey "${1}.suffix" "${WORK_PATH}/serialnumber.yml" 2>/dev/null)"
local SERIAL="${PREFIX:-"0000"}${MIDDLE:-"XXX"}"
SERIAL="${PREFIX:-"0000"}${MIDDLE:-"XXX"}"
case "${SUFFIX:-"alpha"}" in
numeric)
SERIAL+="$(random)"
@ -122,12 +116,13 @@ function generateSerial() {
# 2 - number
# Returns serial number
function generateMacAddress() {
local MACPRE MACSUF NUM MACS
MACPRE="$(readConfigArray "${1}.macpre" "${WORK_PATH}/serialnumber.yml" 2>/dev/null)"
MACSUF="$(printf '%02x%02x%02x' $((${RANDOM} % 256)) $((${RANDOM} % 256)) $((${RANDOM} % 256)))"
MACSUF="$(printf '%02x%02x%02x' $((RANDOM % 256)) $((RANDOM % 256)) $((RANDOM % 256)))"
NUM=${2:-1}
local MACS=""
MACS=""
for I in $(seq 1 ${NUM}); do
MACS+="$(printf '%06x%06x' $((0x${MACPRE:-"001132"})) $(($((0x${MACSUF})) + ${I})))"
MACS+="$(printf '%06x%06x' $((0x${MACPRE:-"001132"})) $((0x${MACSUF} + I)))"
[ ${I} -lt ${NUM} ] && MACS+=" "
done
echo "${MACS}"
@ -140,6 +135,7 @@ function generateMacAddress() {
# 2 - Serial number to test
# Returns 1 if serial number is invalid
function validateSerial() {
local PREFIX MIDDLE SUFFIX P M S L
PREFIX="$(readConfigArray "${1}.prefix" "${WORK_PATH}/serialnumber.yml" 2>/dev/null)"
MIDDLE="$(readConfigArray "${1}.middle" "${WORK_PATH}/serialnumber.yml" 2>/dev/null)"
SUFFIX="$(readConfigKey "${1}.suffix" "${WORK_PATH}/serialnumber.yml" 2>/dev/null)"
@ -176,7 +172,7 @@ function validateSerial() {
# 1 - key
# 2 - file
function _get_conf_kv() {
grep "${1}" "${2}" 2>/dev/null | sed "s|^${1}=\"\(.*\)\"$|\1|g"
grep "^${1}=" "${2}" 2>/dev/null | cut -d'=' -f2- | sed 's/^"//;s/"$//' 2>/dev/null
}
###############################################################################
@ -187,18 +183,19 @@ function _get_conf_kv() {
function _set_conf_kv() {
# Delete
if [ -z "${2}" ]; then
sed -i "${3}" -e "s/^${1}=.*$//" 2>/dev/null
sed -i "/^${1}=/d" "${3}" 2>/dev/null
return $?
fi
# Replace
if grep -q "^${1}=" "${3}"; then
sed -i "${3}" -e "s\"^${1}=.*\"${1}=\\\"${2}\\\"\"" 2>/dev/null
sed -i "s#^${1}=.*#${1}=\"${2}\"#" "${3}" 2>/dev/null
return $?
fi
# Add if doesn't exist
echo "${1}=\"${2}\"" >>"${3}"
return $?
}
###############################################################################
@ -206,14 +203,14 @@ function _set_conf_kv() {
# @ - url list
function _get_fastest() {
local speedlist=""
if ! command -v ping >/dev/null 2>&1; then
for I in $@; do
speed=$(ping -c 1 -W 5 ${I} 2>/dev/null | awk -F'[= ]' '/time=/ {for(i=1;i<=NF;i++) if ($i=="time") print $(i+1)}')
if command -v ping >/dev/null 2>&1; then
for I in "$@"; do
speed=$(ping -c 1 -W 5 "${I}" 2>/dev/null | awk -F'[= ]' '/time=/ {for(i=1;i<=NF;i++) if ($i=="time") print $(i+1)}')
speedlist+="${I} ${speed:-999}\n" # Assign default value 999 if speed is empty
done
else
for I in $@; do
speed=$(curl -o /dev/null -s -w '%{time_total}' ${I})
for I in "$@"; do
speed=$(curl -o /dev/null -s -w '%{time_total}' "${I}")
speed=$(awk "BEGIN {print (${speed:-0.999} * 1000)}")
speedlist+="${I} ${speed:-999}\n" # Assign default value 999 if speed is empty
done
@ -222,7 +219,7 @@ function _get_fastest() {
URL="$(echo "${fastest}" | awk '{print $1}')"
SPD="$(echo "${fastest}" | awk '{print $2}')" # It is a float type
echo "${URL}"
[ $(echo ${SPD:-999} | cut -d. -f1) -ge 999 ] && return 1 || return 0
[ $(echo "${SPD:-999}" | cut -d. -f1) -ge 999 ] && return 1 || return 0
}
###############################################################################
@ -245,7 +242,7 @@ function _sort_netif() {
ETHLISTTMPB="$(echo -e "${ETHLISTTMPB}" | grep -v "${MACX}")\n"
done
fi
local ETHLIST="$(echo -e "${ETHLISTTMPM}${ETHLISTTMPB}" | grep -v '^$')"
ETHLIST="$(echo -e "${ETHLISTTMPM}${ETHLISTTMPB}" | grep -v '^$')"
local ETHSEQ="$(echo -e "${ETHLIST}" | awk '{print $3}' | sed 's/eth//g')"
local ETHNUM="$(echo -e "${ETHLIST}" | wc -l)"
@ -255,12 +252,12 @@ function _sort_netif() {
/etc/init.d/S41dhcpcd stop >/dev/null 2>&1
/etc/init.d/S40network stop >/dev/null 2>&1
for i in $(seq 0 $((${ETHNUM:0} - 1))); do
ip link set dev eth${i} name tmp${i}
ip link set dev "eth${i}" name "tmp${i}"
done
I=0
for i in ${ETHSEQ}; do
ip link set dev tmp${i} name eth${I}
I=$((${I} + 1))
ip link set dev "tmp${i}" name "eth${I}"
I=$((I + 1))
done
/etc/init.d/S40network start >/dev/null 2>&1
/etc/init.d/S41dhcpcd start >/dev/null 2>&1
@ -287,9 +284,9 @@ function getBus() {
# 1 - ethN
function getIP() {
local IP=""
if [ -n "${1}" -a -d "/sys/class/net/${1}" ]; then
IP=$(ip route show dev ${1} 2>/dev/null | sed -n 's/.* via .* src \(.*\) metric .*/\1/p')
[ -z "${IP}" ] && IP=$(ip addr show ${1} scope global 2>/dev/null | grep -E "inet .* eth" | awk '{print $2}' | cut -f1 -d'/' | head -1)
if [ -n "${1}" ] && [ -d "/sys/class/net/${1}" ]; then
IP=$(ip route show dev "${1}" 2>/dev/null | sed -n 's/.* via .* src \(.*\) metric .*/\1/p')
[ -z "${IP}" ] && IP=$(ip addr show "${1}" scope global 2>/dev/null | grep -E "inet .* eth" | awk '{print $2}' | cut -f1 -d'/' | head -1)
else
IP=$(ip route show 2>/dev/null | sed -n 's/.* via .* src \(.*\) metric .*/\1/p' | head -1)
[ -z "${IP}" ] && IP=$(ip addr show scope global 2>/dev/null | grep -E "inet .* eth" | awk '{print $2}' | cut -f1 -d'/' | head -1)
@ -309,7 +306,7 @@ function getLogo() {
return 1
fi
local STATUS=$(curl -skL --connect-timeout 10 -w "%{http_code}" "https://${fastest}/api/products/getPhoto?product=${MODEL/+/%2B}&type=img_s&sort=0" -o "${PART3_PATH}/logo.png")
if [ $? -ne 0 -o ${STATUS:-0} -ne 200 -o ! -f "${PART3_PATH}/logo.png" ]; then
if [ $? -ne 0 ] || [ "${STATUS:-0}" -ne 200 ] || [ ! -f "${PART3_PATH}/logo.png" ]; then
rm -f "${PART3_PATH}/logo.png"
return 1
fi
@ -324,42 +321,72 @@ function getLogo() {
# 1 - key name
# 2 - key string
function checkCmdline() {
return $(grub-editenv ${USER_GRUBENVFILE} list 2>/dev/null | grep "^${1}=" | cut -d'=' -f2- | grep -q "${2}")
grub-editenv "${USER_GRUBENVFILE}" list 2>/dev/null | grep -q "^${1}=\"\?${2}\"\?"
}
###############################################################################
# get logo of model
# set Cmdline
# 1 - key name
# 2 - key string
function setCmdline() {
[ -z "${1}" ] && return 1
if [ -n "${2}" ]; then
grub-editenv ${USER_GRUBENVFILE} set "${1}=${2}"
grub-editenv "${USER_GRUBENVFILE}" set "${1}=${2}"
else
grub-editenv ${USER_GRUBENVFILE} unset "${1}"
grub-editenv "${USER_GRUBENVFILE}" unset "${1}"
fi
}
###############################################################################
# get logo of model
# check Cmdline
# add Cmdline
# 1 - key name
# 2 - key string
function addCmdline() {
local CMDLINE="$(grub-editenv ${USER_GRUBENVFILE} list 2>/dev/null | grep "^${1}=" | cut -d'=' -f2-)"
local CMDLINE
CMDLINE="$(grub-editenv "${USER_GRUBENVFILE}" list 2>/dev/null | grep "^${1}=" | cut -d'=' -f2- | sed 's/^"//;s/"$//')"
[ -n "${CMDLINE}" ] && CMDLINE="${CMDLINE} ${2}" || CMDLINE="${2}"
setCmdline "${1}" "${CMDLINE}"
}
###############################################################################
# get logo of model
# 1 - model
# del Cmdline
# 1 - key name
# 2 - key string
function delCmdline() {
local CMDLINE="$(grub-editenv ${USER_GRUBENVFILE} list 2>/dev/null | grep "^${1}=" | cut -d'=' -f2-)"
CMDLINE="$(echo "${CMDLINE}" | sed "s/ *${2}//; s/^[[:space:]]*//;s/[[:space:]]*$//")"
local CMDLINE
CMDLINE="$(grub-editenv "${USER_GRUBENVFILE}" list 2>/dev/null | grep "^${1}=" | cut -d'=' -f2- | sed 's/^"//;s/"$//')"
CMDLINE="$(echo "${CMDLINE}" | sed "s/[ \t]*${2}//; s/^[ \t]*//;s/[ \t]*$//")"
setCmdline "${1}" "${CMDLINE}"
}
###############################################################################
# check CPU Intel(VT-d)/AMD(AMD-Vi)
function checkCPU_VT_d() {
lsmod | grep -q msr || modprobe msr 2>/dev/null
if grep -q "GenuineIntel" /proc/cpuinfo; then
local VT_D_ENABLED=$(rdmsr 0x3a 2>/dev/null)
[ "$((${VT_D_ENABLED:-0x0} & 0x5))" -eq $((0x5)) ] && return 0
elif grep -q "AuthenticAMD" /proc/cpuinfo; then
local IOMMU_ENABLED=$(rdmsr 0xC0010114 2>/dev/null)
[ "$((${IOMMU_ENABLED:-0x0} & 0x1))" -eq $((0x1)) ] && return 0
else
return 1
fi
}
###############################################################################
# check BIOS Intel(VT-d)/AMD(AMD-Vi)
function checkBIOS_VT_d() {
if grep -q "GenuineIntel" /proc/cpuinfo; then
dmesg | grep -iq "DMAR-IR.*DRHD base" && return 0
elif grep -q "AuthenticAMD" /proc/cpuinfo; then
# TODO: need check
dmesg | grep -iq "AMD-Vi.*enabled" && return 0
else
return 1
fi
}
###############################################################################
# Rebooting
# 1 - mode
@ -367,11 +394,11 @@ function rebootTo() {
local MODES="config recovery junior bios memtest"
if [ -z "${1}" ] || ! echo "${MODES}" | grep -qw "${1}"; then exit 1; fi
# echo "Rebooting to ${1} mode"
GRUBPATH="$(dirname $(find ${PART1_PATH}/ -name grub.cfg 2>/dev/null | head -1))"
GRUBPATH="$(dirname "$(find "${PART1_PATH}/" -name grub.cfg 2>/dev/null | head -1)")"
[ -z "${GRUBPATH}" ] && exit 1
ENVFILE="${GRUBPATH}/grubenv"
[ ! -f "${ENVFILE}" ] && grub-editenv ${ENVFILE} create
grub-editenv ${ENVFILE} set next_entry="${1}"
[ ! -f "${ENVFILE}" ] && grub-editenv "${ENVFILE}" create
grub-editenv "${ENVFILE}" set next_entry="${1}"
reboot
}
@ -380,23 +407,23 @@ function rebootTo() {
# 1 netif name
# 2 enable/disable (1/0)
function connectwlanif() {
[ -z "${1}" -o ! -d "/sys/class/net/${1}" ] && return 1
[ -z "${1}" ] || [ ! -d "/sys/class/net/${1}" ] && return 1
if [ "${2}" = "0" ]; then
if [ -f "/var/run/wpa_supplicant.pid.${1}" ]; then
kill -9 $(cat /var/run/wpa_supplicant.pid.${1})
rm -f /var/run/wpa_supplicant.pid.${1}
kill -9 "$(cat /var/run/wpa_supplicant.pid.${1})"
rm -f "/var/run/wpa_supplicant.pid.${1}"
fi
else
local CONF=""
[ -z "${CONF}" -a -f "${PART1_PATH}/wpa_supplicant.conf.${1}" ] && CONF="${PART1_PATH}/wpa_supplicant.conf.${1}"
[ -z "${CONF}" -a -f "${PART1_PATH}/wpa_supplicant.conf" ] && CONF="${PART1_PATH}/wpa_supplicant.conf"
[ -z "${CONF}" ] && [ -f "${PART1_PATH}/wpa_supplicant.conf.${1}" ] && CONF="${PART1_PATH}/wpa_supplicant.conf.${1}"
[ -z "${CONF}" ] && [ -f "${PART1_PATH}/wpa_supplicant.conf" ] && CONF="${PART1_PATH}/wpa_supplicant.conf"
[ -z "${CONF}" ] && return 2
if [ -f "/var/run/wpa_supplicant.pid.${1}" ]; then
kill -9 $(cat /var/run/wpa_supplicant.pid.${1})
rm -f /var/run/wpa_supplicant.pid.${1}
kill -9 "$(cat /var/run/wpa_supplicant.pid.${1})"
rm -f "/var/run/wpa_supplicant.pid.${1}"
fi
wpa_supplicant -i ${1} -c "${CONF}" -B -P "/var/run/wpa_supplicant.pid.${1}" >/dev/null 2>&1
wpa_supplicant -i "${1}" -c "${CONF}" -B -P "/var/run/wpa_supplicant.pid.${1}" >/dev/null 2>&1
fi
return 0
}

View File

@ -1,19 +1,14 @@
[ -z "${WORK_PATH}" -o ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")/../" >/dev/null 2>&1 && pwd)"
[ -z "${WORK_PATH}" ] || [ ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")/../" >/dev/null 2>&1 && pwd)"
type gettext >/dev/null 2>&1 && alias TEXT='gettext "rr"' || alias TEXT='echo'
shopt -s expand_aliases
[ -d "${WORK_PATH}/lang" ] && export TEXTDOMAINDIR="${WORK_PATH}/lang"
[ -f "${PART1_PATH}/.locale" ] && export LC_ALL="$(cat "${PART1_PATH}/.locale")"
if type gettext >/dev/null 2>&1; then
alias TEXT='gettext "rr"'
shopt -s expand_aliases
else
alias TEXT='echo'
shopt -s expand_aliases
fi
if [ -d "${WORK_PATH}/lang" ]; then
export TEXTDOMAINDIR="${WORK_PATH}/lang"
fi
if [ -f "${PART1_PATH}/.locale" ]; then
export LC_ALL="$(cat ${PART1_PATH}/.locale)"
fi
if [ -f "${PART1_PATH}/.timezone" ]; then
TIMEZONE="$(cat ${PART1_PATH}/.timezone)"
ln -sf "/usr/share/zoneinfo/right/${TIMEZONE}" /etc/localtime
TIMEZONE="$(cat "${PART1_PATH}/.timezone")"
if [ -f "/usr/share/zoneinfo/right/${TIMEZONE}" ]; then
ln -sf "/usr/share/zoneinfo/right/${TIMEZONE}" /etc/localtime
fi
fi

View File

@ -1,3 +1,37 @@
###############################################################################
# Unpack modules from a tgz file
# 1 - Platform
# 2 - Kernel Version
function unpackModules() {
local PLATFORM=${1}
local KVER=${2}
local KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")"
rm -rf "${TMP_PATH}/modules"
mkdir -p "${TMP_PATH}/modules"
if [ "${KERNEL}" = "custom" ]; then
tar -zxf "${CKS_PATH}/modules-${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules"
else
tar -zxf "${MODULES_PATH}/${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules"
fi
}
###############################################################################
# Packag modules to a tgz file
# 1 - Platform
# 2 - Kernel Version
function packagModules() {
local PLATFORM=${1}
local KVER=${2}
local KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")"
if [ "${KERNEL}" = "custom" ]; then
tar -zcf "${CKS_PATH}/modules-${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules" .
else
tar -zcf "${MODULES_PATH}/${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules" .
fi
}
###############################################################################
# Return list of all modules available
# 1 - Platform
@ -6,27 +40,20 @@ function getAllModules() {
local PLATFORM=${1}
local KVER=${2}
if [ -z "${PLATFORM}" -o -z "${KVER}" ]; then
echo ""
if [ -z "${PLATFORM}" ] || [ -z "${KVER}" ]; then
return 1
fi
# Unzip modules for temporary folder
rm -rf "${TMP_PATH}/modules"
mkdir -p "${TMP_PATH}/modules"
local KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")"
if [ "${KERNEL}" = "custom" ]; then
tar -zxf "${CKS_PATH}/modules-${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules"
else
tar -zxf "${MODULES_PATH}/${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules"
fi
# Get list of all modules
unpackModules "${PLATFORM}" "${KVER}"
for F in $(ls ${TMP_PATH}/modules/*.ko 2>/dev/null); do
local X=$(basename ${F})
local X=$(basename "${F}")
local M=${X:0:-3}
local DESC=$(modinfo ${F} 2>/dev/null | awk -F':' '/description:/{ print $2}' | awk '{sub(/^[ ]+/,""); print}')
local DESC=$(modinfo "${F}" 2>/dev/null | awk -F':' '/description:/{ print $2}' | awk '{sub(/^[ ]+/,""); print}')
[ -z "${DESC}" ] && DESC="${X}"
echo "${M} \"${DESC}\""
done
rm -rf "${TMP_PATH}/modules"
}
@ -41,34 +68,26 @@ function installModules() {
shift 2
local MLIST="${@}"
if [ -z "${PLATFORM}" -o -z "${KVER}" ]; then
if [ -z "${PLATFORM}" ] || [ -z "${KVER}" ]; then
echo "ERROR: installModules: Platform or Kernel Version not defined" >"${LOG_FILE}"
return 1
fi
# Unzip modules for temporary folder
rm -rf "${TMP_PATH}/modules"
mkdir -p "${TMP_PATH}/modules"
local KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")"
if [ "${KERNEL}" = "custom" ]; then
tar -zxf "${CKS_PATH}/modules-${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules" 2>"${LOG_FILE}"
else
tar -zxf "${MODULES_PATH}/${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules" 2>"${LOG_FILE}"
fi
if [ $? -ne 0 ]; then
return 1
fi
unpackModules "${PLATFORM}" "${KVER}"
local ODP="$(readConfigKey "odp" "${USER_CONFIG_FILE}")"
for F in $(ls "${TMP_PATH}/modules/"*.ko 2>/dev/null); do
local M=$(basename ${F})
[ "${ODP}" = "true" -a -f "${RAMDISK_PATH}/usr/lib/modules/${M}" ] && continue
local M=$(basename "${F}")
[ "${ODP}" == "true" ] && [ -f "${RAMDISK_PATH}/usr/lib/modules/${M}" ] && continue
if echo "${MLIST}" | grep -wq "${M:0:-3}"; then
cp -f "${F}" "${RAMDISK_PATH}/usr/lib/modules/${M}" 2>"${LOG_FILE}"
else
rm -f "${RAMDISK_PATH}/usr/lib/modules/${M}" 2>"${LOG_FILE}"
fi
done
mkdir -p "${RAMDISK_PATH}/usr/lib/firmware"
local KERNEL=$(readConfigKey "kernel" "${USER_CONFIG_FILE}")
if [ "${KERNEL}" = "custom" ]; then
tar -zxf "${CKS_PATH}/firmware.tgz" -C "${RAMDISK_PATH}/usr/lib/firmware" 2>"${LOG_FILE}"
else
@ -78,7 +97,6 @@ function installModules() {
return 1
fi
# Clean
rm -rf "${TMP_PATH}/modules"
return 0
}
@ -93,25 +111,16 @@ function addToModules() {
local KVER=${2}
local KOFILE=${3}
if [ -z "${PLATFORM}" -o -z "${KVER}" -o -z "${KOFILE}" ]; then
if [ -z "${PLATFORM}" ] || [ -z "${KVER}" ] || [ -z "${KOFILE}" ]; then
echo ""
return 1
fi
# Unzip modules for temporary folder
rm -rf "${TMP_PATH}/modules"
mkdir -p "${TMP_PATH}/modules"
local KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")"
if [ "${KERNEL}" = "custom" ]; then
tar -zxf "${CKS_PATH}/modules-${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules"
else
tar -zxf "${MODULES_PATH}/${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules"
fi
cp -f ${KOFILE} ${TMP_PATH}/modules
if [ "${KERNEL}" = "custom" ]; then
tar -zcf "${CKS_PATH}/modules-${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules" .
else
tar -zcf "${MODULES_PATH}/${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules" .
fi
unpackModules "${PLATFORM}" "${KVER}"
cp -f "${KOFILE}" "${TMP_PATH}/modules"
packagModules "${PLATFORM}" "${KVER}"
}
###############################################################################
@ -124,25 +133,16 @@ function delToModules() {
local KVER=${2}
local KONAME=${3}
if [ -z "${PLATFORM}" -o -z "${KVER}" -o -z "${KONAME}" ]; then
if [ -z "${PLATFORM}" ] || [ -z "${KVER}" ] || [ -z "${KONAME}" ]; then
echo ""
return 1
fi
# Unzip modules for temporary folder
rm -rf "${TMP_PATH}/modules"
mkdir -p "${TMP_PATH}/modules"
local KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")"
if [ "${KERNEL}" = "custom" ]; then
tar -zxf "${CKS_PATH}/modules-${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules"
else
tar -zxf "${MODULES_PATH}/${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules"
fi
rm -f ${TMP_PATH}/modules/${KONAME}
if [ "${KERNEL}" = "true" ]; then
tar -zcf "${CKS_PATH}/modules-${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules" .
else
tar -zcf "${MODULES_PATH}/${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules" .
fi
unpackModules "${PLATFORM}" "${KVER}"
rm -f "${TMP_PATH}/modules/${KONAME}"
packagModules "${PLATFORM}" "${KVER}"
}
###############################################################################
@ -153,33 +153,28 @@ function delToModules() {
function getdepends() {
function _getdepends() {
if [ -f "${TMP_PATH}/modules/${1}.ko" ]; then
depends=($(modinfo "${TMP_PATH}/modules/${1}.ko" 2>/dev/null | grep depends: | awk -F: '{print $2}' | awk '$1=$1' | sed 's/,/ /g'))
local depends=($(modinfo "${TMP_PATH}/modules/${1}.ko" 2>/dev/null | grep depends: | awk -F: '{print $2}' | awk '$1=$1' | sed 's/,/ /g'))
if [ ${#depends[@]} -gt 0 ]; then
for k in ${depends[@]}; do
for k in "${depends[@]}"; do
echo "${k}"
_getdepends "${k}"
done
fi
fi
}
local PLATFORM=${1}
local KVER=${2}
local KONAME=${3}
if [ -z "${PLATFORM}" -o -z "${KVER}" -o -z "${KONAME}" ]; then
if [ -z "${PLATFORM}" ] || [ -z "${KVER}" ] || [ -z "${KONAME}" ]; then
echo ""
return 1
fi
# Unzip modules for temporary folder
rm -rf "${TMP_PATH}/modules"
mkdir -p "${TMP_PATH}/modules"
local KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")"
if [ "${KERNEL}" = "custom" ]; then
tar -zxf "${CKS_PATH}/modules-${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules"
else
tar -zxf "${MODULES_PATH}/${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules"
fi
local DPS=($(_getdepends ${KONAME} | tr ' ' '\n' | sort -u))
echo ${DPS[@]}
unpackModules "${PLATFORM}" "${KVER}"
local DPS=($(_getdepends "${KONAME}" | tr ' ' '\n' | sort -u))
echo "${DPS[@]}"
rm -rf "${TMP_PATH}/modules"
}

View File

@ -1,10 +1,10 @@
#!/usr/bin/env bash
set -e
[ -z "${WORK_PATH}" -o ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
[ -z "${WORK_PATH}" ] || [ ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. ${WORK_PATH}/include/functions.sh
. ${WORK_PATH}/include/addons.sh
. "${WORK_PATH}/include/functions.sh"
. "${WORK_PATH}/include/addons.sh"
[ -z "${LOADER_DISK}" ] && die "$(TEXT "Loader is not init!")"
checkBootLoader || die "$(TEXT "The loader is corrupted, please rewrite it!")"
@ -12,19 +12,19 @@ checkBootLoader || die "$(TEXT "The loader is corrupted, please rewrite it!")"
# Shows title
clear
COLUMNS=$(ttysize 2>/dev/null | awk '{print $1}')
[ -z "${COLUMNS}" ] && COLUMNS=80
COLUMNS=${COLUMNS:-80}
TITLE="$(printf "$(TEXT "Welcome to %s")" "$([ -z "${RR_RELEASE}" ] && echo "${RR_TITLE}" || echo "${RR_TITLE}(${RR_RELEASE})")")"
DATE="$(date)"
printf "\033[1;44m%*s\n" ${COLUMNS} ""
printf "\033[1;44m%*s\033[A\n" ${COLUMNS} ""
printf "\033[1;31m%*s\033[0m\n" $(((${#TITLE} + ${COLUMNS}) / 2)) "${TITLE}"
printf "\033[1;44m%*s\033[A\n" ${COLUMNS} ""
printf "\033[1;32m%*s\033[0m\n" ${COLUMNS} "${DATE}"
printf "\033[1;44m%*s\n" "${COLUMNS}" ""
printf "\033[1;44m%*s\033[A\n" "${COLUMNS}" ""
printf "\033[1;31m%*s\033[0m\n" "$(((${#TITLE} + ${COLUMNS}) / 2))" "${TITLE}"
printf "\033[1;44m%*s\033[A\n" "${COLUMNS}" ""
printf "\033[1;32m%*s\033[0m\n" "${COLUMNS}" "${DATE}"
# Get first MAC address
ETHX=$(ls /sys/class/net/ 2>/dev/null | grep -v lo) || true
# No network devices
[ $(echo ${ETHX} | wc -w) -le 0 ] && die "$(TEXT "Network devices not found! Please re execute init.sh after connecting to the network!")"
[ "$(echo "${ETHX}" | wc -w)" -le 0 ] && die "$(TEXT "Network devices not found! Please re execute init.sh after connecting to the network!")"
# If user config file not exists, initialize it
if [ ! -f "${USER_CONFIG_FILE}" ]; then
@ -77,13 +77,13 @@ if [ -f "${PART2_PATH}/GRUB_VER" ]; then
[ -z "$(readConfigKey "platform" "${USER_CONFIG_FILE}")" ] &&
writeConfigKey "platform" "${PLATFORMTMP,,}" "${USER_CONFIG_FILE}"
[ -z "$(readConfigKey "model" "${USER_CONFIG_FILE}")" ] &&
writeConfigKey "model" "$(echo ${MODELTMP} | sed 's/d$/D/; s/rp$/RP/; s/rp+/RP+/')" "${USER_CONFIG_FILE}"
writeConfigKey "model" "$(echo "${MODELTMP}" | sed 's/d$/D/; s/rp$/RP/; s/rp+/RP+/')" "${USER_CONFIG_FILE}"
[ -z "$(readConfigKey "modelid" "${USER_CONFIG_FILE}")" ] &&
writeConfigKey "modelid" "${MODELTMP}" "${USER_CONFIG_FILE}"
fi
if [ ! "LOCALBUILD" = "${LOADER_DISK}" ]; then
if arrayExistItem "sortnetif:" $(readConfigMap "addons" "${USER_CONFIG_FILE}"); then
if arrayExistItem "sortnetif:" "$(readConfigMap "addons" "${USER_CONFIG_FILE}")"; then
_sort_netif "$(readConfigKey "addons.sortnetif" "${USER_CONFIG_FILE}")"
fi
for N in ${ETHX}; do
@ -91,10 +91,10 @@ if [ ! "LOCALBUILD" = "${LOADER_DISK}" ]; then
IPR="$(readConfigKey "network.${MACR}" "${USER_CONFIG_FILE}")"
if [ -n "${IPR}" ] && [ "1" = "$(cat /sys/class/net/${N}/carrier 2>/dev/null)" ]; then
IFS='/' read -r -a IPRA <<<"${IPR}"
ip addr flush dev ${N}
ip addr add ${IPRA[0]}/${IPRA[1]:-"255.255.255.0"} dev ${N}
ip addr flush dev "${N}"
ip addr add "${IPRA[0]}/${IPRA[1]:-"255.255.255.0"}" dev "${N}"
if [ -n "${IPRA[2]}" ]; then
ip route add default via ${IPRA[2]} dev ${N}
ip route add default via "${IPRA[2]}" dev "${N}"
fi
if [ -n "${IPRA[3]:-${IPRA[2]}}" ]; then
sed -i "/nameserver ${IPRA[3]:-${IPRA[2]}}/d" /etc/resolv.conf
@ -103,7 +103,7 @@ if [ ! "LOCALBUILD" = "${LOADER_DISK}" ]; then
sleep 1
fi
[ "${N::4}" = "wlan" ] && connectwlanif "${N}" 1 && sleep 1
[ "${N::3}" = "eth" ] && ethtool -s ${N} wol g 2>/dev/null || true
[ "${N::3}" = "eth" ] && ethtool -s "${N}" wol g 2>/dev/null || true
# [ "${N::3}" = "eth" ] && ethtool -K ${N} rxhash off 2>/dev/null || true
done
fi
@ -116,8 +116,8 @@ BUS=$(getBus "${LOADER_DISK}")
BUSLIST="usb sata sas scsi nvme mmc ide virtio vmbus xen"
if [ "${BUS}" = "usb" ]; then
VID="0x$(udevadm info --query property --name ${LOADER_DISK} 2>/dev/null | grep ID_VENDOR_ID | cut -d= -f2)"
PID="0x$(udevadm info --query property --name ${LOADER_DISK} 2>/dev/null | grep ID_MODEL_ID | cut -d= -f2)"
VID="0x$(udevadm info --query property --name "${LOADER_DISK}" 2>/dev/null | grep ID_VENDOR_ID | cut -d= -f2)"
PID="0x$(udevadm info --query property --name "${LOADER_DISK}" 2>/dev/null | grep ID_MODEL_ID | cut -d= -f2)"
TYPE="flashdisk"
elif ! echo "${BUSLIST}" | grep -wq "${BUS}"; then
if [ "LOCALBUILD" = "${LOADER_DISK}" ]; then
@ -129,11 +129,11 @@ elif ! echo "${BUSLIST}" | grep -wq "${BUS}"; then
fi
# Save variables to user config file
writeConfigKey "vid" ${VID} "${USER_CONFIG_FILE}"
writeConfigKey "pid" ${PID} "${USER_CONFIG_FILE}"
writeConfigKey "vid" "${VID}" "${USER_CONFIG_FILE}"
writeConfigKey "pid" "${PID}" "${USER_CONFIG_FILE}"
# Inform user
echo -e "$(TEXT "Loader disk:") \033[1;32m${LOADER_DISK}\033[0m (\033[1;32m${BUS^^} ${TYPE}\033[0m)"
printf "%s \033[1;32m%s (%s %s)\033[0m\n" "$(TEXT "Loader disk:")" "${LOADER_DISK}" "${BUS^^}" "${TYPE}"
# Load keymap name
LAYOUT="$(readConfigKey "layout" "${USER_CONFIG_FILE}")"
@ -141,23 +141,23 @@ KEYMAP="$(readConfigKey "keymap" "${USER_CONFIG_FILE}")"
# Loads a keymap if is valid
if [ -f "/usr/share/keymaps/i386/${LAYOUT}/${KEYMAP}.map.gz" ]; then
echo -e "$(TEXT "Loading keymap") \033[1;32m${LAYOUT}/${KEYMAP}\033[0m"
printf "%s \033[1;32m%s/%s\033[0m\n" "$(TEXT "Loading keymap:")" "${LAYOUT}" "${KEYMAP}"
zcat "/usr/share/keymaps/i386/${LAYOUT}/${KEYMAP}.map.gz" | loadkeys
fi
# Decide if boot automatically
BOOT=1
if ! loaderIsConfigured; then
echo -e "\033[1;33m$(TEXT "Loader is not configured!")\033[0m"
printf "\033[1;33m%s\033[0m\n" "$(TEXT "Loader is not configured!")"
BOOT=0
elif grep -q "IWANTTOCHANGETHECONFIG" /proc/cmdline; then
echo -e "\033[1;33m$(TEXT "User requested edit settings.")\033[0m"
printf "\033[1;33m%s\033[0m\n" "$(TEXT "User requested edit settings.")"
BOOT=0
fi
# If is to boot automatically, do it
if [ ${BOOT} -eq 1 ]; then
${WORK_PATH}/boot.sh && exit 0
"${WORK_PATH}/boot.sh" && exit 0
fi
HTTP=$(grep -i '^HTTP_PORT=' /etc/rrorg.conf 2>/dev/null | cut -d'=' -f2)
@ -165,8 +165,8 @@ DUFS=$(grep -i '^DUFS_PORT=' /etc/rrorg.conf 2>/dev/null | cut -d'=' -f2)
TTYD=$(grep -i '^TTYD_PORT=' /etc/rrorg.conf 2>/dev/null | cut -d'=' -f2)
# Wait for an IP
echo "$(printf "$(TEXT "Detected %s network cards.")" "$(echo ${ETHX} | wc -w)")"
echo -en "$(TEXT "Checking Connect.")"
printf "$(TEXT "Detected %s network cards.\n")" "$(echo "${ETHX}" | wc -w)"
printf "$(TEXT "Checking Connect.")"
COUNT=0
while [ ${COUNT} -lt 30 ]; do
MSG=""
@ -176,82 +176,82 @@ while [ ${COUNT} -lt 30 ]; do
fi
done
if [ -n "${MSG}" ]; then
echo -en "\r${MSG}$(TEXT "connected.") \n"
printf "\r%s%s \n" "${MSG}" "$(TEXT "connected.")"
break
fi
COUNT=$((${COUNT} + 1))
echo -n "."
COUNT=$((COUNT + 1))
printf "."
sleep 1
done
[ ! -f /var/run/dhcpcd/pid ] && /etc/init.d/S41dhcpcd restart >/dev/null 2>&1 || true
echo "$(TEXT "Waiting IP.")"
printf "$(TEXT "Waiting IP.\n")"
for N in ${ETHX}; do
COUNT=0
DRIVER=$(ls -ld /sys/class/net/${N}/device/driver 2>/dev/null | awk -F '/' '{print $NF}')
echo -en "${N}(${DRIVER}): "
printf "%s(%s): " "${N}" "${DRIVER}"
while true; do
if [ -z "$(cat /sys/class/net/${N}/carrier 2>/dev/null)" ]; then
echo -en "\r${N}(${DRIVER}): $(TEXT "DOWN")\n"
printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "DOWN")"
break
fi
if [ "0" = "$(cat /sys/class/net/${N}/carrier 2>/dev/null)" ]; then
echo -en "\r${N}(${DRIVER}): $(TEXT "NOT CONNECTED")\n"
printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "NOT CONNECTED")"
break
fi
if [ ${COUNT} -eq 15 ]; then
echo -en "\r${N}(${DRIVER}): $(TEXT "TIMEOUT (Please check the IP on the router.)")\n"
if [ ${COUNT} -eq 15 ]; then # Under normal circumstances, no errors should occur here.
printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "TIMEOUT (Please check the IP on the router.)")"
break
fi
COUNT=$((${COUNT} + 1))
IP="$(getIP ${N})"
COUNT=$((COUNT + 1))
IP="$(getIP "${N}")"
if [ -n "${IP}" ]; then
if [[ "${IP}" =~ ^169\.254\..* ]]; then
echo -en "\r${N}(${DRIVER}): $(TEXT "LINK LOCAL (No DHCP server detected.)")\n"
if echo "${IP}" | grep -q "^169\.254\."; then
printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "LINK LOCAL (No DHCP server detected.)")"
else
echo -en "\r${N}(${DRIVER}): $(printf "$(TEXT "Access \033[1;34mhttp://%s:%d\033[0m to configure the loader via web terminal.")" "${IP}" "${TTYD:-7681}")\n"
printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(printf "$(TEXT "Access \033[1;34mhttp://%s:%d\033[0m to configure the loader via web terminal.")" "${IP}" "${TTYD:-7681}")"
fi
break
fi
echo -n "."
printf "."
sleep 1
done
done
# Inform user
echo
echo -e "$(TEXT "Call \033[1;32minit.sh\033[0m to re get init info")"
echo -e "$(TEXT "Call \033[1;32mmenu.sh\033[0m to configure loader")"
echo
echo -e "$(printf "$(TEXT "User config is on \033[1;32m%s\033[0m")" "${USER_CONFIG_FILE}")"
echo -e "$(printf "$(TEXT "HTTP: \033[1;34mhttp://%s:%d\033[0m")" "rr" "${HTTP:-7080}")"
echo -e "$(printf "$(TEXT "DUFS: \033[1;34mhttp://%s:%d\033[0m")" "rr" "${DUFS:-7304}")"
echo -e "$(printf "$(TEXT "TTYD: \033[1;34mhttp://%s:%d\033[0m")" "rr" "${TTYD:-7681}")"
echo
printf "\n"
printf "$(TEXT "Call \033[1;32minit.sh\033[0m to re get init info\n")"
printf "$(TEXT "Call \033[1;32mmenu.sh\033[0m to configure loader\n")"
printf "\n"
printf "$(TEXT "User config is on \033[1;32m%s\033[0m\n")" "${USER_CONFIG_FILE}"
printf "$(TEXT "HTTP: \033[1;34mhttp://%s:%d\033[0m\n")" "rr" "${HTTP:-7080}"
printf "$(TEXT "DUFS: \033[1;34mhttp://%s:%d\033[0m\n")" "rr" "${DUFS:-7304}"
printf "$(TEXT "TTYD: \033[1;34mhttp://%s:%d\033[0m\n")" "rr" "${TTYD:-7681}"
printf "\n"
if [ -f "/etc/shadow-" ]; then
echo -e "$(printf "$(TEXT "SSH port is \033[1;31m%d\033[0m, The \033[1;31mroot\033[0m password has been changed")" "22")"
printf "$(TEXT "SSH port is \033[1;31m%d\033[0m, The \033[1;31mroot\033[0m password has been changed\n")" "22"
else
echo -e "$(printf "$(TEXT "SSH port is \033[1;31m%d\033[0m, The \033[1;31mroot\033[0m password is \033[1;31m%s\033[0m")" "22" "rr")"
printf "$(TEXT "SSH port is \033[1;31m%d\033[0m, The \033[1;31mroot\033[0m password is \033[1;31m%s\033[0m\n")" "22" "rr"
fi
echo
printf "\n"
DSMLOGO="$(readConfigKey "dsmlogo" "${USER_CONFIG_FILE}")"
if [ "${DSMLOGO}" = "true" -a -c "/dev/fb0" -a ! "LOCALBUILD" = "${LOADER_DISK}" ]; then
if [ "${DSMLOGO}" = "true" ] && [ -c "/dev/fb0" ] && [ ! "LOCALBUILD" = "${LOADER_DISK}" ]; then
IP="$(getIP)"
[[ "${IP}" =~ ^169\.254\..* ]] && IP=""
echo "${IP}" | grep -q "^169\.254\." && IP=""
[ -n "${IP}" ] && URL="http://${IP}:${TTYD:-7681}" || URL="http://rr:${TTYD:-7681}"
python ${WORK_PATH}/include/functions.py makeqr -d "${URL}" -l "0" -o "${TMP_PATH}/qrcode_init.png"
python3 "${WORK_PATH}/include/functions.py" makeqr -d "${URL}" -l "0" -o "${TMP_PATH}/qrcode_init.png"
[ -f "${TMP_PATH}/qrcode_init.png" ] && echo | fbv -acufi "${TMP_PATH}/qrcode_init.png" >/dev/null 2>/dev/null || true
python ${WORK_PATH}/include/functions.py makeqr -f "${WORK_PATH}/include/qhxg.png" -l "7" -o "${TMP_PATH}/qrcode_qhxg.png"
python3 "${WORK_PATH}/include/functions.py" makeqr -f "${WORK_PATH}/include/qhxg.png" -l "7" -o "${TMP_PATH}/qrcode_qhxg.png"
[ -f "${TMP_PATH}/qrcode_qhxg.png" ] && echo | fbv -acufi "${TMP_PATH}/qrcode_qhxg.png" >/dev/null 2>/dev/null || true
fi
# Check memory
RAM=$(awk '/MemTotal:/ {printf "%.0f", $2 / 1024}' /proc/meminfo 2>/dev/null)
if [ ${RAM:-0} -le 3500 ]; then
echo -e "\033[1;33m$(TEXT "You have less than 4GB of RAM, if errors occur in loader creation, please increase the amount of memory.")\033[0m\n"
if [ "${RAM:-0}" -le 3500 ]; then
printf "\033[1;33m%s\033[0m\n" "$(TEXT "You have less than 4GB of RAM, if errors occur in loader creation, please increase the amount of memory.")"
fi
mkdir -p "${CKS_PATH}"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -13,7 +13,7 @@ fi
##$1 from, $2 to, $3 file to path
_replace_in_file() {
if grep -q "${1}" "${3}"; then
"${SED_PATH}" -i "${3}" -e "s#${1}#${2}#"
"${SED_PATH}" -i "s#${1}#${2}#" "${3}" 2>/dev/null
fi
}
@ -22,17 +22,18 @@ _replace_in_file() {
# Args: $1 name, $2 new_val, $3 path
_set_conf_kv() {
# Delete
if [ -z "$2" ]; then
"${SED_PATH}" -i "${3}" -e "s/^${1}=.*$//"
if [ -z "${2}" ]; then
"${SED_PATH}" -i "/^${1}=/d" "${3}" 2>/dev/null
return 0
fi
# Replace
if grep -q "^${1}=" "${3}"; then
"${SED_PATH}" -i "${3}" -e "s\"^${1}=.*\"${1}=\\\"${2}\\\"\""
"${SED_PATH}" -i "s#^${1}=.*#${1}=\"${2}\"#" "${3}" 2>/dev/null
return 0
fi
# Add if doesn't exist
echo "${1}=\"${2}\"" >>"${3}"
return 0
}

View File

@ -17,47 +17,43 @@ synoinfo: &synoinfo
maxlanport: "8"
netif_seq: "0 1 2 3 4 5 6 7"
buzzeroffen: "0xffff"
productvers4: &productvers4
"7.0":
kver: "4.4.180"
"7.1":
kver: "4.4.180"
"7.2":
kver: "4.4.302"
productvers5: &productvers5
"7.1":
kpre: "7.1"
kver: "5.10.55"
"7.2":
kpre: "7.2"
kver: "5.10.55"
platforms:
apollolake:
dt: false
flags:
- "movbe"
noflags:
- "x2apic"
flags: ["movbe"]
noflags: ["x2apic"]
synoinfo:
<<: *synoinfo
HddEnableDynamicPower: "no"
productvers:
"7.0":
kver: "4.4.180"
"7.1":
kver: "4.4.180"
"7.2":
kver: "4.4.302"
productvers: *productvers4
broadwell:
dt: false
synoinfo:
<<: *synoinfo
productvers:
"7.0":
kver: "4.4.180"
"7.1":
kver: "4.4.180"
"7.2":
kver: "4.4.302"
synoinfo: *synoinfo
productvers: *productvers4
broadwellnk:
dt: false
synoinfo:
<<: *synoinfo
support_bde_internal_10g: "no"
supportsas: "no"
productvers:
"7.0":
kver: "4.4.180"
"7.1":
kver: "4.4.180"
"7.2":
kver: "4.4.302"
productvers: *productvers4
broadwellnkv2:
dt: true
synoinfo:
@ -66,13 +62,7 @@ platforms:
supportsas: "no"
supportsas_v2_r1: "no"
support_multipath: "yes"
productvers:
"7.0":
kver: "4.4.180"
"7.1":
kver: "4.4.180"
"7.2":
kver: "4.4.302"
productvers: *productvers4
broadwellntbap:
dt: false
synoinfo:
@ -84,43 +74,20 @@ platforms:
support_auto_install: "no"
support_install_only_dev: "no"
required_system_disk_number: "0"
productvers:
"7.0":
kver: "4.4.180"
"7.1":
kver: "4.4.180"
"7.2":
kver: "4.4.302"
productvers: *productvers4
denverton:
dt: false
flags:
- "movbe"
synoinfo:
<<: *synoinfo
productvers:
"7.0":
kver: "4.4.180"
"7.1":
kver: "4.4.180"
"7.2":
kver: "4.4.302"
flags: ["movbe"]
synoinfo: *synoinfo
productvers: *productvers4
geminilake:
dt: true
noflags:
- "x2apic"
synoinfo:
<<: *synoinfo
productvers:
"7.0":
kver: "4.4.180"
"7.1":
kver: "4.4.180"
"7.2":
kver: "4.4.302"
noflags: ["x2apic"]
synoinfo: *synoinfo
productvers: *productvers4
purley:
dt: true
noflags:
- "x2apic"
noflags: ["x2apic"]
synoinfo:
<<: *synoinfo
supportsas: "no"
@ -130,42 +97,18 @@ platforms:
isolated_disk_system: "no"
required_system_disk_number: "0"
internal_disk_without_led_mask: "no"
productvers:
"7.0":
kver: "4.4.180"
"7.1":
kver: "4.4.180"
"7.2":
kver: "4.4.302"
productvers: *productvers4
r1000:
dt: true
synoinfo:
<<: *synoinfo
productvers:
"7.1":
kver: "4.4.180"
"7.2":
kver: "4.4.302"
synoinfo: *synoinfo
productvers: *productvers4
v1000:
dt: true
synoinfo:
<<: *synoinfo
productvers:
"7.0":
kver: "4.4.180"
"7.1":
kver: "4.4.180"
"7.2":
kver: "4.4.302"
synoinfo: *synoinfo
productvers: *productvers4
epyc7002:
dt: true
synoinfo:
<<: *synoinfo
netif_seq_by_dts: "no"
productvers:
"7.1":
kpre: "7.1"
kver: "5.10.55"
"7.2":
kpre: "7.2"
kver: "5.10.55"
productvers: *productvers5

View File

@ -1,10 +1,10 @@
#!/usr/bin/env bash
[ -z "${WORK_PATH}" -o ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
[ -z "${WORK_PATH}" ] || [ ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. ${WORK_PATH}/include/functions.sh
. ${WORK_PATH}/include/addons.sh
. ${WORK_PATH}/include/modules.sh
. "${WORK_PATH}/include/functions.sh"
. "${WORK_PATH}/include/addons.sh"
. "${WORK_PATH}/include/modules.sh"
set -o pipefail # Get exit code from process piped
@ -23,10 +23,7 @@ rm -f "${MOD_RDGZ_FILE}"
echo -n "."
rm -rf "${RAMDISK_PATH}" # Force clean
mkdir -p "${RAMDISK_PATH}"
(
cd "${RAMDISK_PATH}"
xz -dc <"${ORI_RDGZ_FILE}" | cpio -idm
) >/dev/null 2>&1
(cd "${RAMDISK_PATH}" && xz -dc <"${ORI_RDGZ_FILE}" | cpio -idm) >/dev/null 2>&1 || true
# get user data
PLATFORM="$(readConfigKey "platform" "${USER_CONFIG_FILE}")"
@ -51,7 +48,7 @@ HDDSORT="$(readConfigKey "hddsort" "${USER_CONFIG_FILE}")"
# Check if DSM buildnumber changed
. "${RAMDISK_PATH}/etc/VERSION"
if [ -n "${PRODUCTVER}" -a -n "${BUILDNUM}" -a -n "${SMALLNUM}" ] &&
if [ -n "${PRODUCTVER}" ] && [ -n "${BUILDNUM}" ] && [ -n "${SMALLNUM}" ] &&
([ ! "${PRODUCTVER}" = "${majorversion}.${minorversion}" ] || [ ! "${BUILDNUM}" = "${buildnumber}" ] || [ ! "${SMALLNUM}" = "${smallfixnumber}" ]); then
OLDVER="${PRODUCTVER}(${BUILDNUM}$([ ${SMALLNUM:-0} -ne 0 ] && echo "u${SMALLNUM}"))"
NEWVER="${majorversion}.${minorversion}(${buildnumber}$([ ${smallfixnumber:-0} -ne 0 ] && echo "u${smallfixnumber}"))"
@ -76,7 +73,7 @@ KVER="$(readConfigKey "platforms.${PLATFORM}.productvers.\"${PRODUCTVER}\".kver"
KPRE="$(readConfigKey "platforms.${PLATFORM}.productvers.\"${PRODUCTVER}\".kpre" "${WORK_PATH}/platforms.yml")"
# Sanity check
if [ -z "${PLATFORM}" -o -z "${KVER}" ]; then
if [ -z "${PLATFORM}" ] || [ -z "${KVER}" ]; then
echo "ERROR: Configuration for model ${MODEL} and productversion ${PRODUCTVER} not found." >"${LOG_FILE}"
exit 1
fi
@ -86,35 +83,35 @@ declare -A ADDONS
declare -A MODULES
# Read synoinfo and addons from config
while IFS=': ' read KEY VALUE; do
while IFS=': ' read -r KEY VALUE; do
[ -n "${KEY}" ] && SYNOINFO["${KEY}"]="${VALUE}"
done <<<$(readConfigMap "synoinfo" "${USER_CONFIG_FILE}")
while IFS=': ' read KEY VALUE; do
done <<<"$(readConfigMap "synoinfo" "${USER_CONFIG_FILE}")"
while IFS=': ' read -r KEY VALUE; do
[ -n "${KEY}" ] && ADDONS["${KEY}"]="${VALUE}"
done <<<$(readConfigMap "addons" "${USER_CONFIG_FILE}")
done <<<"$(readConfigMap "addons" "${USER_CONFIG_FILE}")"
# Read modules from user config
while IFS=': ' read KEY VALUE; do
while IFS=': ' read -r KEY VALUE; do
[ -n "${KEY}" ] && MODULES["${KEY}"]="${VALUE}"
done <<<$(readConfigMap "modules" "${USER_CONFIG_FILE}")
done <<<"$(readConfigMap "modules" "${USER_CONFIG_FILE}")"
# Patches (diff -Naru OLDFILE NEWFILE > xxx.patch)
PATCHS=()
PATCHS+=("ramdisk-etc-rc-*.patch")
PATCHS+=("ramdisk-init-script-*.patch")
PATCHS+=("ramdisk-post-init-script-*.patch")
PATCHS+=("ramdisk-disable-root-pwd-*.patch")
PATCHS+=("ramdisk-disable-disabled-ports-*.patch")
for PE in ${PATCHS[@]}; do
PATCHS=(
"ramdisk-etc-rc-*.patch"
"ramdisk-init-script-*.patch"
"ramdisk-post-init-script-*.patch"
"ramdisk-disable-root-pwd-*.patch"
"ramdisk-disable-disabled-ports-*.patch"
)
for PE in "${PATCHS[@]}"; do
RET=1
echo "Patching with ${PE}" >"${LOG_FILE}"
# ${PE} contains *, so double quotes cannot be added
for PF in $(ls ${WORK_PATH}/patch/${PE} 2>/dev/null); do
echo -n "."
echo "Patching with ${PF}" >>"${LOG_FILE}"
(
cd "${RAMDISK_PATH}"
busybox patch -p1 -i "${PF}" >>"${LOG_FILE}" 2>&1 # busybox patch and gun patch have different processing methods and parameters.
)
# busybox patch and gun patch have different processing methods and parameters.
(cd "${RAMDISK_PATH}" && busybox patch -p1 -i "${PF}") >>"${LOG_FILE}" 2>&1
RET=$?
[ ${RET} -eq 0 ] && break
done
@ -127,7 +124,7 @@ echo -n "."
echo "Set synoinfo SN" >"${LOG_FILE}"
_set_conf_kv "SN" "${SN}" "${RAMDISK_PATH}/etc/synoinfo.conf" >>"${LOG_FILE}" 2>&1 || exit 1
_set_conf_kv "SN" "${SN}" "${RAMDISK_PATH}/etc.defaults/synoinfo.conf" >>"${LOG_FILE}" 2>&1 || exit 1
for KEY in ${!SYNOINFO[@]}; do
for KEY in "${!SYNOINFO[@]}"; do
echo "Set synoinfo ${KEY}" >>"${LOG_FILE}"
_set_conf_kv "${KEY}" "${SYNOINFO[${KEY}]}" "${RAMDISK_PATH}/etc/synoinfo.conf" >>"${LOG_FILE}" 2>&1 || exit 1
_set_conf_kv "${KEY}" "${SYNOINFO[${KEY}]}" "${RAMDISK_PATH}/etc.defaults/synoinfo.conf" >>"${LOG_FILE}" 2>&1 || exit 1
@ -141,7 +138,7 @@ rm -f "${TMP_PATH}/rp.txt"
touch "${TMP_PATH}/rp.txt"
echo "_set_conf_kv 'SN' '${SN}' '/tmpRoot/etc/synoinfo.conf'" >>"${TMP_PATH}/rp.txt"
echo "_set_conf_kv 'SN' '${SN}' '/tmpRoot/etc.defaults/synoinfo.conf'" >>"${TMP_PATH}/rp.txt"
for KEY in ${!SYNOINFO[@]}; do
for KEY in "${!SYNOINFO[@]}"; do
echo "_set_conf_kv '${KEY}' '${SYNOINFO[${KEY}]}' '/tmpRoot/etc/synoinfo.conf'" >>"${TMP_PATH}/rp.txt"
echo "_set_conf_kv '${KEY}' '${SYNOINFO[${KEY}]}' '/tmpRoot/etc.defaults/synoinfo.conf'" >>"${TMP_PATH}/rp.txt"
done
@ -162,18 +159,20 @@ gzip -dc "${LKMS_PATH}/rp-${PLATFORM}-$([ -n "${KPRE}" ] && echo "${KPRE}-")${KV
echo -n "."
echo "Create addons.sh" >"${LOG_FILE}"
mkdir -p "${RAMDISK_PATH}/addons"
echo "#!/bin/sh" >"${RAMDISK_PATH}/addons/addons.sh"
echo 'echo "addons.sh called with params ${@}"' >>"${RAMDISK_PATH}/addons/addons.sh"
echo "export LOADERLABEL=\"RR\"" >>"${RAMDISK_PATH}/addons/addons.sh"
echo "export LOADERRELEASE=\"${RR_RELEASE}\"" >>"${RAMDISK_PATH}/addons/addons.sh"
echo "export LOADERVERSION=\"${RR_VERSION}\"" >>"${RAMDISK_PATH}/addons/addons.sh"
echo "export PLATFORM=\"${PLATFORM}\"" >>"${RAMDISK_PATH}/addons/addons.sh"
echo "export MODEL=\"${MODEL}\"" >>"${RAMDISK_PATH}/addons/addons.sh"
echo "export PRODUCTVERL=\"${PRODUCTVERL}\"" >>"${RAMDISK_PATH}/addons/addons.sh"
echo "export MLINK=\"${PATURL}\"" >>"${RAMDISK_PATH}/addons/addons.sh"
echo "export MCHECKSUM=\"${PATSUM}\"" >>"${RAMDISK_PATH}/addons/addons.sh"
echo "export LAYOUT=\"${LAYOUT}\"" >>"${RAMDISK_PATH}/addons/addons.sh"
echo "export KEYMAP=\"${KEYMAP}\"" >>"${RAMDISK_PATH}/addons/addons.sh"
{
echo "#!/bin/sh"
echo 'echo "addons.sh called with params ${@}"'
echo "export LOADERLABEL=\"RR\""
echo "export LOADERRELEASE=\"${RR_RELEASE}\""
echo "export LOADERVERSION=\"${RR_VERSION}\""
echo "export PLATFORM=\"${PLATFORM}\""
echo "export MODEL=\"${MODEL}\""
echo "export PRODUCTVERL=\"${PRODUCTVERL}\""
echo "export MLINK=\"${PATURL}\""
echo "export MCHECKSUM=\"${PATSUM}\""
echo "export LAYOUT=\"${LAYOUT}\""
echo "export KEYMAP=\"${KEYMAP}\""
} >"${RAMDISK_PATH}/addons/addons.sh"
chmod +x "${RAMDISK_PATH}/addons/addons.sh"
# This order cannot be changed.
@ -188,7 +187,7 @@ for ADDON in "redpill" "revert" "misc" "eudev" "disks" "localrss" "notify" "wol"
done
# User addons
for ADDON in ${!ADDONS[@]}; do
for ADDON in "${!ADDONS[@]}"; do
PARAMS=${ADDONS[${ADDON}]}
installAddon "${ADDON}" "${PLATFORM}" "$([ -n "${KPRE}" ] && echo "${KPRE}-")${KVER}" || exit 1
echo "/addons/${ADDON}.sh \${1} ${PARAMS}" >>"${RAMDISK_PATH}/addons/addons.sh" 2>>"${LOG_FILE}" || exit 1
@ -238,25 +237,25 @@ for N in $(seq 0 7); do
done
# issues/313
if [ ${PLATFORM} = "epyc7002" ]; then
sed -i 's#/dev/console#/var/log/lrc#g' ${RAMDISK_PATH}/usr/bin/busybox
sed -i '/^echo "START/a \\nmknod -m 0666 /dev/console c 1 3' ${RAMDISK_PATH}/linuxrc.syno
if [ "${PLATFORM}" = "epyc7002" ]; then
sed -i 's#/dev/console#/var/log/lrc#g' "${RAMDISK_PATH}/usr/bin/busybox"
sed -i '/^echo "START/a \\nmknod -m 0666 /dev/console c 1 3' "${RAMDISK_PATH}/linuxrc.syno"
fi
if [ "${PLATFORM}" = "broadwellntbap" ]; then
sed -i 's/IsUCOrXA="yes"/XIsUCOrXA="yes"/g; s/IsUCOrXA=yes/XIsUCOrXA=yes/g' ${RAMDISK_PATH}/usr/syno/share/environments.sh
sed -i 's/IsUCOrXA="yes"/XIsUCOrXA="yes"/g; s/IsUCOrXA=yes/XIsUCOrXA=yes/g' "${RAMDISK_PATH}/usr/syno/share/environments.sh"
fi
# Call user patch scripts
echo -n "."
for F in $(ls -1 ${SCRIPTS_PATH}/*.sh 2>/dev/null); do
for F in $(ls -1 "${SCRIPTS_PATH}/"*.sh 2>/dev/null); do
echo "Calling ${F}" >"${LOG_FILE}"
. "${F}" >>"${LOG_FILE}" 2>&1 || exit 1
done
# Reassembly ramdisk
echo -n "."
if [ "${RD_COMPRESSED}" == "true" ]; then
if [ "${RD_COMPRESSED}" = "true" ]; then
(cd "${RAMDISK_PATH}" && find . 2>/dev/null | cpio -o -H newc -R root:root | xz -9 --format=lzma >"${MOD_RDGZ_FILE}") >"${LOG_FILE}" 2>&1 || exit 1
else
(cd "${RAMDISK_PATH}" && find . 2>/dev/null | cpio -o -H newc -R root:root >"${MOD_RDGZ_FILE}") >"${LOG_FILE}" 2>&1 || exit 1

View File

@ -1,9 +1,9 @@
#!/usr/bin/env bash
# Based on code and ideas from @jumkey
[ -z "${WORK_PATH}" -o ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
[ -z "${WORK_PATH}" ] || [ ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. ${WORK_PATH}/include/functions.sh
. "${WORK_PATH}/include/functions.sh"
PLATFORM="$(readConfigKey "platform" "${USER_CONFIG_FILE}")"
PRODUCTVER="$(readConfigKey "productver" "${USER_CONFIG_FILE}")"
@ -15,60 +15,51 @@ KPRE="$(readConfigKey "platforms.${PLATFORM}.productvers.\"${PRODUCTVER}\".kpre"
# Output: LE HEX with size of file in bytes (to STDOUT)
file_size_le() {
printf $(
dec_size=0
for F in "${@}"; do
fsize=$(stat -c "%s" ${F})
dec_size=$(expr ${dec_size} + ${fsize})
done
printf "%08x\n" ${dec_size} |
sed 's/\(..\)/\1 /g' | {
read ch0 ch1 ch2 ch3
for ch in ${ch3} ${ch2} ${ch1} ${ch0}; do
printf '%s%03o' '\' $((0x${ch}))
done
local dec_size=0
for F in "$@"; do dec_size=$((dec_size + $(stat -c "%s" "${F}"))); done
printf "%08x\n" "${dec_size}" | sed 's/\(..\)/\1 /g' | {
read -r ch0 ch1 ch2 ch3
for ch in "${ch3}" "${ch2}" "${ch1}" "${ch0}"; do printf '%s%03o' '\' "$((0x${ch}))"; done
}
)
}
size_le() {
printf $(
printf "%08x\n" "${@}" |
sed 's/\(..\)/\1 /g' | {
read ch0 ch1 ch2 ch3
for ch in ${ch3} ${ch2} ${ch1} ${ch0}; do
printf '%s%03o' '\' $((0x${ch}))
done
printf "%08x\n" "${@}" | sed 's/\(..\)/\1 /g' | {
read -r ch0 ch1 ch2 ch3
for ch in "${ch3}" "${ch2}" "${ch1}" "${ch0}"; do printf '%s%03o' '\' "$((0x${ch}))"; done
}
)
}
VMLINUX_MOD=${1}
ZIMAGE_MOD=${2}
if [ $(echo "${KVER:-4}" | cut -d'.' -f1) -lt 5 ]; then
if [ "$(echo "${KVER:-4}" | cut -d'.' -f1)" -lt 5 ]; then
# Kernel version 4.x or 3.x (bromolow)
#zImage_head 16494
#payload(
# vmlinux.bin x
# padding 0xf00000-x
# vmlinux.bin size 4
#) 0xf00004
#zImage_tail(
# unknown 72
# run_size 4
# unknown 30
# vmlinux.bin size 4
# unknown 114460
#) 114570
#crc32 4
# zImage_head 16494
# payload(
# vmlinux.bin x
# padding 0xf00000-x
# vmlinux.bin size 4
# ) 0xf00004
# zImage_tail(
# unknown 72
# run_size 4
# unknown 30
# vmlinux.bin size 4
# unknown 114460
# ) 114570
# crc32 4
gzip -dc "${WORK_PATH}/bzImage-template-v4.gz" >"${ZIMAGE_MOD}" || exit 1
dd if="${VMLINUX_MOD}" of="${ZIMAGE_MOD}" bs=16494 seek=1 conv=notrunc || exit 1
file_size_le "${VMLINUX_MOD}" | dd of="${ZIMAGE_MOD}" bs=15745134 seek=1 conv=notrunc || exit 1
file_size_le "${VMLINUX_MOD}" | dd of="${ZIMAGE_MOD}" bs=15745244 seek=1 conv=notrunc || exit 1
RUN_SIZE=$(objdump -h ${VMLINUX_MOD} | sh "${WORK_PATH}/calc_run_size.sh")
size_le ${RUN_SIZE} | dd of=${ZIMAGE_MOD} bs=15745210 seek=1 conv=notrunc || exit 1
size_le $(($((16#$(crc32 "${ZIMAGE_MOD}" | awk '{print $1}'))) ^ 0xFFFFFFFF)) | dd of="${ZIMAGE_MOD}" conv=notrunc oflag=append || exit 1
RUN_SIZE=$(objdump -h "${VMLINUX_MOD}" | sh "${WORK_PATH}/calc_run_size.sh")
size_le "${RUN_SIZE}" | dd of="${ZIMAGE_MOD}" bs=15745210 seek=1 conv=notrunc || exit 1
size_le "$((16#$(crc32 "${ZIMAGE_MOD}" | awk '{print $1}') ^ 0xFFFFFFFF))" | dd of="${ZIMAGE_MOD}" conv=notrunc oflag=append || exit 1
else
# Kernel version 5.x
gzip -dc "${WORK_PATH}/bzImage-template-v5.gz" >"${ZIMAGE_MOD}" || exit 1
@ -76,7 +67,7 @@ else
dd if="${VMLINUX_MOD}" of="${ZIMAGE_MOD}" bs=14561 seek=1 conv=notrunc || exit 1
file_size_le "${VMLINUX_MOD}" | dd of="${ZIMAGE_MOD}" bs=34463421 seek=1 conv=notrunc || exit 1
file_size_le "${VMLINUX_MOD}" | dd of="${ZIMAGE_MOD}" bs=34479132 seek=1 conv=notrunc || exit 1
# RUN_SIZE=$(objdump -h ${VMLINUX_MOD} | sh "${WORK_PATH}/calc_run_size.sh")
# size_le ${RUN_SIZE} | dd of=${ZIMAGE_MOD} bs=34626904 seek=1 conv=notrunc || exit 1
size_le $(($((16#$(crc32 "${ZIMAGE_MOD}" | awk '{print $1}'))) ^ 0xFFFFFFFF)) | dd of="${ZIMAGE_MOD}" conv=notrunc oflag=append || exit 1
# RUN_SIZE=$(objdump -h "${VMLINUX_MOD}" | sh "${WORK_PATH}/calc_run_size.sh")
# size_le "${RUN_SIZE}" | dd of="${ZIMAGE_MOD}" bs=34626904 seek=1 conv=notrunc || exit 1
size_le "$((16#$(crc32 "${ZIMAGE_MOD}" | awk '{print $1}') ^ 0xFFFFFFFF))" | dd of="${ZIMAGE_MOD}" conv=notrunc oflag=append || exit 1
fi

View File

@ -1,8 +1,8 @@
#!/usr/bin/env bash
[ -z "${WORK_PATH}" -o ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
[ -z "${WORK_PATH}" ] || [ ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. ${WORK_PATH}/include/functions.sh
. "${WORK_PATH}/include/functions.sh"
set -o pipefail # Get exit code from process piped
@ -28,20 +28,20 @@ if [ "${KERNEL}" = "custom" ]; then
else
echo -n "."
# Extract vmlinux
${WORK_PATH}/bzImage-to-vmlinux.sh "${ORI_ZIMAGE_FILE}" "${TMP_PATH}/vmlinux" >"${LOG_FILE}" 2>&1 || exit 1
"${WORK_PATH}/bzImage-to-vmlinux.sh" "${ORI_ZIMAGE_FILE}" "${TMP_PATH}/vmlinux" >"${LOG_FILE}" 2>&1 || exit 1
echo -n "."
# Patch boot params and ramdisk check
${WORK_PATH}/kpatch "${TMP_PATH}/vmlinux" "${TMP_PATH}/vmlinux-mod" >"${LOG_FILE}" 2>&1 || exit 1
"${WORK_PATH}/kpatch" "${TMP_PATH}/vmlinux" "${TMP_PATH}/vmlinux-mod" >"${LOG_FILE}" 2>&1 || exit 1
echo -n "."
# rebuild zImage
${WORK_PATH}/vmlinux-to-bzImage.sh "${TMP_PATH}/vmlinux-mod" "${MOD_ZIMAGE_FILE}" >"${LOG_FILE}" 2>&1 || exit 1
# Rebuild zImage
"${WORK_PATH}/vmlinux-to-bzImage.sh" "${TMP_PATH}/vmlinux-mod" "${MOD_ZIMAGE_FILE}" >"${LOG_FILE}" 2>&1 || exit 1
fi
sync
echo -n "."
# Update HASH of new DSM zImage
HASH="$(sha256sum ${ORI_ZIMAGE_FILE} | awk '{print $1}')"
HASH="$(sha256sum "${ORI_ZIMAGE_FILE}" | awk '{print $1}')"
writeConfigKey "zimage-hash" "${HASH}" "${USER_CONFIG_FILE}"
echo

View File

@ -75,10 +75,10 @@ function set_gfxpayload {
fi
}
set RR_CMDLINE="earlyprintk earlycon=uart8250,io,0x3f8,115200n8 console=ttyS0,115200n8 root=/dev/ram rootwait net.ifnames=0 panic=5 split_lock_detect=off pcie_aspm=off intel_pstate=disable"
set RR_CMDLINE="earlyprintk earlycon=uart8250,io,0x3f8,115200n8 console=ttyS0,115200n8 root=/dev/ram rootwait nointremap net.ifnames=0 panic=5 split_lock_detect=off pcie_aspm=off intel_pstate=disable"
search --set=root --label "RR3"
if [ -s /zImage-dsm -a -s /initrd-dsm ]; then
if [ -s /zImage-dsm ] && [ -s /initrd-dsm ]; then
if [ "${default}" = "direct" ]; then
set timeout="1"
menuentry 'Boot DSM kernel directly' --id direct {

View File

@ -6,27 +6,29 @@
# See /LICENSE for more information.
#
PROMPT=$(sudo -nv 2>&1)
if [ $? -ne 0 ]; then
if [ "$(id -u)" -ne 0 ]; then
echo "This script must be run as root"
exit 1
fi
function help() {
echo "Usage: $0 <command> [args]"
echo "Commands:"
echo " create [workspace] [rr.img] - Create the workspace"
echo " init - Initialize the environment"
echo " config [model] [version] - Config the DSM system"
echo " build - Build the DSM system"
echo " pack [rr.img] - Pack to rr.img"
echo " help - Show this help"
cat <<EOF
Usage: $0 <command> [args]
Commands:
create [workspace] [rr.img] - Create the workspace
init - Initialize the environment
config [model] [version] - Config the DSM system
build - Build the DSM system
pack [rr.img] - Pack to rr.img
help - Show this help
EOF
exit 1
}
function create() {
WORKSPACE="$(realpath ${1:-"workspace"})"
RRIMGPATH="$(realpath ${2:-"rr.img"})"
local WORKSPACE RRIMGPATH LOOPX INITRD_FILE INITRD_FORMAT
WORKSPACE="$(realpath "${1:-workspace}")"
RRIMGPATH="$(realpath "${2:-rr.img}")"
if [ ! -f "${RRIMGPATH}" ]; then
echo "File not found: ${RRIMGPATH}"
@ -34,47 +36,37 @@ function create() {
fi
sudo apt update
sudo apt install -y locales busybox dialog curl xz-utils cpio sed qemu-utils
sudo pip install bs4
sudo apt install -y locales busybox dialog gettext sed gawk jq curl
sudo apt install -y python-is-python3 python3-pip libelf-dev qemu-utils cpio xz-utils lz4 lzma bzip2 gzip zstd
# sudo snap install yq
if ! command -v yq &>/dev/null || ! yq --version 2>/dev/null | grep -q "v4."; then
sudo curl -kL https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -o /usr/bin/yq && sudo chmod a+x /usr/bin/yq
fi
# Backup the original python3 executable.
sudo mv -f "$(realpath $(which python3))/EXTERNALLY-MANAGED" "$(realpath $(which python3))/EXTERNALLY-MANAGED.bak" 2>/dev/null || true
sudo pip3 install -U click requests requests-toolbelt urllib3 qrcode[pil] beautifulsoup4
sudo locale-gen ar_SA.UTF-8 de_DE.UTF-8 en_US.UTF-8 es_ES.UTF-8 fr_FR.UTF-8 ja_JP.UTF-8 ko_KR.UTF-8 ru_RU.UTF-8 th_TH.UTF-8 tr_TR.UTF-8 uk_UA.UTF-8 vi_VN.UTF-8 zh_CN.UTF-8 zh_HK.UTF-8 zh_TW.UTF-8
YQ=$(command -v yq)
if [ -z "${YQ}" ] || ! ${YQ} --version 2>/dev/null | grep -q "v4."; then
wget https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -O "${YQ:-"/usr/bin/yq"}" && chmod +x "${YQ:-"/usr/bin/yq"}"
fi
LOOPX=$(sudo losetup -f)
sudo losetup -P "${LOOPX}" "${RRIMGPATH}"
echo "Mounting image file"
rm -rf "/tmp/mnt/p1"
rm -rf "/tmp/mnt/p2"
rm -rf "/tmp/mnt/p3"
mkdir -p "/tmp/mnt/p1"
mkdir -p "/tmp/mnt/p2"
mkdir -p "/tmp/mnt/p3"
sudo mount ${LOOPX}p1 "/tmp/mnt/p1" || (
echo -e "Can't mount ${LOOPX}p1."
exit 1
)
sudo mount ${LOOPX}p2 "/tmp/mnt/p2" || (
echo -e "Can't mount ${LOOPX}p2."
exit 1
)
sudo mount ${LOOPX}p3 "/tmp/mnt/p3" || (
echo -e "Can't mount ${LOOPX}p3."
exit 1
)
for i in {1..3}; do
rm -rf "/tmp/mnt/p${i}"
mkdir -p "/tmp/mnt/p${i}"
sudo mount "${LOOPX}p${i}" "/tmp/mnt/p${i}" || {
echo "Can't mount ${LOOPX}p${i}."
exit 1
}
done
echo "Create WORKSPACE"
rm -rf "${WORKSPACE}"
mkdir -p "${WORKSPACE}/mnt"
mkdir -p "${WORKSPACE}/tmp"
mkdir -p "${WORKSPACE}/initrd"
cp -rf "/tmp/mnt/p1" "${WORKSPACE}/mnt/p1"
cp -rf "/tmp/mnt/p2" "${WORKSPACE}/mnt/p2"
cp -rf "/tmp/mnt/p3" "${WORKSPACE}/mnt/p3"
mkdir -p "${WORKSPACE}/mnt" "${WORKSPACE}/tmp" "${WORKSPACE}/initrd"
cp -rf /tmp/mnt/p{1,2,3} "${WORKSPACE}/mnt/"
INITRD_FILE="${WORKSPACE}/mnt/p3/initrd-rr"
INITRD_FORMAT=$(file -b --mime-type "${INITRD_FILE}")
@ -91,49 +83,50 @@ function create() {
*) ;;
esac
) 2>/dev/null
sudo sync
sudo umount "/tmp/mnt/p1"
sudo umount "/tmp/mnt/p2"
sudo umount "/tmp/mnt/p3"
rm -rf "/tmp/mnt/p1"
rm -rf "/tmp/mnt/p2"
rm -rf "/tmp/mnt/p3"
sudo losetup --detach ${LOOPX}
if [ ! -f "${WORKSPACE}/initrd/opt/rr/init.sh" ] || ! [ -f "${WORKSPACE}/initrd/opt/rr/menu.sh" ]; then
sudo sync
for i in {1..3}; do
sudo umount "/tmp/mnt/p${i}"
rm -rf "/tmp/mnt/p${i}"
done
sudo losetup --detach "${LOOPX}"
if [ ! -f "${WORKSPACE}/initrd/opt/rr/init.sh" ] || [ ! -f "${WORKSPACE}/initrd/opt/rr/menu.sh" ]; then
echo "initrd decompression failed."
exit 1
fi
rm -f $(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/rr.env
echo "export LOADER_DISK=\"LOCALBUILD\"" >>$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/rr.env
echo "export CHROOT_PATH=\"${WORKSPACE}\"" >>$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/rr.env
rm -f "$(dirname "${BASH_SOURCE[0]}")/rr.env"
cat <<EOF >"$(dirname "${BASH_SOURCE[0]}")/rr.env"
export LOADER_DISK="LOCALBUILD"
export CHROOT_PATH="${WORKSPACE}"
EOF
echo "OK."
}
function init() {
if [ ! -f $(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/rr.env ]; then
if [ ! -f "$(dirname "${BASH_SOURCE[0]}")/rr.env" ]; then
echo "Please run init first"
exit 1
fi
. $(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/rr.env
pushd "${CHROOT_PATH}/initrd/opt/rr"
. "$(dirname "${BASH_SOURCE[0]}")/rr.env"
pushd "${CHROOT_PATH}/initrd/opt/rr" >/dev/null
echo "init"
./init.sh
RET=$?
popd
local RET=$?
popd >/dev/null
[ ${RET} -ne 0 ] && echo "Failed." || echo "Success."
return ${RET}
exit ${RET}
}
function config() {
if [ ! -f $(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/rr.env ]; then
if [ ! -f "$(dirname "${BASH_SOURCE[0]}")/rr.env" ]; then
echo "Please run init first"
exit 1
fi
. $(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/rr.env
RET=1
pushd "${CHROOT_PATH}/initrd/opt/rr"
. "$(dirname "${BASH_SOURCE[0]}")/rr.env"
local RET=1
pushd "${CHROOT_PATH}/initrd/opt/rr" >/dev/null
while true; do
if [ -z "${1}" ]; then
echo "menu"
@ -141,26 +134,26 @@ function config() {
RET=0
else
echo "model"
./menu.sh modelMenu "${1:-"SA6400"}" || break
./menu.sh modelMenu "${1:-SA6400}" || break
echo "version"
./menu.sh productversMenu "${2:-"7.2"}" || break
./menu.sh productversMenu "${2:-7.2}" || break
RET=0
fi
break
done
popd
popd >/dev/null
[ ${RET} -ne 0 ] && echo "Failed." || echo "Success."
return ${RET}
exit ${RET}
}
function build() {
if [ ! -f $(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/rr.env ]; then
if [ ! -f "$(dirname "${BASH_SOURCE[0]}")/rr.env" ]; then
echo "Please run init first"
exit 1
fi
. $(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/rr.env
RET=1
pushd "${CHROOT_PATH}/initrd/opt/rr"
. "$(dirname "${BASH_SOURCE[0]}")/rr.env"
local RET=1
pushd "${CHROOT_PATH}/initrd/opt/rr" >/dev/null
while true; do
echo "build"
./menu.sh make -1 || break
@ -169,19 +162,20 @@ function build() {
RET=0
break
done
popd
popd >/dev/null
[ ${RET} -ne 0 ] && echo "Failed." || echo "Success."
return ${RET}
exit ${RET}
}
function pack() {
if [ ! -f $(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/rr.env ]; then
if [ ! -f "$(dirname "${BASH_SOURCE[0]}")/rr.env" ]; then
echo "Please run init first"
exit 1
fi
. $(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/rr.env
. "$(dirname "${BASH_SOURCE[0]}")/rr.env"
RRIMGPATH="$(realpath ${1:-"rr.img"})"
local RRIMGPATH LOOPX
RRIMGPATH="$(realpath "${1:-rr.img}")"
if [ ! -f "${RRIMGPATH}" ]; then
gzip -dc "${CHROOT_PATH}/initrd/opt/rr/grub.img.gz" >"${RRIMGPATH}"
fi
@ -191,48 +185,32 @@ function pack() {
sudo losetup -P "${LOOPX}" "${RRIMGPATH}"
echo "Mounting image file"
rm -rf "/tmp/mnt/p1"
rm -rf "/tmp/mnt/p2"
rm -rf "/tmp/mnt/p3"
mkdir -p "/tmp/mnt/p1"
mkdir -p "/tmp/mnt/p2"
mkdir -p "/tmp/mnt/p3"
sudo mount ${LOOPX}p1 "/tmp/mnt/p1" || (
echo -e "Can't mount ${LOOPX}p1."
exit 1
)
sudo mount ${LOOPX}p2 "/tmp/mnt/p2" || (
echo -e "Can't mount ${LOOPX}p2."
exit 1
)
sudo mount ${LOOPX}p3 "/tmp/mnt/p3" || (
echo -e "Can't mount ${LOOPX}p3."
exit 1
)
for i in {1..3}; do
rm -rf "/tmp/mnt/p${i}"
mkdir -p "/tmp/mnt/p${i}"
sudo mount "${LOOPX}p${i}" "/tmp/mnt/p${i}" || {
echo "Can't mount ${LOOPX}p${i}."
exit 1
}
done
echo "Pack image file"
sudo cp -af "${CHROOT_PATH}/mnt/p1/.locale" "/tmp/mnt/p1" 2>/dev/null
sudo cp -rf "${CHROOT_PATH}/mnt/p1/"* "/tmp/mnt/p1" || (
echo -e "Can't cp ${LOOPX}p1."
exit 1
)
sudo cp -rf "${CHROOT_PATH}/mnt/p2/"* "/tmp/mnt/p2" || (
echo -e "Can't cp ${LOOPX}p2."
exit 1
)
sudo cp -rf "${CHROOT_PATH}/mnt/p3/"* "/tmp/mnt/p3" || (
echo -e "Can't cp ${LOOPX}p3."
exit 1
)
for i in {1..3}; do
[ ${i} -eq 1 ] && sudo cp -af "${CHROOT_PATH}/mnt/p${i}/"{.locale,.timezone} "/tmp/mnt/p${i}/" 2>/dev/null
sudo cp -rf "${CHROOT_PATH}/mnt/p${i}/"* "/tmp/mnt/p${i}" || {
echo "Can't cp ${LOOPX}p${i}."
exit 1
}
done
sudo sync
sudo umount "/tmp/mnt/p1"
sudo umount "/tmp/mnt/p2"
sudo umount "/tmp/mnt/p3"
rm -rf "/tmp/mnt/p1"
rm -rf "/tmp/mnt/p2"
rm -rf "/tmp/mnt/p3"
sudo losetup --detach ${LOOPX}
for i in {1..3}; do
sudo umount "/tmp/mnt/p${i}"
rm -rf "/tmp/mnt/p${i}"
done
sudo losetup --detach "${LOOPX}"
echo "OK."
exit 0
}
$@

View File

@ -6,10 +6,11 @@
# See /LICENSE for more information.
#
import os, sys, glob, json, yaml, click, shutil, tarfile, kmodule, requests
import os, sys, glob, json, yaml, click, shutil, tarfile, kmodule, requests, urllib3
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry # type: ignore
from openpyxl import Workbook
@click.group()
def cli():
"""
@ -23,31 +24,40 @@ def cli():
@click.option("-j", "--jsonpath", type=str, required=True, help="The output path of jsonfile.")
@click.option("-x", "--xlsxpath", type=str, required=False, help="The output path of xlsxfile.")
def getmodels(workpath, jsonpath, xlsxpath):
models = {}
with open("{}/opt/rr/platforms.yml".format(workpath), "r") as f:
platforms_yml = os.path.join(workpath, "opt", "rr", "platforms.yml")
with open(platforms_yml, "r") as f:
P_data = yaml.safe_load(f)
P_platforms = P_data.get("platforms", [])
for P in P_platforms:
productvers = {}
for V in P_platforms[P]["productvers"]:
if P_platforms[P]["productvers"][V].get("kpre", "") != "":
productvers[V] = (P_platforms[P]["productvers"][V].get("kpre", "") + "-" + P_platforms[P]["productvers"][V].get("kver", ""))
else:
productvers[V] = P_platforms[P]["productvers"][V].get("kver", "")
kpre = P_platforms[P]["productvers"][V].get("kpre", "")
kver = P_platforms[P]["productvers"][V].get("kver", "")
productvers[V] = f"{kpre}-{kver}" if kpre else kver
models[P] = {"productvers": productvers, "models": []}
req = requests.get("https://autoupdate.synology.com/os/v2")
req.encoding = "utf-8"
data = json.loads(req.text)
adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=1, status_forcelist=[500, 502, 503, 504]))
session = requests.Session()
session.mount("http://", adapter)
session.mount("https://", adapter)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
try:
req = session.get("https://autoupdate.synology.com/os/v2", timeout=10, verify=False)
req.encoding = "utf-8"
data = json.loads(req.text)
except Exception as e:
click.echo(f"Error: {e}")
return
for I in data["channel"]["item"]:
if not I["title"].startswith("DSM"):
for item in data["channel"]["item"]:
if not item["title"].startswith("DSM"):
continue
for J in I["model"]:
arch = J["mUnique"].split("_")[1].lower()
name = J["mLink"].split("/")[-1].split("_")[1].replace("%2B", "+")
if arch not in models.keys():
for model in item["model"]:
arch = model["mUnique"].split("_")[1].lower()
name = model["mLink"].split("/")[-1].split("_")[1].replace("%2B", "+")
if arch not in models:
continue
if name in (A for B in models for A in models[B]["models"]):
continue
@ -64,45 +74,51 @@ def getmodels(workpath, jsonpath, xlsxpath):
ws.append([k, str(v["productvers"]), str(v["models"])])
wb.save(xlsxpath)
@cli.command()
@click.option("-w", "--workpath", type=str, required=True, help="The workpath of RR.")
@click.option("-j", "--jsonpath", type=str, required=True, help="The output path of jsonfile.")
@click.option("-x", "--xlsxpath", type=str, required=False, help="The output path of xlsxfile.")
def getpats(workpath, jsonpath, xlsxpath):
def __fullversion(ver):
out = ver
arr = ver.split('-')
if len(arr) > 0:
a = arr[0].split('.')[0] if len(arr[0].split('.')) > 0 else '0'
b = arr[0].split('.')[1] if len(arr[0].split('.')) > 1 else '0'
c = arr[0].split('.')[2] if len(arr[0].split('.')) > 2 else '0'
d = arr[1] if len(arr) > 1 else '00000'
e = arr[2] if len(arr) > 2 else '0'
out = '{}.{}.{}-{}-{}'.format(a,b,c,d,e)
return out
a, b, c = (arr[0].split('.') + ['0', '0', '0'])[:3]
d = arr[1] if len(arr) > 1 else '00000'
e = arr[2] if len(arr) > 2 else '0'
return f'{a}.{b}.{c}-{d}-{e}'
platforms = []
models = []
with open("{}/opt/rr/platforms.yml".format(workpath), "r") as f:
platforms_yml = os.path.join(workpath, "opt", "rr", "platforms.yml")
with open(platforms_yml, "r") as f:
data = yaml.safe_load(f)
platforms = data.get("platforms", [])
req = requests.get("https://autoupdate.synology.com/os/v2")
req.encoding = "utf-8"
data = json.loads(req.text)
adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=1, status_forcelist=[500, 502, 503, 504]))
session = requests.Session()
session.mount("http://", adapter)
session.mount("https://", adapter)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
try:
req = session.get("https://autoupdate.synology.com/os/v2", timeout=10, verify=False)
req.encoding = "utf-8"
data = json.loads(req.text)
except Exception as e:
click.echo(f"Error: {e}")
return
for I in data["channel"]["item"]:
if not I["title"].startswith("DSM"):
models = []
for item in data["channel"]["item"]:
if not item["title"].startswith("DSM"):
continue
for J in I["model"]:
arch = J["mUnique"].split("_")[1].lower()
name = J["mLink"].split("/")[-1].split("_")[1].replace("%2B", "+")
for model in item["model"]:
arch = model["mUnique"].split("_")[1].lower()
name = model["mLink"].split("/")[-1].split("_")[1].replace("%2B", "+")
if arch not in platforms:
continue
if name in models:
continue
models.append(name)
pats = {}
for M in models:
pats[M] = {}
@ -112,57 +128,73 @@ def getpats(workpath, jsonpath, xlsxpath):
#urlInfo = "https://www.synology.cn/api/support/findDownloadInfo?lang=zh-cn"
#urlSteps = "https://www.synology.cn/api/support/findUpgradeSteps?"
major = "&major={}".format(version.split('.')[0]) if len(version.split('.')) > 0 else ""
minor = "&minor={}".format(version.split('.')[1]) if len(version.split('.')) > 1 else ""
req = requests.get("{}&product={}{}{}".format(urlInfo, M.replace("+", "%2B"), major, minor))
req.encoding = "utf-8"
data = json.loads(req.text)
major = f"&major={version.split('.')[0]}" if len(version.split('.')) > 0 else ""
minor = f"&minor={version.split('.')[1]}" if len(version.split('.')) > 1 else ""
try:
req = session.get(f"{urlInfo}&product={M.replace('+', '%2B')}{major}{minor}", timeout=10, verify=False)
req.encoding = "utf-8"
data = json.loads(req.text)
except Exception as e:
click.echo(f"Error: {e}")
continue
build_ver = data['info']['system']['detail'][0]['items'][0]['build_ver']
build_num = data['info']['system']['detail'][0]['items'][0]['build_num']
buildnano = data['info']['system']['detail'][0]['items'][0]['nano']
V=__fullversion("{}-{}-{}".format(build_ver, build_num, buildnano))
if not V in pats[M]:
pats[M][V]={}
pats[M][V]['url'] = data['info']['system']['detail'][0]['items'][0]['files'][0]['url'].split('?')[0]
pats[M][V]['sum'] = data['info']['system']['detail'][0]['items'][0]['files'][0]['checksum']
V = __fullversion(f"{build_ver}-{build_num}-{buildnano}")
if V not in pats[M]:
pats[M][V] = {
'url': data['info']['system']['detail'][0]['items'][0]['files'][0]['url'].split('?')[0],
'sum': data['info']['system']['detail'][0]['items'][0]['files'][0]['checksum']
}
from_ver=0
for I in data['info']['pubVers']:
if from_ver == 0 or I['build'] < from_ver: from_ver = I['build']
from_ver = min(I['build'] for I in data['info']['pubVers'])
for I in data['info']['productVers']:
if not I['version'].startswith(version): continue
if major == "" or minor == "":
majorTmp = "&major={}".format(I['version'].split('.')[0]) if len(I['version'].split('.')) > 0 else ""
minorTmp = "&minor={}".format(I['version'].split('.')[1]) if len(I['version'].split('.')) > 1 else ""
reqTmp = requests.get("{}&product={}{}{}".format(urlInfo, M.replace("+", "%2B"), majorTmp, minorTmp))
reqTmp.encoding = "utf-8"
dataTmp = json.loads(reqTmp.text)
if not I['version'].startswith(version):
continue
if not major or not minor:
majorTmp = f"&major={I['version'].split('.')[0]}" if len(I['version'].split('.')) > 0 else ""
minorTmp = f"&minor={I['version'].split('.')[1]}" if len(I['version'].split('.')) > 1 else ""
try:
reqTmp = session.get(f"{urlInfo}&product={M.replace('+', '%2B')}{majorTmp}{minorTmp}", timeout=10, verify=False)
reqTmp.encoding = "utf-8"
dataTmp = json.loads(reqTmp.text)
except Exception as e:
click.echo(f"Error: {e}")
continue
build_ver = dataTmp['info']['system']['detail'][0]['items'][0]['build_ver']
build_num = dataTmp['info']['system']['detail'][0]['items'][0]['build_num']
buildnano = dataTmp['info']['system']['detail'][0]['items'][0]['nano']
V=__fullversion("{}-{}-{}".format(build_ver, build_num, buildnano))
if not V in pats[M]:
pats[M][V]={}
pats[M][V]['url'] = dataTmp['info']['system']['detail'][0]['items'][0]['files'][0]['url'].split('?')[0]
pats[M][V]['sum'] = dataTmp['info']['system']['detail'][0]['items'][0]['files'][0]['checksum']
V = __fullversion(f"{build_ver}-{build_num}-{buildnano}")
if V not in pats[M]:
pats[M][V] = {
'url': dataTmp['info']['system']['detail'][0]['items'][0]['files'][0]['url'].split('?')[0],
'sum': dataTmp['info']['system']['detail'][0]['items'][0]['files'][0]['checksum']
}
for J in I['versions']:
to_ver=J['build']
reqSteps = requests.get("{}&product={}&from_ver={}&to_ver={}".format(urlSteps, M.replace("+", "%2B"), from_ver, to_ver))
if reqSteps.status_code != 200: continue
reqSteps.encoding = "utf-8"
dataSteps = json.loads(reqSteps.text)
to_ver = J['build']
try:
reqSteps = session.get(f"{urlSteps}&product={M.replace('+', '%2B')}&from_ver={from_ver}&to_ver={to_ver}", timeout=10, verify=False)
if reqSteps.status_code != 200:
continue
reqSteps.encoding = "utf-8"
dataSteps = json.loads(reqSteps.text)
except Exception as e:
click.echo(f"Error: {e}")
continue
for S in dataSteps['upgrade_steps']:
if not 'full_patch' in S or S['full_patch'] is False: continue
if not 'build_ver' in S or not S['build_ver'].startswith(version): continue
V=__fullversion("{}-{}-{}".format(S['build_ver'], S['build_num'], S['nano']))
if not V in pats[M]:
pats[M][V] = {}
pats[M][V]['url'] = S['files'][0]['url'].split('?')[0]
pats[M][V]['sum'] = S['files'][0]['checksum']
if not S.get('full_patch') or not S['build_ver'].startswith(version):
continue
V = __fullversion(f"{S['build_ver']}-{S['build_num']}-{S['nano']}")
if V not in pats[M]:
pats[M][V] = {
'url': S['files'][0]['url'].split('?')[0],
'sum': S['files'][0]['checksum']
}
if jsonpath:
with open(jsonpath, "w") as f:
@ -176,13 +208,13 @@ def getpats(workpath, jsonpath, xlsxpath):
ws.append([k1, k2, v2["url"], v2["sum"]])
wb.save(xlsxpath)
@cli.command()
@click.option("-w", "--workpath", type=str, required=True, help="The workpath of RR.")
@click.option("-j", "--jsonpath", type=str, required=True, help="The output path of jsonfile.")
@click.option("-x", "--xlsxpath", type=str, required=False, help="The output path of xlsxfile.")
def getaddons(workpath, jsonpath, xlsxpath):
# Read the manifest.yml file
AS = glob.glob("{}/mnt/p3/addons/*/manifest.yml".format(workpath))
AS = glob.glob(os.path.join(workpath, "mnt", "p3", "addons", "*", "manifest.yml"))
AS.sort()
addons = {}
for A in AS:
@ -200,7 +232,7 @@ def getaddons(workpath, jsonpath, xlsxpath):
ws = wb.active
ws.append(["Name", "system", "en_US", "zh_CN"])
for k1, v1 in addons.items():
ws.append([k1, v1.get("system", False), v1.get("description").get("en_US", ""), v1.get("description").get("zh_CN", ""),])
ws.append([k1, v1.get("system", False), v1.get("description").get("en_US", ""), v1.get("description").get("zh_CN", "")])
wb.save(xlsxpath)
@ -209,8 +241,7 @@ def getaddons(workpath, jsonpath, xlsxpath):
@click.option("-j", "--jsonpath", type=str, required=True, help="The output path of jsonfile.")
@click.option("-x", "--xlsxpath", type=str, required=False, help="The output path of xlsxfile.")
def getmodules(workpath, jsonpath, xlsxpath):
# Read the module files
MS = glob.glob("{}/mnt/p3/modules/*.tgz".format(workpath))
MS = glob.glob(os.path.join(workpath, "mnt", "p3", "modules", "*.tgz"))
MS.sort()
modules = {}
TMP_PATH = "/tmp/modules"
@ -219,12 +250,10 @@ def getmodules(workpath, jsonpath, xlsxpath):
for M in MS:
M_name = os.path.splitext(os.path.basename(M))[0]
M_modules = {}
# Extract the module
os.makedirs(TMP_PATH)
with tarfile.open(M, "r") as tar:
tar.extractall(TMP_PATH)
# Traverse the extracted files
KS = glob.glob("{}/*.ko".format(TMP_PATH))
KS = glob.glob(os.path.join(TMP_PATH, "*.ko"))
KS.sort()
for K in KS:
K_name = os.path.splitext(os.path.basename(K))[0]

View File

@ -5,19 +5,23 @@
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
# sudo apt install -y locales busybox dialog gettext sed gawk jq curl
# sudo apt install -y python-is-python3 python3-pip libelf-dev qemu-utils cpio xz-utils lz4 lzma bzip2 gzip zstd
[ -n "${1}" ] && export TOKEN="${1}"
REPO="https://api.github.com/repos/RROrg"
# Convert po2mo
# $1 path
function convertpo2mo() {
echo "Convert po2mo begin"
local DEST_PATH="${1:-lang}"
for P in $(ls ${DEST_PATH}/*/LC_MESSAGES/rr.po 2>/dev/null); do
while read -r P; do
# Use msgfmt command to compile the .po file into a binary .mo file
echo "msgfmt ${P} to ${P/.po/.mo}"
msgfmt ${P} -o ${P/.po/.mo}
done
msgfmt "${P}" -o "${P/.po/.mo}"
done <<<$(find "${DEST_PATH}" -type f -name 'rr.po')
echo "Convert po2mo end"
}
@ -34,9 +38,9 @@ function getExtractor() {
local PAT_URL="https://global.synologydownload.com/download/DSM/release/7.0.1/42218/DSM_DS3622xs%2B_42218.pat"
local PAT_FILE="DSM_DS3622xs+_42218.pat"
local STATUS=$(curl -#L -w "%{http_code}" "${PAT_URL}" -o "${CACHE_DIR}/${PAT_FILE}")
if [ $? -ne 0 -o ${STATUS:-0} -ne 200 ]; then
if [ $? -ne 0 ] || [ "${STATUS:-0}" -ne 200 ]; then
echo "[E] DSM_DS3622xs%2B_42218.pat download error!"
rm -rf ${CACHE_DIR}
rm -rf "${CACHE_DIR}"
exit 1
fi
@ -44,13 +48,10 @@ function getExtractor() {
tar -C "${CACHE_DIR}/ramdisk/" -xf "${CACHE_DIR}/${PAT_FILE}" rd.gz 2>&1
if [ $? -ne 0 ]; then
echo "[E] extractor rd.gz error!"
rm -rf ${CACHE_DIR}
rm -rf "${CACHE_DIR}"
exit 1
fi
(
cd "${CACHE_DIR}/ramdisk"
xz -dc <rd.gz | cpio -idm
) >/dev/null 2>&1 || true
(cd "${CACHE_DIR}/ramdisk" && xz -dc <rd.gz | cpio -idm) >/dev/null 2>&1 || true
rm -rf "${DEST_PATH}"
mkdir -p "${DEST_PATH}"
@ -62,7 +63,7 @@ function getExtractor() {
cp -f "${CACHE_DIR}/ramdisk/usr/syno/bin/scemd" "${DEST_PATH}/syno_extract_system_patch"
# Clean up
rm -rf ${CACHE_DIR}
rm -rf "${CACHE_DIR}"
echo "Getting syno extractor end"
}
@ -75,18 +76,19 @@ function getBuildroot() {
local CACHE_DIR="/tmp/buildroot"
local CACHE_FILE="/tmp/buildroot.zip"
rm -f "${CACHE_FILE}"
local TAG
if [ "${2}" = "true" ]; then
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "https://api.github.com/repos/RROrg/rr-buildroot/releases" | jq -r ".[].tag_name" | sort -rV | head -1)
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "${REPO}/rr-buildroot/releases" | jq -r ".[].tag_name" | sort -rV | head -1)
else
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "https://api.github.com/repos/RROrg/rr-buildroot/releases/latest" | jq -r ".tag_name")
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "${REPO}/rr-buildroot/releases/latest" | jq -r ".tag_name")
fi
while read ID NAME; do
while read -r ID NAME; do
if [ "${NAME}" = "buildroot-${TAG}.zip" ]; then
STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "https://api.github.com/repos/RROrg/rr-buildroot/releases/assets/${ID}" -o "${CACHE_FILE}")
STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "${REPO}/rr-buildroot/releases/assets/${ID}" -o "${CACHE_FILE}")
echo "TAG=${TAG}; Status=${STATUS}"
[ ${STATUS:-0} -ne 200 ] && exit 1
fi
done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "https://api.github.com/repos/RROrg/rr-buildroot/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')
done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "${REPO}/rr-buildroot/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')
# Unzip Buildroot
rm -rf "${CACHE_DIR}"
mkdir -p "${CACHE_DIR}"
@ -107,18 +109,19 @@ function getCKs() {
local DEST_PATH="${1:-cks}"
local CACHE_FILE="/tmp/rr-cks.zip"
rm -f "${CACHE_FILE}"
local TAG
if [ "${2}" = "true" ]; then
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "https://api.github.com/repos/RROrg/rr-cks/releases" | jq -r ".[].tag_name" | sort -rV | head -1)
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "${REPO}/rr-cks/releases" | jq -r ".[].tag_name" | sort -rV | head -1)
else
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "https://api.github.com/repos/RROrg/rr-cks/releases/latest" | jq -r ".tag_name")
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "${REPO}/rr-cks/releases/latest" | jq -r ".tag_name")
fi
while read ID NAME; do
while read -r ID NAME; do
if [ "${NAME}" = "rr-cks-${TAG}.zip" ]; then
STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "https://api.github.com/repos/RROrg/rr-cks/releases/assets/${ID}" -o "${CACHE_FILE}")
STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "${REPO}/rr-cks/releases/assets/${ID}" -o "${CACHE_FILE}")
echo "TAG=${TAG}; Status=${STATUS}"
[ ${STATUS:-0} -ne 200 ] && exit 1
fi
done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "https://api.github.com/repos/RROrg/rr-cks/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')
done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "${REPO}/rr-cks/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')
[ ! -f "${CACHE_FILE}" ] && exit 1
# Unzip CKs
rm -rf "${DEST_PATH}"
@ -136,18 +139,19 @@ function getLKMs() {
local DEST_PATH="${1:-lkms}"
local CACHE_FILE="/tmp/rp-lkms.zip"
rm -f "${CACHE_FILE}"
local TAG
if [ "${2}" = "true" ]; then
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "https://api.github.com/repos/RROrg/rr-lkms/releases" | jq -r ".[].tag_name" | sort -rV | head -1)
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "${REPO}/rr-lkms/releases" | jq -r ".[].tag_name" | sort -rV | head -1)
else
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "https://api.github.com/repos/RROrg/rr-lkms/releases/latest" | jq -r ".tag_name")
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "${REPO}/rr-lkms/releases/latest" | jq -r ".tag_name")
fi
while read ID NAME; do
while read -r ID NAME; do
if [ "${NAME}" = "rp-lkms-${TAG}.zip" ]; then
STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "https://api.github.com/repos/RROrg/rr-lkms/releases/assets/${ID}" -o "${CACHE_FILE}")
STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "${REPO}/rr-lkms/releases/assets/${ID}" -o "${CACHE_FILE}")
echo "TAG=${TAG}; Status=${STATUS}"
[ ${STATUS:-0} -ne 200 ] && exit 1
fi
done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "https://api.github.com/repos/RROrg/rr-lkms/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')
done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "${REPO}/rr-lkms/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')
[ ! -f "${CACHE_FILE}" ] && exit 1
# Unzip LKMs
rm -rf "${DEST_PATH}"
@ -157,7 +161,7 @@ function getLKMs() {
echo "Getting LKMs end"
}
# Get latest addons and install its
# Get latest addons and install them
# $1 path
# $2 (true|false[d]) include prerelease
function getAddons() {
@ -165,18 +169,19 @@ function getAddons() {
local DEST_PATH="${1:-addons}"
local CACHE_DIR="/tmp/addons"
local CACHE_FILE="/tmp/addons.zip"
local TAG
if [ "${2}" = "true" ]; then
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "https://api.github.com/repos/RROrg/rr-addons/releases" | jq -r ".[].tag_name" | sort -rV | head -1)
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "${REPO}/rr-addons/releases" | jq -r ".[].tag_name" | sort -rV | head -1)
else
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "https://api.github.com/repos/RROrg/rr-addons/releases/latest" | jq -r ".tag_name")
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "${REPO}/rr-addons/releases/latest" | jq -r ".tag_name")
fi
while read ID NAME; do
while read -r ID NAME; do
if [ "${NAME}" = "addons-${TAG}.zip" ]; then
STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "https://api.github.com/repos/RROrg/rr-addons/releases/assets/${ID}" -o "${CACHE_FILE}")
STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "${REPO}/rr-addons/releases/assets/${ID}" -o "${CACHE_FILE}")
echo "TAG=${TAG}; Status=${STATUS}"
[ ${STATUS:-0} -ne 200 ] && exit 1
fi
done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "https://api.github.com/repos/RROrg/rr-addons/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')
done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "${REPO}/rr-addons/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')
[ ! -f "${CACHE_FILE}" ] && exit 1
rm -rf "${DEST_PATH}"
mkdir -p "${DEST_PATH}"
@ -185,8 +190,8 @@ function getAddons() {
mkdir -p "${CACHE_DIR}"
unzip "${CACHE_FILE}" -d "${CACHE_DIR}"
echo "Installing addons to ${DEST_PATH}"
[ -f /tmp/addons/VERSION ] && cp -f /tmp/addons/VERSION ${DEST_PATH}/
for PKG in $(ls ${CACHE_DIR}/*.addon 2>/dev/null); do
[ -f "/tmp/addons/VERSION" ] && cp -f "/tmp/addons/VERSION" "${DEST_PATH}/"
for PKG in "${CACHE_DIR}"/*.addon; do
ADDON=$(basename "${PKG}" .addon)
mkdir -p "${DEST_PATH}/${ADDON}"
echo "Extracting ${PKG} to ${DEST_PATH}/${ADDON}"
@ -205,18 +210,19 @@ function getModules() {
local DEST_PATH="${1:-addons}"
local CACHE_FILE="/tmp/modules.zip"
rm -f "${CACHE_FILE}"
local TAG
if [ "${2}" = "true" ]; then
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "https://api.github.com/repos/RROrg/rr-modules/releases" | jq -r ".[].tag_name" | sort -rV | head -1)
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "${REPO}/rr-modules/releases" | jq -r ".[].tag_name" | sort -rV | head -1)
else
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "https://api.github.com/repos/RROrg/rr-modules/releases/latest" | jq -r ".tag_name")
TAG=$(curl -skL -H "Authorization: token ${TOKEN}" "${REPO}/rr-modules/releases/latest" | jq -r ".tag_name")
fi
while read ID NAME; do
while read -r ID NAME; do
if [ "${NAME}" = "modules-${TAG}.zip" ]; then
STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "https://api.github.com/repos/RROrg/rr-modules/releases/assets/${ID}" -o "${CACHE_FILE}")
STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "${REPO}/rr-modules/releases/assets/${ID}" -o "${CACHE_FILE}")
echo "TAG=${TAG}; Status=${STATUS}"
[ ${STATUS:-0} -ne 200 ] && exit 1
fi
done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "https://api.github.com/repos/RROrg/rr-modules/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')
done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "${REPO}/rr-modules/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')
[ ! -f "${CACHE_FILE}" ] && exit 1
# Unzip Modules
rm -rf "${DEST_PATH}"
@ -231,20 +237,20 @@ function getModules() {
# $2 plugin path
# $3 output file
function repackInitrd() {
INITRD_FILE="${1}"
PLUGIN_PATH="${2}"
OUTPUT_PATH="${3:-${INITRD_FILE}}"
local INITRD_FILE="${1}"
local PLUGIN_PATH="${2}"
local OUTPUT_PATH="${3:-${INITRD_FILE}}"
[ -z "${INITRD_FILE}" -o ! -f "${INITRD_FILE}" ] && exit 1
[ -z "${PLUGIN_PATH}" -o ! -d "${PLUGIN_PATH}" ] && exit 1
[ -z "${INITRD_FILE}" ] || [ ! -f "${INITRD_FILE}" ] && exit 1
[ -z "${PLUGIN_PATH}" ] || [ ! -d "${PLUGIN_PATH}" ] && exit 1
INITRD_FILE="$(readlink -f "${INITRD_FILE}")"
PLUGIN_PATH="$(readlink -f "${PLUGIN_PATH}")"
OUTPUT_PATH="$(readlink -f "${OUTPUT_PATH}")"
RDXZ_PATH="rdxz_tmp"
local RDXZ_PATH="rdxz_tmp"
mkdir -p "${RDXZ_PATH}"
INITRD_FORMAT=$(file -b --mime-type "${INITRD_FILE}")
local INITRD_FORMAT=$(file -b --mime-type "${INITRD_FILE}")
(
cd "${RDXZ_PATH}"
case "${INITRD_FORMAT}" in
@ -281,17 +287,17 @@ function repackInitrd() {
# $2 changsize MB eg: +50M -50M
# $3 output file
function resizeImg() {
INPUT_FILE="${1}"
CHANGE_SIZE="${2}"
OUTPUT_FILE="${3:-${INPUT_FILE}}"
local INPUT_FILE="${1}"
local CHANGE_SIZE="${2}"
local OUTPUT_FILE="${3:-${INPUT_FILE}}"
[ -z "${INPUT_FILE}" -o ! -f "${INPUT_FILE}" ] && exit 1
[ -z "${INPUT_FILE}" ] || [ ! -f "${INPUT_FILE}" ] && exit 1
[ -z "${CHANGE_SIZE}" ] && exit 1
INPUT_FILE="$(readlink -f "${INPUT_FILE}")"
OUTPUT_FILE="$(readlink -f "${OUTPUT_FILE}")"
SIZE=$(($(du -sm "${INPUT_FILE}" 2>/dev/null | awk '{print $1}')$(echo "${CHANGE_SIZE}" | sed 's/M//g; s/b//g')))
local SIZE=$(($(du -sm "${INPUT_FILE}" 2>/dev/null | awk '{print $1}')$(echo "${CHANGE_SIZE}" | sed 's/M//g; s/b//g')))
[ "${SIZE:-0}" -lt 0 ] && exit 1
if [ ! "${INPUT_FILE}" = "${OUTPUT_FILE}" ]; then
@ -299,8 +305,8 @@ function resizeImg() {
fi
sudo truncate -s ${SIZE}M "${OUTPUT_FILE}"
echo -e "d\n\nn\n\n\n\n\nn\nw" | sudo fdisk "${OUTPUT_FILE}"
LOOPX=$(sudo losetup -f)
echo -e "d\n\nn\n\n\n\n\nn\nw" | sudo fdisk "${OUTPUT_FILE}" >/dev/null 2>&1
local LOOPX=$(sudo losetup -f)
sudo losetup -P ${LOOPX} "${OUTPUT_FILE}"
sudo e2fsck -fp $(ls ${LOOPX}* 2>/dev/null | sort -n | tail -1)
sudo resize2fs $(ls ${LOOPX}* 2>/dev/null | sort -n | tail -1)
@ -311,12 +317,12 @@ function resizeImg() {
# $1 bootloader file
# $2 ova file
function convertova() {
BLIMAGE=${1}
OVAPATH=${2}
local BLIMAGE=${1}
local OVAPATH=${2}
BLIMAGE="$(readlink -f "${BLIMAGE}")"
OVAPATH="$(readlink -f "${OVAPATH}")"
VMNAME="$(basename "${OVAPATH}" .ova)"
local VMNAME="$(basename "${OVAPATH}" .ova)"
# Download and install ovftool if it doesn't exist
if [ ! -x ovftool/ovftool ]; then
@ -345,9 +351,9 @@ function convertova() {
# Create VM configuration
cat <<_EOF_ >"OVA_${VMNAME}/${VMNAME}.vmx"
.encoding = "GBK"
.encoding = "UTF-8"
config.version = "8"
virtualHW.version = "21"
virtualHW.version = "17"
displayName = "${VMNAME}"
annotation = "https://github.com/RROrg/rr"
guestOS = "ubuntu-64"

View File

@ -2,4 +2,8 @@ bs4
click
kmodule
requests
openpyxl
requests-toolbelt
urllib3
openpyxl
qrcode[pil]
beautifulsoup4

View File

@ -6,35 +6,40 @@
# See /LICENSE for more information.
#
# sudo apt update
# sudo apt install -y locales busybox dialog curl xz-utils cpio sed
# sudo apt install -y locales busybox dialog gettext sed gawk jq curl
# sudo apt install -y python-is-python3 python3-pip libelf-dev qemu-utils cpio xz-utils lz4 lzma bzip2 gzip zstd
# # sudo snap install yq
# if ! command -v yq &>/dev/null || ! yq --version 2>/dev/null | grep -q "v4."; then
# sudo curl -kL https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -o /usr/bin/yq && sudo chmod a+x /usr/bin/yq
# fi
#
# # Backup the original python3 executable.
# sudo mv -f "$(realpath $(which python3))/EXTERNALLY-MANAGED" "$(realpath $(which python3))/EXTERNALLY-MANAGED.bak" 2>/dev/null || true
# sudo pip3 install -U click requests requests-toolbelt urllib3 qrcode[pil] beautifulsoup4
#
# sudo locale-gen ar_SA.UTF-8 de_DE.UTF-8 en_US.UTF-8 es_ES.UTF-8 fr_FR.UTF-8 ja_JP.UTF-8 ko_KR.UTF-8 ru_RU.UTF-8 th_TH.UTF-8 tr_TR.UTF-8 uk_UA.UTF-8 vi_VN.UTF-8 zh_CN.UTF-8 zh_HK.UTF-8 zh_TW.UTF-8
#
# export TOKEN="${1}"
#
set -e
PROMPT=$(sudo -nv 2>&1)
if [ $? -ne 0 ]; then
if [ "$(id -u)" -ne 0 ]; then
echo "This script must be run as root"
exit 1
fi
PRE="true"
. scripts/func.sh
. scripts/func.sh "${TOKEN}"
echo "Get extractor"
getCKs "files/p3/cks" "${PRE}"
getLKMs "files/p3/lkms" "${PRE}"
getAddons "files/p3/addons" "${PRE}"
getModules "files/p3/modules" "${PRE}"
getBuildroot "files/p3" "${PRE}"
getExtractor "files/p3/extractor"
getCKs "files/mnt/p3/cks" "true"
getLKMs "files/mnt/p3/lkms" "true"
getAddons "files/mnt/p3/addons" "true"
getModules "files/mnt/p3/modules" "true"
getBuildroot "files/mnt/p3" "true"
getExtractor "files/mnt/p3/extractor"
echo "Repack initrd"
convertpo2mo "files/initrd/opt/rr/lang"
repackInitrd "files/p3/initrd-rr" "files/initrd"
repackInitrd "files/mnt/p3/initrd-rr" "files/initrd"
if [ -n "${1}" ]; then
export LOADER_DISK="LOCALBUILD"
@ -43,81 +48,63 @@ if [ -n "${1}" ]; then
cd "${CHROOT_PATH}/initrd/opt/rr"
./init.sh
./menu.sh modelMenu "${1}"
./menu.sh productversMenu "7.2"
./menu.sh productversMenu "${2:-7.2}"
./menu.sh make -1
./menu.sh cleanCache -1
)
fi
IMAGE_FILE="rr.img"
gzip -dc "files/grub.img.gz" >"${IMAGE_FILE}"
gzip -dc "files/initrd/opt/rr/grub.img.gz" >"${IMAGE_FILE}"
fdisk -l "${IMAGE_FILE}"
LOOPX=$(sudo losetup -f)
sudo losetup -P "${LOOPX}" "${IMAGE_FILE}"
echo "Mounting image file"
rm -rf "/tmp/mnt/p1"
rm -rf "/tmp/mnt/p2"
rm -rf "/tmp/mnt/p3"
mkdir -p "/tmp/mnt/p1"
mkdir -p "/tmp/mnt/p2"
mkdir -p "/tmp/mnt/p3"
sudo mount ${LOOPX}p1 "/tmp/mnt/p1" || (
echo -e "Can't mount ${LOOPX}p1."
exit 1
)
sudo mount ${LOOPX}p2 "/tmp/mnt/p2" || (
echo -e "Can't mount ${LOOPX}p2."
exit 1
)
sudo mount ${LOOPX}p3 "/tmp/mnt/p3" || (
echo -e "Can't mount ${LOOPX}p3."
exit 1
)
for i in {1..3}; do
[ ! -d "files/mnt/p${i}" ] && continue
rm -rf "/tmp/mnt/p${i}"
mkdir -p "/tmp/mnt/p${i}"
echo "Copying files"
sudo cp -af "files/mnt/p1/.locale" "/tmp/mnt/p1" 2>/dev/null
sudo cp -rf "files/mnt/p1/"* "/tmp/mnt/p1" || (
echo -e "Can't cp ${LOOPX}p1."
exit 1
)
sudo cp -rf "files/mnt/p2/"* "/tmp/mnt/p2" || (
echo -e "Can't cp ${LOOPX}p2."
exit 1
)
sudo cp -rf "files/mnt/p3/"* "/tmp/mnt/p3" || (
echo -e "Can't cp ${LOOPX}p2."
exit 1
)
echo "Mounting ${LOOPX}p${i}"
sudo mount "${LOOPX}p${i}" "/tmp/mnt/p${i}" || {
echo "Can't mount ${LOOPX}p${i}."
break
}
echo "Copying files to ${LOOPX}p${i}"
[ ${i} -eq 1 ] && sudo cp -af "files/mnt/p${i}/"{.locale,.timezone} "/tmp/mnt/p${i}/" 2>/dev/null || true
sudo cp -rf "files/mnt/p${i}/"* "/tmp/mnt/p${i}" || true
sudo sync
sudo sync
echo "Unmounting ${LOOPX}p${i}"
sudo umount "/tmp/mnt/p${i}" || {
echo "Can't umount ${LOOPX}p${i}."
break
}
rm -rf "/tmp/mnt/p${i}"
done
sudo losetup --detach "${LOOPX}"
resizeImg "${IMAGE_FILE}" "+2560M"
# convertova "${IMAGE_FILE}" "${IMAGE_FILE/.img/.ova}"
# update.zip
sha256sum update-list.yml update-check.sh >sha256sum
zip -9j update.zip update-list.yml update-check.sh
while read F; do
if [ -d "/tmp/${F}" ]; then
FTGZ="$(basename "/tmp/${F}").tgz"
tar -czf "${FTGZ}" -C "/tmp/${F}" .
zip -9j "update.zip" update-list.yml update-check.sh
while read -r F; do
if [ -d "${F}" ]; then
FTGZ="$(basename "${F}").tgz"
tar -zcf "${FTGZ}" -C "${F}" .
sha256sum "${FTGZ}" >>sha256sum
zip -9j update.zip "${FTGZ}"
sudo rm -f "${FTGZ}"
zip -9j "update.zip" "${FTGZ}"
rm -f "${FTGZ}"
else
(cd $(dirname "/tmp/${F}") && sha256sum $(basename "/tmp/${F}")) >>sha256sum
zip -9j update.zip "/tmp/${F}"
(cd $(dirname "${F}") && sha256sum $(basename "${F}")) >>sha256sum
zip -9j "update.zip" "${F}"
fi
done <<<$(yq '.replace | explode(.) | to_entries | map([.key])[] | .[]' update-list.yml)
zip -9j update.zip sha256sum
echo "Unmount image file"
sudo umount "/tmp/files/p1"
sudo umount "/tmp/files/p2"
sudo umount "/tmp/files/p3"
sudo losetup --detach ${LOOPX}
if [ -n "${1}" ]; then
echo "Packing image file"
sudo mv "${IMAGE_FILE}" "rr-${1}.img"
fi
zip -9j "update.zip" sha256sum