Compare commits

..

No commits in common. "main" and "25.1.0" have entirely different histories.
main ... 25.1.0

74 changed files with 11269 additions and 18135 deletions

6
.gitattributes vendored
View File

@ -1,5 +1 @@
* text=auto eol=lf
*.png binary
*.jpg binary
*.gif binary
*.ico binary
*.sh eol=lf

View File

@ -1,107 +1,109 @@
#
# Copyright (C) 2022 Ing <https://github.com/wjz304>
#
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
name: Data
on:
release:
types:
- created
workflow_dispatch:
inputs:
push:
description: "push"
default: false
type: boolean
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@main
with:
ref: main
- name: Init Env
run: |
git config --global user.email "github-actions[bot]@users.noreply.github.com"
git config --global user.name "github-actions[bot]"
sudo timedatectl set-timezone "Asia/Shanghai"
- name: Delay
run: |
echo "Delaying for 1 minutes..."
sleep 60
- name: Get Release RR
run: |
REPO="${{ github.server_url }}/${{ github.repository }}"
PRERELEASE="true"
TAG=""
if [ "${PRERELEASE}" = "true" ]; then
TAG="$(curl -skL --connect-timeout 10 "${REPO}/tags" | grep "/refs/tags/.*\.zip" | sed -E 's/.*\/refs\/tags\/(.*)\.zip.*$/\1/' | sort -rV | head -1)"
else
TAG="$(curl -skL --connect-timeout 10 -w "%{url_effective}" -o /dev/null "${REPO}/releases/latest" | awk -F'/' '{print $NF}')"
fi
[ "${TAG:0:1}" = "v" ] && TAG="${TAG:1}"
rm -f rr-${TAG}.img.zip
STATUS=$(curl -kL --connect-timeout 10 -w "%{http_code}" "${REPO}/releases/download/${TAG}/rr-${TAG}.img.zip" -o "rr-${TAG}.img.zip")
if [ $? -ne 0 ] || [ ${STATUS:-0} -ne 200 ]; then
echo "Download failed"
exit 1
fi
unzip rr-${TAG}.img.zip -d "rr"
export TERM=xterm
sudo ./localbuild.sh create rr/ws rr/rr.img
if [ $? -ne 0 ]; then
echo "create failed"
exit 1
fi
- name: Get data
run: |
sudo apt update
sudo apt install -y locales busybox dialog gettext sed gawk jq curl
sudo apt install -y python-is-python3 python3-pip libelf-dev qemu-utils cpio xz-utils lz4 lzma bzip2 gzip zstd
sudo apt install -y build-essential libtool pkgconf libzstd-dev liblzma-dev libssl-dev # kmodule dependencies
# Backup the original python3 executable.
sudo mv -f "$(realpath $(which python3))/EXTERNALLY-MANAGED" "$(realpath $(which python3))/EXTERNALLY-MANAGED.bak" 2>/dev/null || true
sudo pip3 install -U -r scripts/requirements.txt
python3 scripts/func.py getmodels -w "rr/ws/initrd" -j "docs/models.json" -x "docs/models.xlsx"
python3 scripts/func.py getpats -w "rr/ws/initrd" -j "docs/pats.json" -x "docs/pats.xlsx"
python3 scripts/func.py getaddons -w "rr/ws" -j "docs/addons.json" -x "docs/addons.xlsx"
python3 scripts/func.py getmodules -w "rr/ws" -j "docs/modules.json" -x "docs/modules.xlsx"
- name: Upload to Artifacts
if: success()
uses: actions/upload-artifact@v4
with:
name: docs
path: |
docs/*.json
docs/*.xlsx
retention-days: 5
- name: Check and Push
if: success() && (inputs.push == true || github.event.action == 'created')
run: |
echo "Git push ..."
# git checkout main
git pull
status=$(git status -s | grep -E "docs" | awk '{printf " %s", $2}')
if [ -n "${status}" ]; then
git add ${status}
git commit -m "update $(date +%Y-%m-%d" "%H:%M:%S)"
git push -f
fi
#
# Copyright (C) 2022 Ing <https://github.com/wjz304>
#
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
name: Data
on:
release:
types:
- created
workflow_dispatch:
inputs:
push:
description: "push"
default: false
type: boolean
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@main
with:
ref: main
- name: Init Env
run: |
git config --global user.email "github-actions[bot]@users.noreply.github.com"
git config --global user.name "github-actions[bot]"
sudo timedatectl set-timezone "Asia/Shanghai"
- name: Delay
run: |
echo "Delaying for 1 minutes..."
sleep 60
- name: Get Release RR
run: |
REPO="${{ github.server_url }}/${{ github.repository }}"
PRERELEASE="true"
TAG=""
if [ "${PRERELEASE}" = "true" ]; then
TAG="$(curl -skL --connect-timeout 10 "${REPO}/tags" | grep "/refs/tags/.*\.zip" | sed -E 's/.*\/refs\/tags\/(.*)\.zip.*$/\1/' | sort -rV | head -1)"
else
LATESTURL="$(curl -skL --connect-timeout 10 -w %{url_effective} -o /dev/null "${REPO}/releases/latest")"
TAG="${LATESTURL##*/}"
fi
[ "${TAG:0:1}" = "v" ] && TAG="${TAG:1}"
rm -f rr-${TAG}.img.zip
STATUS=$(curl -kL --connect-timeout 10 -w "%{http_code}" "${REPO}/releases/download/${TAG}/rr-${TAG}.img.zip" -o "rr-${TAG}.img.zip")
if [ $? -ne 0 ] || [ ${STATUS:-0} -ne 200 ]; then
echo "Download failed"
exit 1
fi
unzip rr-${TAG}.img.zip -d "rr"
export TERM=xterm
sudo ./localbuild.sh create rr/ws rr/rr.img
if [ $? -ne 0 ]; then
echo "create failed"
exit 1
fi
- name: Get data
run: |
sudo apt update
sudo apt install -y locales busybox dialog gettext sed gawk jq curl
sudo apt install -y python-is-python3 python3-pip libelf-dev qemu-utils cpio xz-utils lz4 lzma bzip2 gzip zstd
sudo apt install -y build-essential libtool pkgconf libzstd-dev liblzma-dev libssl-dev # kmodule dependencies
# Backup the original python3 executable.
sudo mv -f "$(realpath $(which python3))/EXTERNALLY-MANAGED" "$(realpath $(which python3))/EXTERNALLY-MANAGED.bak" 2>/dev/null || true
sudo pip3 install -U -r scripts/requirements.txt
python3 scripts/func.py getmodels -w "rr/ws/initrd" -j "docs/models.json" -x "docs/models.xlsx"
python3 scripts/func.py getaddons -w "rr/ws" -j "docs/addons.json" -x "docs/addons.xlsx"
python3 scripts/func.py getmodules -w "rr/ws" -j "docs/modules.json" -x "docs/modules.xlsx"
python3 scripts/func.py getpats -w "rr/ws/initrd" -j "docs/pats.json" -x "docs/pats.xlsx"
- name: Upload to Artifacts
if: success()
uses: actions/upload-artifact@v4
with:
name: docs
path: |
docs/*.json
docs/*.xlsx
retention-days: 5
- name: Check and Push
if: success() && (inputs.push == true || github.event.action == 'created')
run: |
echo "Git push ..."
# git checkout main
git pull
status=$(git status -s | grep -E "docs" | awk '{printf " %s", $2}')
if [ -n "${status}" ]; then
git add ${status}
git commit -m "update $(date +%Y-%m-%d" "%H:%M:%S)"
git push -f
fi

View File

@ -29,9 +29,7 @@ jobs:
# -*- coding: utf-8 -*-
import json, subprocess
def set_output(name, value):
subprocess.call(f'echo "{name}<<EOF" >> $GITHUB_ENV', shell=True)
subprocess.call(f'echo "{value}" >> $GITHUB_ENV', shell=True)
subprocess.call(f'echo "EOF" >> $GITHUB_ENV', shell=True)
subprocess.call(["echo '{}={}' >> $GITHUB_ENV".format(name, value)], shell=True)
issuetitle = ${{ toJSON(github.event.issue.title) }}
issuebody = ${{ toJSON(github.event.issue.body) }}
@ -40,12 +38,7 @@ jobs:
warinfo = 'false'
format = ''
size = ''
template = ''
language= ''
sn = ''
macs = ''
tips = ''
model = ''
version = ''
kernel = ''
@ -56,12 +49,7 @@ jobs:
jsonbody = json.loads(issuebody)
iscustom = 'true'
format = jsonbody.get('format', '')
size = jsonbody.get('size', '')
template = jsonbody.get('template', '')
language = jsonbody.get('language', '')
sn = jsonbody.get('sn', '')
macs = jsonbody.get('macs', '')
tips = jsonbody.get('tips', '')
model = jsonbody.get('model', '')
version = jsonbody.get('version', '')
kernel = jsonbody.get('kernel', '')
@ -73,17 +61,12 @@ jobs:
if iscustom == 'false':
if issuebody.find('DMI') < 0 and issuebody.find('CPU') < 0 and issuebody.find('NIC') < 0:
warinfo = 'true'
set_output("iscustom", iscustom)
set_output("warinfo", warinfo)
set_output("format", format)
set_output("size", size)
set_output("template", template)
set_output("language", language)
set_output("sn", sn)
set_output("macs", macs)
set_output("tips", tips)
set_output("model", model)
set_output("version", version)
set_output("kernel", kernel)
@ -176,23 +159,20 @@ jobs:
}
function readConfigKey() {
local result
result=$(sudo yq eval ".${1} | explode(.)" "${2}" 2>/dev/null)
local result=$(sudo yq eval ".${1} | explode(.)" "${2}" 2>/dev/null)
[ "${result}" = "null" ] && echo "" || echo "${result}"
}
function mergeConfigModules() {
# Error: bad file '-': cannot index array with '8139cp' (strconv.ParseInt: parsing "8139cp": invalid syntax)
# When the first key is a pure number, yq will not process it as a string by default. The current solution is to insert a placeholder key.
local MS ML XF
MS="RRORG\n${1// /\\n}"
ML="$(echo -en "${MS}" | awk '{print "modules."$1":"}')"
XF=$(mktemp 2>/dev/null)
XF=${XF:-/tmp/tmp.XXXXXXXXXX}
echo -en "${ML}" | sudo yq -p p -o y >"${XF}"
deleteConfigKey "modules.\"RRORG\"" "${XF}"
sudo yq eval-all --inplace '. as $item ireduce ({}; . * $item)' --inplace "${2}" "${XF}" 2>/dev/null
rm -f "${XF}"
local MS="RRORG\n${1// /\\n}"
local L="$(echo -en "${MS}" | awk '{print "modules."$1":"}')"
local xmlfile=$(mktemp)
echo -en "${L}" | sudo yq -p p -o y >"${xmlfile}"
deleteConfigKey "modules.\"RRORG\"" "${xmlfile}"
sudo yq eval-all --inplace '. as $item ireduce ({}; . * $item)' --inplace "${2}" "${xmlfile}" 2>/dev/null
rm -f "${xmlfile}"
}
REPO="${{ github.server_url }}/${{ github.repository }}"
@ -204,7 +184,8 @@ jobs:
if [ "${PRERELEASE}" = "true" ]; then
TAG="$(curl -skL --connect-timeout 10 "${REPO}/tags" | grep "/refs/tags/.*\.zip" | sed -E 's/.*\/refs\/tags\/(.*)\.zip.*$/\1/' | sort -rV | head -1)"
else
TAG="$(curl -skL --connect-timeout 10 -w "%{url_effective}" -o /dev/null "${REPO}/releases/latest" | awk -F'/' '{print $NF}')"
LATESTURL="$(curl -skL --connect-timeout 10 -w %{url_effective} -o /dev/null "${REPO}/releases/latest")"
TAG="${LATESTURL##*/}"
fi
[ "${TAG:0:1}" = "v" ] && TAG="${TAG:1}"
rm -f rr-${TAG}.img.zip
@ -228,8 +209,6 @@ jobs:
# sudo sed -i "s/set -e/set -ex/" rr/ws/initrd/opt/rr/init.sh
# sudo sed -i '/^alias/i\set -x' rr/ws/initrd/opt/rr/menu.sh
sudo sed -i 's/"global.synologydownload.com" "global.download.synology.com"/"global.download.synology.com" "global.synologydownload.com"/g' rr/ws/initrd/opt/rr/menu.sh
[ -n "${{ env.language }}" ] && echo "${{ env.language }}.UTF-8" | sudo tee rr/ws/mnt/p1/.locale
sudo ./localbuild.sh init
@ -250,39 +229,12 @@ jobs:
exit 1
fi
if [ -n "${{ env.sn }}" ]; then
echo "set sn: ${{ env.sn }}"
USER_CONFIG_FILE="rr/ws/mnt/p1/user-config.yml"
writeConfigKey "sn" "${{ env.sn }}" "${USER_CONFIG_FILE}"
fi
if [ -n "${{ env.macs }}" ]; then
echo "set macs: ${{ env.macs }}"
USER_CONFIG_FILE="rr/ws/mnt/p1/user-config.yml"
MACS=($(echo "${{ env.macs }}" | sed 's/[:-]//g' | sed 's/.*/\U&/' | sed 's/[;,]/ /g'))
writeConfigKey "mac1" "${MACS[0]}" "${USER_CONFIG_FILE}"
writeConfigKey "mac2" "${MACS[1]}" "${USER_CONFIG_FILE}"
fi
if [ -n "${{ env.tips }}" ]; then
echo "set tips: ${{ env.tips }}"
echo -e "${{ env.tips }}" | sudo tee rr/ws/mnt/p3/AddTips
fi
if [ -n "${{ env.addons }}" ]; then
echo "set addons: ${{ env.addons }}"
USER_CONFIG_FILE="rr/ws/mnt/p1/user-config.yml"
writeConfigKey "addons" "{}" "${USER_CONFIG_FILE}"
IFS=',' read -ra ADDON_ARR <<< "${{ env.addons }}"
for A in "${ADDON_ARR[@]}"; do
if echo "${A}" | grep -qE '^[^:]+:[^:]+$'; then
KEY="$(echo "${A}" | cut -d':' -f1 | xargs)"
VAL="$(echo "${A}" | cut -d':' -f2 | xargs)"
else
KEY="${A}"
VAL=""
fi
writeConfigKey "addons.\"${KEY}\"" "${VAL}" "${USER_CONFIG_FILE}"
for A in $(echo "${{ env.addons }}" | sed 's/,/ /g'); do
writeConfigKey "addons.\"${A}\"" "" "${USER_CONFIG_FILE}"
done
fi
@ -302,44 +254,19 @@ jobs:
exit 1
fi
if [ "true" = "${{ env.template }}" ]; then
echo "set template: ${{ env.template }}"
USER_CONFIG_FILE="rr/ws/mnt/p1/user-config.yml"
writeConfigKey "sn" "" "${USER_CONFIG_FILE}"
writeConfigKey "mac1" "" "${USER_CONFIG_FILE}"
writeConfigKey "mac2" "" "${USER_CONFIG_FILE}"
fi
sudo ./localbuild.sh pack rr/rr.img
if [ $? -ne 0 ]; then
echo "pack failed"
exit 1
fi
case "${{ env.size }}" in
2GB)
echo "2GB"
;;
4GB)
echo "4GB"
sudo ./localbuild.sh resize rr/rr.img +2048M
;;
8GB)
echo "8GB"
sudo ./localbuild.sh resize rr/rr.img +6144M
;;
*)
echo "unknown size"
;;
esac
ls rr -al
RR_VERSION_FILE="rr/ws/mnt/p1/RR_VERSION"
USER_CONFIG_FILE="rr/ws/mnt/p1/user-config.yml"
{
echo "RR: "
echo " VERSION: $(cat "${RR_VERSION_FILE}" 2>/dev/null | head -1)"
echo " VERSION: $(cat ${RR_VERSION_FILE} 2>/dev/null | head -1)"
echo " CUSTOM: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
echo
echo "DSM:"
@ -354,52 +281,34 @@ jobs:
echo "Of course, you can also modify the settings yourself."
} >README.txt
case "${{ env.format }}" in
ova)
echo "OVA"
. scripts/func.sh "${{ secrets.RRORG }}"
convertova "rr/rr.img" "rr/rr.ova"
(cd rr && sha256sum rr.ova >../sha256sum)
zip -9 "rr-${MODEL}-${TAG}-${{ github.run_id }}.ova.zip" -j rr/rr.ova ${USER_CONFIG_FILE} sha256sum README.txt
;;
vmx)
echo "VMX"
. scripts/func.sh "${{ secrets.RRORG }}"
convertvmx "rr/rr.img" "rr.vmx" # rr.vmx is a directory
(cd rr.vmx && sha256sum * >../sha256sum)
zip -9 "rr-${MODEL}-${TAG}-${{ github.run_id }}.vmx.zip" -r rr.vmx ${USER_CONFIG_FILE} sha256sum README.txt
;;
vmdk)
echo "VMDK"
qemu-img convert rr/rr.img -O vmdk -o 'adapter_type=lsilogic,subformat=streamOptimized,compat6' rr/rr.vmdk
(cd rr && sha256sum rr.vmdk >../sha256sum)
zip -9 "rr-${MODEL}-${TAG}-${{ github.run_id }}.vmdk.zip" -j rr/rr.vmdk ${USER_CONFIG_FILE} sha256sum README.txt
;;
flat)
echo "FLAT"
qemu-img convert rr/rr.img -O vmdk -o 'adapter_type=lsilogic,subformat=monolithicFlat,compat6' rr/rr.vmdk
(cd rr && sha256sum rr*.vmdk >../sha256sum)
zip -9 "rr-${MODEL}-${TAG}-${{ github.run_id }}.flat.zip" -j rr/rr*.vmdk ${USER_CONFIG_FILE} sha256sum README.txt
;;
vhd)
echo "VHD"
. scripts/func.sh "${{ secrets.RRORG }}"
qemu-img convert rr/rr.img -O vpc rr/rr.vhd
createvmc "rr/rr.vhd" "rr/rr.vmc"
(cd rr && sha256sum rr.vhd >../sha256sum)
zip -9 "rr-${MODEL}-${TAG}-${{ github.run_id }}.vhd.zip" -j rr/rr.vmc rr/rr.vhd ${USER_CONFIG_FILE} sha256sum README.txt
;;
vhdx)
echo "VHDX"
qemu-img convert rr/rr.img -O vhdx -o subformat=dynamic rr/rr.vhdx
(cd rr && sha256sum rr.vhdx >../sha256sum)
zip -9 "rr-${MODEL}-${TAG}-${{ github.run_id }}.vhdx.zip" -j rr/rr.vhdx ${USER_CONFIG_FILE} sha256sum README.txt
;;
*)
echo "IMG"
(cd rr && sha256sum rr.img >../sha256sum)
zip -9 "rr-${MODEL}-${TAG}-${{ github.run_id }}.img.zip" -j rr/rr.img ${USER_CONFIG_FILE} sha256sum README.txt
esac
if [ "${{ env.format }}" = "ova" ]; then
. scripts/func.sh "${{ secrets.RRORG }}"
convertova "rr/rr.img" "rr/rr.ova"
(cd rr; sha256sum rr.ova >../sha256sum)
zip -9 "rr-${MODEL}-${TAG}-${{ github.run_id }}.ova.zip" -j rr/rr.ova ${USER_CONFIG_FILE} sha256sum README.txt
elif [ "${{ env.format }}" = "vmx" ]; then
. scripts/func.sh "${{ secrets.RRORG }}"
convertvmx "rr/rr.img" "rr.vmx" # rr.vmx is a directory
(cd rr.vmx; sha256sum * >../sha256sum)
zip -9 "rr-${MODEL}-${TAG}-${{ github.run_id }}.vmx.zip" -r rr.vmx ${USER_CONFIG_FILE} sha256sum README.txt
elif [ "${{ env.format }}" = "vmdk" ]; then
qemu-img convert rr/rr.img -O vmdk -o 'adapter_type=lsilogic,subformat=streamOptimized,compat6' rr/rr.vmdk
(cd rr; sha256sum rr.vmdk >../sha256sum)
zip -9 "rr-${MODEL}-${TAG}-${{ github.run_id }}.vmdk.zip" -j rr/rr.vmdk ${USER_CONFIG_FILE} sha256sum README.txt
elif [ "${{ env.format }}" = "vhd" ]; then
. scripts/func.sh "${{ secrets.RRORG }}"
qemu-img convert rr/rr.img -O vpc rr/rr.vhd
createvmc "rr/rr.vhd" "rr/rr.vmc"
(cd rr; sha256sum rr.vhd >../sha256sum)
zip -9 "rr-${MODEL}-${TAG}-${{ github.run_id }}.vhd.zip" -j rr/rr.vmc rr/rr.vhd ${USER_CONFIG_FILE} sha256sum README.txt
elif [ "${{ env.format }}" = "vhdx" ]; then
qemu-img convert rr/rr.img -O vhdx -o subformat=dynamic rr/rr.vhdx
(cd rr; sha256sum rr.vhdx >../sha256sum)
zip -9 "rr-${MODEL}-${TAG}-${{ github.run_id }}.vhdx.zip" -j rr/rr.vhdx ${USER_CONFIG_FILE} sha256sum README.txt
else
(cd rr; sha256sum rr.img >../sha256sum)
zip -9 "rr-${MODEL}-${TAG}-${{ github.run_id }}.img.zip" -j rr/rr.img ${USER_CONFIG_FILE} sha256sum README.txt
fi
echo "TAG=${TAG}" >> $GITHUB_ENV
@ -422,11 +331,19 @@ jobs:
update-mode: replace
body: |
Hi @${{ github.event.issue.user.login }}.
RR-${{ env.model }}-${{ env.TAG }} build success, please download the attachment from the below link (Attachments are only kept for 5 days).
RR-${{ env.model }} build success, please download the attachment from the below link (Attachments are only kept for 5 days).
> ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
----
emoji: hooray
- name: Close Issues
if: env.iscustom == 'true' && success()
uses: actions-cool/issues-helper@v3
with:
actions: 'close-issue'
token: ${{ secrets.GITHUB_TOKEN }}
issue-number: ${{ github.event.issue.number }}
- name: Update Comment Fail
if: env.iscustom == 'true' && failure()
uses: actions-cool/issues-helper@v3
@ -437,15 +354,7 @@ jobs:
update-mode: replace
body: |
Hi @${{ github.event.issue.user.login }}.
RR-${{ env.model }}-${{ env.TAG }} build failed, please try again.
RR-${{ env.model }} build failed, please try again.
> ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
----
emoji: confused
- name: Close Issues
if: env.iscustom == 'true'
uses: actions-cool/issues-helper@v3
with:
actions: 'close-issue'
token: ${{ secrets.GITHUB_TOKEN }}
issue-number: ${{ github.event.issue.number }}
emoji: confused

1
.gitignore vendored
View File

@ -10,7 +10,6 @@ rr*.vmdk
**.po~
**.mo
downloads.md
tests
Changelog*
sha256sum

View File

@ -2,6 +2,7 @@
<h1>RR: <small>redpills preinstallation and recovery environment</small></h1>
[![点击数](https://hits.seeyoufarm.com/api/count/incr/badge.svg?url=https://github.com/rrorg/rr&edge_flat=true)](https://github.com/rrorg/rr)
[![GitHub Release](https://img.shields.io/github/v/release/rrorg/rr?logo=github&style=flat-square)](https://github.com/rrorg/rr/releases/latest)
[![GitHub Downloads (all assets, all releases)](https://img.shields.io/github/downloads/rrorg/rr/total?logo=github&style=flat-square)](https://github.com/rrorg/rr/releases)
[![GitHub Issues or Pull Requests by label](https://img.shields.io/github/issues-closed-raw/rrorg/rr/custom?logo=github&style=flat-square&label=custom)](https://rrorg.github.io/rr/)
@ -18,7 +19,6 @@
### 2: Documentation & FAQ
- [RRManager](https://github.com/T-REX-XP/RRManager)
- [rr-tools](https://github.com/RROrg/rr-tools)
- [blog](https://rrorg.cn)
- [docs](https://rrorg.github.io/rr-docs)
- [📣](https://github.com/orgs/RROrg/discussions)
@ -27,13 +27,13 @@
- During the compilation process, you need to connect to the Internet to obtain model and version information and download the corresponding ROM.
If you cannot connect to the Internet, please build a pre-compiled bootloader through [RR-CUSTOM](https://rrorg.github.io/rr/).
- Models: [models](https://github.com/RROrg/rr/raw/main/docs/models.xlsx)
- PATs: [pats](https://github.com/RROrg/rr/raw/main/docs/pats.xlsx)
- Addons: [addons](https://github.com/RROrg/rr/raw/main/docs/addons.xlsx)
- Modules: [modules](https://github.com/RROrg/rr/raw/main/docs/modules.xlsx)
> Models: [models](https://github.com/RROrg/rr/raw/main/docs/models.xlsx)
> PATs: [pats](https://github.com/RROrg/rr/raw/main/docs/pats.xlsx)
> Addons: [addons](https://github.com/RROrg/rr/raw/main/docs/addons.xlsx)
> Modules: [modules](https://github.com/RROrg/rr/raw/main/docs/modules.xlsx)
### 4: GPU:
### 4: GPU:
- vGPU:
- [蔚然小站](https://blog.kkk.rs/)

View File

@ -1 +1 @@
25.9.1
25.1.0

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@ -1,76 +0,0 @@
<!DOCTYPE html>
<html lang="zh-Hans-CN" data-color-mode="auto" data-light-theme="light" data-dark-theme="dark"
data-a11y-animated-images="system">
<head>
<meta charset="utf-8" />
<link rel="dns-prefetch" href="https://github.githubassets.com" />
<link rel="dns-prefetch" href="https://avatars.githubusercontent.com" />
<link rel="dns-prefetch" href="https://github-cloud.s3.amazonaws.com" />
<link rel="dns-prefetch" href="https://user-images.githubusercontent.com/" />
<link rel="preconnect" href="https://github.githubassets.com" crossorigin />
<link rel="preconnect" href="https://avatars.githubusercontent.com" />
<link crossorigin="anonymous" media="all" rel="stylesheet"
href="https://github.githubassets.com/assets/light-0eace2597ca3.css" />
<link crossorigin="anonymous" media="all" rel="stylesheet"
href="https://github.githubassets.com/assets/dark-a167e256da9c.css" />
<link crossorigin="anonymous" media="all" rel="stylesheet"
href="https://github.githubassets.com/assets/primer-711f412bb361.css" />
<link crossorigin="anonymous" media="all" rel="stylesheet"
href="https://github.githubassets.com/assets/global-78704364aaba.css" />
<style>
select,
.input-style {
width: 100%;
}
.textarea-style {
width: 100%;
}
.loading-message {
text-align: center;
}
</style>
<script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script>
<script>
async function fetchChangelog() {
let page = 1;
const changelogContainer = document.getElementById('changelog');
const loadingMessage = document.createElement('p');
loadingMessage.textContent = 'Loading...';
loadingMessage.className = 'loading-message';
changelogContainer.appendChild(loadingMessage);
while (true) {
const response = await fetch(`https://api.github.com/repos/RROrg/rr/releases?page=${page}&per_page=100`);
const releases = await response.json();
if ((!response.ok) || (releases.length === 0)) {
const errorMessage = document.createElement('p');
errorMessage.textContent = releases.message;
errorMessage.className = 'error-message';
changelogContainer.appendChild(errorMessage);
break;
}
releases.forEach(release => {
const releaseElement = document.createElement('div');
releaseElement.innerHTML = `<h2><a href="${release.html_url}" target="_blank">${release.name}</a></h2>${marked.parse(release.body)}`;
changelogContainer.appendChild(releaseElement);
});
page++;
}
changelogContainer.removeChild(loadingMessage);
}
document.addEventListener('DOMContentLoaded', fetchChangelog);
</script>
</head>
<body>
<div id="changelog"></div>
</body>
</html>

View File

@ -1,522 +1,406 @@
<!DOCTYPE html>
<html lang="zh-Hans-CN" data-color-mode="auto" data-light-theme="light" data-dark-theme="dark"
data-a11y-animated-images="system">
<head>
<meta charset="utf-8" />
<link rel="dns-prefetch" href="https://github.githubassets.com" />
<link rel="dns-prefetch" href="https://avatars.githubusercontent.com" />
<link rel="dns-prefetch" href="https://github-cloud.s3.amazonaws.com" />
<link rel="dns-prefetch" href="https://user-images.githubusercontent.com/" />
<link rel="preconnect" href="https://github.githubassets.com" crossorigin />
<link rel="preconnect" href="https://avatars.githubusercontent.com" />
<link crossorigin="anonymous" media="all" rel="stylesheet"
href="https://github.githubassets.com/assets/light-0eace2597ca3.css" />
<link crossorigin="anonymous" media="all" rel="stylesheet"
href="https://github.githubassets.com/assets/dark-a167e256da9c.css" />
<link crossorigin="anonymous" media="all" rel="stylesheet"
href="https://github.githubassets.com/assets/primer-711f412bb361.css" />
<link crossorigin="anonymous" media="all" rel="stylesheet"
href="https://github.githubassets.com/assets/global-78704364aaba.css" />
<style>
select,
.input-style {
width: 100%;
}
.textarea-style {
width: 100%;
}
#logo {
text-align: center;
margin: 20px auto !important;
}
#labels {
text-align: center;
margin: 20px auto !important;
height: auto !important;
}
#products {
text-align: center;
margin: 20px auto !important;
height: auto !important;
}
</style>
<script src="https://code.jquery.com/jquery-3.3.1.min.js"></script>
<script src="https://polyfill.io/v3/polyfill.min.js"></script>
<!-- <script src="https://cdn.jsdelivr.net/npm/marked/marked.min.js"></script> -->
<script type="application/javascript">
var repo = "RROrg/rr"
var debug = getUrlParam("debug");
var _models = {};
var _pats = {};
var _addons = {};
var _modules = {};
function getUrlParam(paraName) {
var url = document.location.toString();
var arrObj = url.split("?");
if (arrObj.length > 1) {
var arrPara = arrObj[1].split("&");
var arr;
for (var i = 0; i < arrPara.length; i++) {
arr = arrPara[i].split("=");
if (arr != null && arr[0] == paraName) {
return arr[1];
}
}
}
return "";
}
function httpGetAsync(theUrl, callback) {
let xmlHttpReq = new XMLHttpRequest();
xmlHttpReq.onreadystatechange = function () {
if (xmlHttpReq.readyState == 4 && xmlHttpReq.status == 200)
callback(xmlHttpReq.responseText);
};
xmlHttpReq.open("GET", theUrl, true); // true for asynchronous
xmlHttpReq.send(null);
}
window.onload = function () {
init();
}
function init() {
httpGetAsync("https://raw.githubusercontent.com/RROrg/rr/main/docs/models.json", function (result) {
_models = JSON.parse(result);
httpGetAsync("https://raw.githubusercontent.com/RROrg/rr/main/docs/pats.json", function (result) {
_pats = JSON.parse(result);
httpGetAsync("https://raw.githubusercontent.com/RROrg/rr/main/docs/addons.json", function (result) {
_addons = JSON.parse(result);
httpGetAsync("https://raw.githubusercontent.com/RROrg/rr/main/docs/modules.json", function (result) {
_modules = JSON.parse(result);
$("#model").on("change", changeModel);
$("#version").on("change", changeVersion);
$("#language").on("change", changeVersion);
$("#kernel").on("change", chanageKernel);
$('#addons').val("acpid,mountloader,powersched,reboottoloader,trivial,vmtools");
if (debug) {
$('#sn_item').show();
$('#macs_item').show();
$('#tips_item').show();
} else {
$('#sn_item').hide();
$('#macs_item').hide();
$('#tips_item').hide();
}
setModels();
});
});
});
});
}
function setModels() {
var models = [];
for (var P in _models) {
models = models.concat(_models[P]["models"]);
}
$("#model")[0].options.length = 0;
for (var i = 0; i < models.length; i++) {
var model = models[i];
$("#model").append(`<option value="${model}">${model}</option>`);
}
changeModel();
}
function changeModel() {
model = $("#model").val();
var versions = [];
if (model in _pats) {
versions = Object.keys(_pats[model]);
versions.sort((a, b) => b.localeCompare(a));
}
$("#version")[0].options.length = 0;
for (var i = 0; i < versions.length; i++) {
var version = versions[i];
$("#version").append(`<option value="${version}">${version}</option>`);
}
changeproductsimage();
changeVersion();
}
function changeproductsimage() {
//var model = $("#model").val().replace("#","").replace("+","plus");
//$('#products').html(`<img src="https://www.synology.cn/img/products/detail/${model}/heading.png" width="20%">`);
var model = $("#model").val().replace("#", "").replace("+", "%2B");
$('#products').html(`<img src="https://www.synology.com/api/products/getPhoto?product=${model}&type=img_s&sort=0" width="20%">`);
}
function changeVersion() {
createAddonsBtn();
createModulesBtn();
chanageKernel();
}
function createAddonsBtn() {
var language = $("#language").val();
var extstr = $('#addons').val().split(",");
var idx = 1;
var html = `<div class="form-group-body" id="addons_btns">`;
for (var ext in _addons) {
var dispar = "";
if (_addons[ext]["system"] == true) {
dispar = "disabled";
}
var par = "";
if (extstr.includes(ext)) {
par = "btn-danger";
}
html += `<button type="button" class="btn btn-primary btn-sm mt-2 ${par}" id="btn_${ext}" ${par} ${dispar} onclick="return onclickextAddon('${ext}')" autofocus="" title="${_addons[ext]["description"][language]}">${ext}</button>&nbsp;`;
//if (idx++ % 10 == 0) html += "<br />";
}
html += `</div>`;
$('#addons_btns').html(html);
}
function createModulesBtn() {
var model = $("#model").val();
var version = $("#version").val().split(".").slice(0, 2).join(".");
var platform = "";
for (var P in _models) {
if (_models[P]["models"].includes(model)) {
platform = P + "-" + _models[P]["productvers"][version];
break;
}
}
var extstr = [];
var idx = 1;
var html = `<div class="form-group-body" id="modules_btns">`;
for (var ext in _modules[platform]) {
extstr.push(ext);
var par = "btn-danger";
html += `<button type="button" class="btn btn-primary btn-sm mt-2 ${par}" id="btn_${ext}" ${par} onclick="return onclickextModule('${ext}')" autofocus="" title="${_modules[platform][ext]["description"]}">${ext}</button>&nbsp;`;
//if (idx++ % 10 == 0) html += "<br />";
}
html += `</div>`;
$('#modules_btns').html(html);
if (extstr.length > 0) {
$('#modules').val(extstr.join(","));
}
}
function chanageKernel() {
var model = $("#model").val();
var version = $("#version").val().split(".").slice(0, 2).join(".");
var kernel = $("#kernel").val();
if (model == "SA6400" && version == "7.2") {
$("#kernel_item").show();
if (kernel == "custom") {
$('#modules_item').hide();
} else {
$('#modules_item').show();
}
} else {
$('#modules_item').show();
$("#kernel_item").hide();
}
}
function onclickextAddon(ext) {
var btnobj = $("#btn_" + ext);
var extstr = $('#addons').val().split(",");
if (btnobj.hasClass("btn-danger")) {
extstr.map((val, i) => {
if (val.split(':')[0] === ext) {
extstr.splice(i, 1);
}
});
btnobj.removeClass("btn-danger");
} else {
btnobj.addClass("btn-danger");
var param = window.prompt("Please enter parameters (optional):", "");
if (param !== null && param.trim() !== "") {
extstr.push(ext + ":" + param.trim());
} else {
extstr.push(ext);
}
}
extstr.map((val, i) => {
if (val === "") {
extstr.splice(i, 1);
}
});
$('#addons').val(extstr.join(","));
}
function onclickextModule(ext) {
var btnobj = $("#btn_" + ext);
var extstr = $('#modules').val().split(",");
if (btnobj.hasClass("btn-danger")) {
extstr.map((val, i) => {
if (val === ext) {
extstr.splice(i, 1);
}
});
btnobj.removeClass("btn-danger");
} else {
btnobj.addClass("btn-danger");
extstr.push(ext);
}
extstr.map((val, i) => {
if (val === "") {
extstr.splice(i, 1);
}
});
$('#modules').val(extstr.join(","));
}
function createIssues() {
var form = document.getElementById("inputs");
let formData = new FormData(form);
var title = "custom";
var body = {};
var _parameters = ["title", "format", "size", "template", "language", "model", "version", "kernel", "addons", "modules"];
if (debug) {
_parameters.push("sn", "macs", "tips");
}
for (var key in _parameters) {
var name = _parameters[key];
if ($("#" + name).is(":hidden")) { continue; }
if (name == "title") {
if ($("#" + name).val()) {
title += " " + $("#" + name).val();
} else {
title += " " + new Date().toISOString().substr(0, 10);
}
continue;
}
if ($("#" + name).val()) {
body[name] = $("#" + name).val();
}
}
body = JSON.stringify(body).replace(/\+/g, "%2b");
window.location.href = `https://github.com/${repo}/issues/new?title=${title}&body=${body}`;
}
</script>
<title>RR-CUSTOM</title>
<link rel="icon" href="https://avatars.githubusercontent.com/u/151816514?s=200&v=4">
</head>
<body class="logged-in env-production page-responsive" style="word-wrap: break-word">
<div class="application-main left-0 text-left p-3 mx-auto container-xl px-3 px-md-4 px-lg-5 mt-4">
<pre id="logo" style="
display: block;
font-family: ui-monospace, SFMono-Regular, SF Mono, Menlo, Consolas,
Liberation Mono, monospace !important;
font-size: 12px !important;
line-height: 12px !important;
margin: 15px 10px;
color: red;
">
██████╗ ██████╗ ██████╗██╗ ██╗███████╗████████╗ ██████╗ ███╗ ███╗
██╔══██╗██╔══██╗ ██╔════╝██║ ██║██╔════╝╚══██╔══╝██╔═══██╗████╗ ████║
██████╔╝██████╔╝█████╗██║ ██║ ██║███████╗ ██║ ██║ ██║██╔████╔██║
██╔══██╗██╔══██╗╚════╝██║ ██║ ██║╚════██║ ██║ ██║ ██║██║╚██╔╝██║
██║ ██║██║ ██║ ╚██████╗╚██████╔╝███████║ ██║ ╚██████╔╝██║ ╚═╝ ██║
╚═╝ ╚═╝╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚══════╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝
</pre>
<div class="shields" id="labels">
<a id="titleA" href="https://github.com/RROrg/rr">
<img alt="GitHub Release"
src="https://img.shields.io/github/v/release/rrorg/rr?include_prereleases&style=flat-square&label=current">
</a>
<a id="titleA" href="https://github.com/RROrg/rr">
<img alt="GitHub Issues"
src="https://img.shields.io/github/issues-closed-raw/rrorg/rr/custom?style=flat-square&label=custom">
</a>
<a id="titleB" href="./changelogs.html">
<img alt="GitHub Release" src="https://img.shields.io/badge/Changelogs-8A2BE2&style=flat">
</a>
</div>
<div class="image" id="products">
<img src="https://www.synology.cn/img/products/detail/SA6400/heading.png" width="20%">
</div>
<div class="form-group mt-1 mb-2" id="title_item">
<div class="form-group-header">
<label class="color-fg-default text-mono f6">Title: (Please do not delete the "custom " in the title of
the issue creation page.)</label>
</div>
<div class="form-group-body">
<input class="form-control input-contrast input-sm" type="text" id="title" name="inputs[title]" value="" />
</div>
</div>
<div class="form-group mt-1 mb-2" id="format_item">
<div class="form-group-header">
<label class="color-fg-default text-mono f6">Format:</label>
</div>
<div class="form-group-body">
<select class="form-select form-control select-sm input-contrast" id="format" name="inputs[format]" value="">
<option selected="selected" value="img">img</option>
<option value="ova">ova</option>
<option value="vmx">vmx</option>
<option value="vmdk">vmdk</option>
<option value="flat">flat</option>
<option value="vhd">vhd</option>
<option value="vhdx">vhdx</option>
</select>
</div>
</div>
<div class="form-group mt-1 mb-2" id="size_item">
<div class="form-group-header">
<label class="color-fg-default text-mono f6">Size:</label>
</div>
<div class="form-group-body">
<select class="form-select form-control select-sm input-contrast" id="size" name="inputs[size]" value="">
<option value="2GB">2GB</option>
<option selected="selected" value="4GB">4GB</option>
<option value="8GB">8GB</option>
</select>
</div>
</div>
<div class="form-group mt-1 mb-2" id="template_item">
<div class="form-group-header">
<label class="color-fg-default text-mono f6">Template:</label>
</div>
<div class="form-group-body">
<select class="form-select form-control select-sm input-contrast" id="template" name="inputs[template]"
value="">
<option value="true">true</option>
<option selected="selected" value="false">false</option>
</select>
</div>
</div>
<div class="form-group mt-1 mb-2" id="language_item">
<div class="form-group-header">
<label class="color-fg-default text-mono f6">Language:</label>
</div>
<div class="form-group-body">
<select class="form-select form-control select-sm input-contrast" id="language" name="inputs[language]"
value="">
<option value="ar_SA">ar_SA</option>
<option value="de_DE">de_DE</option>
<option selected="selected" value="en_US">en_US</option>
<option value="es_ES">es_ES</option>
<option value="fr_FR">fr_FR</option>
<option value="ja_JP">ja_JP</option>
<option value="ko_KR">ko_KR</option>
<option value="ru_RU">ru_RU</option>
<option value="th_TH">th_TH</option>
<option value="tr_TR">tr_TR</option>
<option value="uk_UA">uk_UA</option>
<option value="vi_VN">vi_VN</option>
<option value="zh_CN">zh_CN</option>
<option value="zh_HK">zh_HK</option>
<option value="zh_TW">zh_TW</option>
</select>
</div>
</div>
<div class="form-group mt-1 mb-2" id="sn_item">
<div class="form-group-header">
<label class="color-fg-default text-mono f6">SN:</label>
</div>
<div class="form-group-body">
<input class="form-control input-contrast input-sm" type="text" id="sn" name="inputs[sn]" value=""></input>
</div>
</div>
<div class="form-group mt-1 mb-2" id="macs_item">
<div class="form-group-header">
<label class="color-fg-default text-mono f6">MACs: (Please separate multiple with ','.)</label>
</div>
<div class="form-group-body">
<input class="form-control input-contrast input-sm" type="text" id="macs" name="inputs[macs]" value=""></input>
</div>
</div>
<div class="form-group mt-1 mb-2" id="tips_item">
<div class="form-group-header">
<label class="color-fg-default text-mono f6">Tips:</label>
</div>
<div class="form-group-body">
<input class="form-control input-contrast input-sm" type="text" id="tips" name="inputs[tips]" value=""></input>
</div>
</div>
<div class="form-group mt-1 mb-2" id="model_item">
<div class="form-group-header">
<label class="color-fg-default text-mono f6">Model:</label>
</div>
<div class="form-group-body">
<select class="form-select form-control select-sm input-contrast" id="model" name="inputs[model]" value="">
<option selected="selected" value="SA6400">SA6400</option>
</select>
</div>
</div>
<div class="form-group mt-1 mb-2" id="version_item">
<div class="form-group-header">
<label class="color-fg-default text-mono f6">Version:</label>
<a href="https://github.com/RROrg/rr/raw/main/docs/pats.xlsx"> Details</a>
</div>
<div class="form-group-body">
<select class="form-select form-control select-sm input-contrast" id="version" name="inputs[version]" value="">
<option selected="selected" value="7.2">7.2</option>
</select>
</div>
</div>
<div class="form-group mt-1 mb-2" id="kernel_item">
<div class="form-group-header">
<label class="color-fg-default text-mono f6">Kernel: (only "custom" supports Hyper-V, Xen.)</label>
</div>
<div class="form-group-body">
<select class="form-select form-control select-sm input-contrast" id="kernel" name="inputs[kernel]" value="">
<option selected="selected" value="official">official</option>
<option value="custom">custom</option>
</select>
</div>
</div>
<div class="form-group mt-1 mb-2" id="addons_item">
<div class="form-group-header">
<label class="color-fg-default text-mono f6">Addons:</label>
<a href="https://github.com/RROrg/rr/raw/main/docs/addons.xlsx"> Details</a>
</div>
<div class="form-group-body">
<textarea class="textarea-style input-contrast input-sm" type="text" id="addons" name="inputs[addons]"
disabled></textarea>
<div class="form-group-body" id="addons_btns">
</div>
</div>
</div>
<div class="form-group mt-1 mb-2" id="modules_item">
<div class="form-group-header">
<label class="color-fg-default text-mono f6">Modules:</label>
<a href="https://github.com/RROrg/rr/raw/main/docs/modules.xlsx"> Details</a>
</div>
<div class="form-group-body">
<textarea class="textarea-style input-contrast input-sm" type="text" id="modules" name="inputs[modules]"
disabled></textarea>
<div class="form-group-body" id="modules_btns">
</div>
</div>
</div>
<div data-replace-remote-form-target="" class="workflow-dispatch">
<form id="inputs">
<button type="button" class="btn State--merged" onclick="return createIssues()" autofocus="">
Create
</button>
</form>
</div>
</div>
<footer class="footer width-full container-xl mt-3 text-center color-fg-muted">
<a aria-label="RROrg" title="GitHub" class="footer-octicon mr-2" href="https://github.com/RROrg">
<img class="avatar rounded-2 avatar-user" src="https://avatars.githubusercontent.com/u/151816514?s=88&amp;v=4"
width="40" height="40" alt="@RROrg" />
</a>
<a aria-label="RROrg" title="GitHub" class="footer-octicon mr-2" href="https://github.com/wjz304">
<img class="avatar rounded-2 avatar-user" src="https://avatars.githubusercontent.com/u/5615843?s=88&amp;v=4"
width="40" height="40" alt="@wjz304" />
</a>
<span> © 2022 RROrg, Ing, Inc. </span>
</footer>
</body>
</html>
<!DOCTYPE html>
<html lang="zh-Hans-CN" data-color-mode="auto" data-light-theme="light" data-dark-theme="dark"
data-a11y-animated-images="system">
<head>
<meta charset="utf-8" />
<link rel="dns-prefetch" href="https://github.githubassets.com" />
<link rel="dns-prefetch" href="https://avatars.githubusercontent.com" />
<link rel="dns-prefetch" href="https://github-cloud.s3.amazonaws.com" />
<link rel="dns-prefetch" href="https://user-images.githubusercontent.com/" />
<link rel="preconnect" href="https://github.githubassets.com" crossorigin />
<link rel="preconnect" href="https://avatars.githubusercontent.com" />
<link crossorigin="anonymous" media="all" rel="stylesheet"
href="https://github.githubassets.com/assets/light-0eace2597ca3.css" />
<link crossorigin="anonymous" media="all" rel="stylesheet"
href="https://github.githubassets.com/assets/dark-a167e256da9c.css" />
<link crossorigin="anonymous" media="all" rel="stylesheet"
href="https://github.githubassets.com/assets/primer-711f412bb361.css" />
<link crossorigin="anonymous" media="all" rel="stylesheet"
href="https://github.githubassets.com/assets/global-78704364aaba.css" />
<style>
select,
.input-style {
width: 100%;
}
.textarea-style {
width: 100%;
}
</style>
<script src="https://code.jquery.com/jquery-3.3.1.min.js"></script>
<script src="https://polyfill.io/v3/polyfill.min.js"></script>
<!-- <script src="https://fastly.jsdelivr.net/npm/marked/marked.min.js"></script> -->
<script type="application/javascript">
var repo = "RROrg/rr"
var _models = {};
var _pats = {};
var _addons = {};
var _modules = {};
function httpGetAsync(theUrl, callback) {
let xmlHttpReq = new XMLHttpRequest();
xmlHttpReq.onreadystatechange = function () {
if (xmlHttpReq.readyState == 4 && xmlHttpReq.status == 200)
callback(xmlHttpReq.responseText);
};
xmlHttpReq.open("GET", theUrl, true); // true for asynchronous
xmlHttpReq.send(null);
}
window.onload = function () {
init();
}
function init() {
httpGetAsync("https://raw.githubusercontent.com/RROrg/rr/main/docs/models.json", function (result) {
_models = JSON.parse(result);
httpGetAsync("https://raw.githubusercontent.com/RROrg/rr/main/docs/pats.json", function (result) {
_pats = JSON.parse(result);
httpGetAsync("https://raw.githubusercontent.com/RROrg/rr/main/docs/addons.json", function (result) {
_addons = JSON.parse(result);
httpGetAsync("https://raw.githubusercontent.com/RROrg/rr/main/docs/modules.json", function (result) {
_modules = JSON.parse(result);
$("#model").on("change", changeModel);
$("#version").on("change", changeVersion);
$("#language").on("change", changeVersion);
$("#kernel").on("change", chanageKernel);
$('#addons').val("acpid,mountloader,powersched,reboottoloader,trivial");
setModels();
});
});
});
});
}
function setModels() {
var models = [];
for (var P in _models) {
models = models.concat(_models[P]["models"]);
}
$("#model")[0].options.length = 0;
for (var i = 0; i < models.length; i++) {
var model = models[i];
$("#model").append(`<option value="${model}">${model}</option>`);
}
changeModel();
}
function changeModel() {
model = $("#model").val();
var versions = [];
if (model in _pats) {
versions = Object.keys(_pats[model]);
versions.sort((a, b) => b.localeCompare(a));
}
$("#version")[0].options.length = 0;
for (var i = 0; i < versions.length; i++) {
var version = versions[i];
$("#version").append(`<option value="${version}">${version}</option>`);
}
changeproductsimage();
changeVersion();
}
function changeproductsimage() {
//var model = $("#model").val().replace("#","").replace("+","plus");
//$('#products').html(`<img src="https://www.synology.cn/img/products/detail/${model}/heading.png" width="20%">`);
var model = $("#model").val().replace("#", "").replace("+", "%2B");
$('#products').html(`<img src="https://www.synology.com/api/products/getPhoto?product=${model}&type=img_s&sort=0" width="20%">`);
}
function changeVersion() {
createAddonsBtn();
createModulesBtn();
chanageKernel();
}
function createAddonsBtn() {
var language = $("#language").val();
var extstr = $('#addons').val().split(",");
var idx = 1;
var html = `<div class="form-group-body" id="addons_btns">`;
for (var ext in _addons) {
var dispar = "";
if (_addons[ext]["system"] == true) {
dispar = "disabled";
}
var par = "";
if (extstr.includes(ext)) {
par = "btn-danger";
}
html += `<button type="button" class="btn btn-primary btn-sm mt-2 ${par}" id="btn_${ext}" ${par} ${dispar} onclick="return onclickext('addons', '${ext}')" autofocus="" title="${_addons[ext]["description"][language]}">${ext}</button>&nbsp;`;
//if (idx++ % 10 == 0) html += "<br />";
}
html += `</div>`;
$('#addons_btns').html(html);
}
function createModulesBtn() {
var model = $("#model").val();
var version = $("#version").val().split(".").slice(0, 2).join(".");
var platform = "";
for (var P in _models) {
if (_models[P]["models"].includes(model)) {
platform = P + "-" + _models[P]["productvers"][version];
break;
}
}
var extstr = [];
var idx = 1;
var html = `<div class="form-group-body" id="modules_btns">`;
for (var ext in _modules[platform]) {
extstr.push(ext);
var par = "btn-danger";
html += `<button type="button" class="btn btn-primary btn-sm mt-2 ${par}" id="btn_${ext}" ${par} onclick="return onclickext('modules', '${ext}')" autofocus="" title="${_modules[platform][ext]["description"]}">${ext}</button>&nbsp;`;
//if (idx++ % 10 == 0) html += "<br />";
}
html += `</div>`;
$('#modules_btns').html(html);
if (extstr.length > 0) {
$('#modules').val(extstr.join(","));
}
}
function chanageKernel() {
var model = $("#model").val();
var version = $("#version").val().split(".").slice(0, 2).join(".");
var kernel = $("#kernel").val();
if (model == "SA6400" && version == "7.2") {
$("#kernel_item").show();
if (kernel == "custom") {
$('#modules_item').hide();
} else {
$('#modules_item').show();
}
} else {
$('#modules_item').show();
$("#kernel_item").hide();
}
}
function onclickext(type, ext) {
var btnobj = $("#btn_" + ext);
var extstr = $('#' + type).val().split(",");
if (btnobj.hasClass("btn-danger")) {
extstr.map((val, i) => {
if (val === ext) {
extstr.splice(i, 1);
}
});
btnobj.removeClass("btn-danger");
} else {
btnobj.addClass("btn-danger");
extstr.push(ext);
}
extstr.map((val, i) => {
if (val === "") {
extstr.splice(i, 1);
}
});
$('#' + type).val(extstr.join(","));
}
function createIssues() {
var form = document.getElementById("inputs");
let formData = new FormData(form);
var title = "custom";
var body = {};
var _parameters = ["title", "format", "language", "model", "version", "kernel", "addons", "modules"];
for (var key in _parameters) {
var name = _parameters[key];
if ($("#" + name).is(":hidden")) { continue; }
if (name == "title") {
if ($("#" + name).val()) {
title += " " + $("#" + name).val();
} else {
title += " " + new Date().toISOString().substr(0, 10);
}
continue;
}
if ($("#" + name).val()) {
body[name] = $("#" + name).val();
}
}
body = JSON.stringify(body).replace(/\+/g, "%2b");
window.location.href = `https://github.com/${repo}/issues/new?title=${title}&body=${body}`;
}
</script>
<title>RR-CUSTOM</title>
<link rel="icon" href="https://avatars.githubusercontent.com/u/151816514?s=200&v=4">
</head>
<body class="logged-in env-production page-responsive" style="word-wrap: break-word">
<div class="application-main left-0 text-left p-3 mx-auto container-xl px-3 px-md-4 px-lg-5 mt-4">
<pre id="logo" style="
display: block;
font-family: ui-monospace, SFMono-Regular, SF Mono, Menlo, Consolas,
Liberation Mono, monospace !important;
font-size: 12px !important;
line-height: 12px !important;
margin: 15px 10px;
color: red;
">
██████╗ ██████╗ ██████╗██╗ ██╗███████╗████████╗ ██████╗ ███╗ ███╗
██╔══██╗██╔══██╗ ██╔════╝██║ ██║██╔════╝╚══██╔══╝██╔═══██╗████╗ ████║
██████╔╝██████╔╝█████╗██║ ██║ ██║███████╗ ██║ ██║ ██║██╔████╔██║
██╔══██╗██╔══██╗╚════╝██║ ██║ ██║╚════██║ ██║ ██║ ██║██║╚██╔╝██║
██║ ██║██║ ██║ ╚██████╗╚██████╔╝███████║ ██║ ╚██████╔╝██║ ╚═╝ ██║
╚═╝ ╚═╝╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚══════╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝
</pre>
<div class="image" id="products" style="height: 50px; margin-left: 200px;">
<img src="https://www.synology.cn/img/products/detail/SA6400/heading.png" width="20%">
</div>
<div class="flex-auto min-width-0 width-fit mr-3">
<div class="d-flex flex-wrap flex-items-center wb-break-word f3 text-normal">
<a id="titleA" href="https://github.com/RROrg/rr">
<img alt="GitHub Release"
src="https://img.shields.io/github/v/release/rrorg/rr?include_prereleases&style=flat-square&label=current">
<img alt="GitHub Issues"
src="https://img.shields.io/github/issues-closed-raw/rrorg/rr/custom?style=flat-square&label=custom">
</a>
</div>
<div class="btn-link tabnav-tab preview-tab js-preview-tab flex-1 flex-md-auto width-full">
<marquee id="content" direction="up" height="36" scrollamount="1" onMouseOut="this.start()"
onMouseOver="this.stop()">
</marquee>
</div>
</div>
<div class="form-group mt-1 mb-2" id="title_item">
<div class="form-group-header">
<label class="color-fg-default text-mono f6">Title: (Please do not delete the "custom " in the title of
the issue creation page.)</label>
</div>
<div class="form-group-body">
<input class="form-control input-contrast input-sm" type="text" id="title" name="inputs[title]"
value="" />
</div>
</div>
<div class="form-group mt-1 mb-2" id="format_item">
<div class="form-group-header">
<label class="color-fg-default text-mono f6">Format:</label>
</div>
<div class="form-group-body">
<select class="form-select form-control select-sm input-contrast" id="format" name="inputs[format]"
value="">
<option selected="selected" value="img">img</option>
<option value="ova">ova</option>
<option value="vmx">vmx</option>
<option value="vmdk">vmdk</option>
<option value="vhd">vhd</option>
<option value="vhdx">vhdx</option>
</select>
</div>
</div>
<div class="form-group mt-1 mb-2" id="language_item">
<div class="form-group-header">
<label class="color-fg-default text-mono f6">Language:</label>
</div>
<div class="form-group-body">
<select class="form-select form-control select-sm input-contrast" id="language" name="inputs[language]"
value="">
<option value="ar_SA">ar_SA</option>
<option value="de_DE">de_DE</option>
<option selected="selected" value="en_US">en_US</option>
<option value="es_ES">es_ES</option>
<option value="fr_FR">fr_FR</option>
<option value="ja_JP">ja_JP</option>
<option value="ko_KR">ko_KR</option>
<option value="ru_RU">ru_RU</option>
<option value="th_TH">th_TH</option>
<option value="tr_TR">tr_TR</option>
<option value="uk_UA">uk_UA</option>
<option value="vi_VN">vi_VN</option>
<option value="zh_CN">zh_CN</option>
<option value="zh_HK">zh_HK</option>
<option value="zh_TW">zh_TW</option>
</select>
</div>
</div>
<div class="form-group mt-1 mb-2" id="model_item">
<div class="form-group-header">
<label class="color-fg-default text-mono f6">Model:</label>
</div>
<div class="form-group-body">
<select class="form-select form-control select-sm input-contrast" id="model" name="inputs[model]"
value="">
<option selected="selected" value="SA6400">SA6400</option>
</select>
</div>
</div>
<div class="form-group mt-1 mb-2" id="version_item">
<div class="form-group-header">
<label class="color-fg-default text-mono f6">Version:</label>
<a href="https://github.com/RROrg/rr/raw/main/docs/pats.xlsx"> Details</a>
</div>
<div class="form-group-body">
<select class="form-select form-control select-sm input-contrast" id="version" name="inputs[version]"
value="">
<option selected="selected" value="7.2">7.2</option>
</select>
</div>
</div>
<div class="form-group mt-1 mb-2" id="kernel_item">
<div class="form-group-header">
<label class="color-fg-default text-mono f6">Kernel: (only "custom" supports Hyper-V, Xen.)</label>
</div>
<div class="form-group-body">
<select class="form-select form-control select-sm input-contrast" id="kernel" name="inputs[kernel]"
value="">
<option selected="selected" value="official">official</option>
<option value="custom">custom</option>
</select>
</div>
</div>
<div class="form-group mt-1 mb-2" id="addons_item">
<div class="form-group-header">
<label class="color-fg-default text-mono f6">Addons:</label>
<a href="https://github.com/RROrg/rr/raw/main/docs/addons.xlsx"> Details</a>
</div>
<div class="form-group-body">
<textarea class="textarea-style input-contrast input-sm" type="text" id="addons" name="inputs[addons]"
disabled></textarea>
<div class="form-group-body" id="addons_btns">
</div>
</div>
</div>
<div class="form-group mt-1 mb-2" id="modules_item">
<div class="form-group-header">
<label class="color-fg-default text-mono f6">Modules:</label>
<a href="https://github.com/RROrg/rr/raw/main/docs/modules.xlsx"> Details</a>
</div>
<div class="form-group-body">
<textarea class="textarea-style input-contrast input-sm" type="text" id="modules" name="inputs[modules]"
disabled></textarea>
<div class="form-group-body" id="modules_btns">
</div>
</div>
</div>
<div data-replace-remote-form-target="" class="workflow-dispatch">
<form id="inputs">
<button type="button" class="btn State--merged" onclick="return createIssues()" autofocus="">
Create
</button>
</form>
</div>
</div>
<footer class="footer width-full container-xl mt-3 text-center color-fg-muted">
<a aria-label="RROrg" title="GitHub" class="footer-octicon mr-2" href="https://github.com/RROrg">
<img class="avatar rounded-2 avatar-user"
src="https://avatars.githubusercontent.com/u/151816514?s=88&amp;v=4" width="40" height="40"
alt="@RROrg" />
</a>
<a aria-label="RROrg" title="GitHub" class="footer-octicon mr-2" href="https://github.com/wjz304">
<img class="avatar rounded-2 avatar-user" src="https://avatars.githubusercontent.com/u/5615843?s=88&amp;v=4"
width="40" height="40" alt="@wjz304" />
</a>
<span> © 2022 RROrg, Ing, Inc. </span>
</footer>
</body>
</html>

View File

@ -119,8 +119,8 @@
"7.2": "4.4.302"
},
"models": [
"HD6500",
"FS6400"
"FS6400",
"HD6500"
]
},
"r1000": {
@ -167,36 +167,5 @@
"models": [
"SA6400"
]
},
"geminilakenk": {
"productvers": {
"7.1": "7.1-5.10.55",
"7.2": "7.2-5.10.55"
},
"models": [
"DS225+",
"DS425+"
]
},
"r1000nk": {
"productvers": {
"7.1": "7.1-5.10.55",
"7.2": "7.2-5.10.55"
},
"models": [
"DS725+"
]
},
"v1000nk": {
"productvers": {
"7.1": "7.1-5.10.55",
"7.2": "7.2-5.10.55"
},
"models": [
"DS1525+",
"DS1825+",
"DS925+",
"RS2825RP+"
]
}
}

Binary file not shown.

File diff suppressed because it is too large Load Diff

Binary file not shown.

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

@ -1,12 +1,4 @@
#!/usr/bin/env bash
#
# Copyright (C) 2022 Ing <https://github.com/wjz304>
#
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
# shellcheck disable=SC2034
set -e
[ -z "${WORK_PATH}" ] || [ ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
@ -28,7 +20,7 @@ BUS=$(getBus "${LOADER_DISK}")
clear
COLUMNS=$(ttysize 2>/dev/null | awk '{print $1}')
COLUMNS=${COLUMNS:-80}
WTITLE="$(printf "$(TEXT "Welcome to %s")" "${RR_TITLE}${RR_RELEASE:+(${RR_RELEASE})}")"
WTITLE="$(printf "$(TEXT "Welcome to %s")" "$([ -z "${RR_RELEASE}" ] && echo "${RR_TITLE}" || echo "${RR_TITLE}(${RR_RELEASE})")")"
DATE="$(date)"
printf "\033[1;44m%*s\n" "${COLUMNS}" ""
printf "\033[1;44m%*s\033[A\n" "${COLUMNS}" ""
@ -41,30 +33,9 @@ BTITLE+="$([ ${EFI} -eq 1 ] && echo " [UEFI]" || echo " [BIOS]")"
BTITLE+="$([ "${BUS}" = "usb" ] && echo " [${BUS^^} flashdisk]" || echo " [${BUS^^} DoM]")"
printf "\033[1;33m%*s\033[0m\n" $(((${#BTITLE} + ${COLUMNS}) / 2)) "${BTITLE}"
if [ -f "${PART1_PATH}/.upgraded" ]; then
MODEL="$(readConfigKey "model" "${USER_CONFIG_FILE}")"
PLATFORM="$(readConfigKey "platform" "${USER_CONFIG_FILE}")"
if [ -n "${MODEL}" ] && [ -n "${PLATFORM}" ]; then
printf "\033[1;43m%s\033[0m\n" "$(TEXT "Reconfigure after upgrade ...")"
PRODUCTVER="$(readConfigKey "productver" "${USER_CONFIG_FILE}")"
PATURL="$(readConfigKey "paturl" "${USER_CONFIG_FILE}")"
PATSUM="$(readConfigKey "patsum" "${USER_CONFIG_FILE}")"
./menu.sh modelMenu "${MODEL}" "${PLATFORM}" || {
echo -e "$(TEXT "Reconfiguration failed!")"
exit 1
}
if [ -n "${PRODUCTVER}" ] && [ -n "${PATURL}" ]; then
./menu.sh productversMenu "${PRODUCTVER}" "${PATURL}" "${PATSUM}" || {
echo -e "$(TEXT "Reconfiguration failed!")"
exit 1
}
fi
fi
rm -f "${PART1_PATH}/.upgraded"
fi
# Check if DSM zImage changed, patch it if necessary
ZIMAGE_HASH="$(readConfigKey "zimage-hash" "${USER_CONFIG_FILE}")"
if [ -f "${PART1_PATH}/.build" ] || [ "$(sha256sum "${ORI_ZIMAGE_FILE}" | awk '{print $1}')" != "${ZIMAGE_HASH}" ]; then
if [ -f ${PART1_PATH}/.build ] || [ "$(sha256sum "${ORI_ZIMAGE_FILE}" | awk '{print $1}')" != "${ZIMAGE_HASH}" ]; then
printf "\033[1;43m%s\033[0m\n" "$(TEXT "DSM zImage changed")"
${WORK_PATH}/zimage-patch.sh || {
printf "\033[1;43m%s\n%s\n%s:\n%s\033[0m\n" "$(TEXT "DSM zImage not patched")" "$(TEXT "Please upgrade the bootloader version and try again.")" "$(TEXT "Error")" "$(cat "${LOG_FILE}")"
@ -74,14 +45,14 @@ fi
# Check if DSM ramdisk changed, patch it if necessary
RAMDISK_HASH="$(readConfigKey "ramdisk-hash" "${USER_CONFIG_FILE}")"
if [ -f "${PART1_PATH}/.build" ] || [ "$(sha256sum "${ORI_RDGZ_FILE}" | awk '{print $1}')" != "${RAMDISK_HASH}" ]; then
if [ -f ${PART1_PATH}/.build ] || [ "$(sha256sum "${ORI_RDGZ_FILE}" | awk '{print $1}')" != "${RAMDISK_HASH}" ]; then
printf "\033[1;43m%s\033[0m\n" "$(TEXT "DSM ramdisk changed")"
${WORK_PATH}/ramdisk-patch.sh || {
printf "\033[1;43m%s\n%s\n%s:\n%s\033[0m\n" "$(TEXT "DSM ramdisk not patched")" "$(TEXT "Please upgrade the bootloader version and try again.")" "$(TEXT "Error")" "$(cat "${LOG_FILE}")"
exit 1
}
fi
[ -f "${PART1_PATH}/.build" ] && rm -f "${PART1_PATH}/.build"
[ -f ${PART1_PATH}/.build ] && rm -f ${PART1_PATH}/.build
# Load necessary variables
PLATFORM="$(readConfigKey "platform" "${USER_CONFIG_FILE}")"
@ -90,12 +61,13 @@ MODELID="$(readConfigKey "modelid" "${USER_CONFIG_FILE}")"
PRODUCTVER="$(readConfigKey "productver" "${USER_CONFIG_FILE}")"
BUILDNUM="$(readConfigKey "buildnum" "${USER_CONFIG_FILE}")"
SMALLNUM="$(readConfigKey "smallnum" "${USER_CONFIG_FILE}")"
DT="$(readConfigKey "dt" "${USER_CONFIG_FILE}")"
KVER="$(readConfigKey "kver" "${USER_CONFIG_FILE}")"
KPRE="$(readConfigKey "kpre" "${USER_CONFIG_FILE}")"
KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")"
LKM="$(readConfigKey "lkm" "${USER_CONFIG_FILE}")"
DT="$(readConfigKey "platforms.${PLATFORM}.dt" "${WORK_PATH}/platforms.yml")"
KVER="$(readConfigKey "platforms.${PLATFORM}.productvers.\"${PRODUCTVER}\".kver" "${WORK_PATH}/platforms.yml")"
KPRE="$(readConfigKey "platforms.${PLATFORM}.productvers.\"${PRODUCTVER}\".kpre" "${WORK_PATH}/platforms.yml")"
MEV="$(virt-what 2>/dev/null | head -1)"
DMI="$(dmesg 2>/dev/null | grep -i "DMI:" | head -1 | sed 's/\[.*\] DMI: //i')"
CPU="$(awk -F': ' '/model name/ {print $2}' /proc/cpuinfo | uniq)"
@ -122,7 +94,7 @@ if ! readConfigMap "addons" "${USER_CONFIG_FILE}" | grep -q nvmesystem; then
[ ${HASATA} = "0" ] && printf "\033[1;33m*** %s ***\033[0m\n" "$(TEXT "Notice: Please insert at least one sata/scsi disk for system installation (except for the bootloader disk).")"
fi
if checkBIOS_VT_d && [ "$(echo "${KVER:-4}" | cut -d'.' -f1)" -lt 5 ]; then
if checkBIOS_VT_d && [ $(echo "${KVER:-4}" | cut -d'.' -f1) -lt 5 ]; then
printf "\033[1;33m*** %s ***\033[0m\n" "$(TEXT "Notice: Please disable Intel(VT-d)/AMD(AMD-Vi) in BIOS/UEFI settings if you encounter a boot failure.")"
fi
@ -132,7 +104,6 @@ SN="$(readConfigKey "sn" "${USER_CONFIG_FILE}")"
MAC1="$(readConfigKey "mac1" "${USER_CONFIG_FILE}")"
MAC2="$(readConfigKey "mac2" "${USER_CONFIG_FILE}")"
KERNELPANIC="$(readConfigKey "kernelpanic" "${USER_CONFIG_FILE}")"
HDDSORT="$(readConfigKey "hddsort" "${USER_CONFIG_FILE}")"
USBASINTERNAL="$(readConfigKey "usbasinternal" "${USER_CONFIG_FILE}")"
EMMCBOOT="$(readConfigKey "emmcboot" "${USER_CONFIG_FILE}")"
MODBLACKLIST="$(readConfigKey "modblacklist" "${USER_CONFIG_FILE}")"
@ -143,33 +114,16 @@ declare -A CMDLINE
CMDLINE['syno_hw_version']="${MODELID:-${MODEL}}"
CMDLINE['vid']="${VID:-"0x46f4"}" # Sanity check
CMDLINE['pid']="${PID:-"0x0001"}" # Sanity check
if [ -z "${SN}" ]; then
SN="$(generateSerial "${MODEL}")"
writeConfigKey "sn" "${SN}" "${USER_CONFIG_FILE}"
fi
CMDLINE['sn']="${SN}"
if [ -z "${MAC1}" ]; then
if [ -n "${MAC2}" ]; then
MAC1=${MAC2}
MAC2=""
writeConfigKey "mac1" "${MAC1}" "${USER_CONFIG_FILE}"
writeConfigKey "mac2" "${MAC2}" "${USER_CONFIG_FILE}"
else
NETIF_NUM=2
MACS="$(generateMacAddress "${MODEL}" ${NETIF_NUM})"
for I in $(seq 1 ${NETIF_NUM}); do
eval MAC${I}="$(echo ${MACS} | cut -d' ' -f${I})"
writeConfigKey "mac${I}" "$(echo ${MACS} | cut -d' ' -f${I})" "${USER_CONFIG_FILE}"
done
fi
fi
CMDLINE['netif_num']="0"
[ -z "${MAC1}" ] && [ -n "${MAC2}" ] && {
MAC1=${MAC2}
MAC2=""
} # Sanity check
[ -n "${MAC1}" ] && CMDLINE['mac1']="${MAC1}" && CMDLINE['netif_num']="1"
[ -n "${MAC2}" ] && CMDLINE['mac2']="${MAC2}" && CMDLINE['netif_num']="2"
CMDLINE['skip_vender_mac_interfaces']="$(seq -s, 0 $((${CMDLINE['netif_num']:-1} - 1)))"
# set fixed cmdline
if grep -q "force_junior" /proc/cmdline; then
CMDLINE['force_junior']=""
@ -183,10 +137,10 @@ if [ ${EFI} -eq 1 ]; then
else
CMDLINE['noefi']=""
fi
if [ "$(echo "${KVER:-4}" | cut -d'.' -f1)" -lt 5 ]; then
if [ $(echo "${KVER:-4}" | cut -d'.' -f1) -lt 5 ]; then
if [ ! "${BUS}" = "usb" ]; then
SZ=$(blockdev --getsz "${LOADER_DISK}" 2>/dev/null) # SZ=$(cat /sys/block/${LOADER_DISK/\/dev\//}/size)
SS=$(blockdev --getss "${LOADER_DISK}" 2>/dev/null) # SS=$(cat /sys/block/${LOADER_DISK/\/dev\//}/queue/hw_sector_size)
SZ=$(blockdev --getsz ${LOADER_DISK} 2>/dev/null) # SZ=$(cat /sys/block/${LOADER_DISK/\/dev\//}/size)
SS=$(blockdev --getss ${LOADER_DISK} 2>/dev/null) # SS=$(cat /sys/block/${LOADER_DISK/\/dev\//}/queue/hw_sector_size)
SIZE=$((${SZ:-0} * ${SS:-0} / 1024 / 1024 + 10))
# Read SATADoM type
SATADOM="$(readConfigKey "satadom" "${USER_CONFIG_FILE}")"
@ -196,8 +150,6 @@ if [ "$(echo "${KVER:-4}" | cut -d'.' -f1)" -lt 5 ]; then
CMDLINE["elevator"]="elevator"
else
CMDLINE["split_lock_detect"]="off"
# CMDLINE['module.sig_enforce']="0"
# CMDLINE['loadpin.enforce']="0"
fi
if [ "${DT}" = "true" ]; then
@ -211,6 +163,7 @@ fi
CMDLINE["HddHotplug"]="1"
CMDLINE["vender_format_version"]="2"
CMDLINE['skip_vender_mac_interfaces']="0,1,2,3,4,5,6,7"
CMDLINE['earlyprintk']=""
CMDLINE['earlycon']="uart8250,io,0x3f8,115200n8"
CMDLINE['console']="ttyS0,115200n8"
@ -229,29 +182,19 @@ CMDLINE['pcie_aspm']="off"
# CMDLINE['amd_pstate']="disable"
# CMDLINE['nox2apic']="" # check platform
# CMDLINE['nomodeset']=""
CMDLINE['nowatchdog']=""
CMDLINE['modprobe.blacklist']="${MODBLACKLIST}"
CMDLINE['mev']="${MEV:-physical}"
if [ "${MEV:-physical}" = "vmware" ]; then
CMDLINE['tsc']="reliable"
CMDLINE['pmtmr']="0x0"
fi
if [ "${HDDSORT}" = "true" ]; then
CMDLINE['hddsort']=""
fi
if [ "${USBASINTERNAL}" = "true" ]; then
CMDLINE['usbasinternal']=""
fi
if echo "apollolake geminilake purley geminilakenk" | grep -wq "${PLATFORM}"; then
if echo "apollolake geminilake purley" | grep -wq "${PLATFORM}"; then
CMDLINE["nox2apic"]=""
fi
# # Save command line to grubenv RR_CMDLINE= ... nox2apic
# if echo "apollolake geminilake purley geminilakenk" | grep -wq "${PLATFORM}"; then
# if echo "apollolake geminilake purley" | grep -wq "${PLATFORM}"; then
# if grep -Eq "^flags.*x2apic.*" /proc/cpuinfo; then
# checkCmdline "rr_cmdline" "nox2apic" || addCmdline "rr_cmdline" "nox2apic"
# fi
@ -265,7 +208,7 @@ fi
# CMDLINE['modprobe.blacklist']+="sdhci,sdhci_pci,sdhci_acpi"
# fi
# fi
if [ "${DT}" = "true" ] && ! echo "purley broadwellnkv2 epyc7002 geminilakenk r1000nk v1000nk" | grep -wq "${PLATFORM}"; then
if [ "${DT}" = "true" ] && ! echo "epyc7002 purley broadwellnkv2" | grep -wq "${PLATFORM}"; then
if ! echo "${CMDLINE['modprobe.blacklist']}" | grep -q "mpt3sas"; then
[ ! "${CMDLINE['modprobe.blacklist']}" = "" ] && CMDLINE['modprobe.blacklist']+=","
CMDLINE['modprobe.blacklist']+="mpt3sas"
@ -277,7 +220,7 @@ fi
# CMDLINE['kvm.ignore_msrs']="1"
# CMDLINE['kvm.report_ignored_msrs']="0"
if echo "apollolake geminilake geminilakenk" | grep -wq "${PLATFORM}"; then
if echo "apollolake geminilake" | grep -wq "${PLATFORM}"; then
CMDLINE["intel_iommu"]="igfx_off"
fi
@ -285,21 +228,13 @@ if echo "purley broadwellnkv2" | grep -wq "${PLATFORM}"; then
CMDLINE["SASmodel"]="1"
fi
SSID="$(cat "${PART1_PATH}/wpa_supplicant.conf" 2>/dev/null | grep 'ssid=' | cut -d'=' -f2 | sed 's/^"//; s/"$//' | xxd -p | tr -d '\n')"
PSK="$(cat "${PART1_PATH}/wpa_supplicant.conf" 2>/dev/null | grep 'psk=' | cut -d'=' -f2 | sed 's/^"//; s/"$//' | xxd -p | tr -d '\n')"
if [ -n "${SSID}" ] && [ -n "${PSK}" ]; then
CMDLINE["wpa.ssid"]="${SSID}"
CMDLINE["wpa.psk"]="${PSK}"
fi
while IFS=': ' read -r KEY VALUE; do
[ -n "${KEY}" ] && CMDLINE["network.${KEY}"]="${VALUE}"
done <<<"$(readConfigMap "network" "${USER_CONFIG_FILE}")"
done <<<$(readConfigMap "network" "${USER_CONFIG_FILE}")
while IFS=': ' read -r KEY VALUE; do
[ -n "${KEY}" ] && CMDLINE["${KEY}"]="${VALUE}"
done <<<"$(readConfigMap "cmdline" "${USER_CONFIG_FILE}")"
done <<<$(readConfigMap "cmdline" "${USER_CONFIG_FILE}")
# Prepare command line
CMDLINE_LINE=""
@ -308,7 +243,7 @@ for KEY in "${!CMDLINE[@]}"; do
CMDLINE_LINE+=" ${KEY}"
[ -n "${VALUE}" ] && CMDLINE_LINE+="=${VALUE}"
done
CMDLINE_LINE="$(echo "${CMDLINE_LINE}" | sed 's/^ //')" # Remove leading space
CMDLINE_LINE=$(echo "${CMDLINE_LINE}" | sed 's/^ //') # Remove leading space
printf "%s:\n\033[1;36m%s\033[0m\n" "$(TEXT "Cmdline")" "${CMDLINE_LINE}"
# Check if user wants to modify at this stage
@ -329,7 +264,7 @@ function _bootwait() {
rm -f WB WC
return 1
fi
if ! ps -p "${PPID}" -o cmd | grep -q "menu.sh" && [ -f "${TMP_PATH}/menu.lock" ]; then
if false && [ -f "${WORK_PATH}/menu.lock" ]; then
printf "\r%$((${#MSG} * 2))s\n" " "
printf "\r\033[1;33m%s\033[0m\n" "$(TEXT "Menu opened and booting is interrupted.")"
rm -f WB WC
@ -343,22 +278,20 @@ function _bootwait() {
DIRECT="$(readConfigKey "directboot" "${USER_CONFIG_FILE}")"
if [ "${DIRECT}" = "true" ] || [ "${MEV:-physical}" = "parallels" ]; then
# grubenv file limit is 1024 bytes.
grub-editenv "${USER_RSYSENVFILE}" create
grub-editenv "${USER_RSYSENVFILE}" set rr_version="${WTITLE}"
grub-editenv "${USER_RSYSENVFILE}" set rr_booting="${BTITLE}"
grub-editenv "${USER_RSYSENVFILE}" set dsm_model="${MODEL}(${PLATFORM})"
grub-editenv "${USER_RSYSENVFILE}" set dsm_version="${PRODUCTVER}(${BUILDNUM}$([ ${SMALLNUM:-0} -ne 0 ] && echo "u${SMALLNUM}"))"
grub-editenv "${USER_RSYSENVFILE}" set dsm_kernel="${KERNEL}"
grub-editenv "${USER_RSYSENVFILE}" set dsm_lkm="${LKM}"
grub-editenv "${USER_RSYSENVFILE}" set sys_mev="${MEV:-physical}"
grub-editenv "${USER_RSYSENVFILE}" set sys_dmi="${DMI}"
grub-editenv "${USER_RSYSENVFILE}" set sys_cpu="${CPU}"
grub-editenv "${USER_RSYSENVFILE}" set sys_mem="${MEM}"
grub-editenv ${USER_GRUBENVFILE} set rr_version="${WTITLE}"
grub-editenv ${USER_GRUBENVFILE} set rr_booting="${BTITLE}"
grub-editenv ${USER_GRUBENVFILE} set dsm_model="${MODEL}(${PLATFORM})"
grub-editenv ${USER_GRUBENVFILE} set dsm_version="${PRODUCTVER}(${BUILDNUM}$([ ${SMALLNUM:-0} -ne 0 ] && echo "u${SMALLNUM}"))"
grub-editenv ${USER_GRUBENVFILE} set dsm_kernel="${KERNEL}"
grub-editenv ${USER_GRUBENVFILE} set dsm_lkm="${LKM}"
grub-editenv ${USER_GRUBENVFILE} set sys_mev="${MEV:-physical}"
grub-editenv ${USER_GRUBENVFILE} set sys_dmi="${DMI}"
grub-editenv ${USER_GRUBENVFILE} set sys_cpu="${CPU}"
grub-editenv ${USER_GRUBENVFILE} set sys_mem="${MEM}"
CMDLINE_DIRECT=$(echo "${CMDLINE_LINE}" | sed 's/>/\\\\>/g') # Escape special chars
grub-editenv "${USER_GRUBENVFILE}" set dsm_cmdline="${CMDLINE_DIRECT}"
grub-editenv "${USER_GRUBENVFILE}" set next_entry="direct"
CMDLINE_DIRECT=$(echo ${CMDLINE_LINE} | sed 's/>/\\\\>/g') # Escape special chars
grub-editenv ${USER_GRUBENVFILE} set dsm_cmdline="${CMDLINE_DIRECT}"
grub-editenv ${USER_GRUBENVFILE} set next_entry="direct"
_bootwait || exit 0
@ -366,10 +299,19 @@ if [ "${DIRECT}" = "true" ] || [ "${MEV:-physical}" = "parallels" ]; then
reboot
exit 0
else
rm -f "${USER_RSYSENVFILE}" 2>/dev/null || true
grub-editenv "${USER_GRUBENVFILE}" unset dsm_cmdline
grub-editenv "${USER_GRUBENVFILE}" unset next_entry
ETHX="$(find /sys/class/net/ -mindepth 1 -maxdepth 1 ! -name lo -exec basename {} \; | sort)"
grub-editenv ${USER_GRUBENVFILE} unset rr_version
grub-editenv ${USER_GRUBENVFILE} unset rr_booting
grub-editenv ${USER_GRUBENVFILE} unset dsm_model
grub-editenv ${USER_GRUBENVFILE} unset dsm_version
grub-editenv ${USER_GRUBENVFILE} unset dsm_kernel
grub-editenv ${USER_GRUBENVFILE} unset dsm_lkm
grub-editenv ${USER_GRUBENVFILE} unset sys_mev
grub-editenv ${USER_GRUBENVFILE} unset sys_dmi
grub-editenv ${USER_GRUBENVFILE} unset sys_cpu
grub-editenv ${USER_GRUBENVFILE} unset sys_mem
grub-editenv ${USER_GRUBENVFILE} unset dsm_cmdline
grub-editenv ${USER_GRUBENVFILE} unset next_entry
ETHX=$(ls /sys/class/net/ 2>/dev/null | grep -v lo) || true
printf "$(TEXT "Detected %s network cards.\n")" "$(echo "${ETHX}" | wc -w)"
printf "$(TEXT "Checking Connect.")"
COUNT=0
@ -378,7 +320,7 @@ else
while [ ${COUNT} -lt $((${BOOTIPWAIT} + 32)) ]; do
MSG=""
for N in ${ETHX}; do
if [ "1" = "$(cat "/sys/class/net/${N}/carrier" 2>/dev/null)" ]; then
if [ "1" = "$(cat /sys/class/net/${N}/carrier 2>/dev/null)" ]; then
MSG+="${N} "
fi
done
@ -396,33 +338,32 @@ else
printf "$(TEXT "Waiting IP.\n")"
for N in ${ETHX}; do
COUNT=0
DRIVER="$(basename "$(realpath "/sys/class/net/${N}/device/driver" 2>/dev/null)" 2>/dev/null)"
MAC="$(cat "/sys/class/net/${N}/address" 2>/dev/null)"
printf "%s(%s): " "${N}" "${MAC}@${DRIVER}"
DRIVER=$(ls -ld /sys/class/net/${N}/device/driver 2>/dev/null | awk -F '/' '{print $NF}')
printf "%s(%s): " "${N}" "${DRIVER}"
while true; do
if false && [ ! "${N::3}" = "eth" ]; then
printf "\r%s(%s): %s\n" "${N}" "${MAC}@${DRIVER}" "$(TEXT "IGNORE (Does not support non-wired network card.)")"
if [ ! "${N::3}" = "eth" ]; then
printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "IGNORE (Does not support non-wired network card.)")"
break
fi
if [ -z "$(cat "/sys/class/net/${N}/carrier" 2>/dev/null)" ]; then
printf "\r%s(%s): %s\n" "${N}" "${MAC}@${DRIVER}" "$(TEXT "DOWN")"
if [ -z "$(cat /sys/class/net/${N}/carrier 2>/dev/null)" ]; then
printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "DOWN")"
break
fi
if [ "0" = "$(cat "/sys/class/net/${N}/carrier" 2>/dev/null)" ]; then
printf "\r%s(%s): %s\n" "${N}" "${MAC}@${DRIVER}" "$(TEXT "NOT CONNECTED")"
if [ "0" = "$(cat /sys/class/net/${N}/carrier 2>/dev/null)" ]; then
printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "NOT CONNECTED")"
break
fi
if [ ${COUNT} -eq ${BOOTIPWAIT} ]; then # Under normal circumstances, no errors should occur here.
printf "\r%s(%s): %s\n" "${N}" "${MAC}@${DRIVER}" "$(TEXT "TIMEOUT (Please check the IP on the router.)")"
printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "TIMEOUT (Please check the IP on the router.)")"
break
fi
COUNT=$((COUNT + 1))
IP="$(getIP "${N}")"
if [ -n "${IP}" ]; then
if echo "${IP}" | grep -q "^169\.254\."; then
printf "\r%s(%s): %s\n" "${N}" "${MAC}@${DRIVER}" "$(TEXT "LINK LOCAL (No DHCP server detected.)")"
printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "LINK LOCAL (No DHCP server detected.)")"
else
printf "\r%s(%s): %s\n" "${N}" "${MAC}@${DRIVER}" "$(printf "$(TEXT "Access \033[1;34mhttp://%s:5000\033[0m to connect the DSM via web.")" "${IP}")"
printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(printf "$(TEXT "Access \033[1;34mhttp://%s:5000\033[0m to connect the DSM via web.")" "${IP}")"
fi
break
fi
@ -433,7 +374,6 @@ else
_bootwait || exit 0
[ -n "$(cat "${ADD_TIPS_FILE}" 2>/dev/null)" ] && printf "$(TEXT "%s\n")" "$(cat "${ADD_TIPS_FILE}" 2>/dev/null)"
printf "\033[1;37m%s\033[0m\n" "$(TEXT "Loading DSM kernel ...")"
DSMLOGO="$(readConfigKey "dsmlogo" "${USER_CONFIG_FILE}")"
@ -441,16 +381,16 @@ else
IP="$(getIP)"
echo "${IP}" | grep -q "^169\.254\." && IP=""
[ -n "${IP}" ] && URL="http://${IP}:5000" || URL="http://find.synology.com/"
python3 "${WORK_PATH}/include/functions.py" "makeqr" -d "${URL}" -l "6" -o "${TMP_PATH}/qrcode_boot.png"
[ -f "${TMP_PATH}/qrcode_boot.png" ] && echo | fbv -acufi "${TMP_PATH}/qrcode_boot.png" >/dev/null 2>&1 || true
python3 ${WORK_PATH}/include/functions.py makeqr -d "${URL}" -l "6" -o "${TMP_PATH}/qrcode_boot.png"
[ -f "${TMP_PATH}/qrcode_boot.png" ] && echo | fbv -acufi "${TMP_PATH}/qrcode_boot.png" >/dev/null 2>/dev/null || true
python3 "${WORK_PATH}/include/functions.py" "makeqr" -f "${WORK_PATH}/include/qhxg.png" -l "7" -o "${TMP_PATH}/qrcode_qhxg.png"
[ -f "${TMP_PATH}/qrcode_qhxg.png" ] && echo | fbv -acufi "${TMP_PATH}/qrcode_qhxg.png" >/dev/null 2>&1 || true
python3 ${WORK_PATH}/include/functions.py makeqr -f "${WORK_PATH}/include/qhxg.png" -l "7" -o "${TMP_PATH}/qrcode_qhxg.png"
[ -f "${TMP_PATH}/qrcode_qhxg.png" ] && echo | fbv -acufi "${TMP_PATH}/qrcode_qhxg.png" >/dev/null 2>/dev/null || true
fi
# Executes DSM kernel via KEXEC
KEXECARGS="-a"
if [ "$(echo "${KVER:-4}" | cut -d'.' -f1)" -lt 4 ] && [ ${EFI} -eq 1 ]; then
if [ $(echo "${KVER:-4}" | cut -d'.' -f1) -lt 4 ] && [ ${EFI} -eq 1 ]; then
printf "\033[1;33m%s\033[0m\n" "$(TEXT "Warning, running kexec with --noefi param, strange things will happen!!")"
KEXECARGS+=" --noefi"
fi
@ -464,18 +404,13 @@ else
fi
done
# Disconnect wireless
lsmod | grep -q iwlwifi && for F in /sys/class/net/wlan*; do
[ ! -e "${F}" ] && continue
connectwlanif "$(basename "${F}")" 0 2>/dev/null
done
# Unload all network drivers
# for F in $(realpath /sys/class/net/*/device/driver); do [ ! -e "${F}" ] && continue; rmmod -f "$(basename ${F})" 2>/dev/null || true; done
# # Unload all network interfaces
# for D in $(realpath /sys/class/net/*/device/driver); do rmmod -f "$(basename ${D})" 2>/dev/null || true; done
# Unload all graphics drivers
# for D in $(lsmod | grep -E '^(nouveau|amdgpu|radeon|i915)' | awk '{print $1}'); do rmmod -f "${D}" 2>/dev/null || true; done
# for I in $(find /sys/devices -name uevent -exec bash -c 'cat {} 2>/dev/null | grep -Eq "PCI_CLASS=0?30[0|1|2]00" && dirname {}' \;); do
# [ -e ${I}/reset ] && cat "${I}/vendor" >/dev/null | grep -iq 0x10de && echo 1 >${I}/reset || true # Proc open nvidia driver when booting
# [ -e ${I}/reset ] && cat ${I}/vendor >/dev/null | grep -iq 0x10de && echo 1 >${I}/reset || true # Proc open nvidia driver when booting
# done
# Reboot

View File

@ -1,10 +1,4 @@
#!/usr/bin/env bash
#
# Copyright (C) 2022 Ing <https://github.com/wjz304>
#
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
read_u8() {
dd if="${1}" bs=1 skip="$((${2}))" count=1 2>/dev/null | od -An -tu1 | grep -Eo '[0-9]+'

View File

@ -0,0 +1,36 @@
#!/usr/bin/env bash
#
# Calculate the amount of space needed to run the kernel, including room for
# the .bss and .brk sections.
#
# Usage:
# objdump -h a.out | sh calc_run_size.sh
NUM='\([0-9a-fA-F]*[ \t]*\)'
OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"${NUM}${NUM}${NUM}${NUM}"'.*/0x\1 0x\4/p')
if [ -z "${OUT}" ]; then
echo "Never found .bss or .brk file offset" >&2
exit 1
fi
read -r sizeA offsetA sizeB offsetB <<<$(echo ${OUT} | awk '{printf "%d %d %d %d", strtonum($1), strtonum($2), strtonum($3), strtonum($4)}')
runSize=$((offsetA + sizeA + sizeB))
# BFD linker shows the same file offset in ELF.
if [ "${offsetA}" -ne "${offsetB}" ]; then
# Gold linker shows them as consecutive.
endSize=$((offsetB + sizeB))
if [ "${endSize}" -ne "${runSize}" ]; then
printf "sizeA: 0x%x\n" ${sizeA} >&2
printf "offsetA: 0x%x\n" ${offsetA} >&2
printf "sizeB: 0x%x\n" ${sizeB} >&2
printf "offsetB: 0x%x\n" ${offsetB} >&2
echo ".bss and .brk are non-contiguous" >&2
exit 1
fi
fi
printf "%d\n" ${runSize}
exit 0

Binary file not shown.

View File

@ -1,13 +1,3 @@
#!/usr/bin/env bash
#
# Copyright (C) 2022 Ing <https://github.com/wjz304>
#
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
# shellcheck disable=SC2115,SC2155
###############################################################################
# Return list of available addons
# 1 - Platform
@ -28,26 +18,8 @@ function availableAddons() {
[ -z "${DESC}" ] && DESC="$(readConfigKey "description.${LOCALE:-"en_US"}" "${D}/manifest.yml")"
[ -z "${DESC}" ] && DESC="$(readConfigKey "description.en_US" "${D}/manifest.yml")"
[ -z "${DESC}" ] && DESC="$(readConfigKey "description" "${D}/manifest.yml")"
DESC="$(echo "${DESC}" | tr -d '\n\r\t\\' | sed "s/\"/'/g")"
echo "${ADDON} \"${DESC:-"unknown"}\""
done <<<"$(find "${ADDONS_PATH}" -maxdepth 1 -type d 2>/dev/null | sort)"
}
###############################################################################
# Read Addon Key
# 1 - Addon
# 2 - key
function readAddonKey() {
if [ -z "${1}" ] || [ -z "${2}" ]; then
echo ""
return 1
fi
if [ ! -f "${ADDONS_PATH}/${1}/manifest.yml" ]; then
echo ""
return 1
fi
readConfigKey "${2}" "${ADDONS_PATH}/${1}/manifest.yml"
echo -e "${ADDON}\t${DESC:-"unknown"}"
done <<<$(find "${ADDONS_PATH}" -maxdepth 1 -type d 2>/dev/null | sort)
}
###############################################################################

View File

@ -1,11 +1,3 @@
#!/usr/bin/env bash
#
# Copyright (C) 2022 Ing <https://github.com/wjz304>
#
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
###############################################################################
# Delete a key in config file
# 1 - Path of Key
@ -30,8 +22,7 @@ function writeConfigKey() {
# 2 - Path of yaml config file
# Return Value
function readConfigKey() {
local result
result=$(yq eval ".${1} | explode(.)" "${2}" 2>/dev/null)
local result=$(yq eval ".${1} | explode(.)" "${2}" 2>/dev/null)
[ "${result}" = "null" ] && echo "" || echo "${result}"
}
@ -42,15 +33,13 @@ function readConfigKey() {
function mergeConfigModules() {
# Error: bad file '-': cannot index array with '8139cp' (strconv.ParseInt: parsing "8139cp": invalid syntax)
# When the first key is a pure number, yq will not process it as a string by default. The current solution is to insert a placeholder key.
local MS ML XF
MS="RRORG\n${1// /\\n}"
ML="$(echo -en "${MS}" | awk '{print "modules."$1":"}')"
XF=$(mktemp 2>/dev/null)
XF=${XF:-/tmp/tmp.XXXXXXXXXX}
echo -en "${ML}" | yq -p p -o y >"${XF}"
deleteConfigKey "modules.\"RRORG\"" "${XF}"
yq eval-all --inplace '. as $item ireduce ({}; . * $item)' --inplace "${2}" "${XF}" 2>/dev/null
rm -f "${XF}"
local MS="RRORG\n${1// /\\n}"
local L="$(echo -en "${MS}" | awk '{print "modules."$1":"}')"
local xmlfile=$(mktemp)
echo -en "${L}" | yq -p p -o y >"${xmlfile}"
deleteConfigKey "modules.\"RRORG\"" "${xmlfile}"
yq eval-all --inplace '. as $item ireduce ({}; . * $item)' --inplace "${2}" "${xmlfile}" 2>/dev/null
rm -f "${xmlfile}"
}
###############################################################################

View File

@ -1,14 +1,4 @@
#!/usr/bin/env bash
#
# Copyright (C) 2022 Ing <https://github.com/wjz304>
#
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
# shellcheck disable=SC2034
RR_VERSION="25.9.1"
RR_VERSION="25.1.0"
RR_RELEASE=""
RR_TITLE="RR v${RR_VERSION}"
@ -25,7 +15,6 @@ LOG_FILE="${TMP_PATH}/log.txt"
USER_GRUB_CONFIG="${PART1_PATH}/boot/grub/grub.cfg"
USER_GRUBENVFILE="${PART1_PATH}/boot/grub/grubenv"
USER_RSYSENVFILE="${PART1_PATH}/boot/grub/rsysenv"
USER_CONFIG_FILE="${PART1_PATH}/user-config.yml"
USER_LOCALE_FILE="${PART1_PATH}/.locale"
@ -35,10 +24,8 @@ ORI_RDGZ_FILE="${PART2_PATH}/rd.gz"
RR_BZIMAGE_FILE="${PART3_PATH}/bzImage-rr"
RR_RAMDISK_FILE="${PART3_PATH}/initrd-rr"
RR_RAMUSER_FILE="${PART3_PATH}/initrd-rru"
MC_RAMDISK_FILE="${PART3_PATH}/microcode.img"
MOD_ZIMAGE_FILE="${PART3_PATH}/zImage-dsm"
MOD_RDGZ_FILE="${PART3_PATH}/initrd-dsm"
ADD_TIPS_FILE="${PART3_PATH}/AddTips"
CKS_PATH="${PART3_PATH}/cks"
LKMS_PATH="${PART3_PATH}/lkms"

View File

@ -113,7 +113,7 @@ def getmodels(platforms=None):
"""
Get Syno Models.
"""
import re, json, requests, urllib3
import json, requests, urllib3
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry # type: ignore
@ -127,28 +127,25 @@ def getmodels(platforms=None):
models = []
try:
url = "http://update7.synology.com/autoupdate/genRSS.php?include_beta=1"
#url = "https://update7.synology.com/autoupdate/genRSS.php?include_beta=1"
req = session.get(url, timeout=10, verify=False)
req = session.get("https://autoupdate.synology.com/os/v2", timeout=10, verify=False)
req.encoding = "utf-8"
p = re.compile(r"<mUnique>(.*?)</mUnique>.*?<mLink>(.*?)</mLink>", re.MULTILINE | re.DOTALL)
data = p.findall(req.text)
for item in data:
if not "DSM" in item[1]:
data = json.loads(req.text)
for item in data["channel"]["item"]:
if not item["title"].startswith("DSM"):
continue
arch = item[0].split("_")[1]
name = item[1].split("/")[-1].split("_")[1].replace("%2B", "+")
if PS and arch.lower() not in PS:
continue
if not any(m["name"] == name for m in models):
models.append({"name": name, "arch": arch})
for model in item["model"]:
arch = model["mUnique"].split("_")[1]
name = model["mLink"].split("/")[-1].split("_")[1].replace("%2B", "+")
if PS and arch.lower() not in PS:
continue
if not any(m["name"] == name for m in models):
models.append({"name": name, "arch": arch})
models.sort(key=lambda k: (k["arch"], k["name"]))
except Exception as e:
# click.echo(f"Error: {e}")
pass
click.echo(f"Error: {e}")
print(json.dumps(models, indent=4))
@ -193,8 +190,7 @@ def getmodelsbykb(platforms=None):
continue
models.append({"name": d[0].split("<br")[0], "arch": d[5].lower()})
except Exception as e:
# click.echo(f"Error: {e}")
pass
click.echo(f"Error: {e}")
models.sort(key=lambda x: (x["arch"], x["name"]))
print(json.dumps(models, indent=4))
@ -234,7 +230,7 @@ def getpats4mv(model, version):
if V not in pats:
pats[V] = {
'url': data['info']['system']['detail'][0]['items'][0]['files'][0]['url'].split('?')[0],
'sum': data['info']['system']['detail'][0]['items'][0]['files'][0].get('checksum', '0' * 32)
'sum': data['info']['system']['detail'][0]['items'][0]['files'][0]['checksum']
}
from_ver = min(I['build'] for I in data['info']['pubVers'])
@ -256,7 +252,7 @@ def getpats4mv(model, version):
if V not in pats:
pats[V] = {
'url': dataTmp['info']['system']['detail'][0]['items'][0]['files'][0]['url'].split('?')[0],
'sum': dataTmp['info']['system']['detail'][0]['items'][0]['files'][0].get('checksum', '0' * 32)
'sum': dataTmp['info']['system']['detail'][0]['items'][0]['files'][0]['checksum']
}
for J in I['versions']:
@ -271,16 +267,12 @@ def getpats4mv(model, version):
continue
V = __fullversion(f"{S['build_ver']}-{S['build_num']}-{S['nano']}")
if V not in pats:
reqPat = session.head(S['files'][0]['url'].split('?')[0], timeout=10, verify=False)
if reqPat.status_code == 403:
continue
pats[V] = {
'url': S['files'][0]['url'].split('?')[0],
'sum': S['files'][0].get('checksum', '0' * 32)
'sum': S['files'][0]['checksum']
}
except Exception as e:
# click.echo(f"Error: {e}")
pass
click.echo(f"Error: {e}")
pats = {k: pats[k] for k in sorted(pats.keys(), reverse=True)}
print(json.dumps(pats, indent=4))
@ -329,8 +321,7 @@ def getpats(models=None):
pats[model] = {}
pats[model][__fullversion(ver)] = item.attrs['href']
except Exception as e:
# click.echo(f"Error: {e}")
pass
click.echo(f"Error: {e}")
print(json.dumps(pats, indent=4))

View File

@ -1,11 +1,3 @@
#!/usr/bin/env bash
#
# Copyright (C) 2022 Ing <https://github.com/wjz304>
#
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
[ -z "${WORK_PATH}" ] || [ ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")/../" >/dev/null 2>&1 && pwd)"
. "${WORK_PATH}/include/consts.sh"
@ -19,14 +11,14 @@ function checkBootLoader() {
[ -z "${KNAME}" ] && continue
[ "${RO}" = "0" ] && continue
hdparm -r0 "${KNAME}" >/dev/null 2>&1 || true
done <<<"$(lsblk -pno KNAME,RO 2>/dev/null)"
done <<<$(lsblk -pno KNAME,RO 2>/dev/null)
[ ! -w "${PART1_PATH}" ] && return 1
[ ! -w "${PART2_PATH}" ] && return 1
[ ! -w "${PART3_PATH}" ] && return 1
type awk >/dev/null 2>&1 || return 1
type cut >/dev/null 2>&1 || return 1
type sed >/dev/null 2>&1 || return 1
type tar >/dev/null 2>&1 || return 1
command -v awk >/dev/null 2>&1 || return 1
command -v cut >/dev/null 2>&1 || return 1
command -v sed >/dev/null 2>&1 || return 1
command -v tar >/dev/null 2>&1 || return 1
return 0
}
@ -34,17 +26,17 @@ function checkBootLoader() {
# Check if loader is fully configured
# Returns 1 if not
function loaderIsConfigured() {
SN="$(readConfigKey "sn" "${USER_CONFIG_FILE}")"
[ -z "${SN}" ] && return 1
[ ! -f "${MOD_ZIMAGE_FILE}" ] && return 1
[ ! -f "${MOD_RDGZ_FILE}" ] && return 1
[ -z "$(readConfigKey "zimage-hash" "${USER_CONFIG_FILE}")" ] && return 1
[ -z "$(readConfigKey "ramdisk-hash" "${USER_CONFIG_FILE}")" ] && return 1
return 0 # OK
}
###############################################################################
# Just show error message and dies
function die() {
echo -e "\033[1;41m${*}\033[0m"
echo -e "\033[1;41m$@\033[0m"
exit 1
}
@ -84,6 +76,7 @@ function randomhex() {
printf "%02X" $((RANDOM % 255 + 1))
}
###############################################################################
# Generate a random digit (0-9A-Z)
function genRandomDigit() {
@ -183,34 +176,32 @@ function validateSerial() {
###############################################################################
# Get values in .conf K=V file
# 1 - file
# 2 - key
# 1 - key
# 2 - file
function _get_conf_kv() {
grep "^${2}=" "${1}" 2>/dev/null | cut -d'=' -f2- | sed 's/^"//;s/"$//' 2>/dev/null
return $?
grep "^${1}=" "${2}" 2>/dev/null | cut -d'=' -f2- | sed 's/^"//;s/"$//' 2>/dev/null
}
###############################################################################
# Replace/remove/add values in .conf K=V file
# 1 - file
# 2 - key
# 3 - value
# 1 - name
# 2 - new_val
# 3 - path
function _set_conf_kv() {
# Delete
if [ -z "${3}" ]; then
sed -i "/^${2}=/d" "${1}" 2>/dev/null
if [ -z "${2}" ]; then
sed -i "/^${1}=/d" "${3}" 2>/dev/null
return $?
fi
# Replace
if grep -q "^${2}=" "${1}" 2>/dev/null; then
sed -i "s#^${2}=.*#${2}=\"${3}\"#" "${1}" 2>/dev/null
if grep -q "^${1}=" "${3}"; then
sed -i "s#^${1}=.*#${1}=\"${2}\"#" "${3}" 2>/dev/null
return $?
fi
# Add if doesn't exist
mkdir -p "$(dirname "${1}" 2>/dev/null)" 2>/dev/null
echo "${2}=\"${3}\"" >>"${1}" 2>/dev/null
echo "${1}=\"${2}\"" >>"${3}"
return $?
}
@ -219,50 +210,48 @@ function _set_conf_kv() {
# @ - url list
function _get_fastest() {
local speedlist=""
if type ping >/dev/null 2>&1; then
if command -v ping >/dev/null 2>&1; then
for I in "$@"; do
speed=$(LC_ALL=C ping -c 1 -W 5 "${I}" 2>/dev/null | awk -F'[= ]' '/time=/ {for(i=1;i<=NF;i++) if ($i=="time") print $(i+1)}')
speedlist+="${I} ${speed:-999}\n" # Assign default value 999 if speed is empty
done
else
for I in "$@"; do
speed=$(curl -skL -m 10 --connect-timeout 10 -w '%{time_total}' "${I}" -o /dev/null)
speed=$(curl -o /dev/null -s -w '%{time_total}' "${I}")
speed=$(awk "BEGIN {print (${speed:-0.999} * 1000)}")
speedlist+="${I} ${speed:-999}\n" # Assign default value 999 if speed is empty
done
fi
local fastest
fastest="$(echo -e "${speedlist}" | tr -s '\n' | awk '$2 != "999"' | sort -k2n | head -1)"
local fastest="$(echo -e "${speedlist}" | tr -s '\n' | awk '$2 != "999"' | sort -k2n | head -1)"
URL="$(echo "${fastest}" | awk '{print $1}')"
SPD="$(echo "${fastest}" | awk '{print $2}')" # It is a float type
echo "${URL:-${1}}"
[ "$(echo "${SPD:-999}" | cut -d'.' -f1)" -ge 999 ] && return 1 || return 0
[ $(echo "${SPD:-999}" | cut -d. -f1) -ge 999 ] && return 1 || return 0
}
###############################################################################
# sort netif name
# @1 -mac1,mac2,mac3...
function _sort_netif() {
ETHLIST=""
for F in /sys/class/net/eth*; do
[ ! -e "${F}" ] && continue
ETH="$(basename "${F}")"
MAC="$(cat "/sys/class/net/${ETH}/address" 2>/dev/null | sed 's/://g; s/.*/\L&/')"
BUS="$(ethtool -i "${ETH}" 2>/dev/null | grep bus-info | cut -d' ' -f2)"
ETHLIST="${ETHLIST}${BUS} ${MAC} ${ETH}\n"
local ETHLIST=""
local ETHX="$(ls /sys/class/net/ 2>/dev/null | grep eth)" # real network cards list
for N in ${ETHX}; do
local MAC="$(cat /sys/class/net/${N}/address 2>/dev/null | sed 's/://g; s/.*/\L&/')"
local BUS="$(ethtool -i ${N} 2>/dev/null | grep bus-info | cut -d' ' -f2)"
ETHLIST="${ETHLIST}${BUS} ${MAC} ${N}\n"
done
ETHLISTTMPM=""
ETHLISTTMPB="$(echo -e "${ETHLIST}" | sort)"
local ETHLISTTMPM=""
local ETHLISTTMPB="$(echo -e "${ETHLIST}" | sort)"
if [ -n "${1}" ]; then
MACS="$(echo "${1}" | sed 's/://g; s/,/ /g; s/.*/\L&/')"
local MACS="$(echo "${1}" | sed 's/://g; s/,/ /g; s/.*/\L&/')"
for MACX in ${MACS}; do
ETHLISTTMPM="${ETHLISTTMPM}$(echo -e "${ETHLISTTMPB}" | grep "${MACX}")\n"
ETHLISTTMPB="$(echo -e "${ETHLISTTMPB}" | grep -v "${MACX}")\n"
done
fi
ETHLIST="$(echo -e "${ETHLISTTMPM}${ETHLISTTMPB}" | grep -v '^$')"
ETHSEQ="$(echo -e "${ETHLIST}" | awk '{print $3}' | sed 's/eth//g')"
ETHNUM="$(echo -e "${ETHLIST}" | wc -l)"
local ETHSEQ="$(echo -e "${ETHLIST}" | awk '{print $3}' | sed 's/eth//g')"
local ETHNUM="$(echo -e "${ETHLIST}" | wc -l)"
# echo "${ETHSEQ}"
# sort
@ -303,11 +292,11 @@ function getBus() {
function getIP() {
local IP=""
if [ -n "${1}" ] && [ -d "/sys/class/net/${1}" ]; then
IP=$(ip addr show "${1}" scope global 2>/dev/null | grep -E "inet .* eth" | awk '{print $2}' | cut -d'/' -f1 | head -1)
[ -z "${IP}" ] && IP=$(ip route show dev "${1}" 2>/dev/null | sed -n 's/.* via .* src \(.*\) metric .*/\1/p' | head -1)
IP=$(ip route show dev "${1}" 2>/dev/null | sed -n 's/.* via .* src \(.*\) metric .*/\1/p')
[ -z "${IP}" ] && IP=$(ip addr show "${1}" scope global 2>/dev/null | grep -E "inet .* eth" | awk '{print $2}' | cut -f1 -d'/' | head -1)
else
IP=$(ip addr show scope global 2>/dev/null | grep -E "inet .* eth" | awk '{print $2}' | cut -d'/' -f1 | head -1)
[ -z "${IP}" ] && IP=$(ip route show 2>/dev/null | sed -n 's/.* via .* src \(.*\) metric .*/\1/p' | head -1)
IP=$(ip route show 2>/dev/null | sed -n 's/.* via .* src \(.*\) metric .*/\1/p' | head -1)
[ -z "${IP}" ] && IP=$(ip addr show scope global 2>/dev/null | grep -E "inet .* eth" | awk '{print $2}' | cut -f1 -d'/' | head -1)
fi
echo "${IP}"
return 0
@ -318,14 +307,13 @@ function getIP() {
# 1 - model
function getLogo() {
local MODEL="${1}"
local fastest
local STATUS
rm -f "${PART3_PATH}/logo.png"
local fastest="$(_get_fastest "www.synology.com" "www.synology.cn")"
# [ $? -ne 0 ] && return 1
fastest="$(_get_fastest "www.synology.com" "www.synology.cn")"
STATUS=$(curl -skL --connect-timeout 10 -w "%{http_code}" "https://${fastest}/api/products/getPhoto?product=${MODEL/+/%2B}&type=img_s&sort=0" -o "${PART3_PATH}/logo.png")
local STATUS=$(curl -skL --connect-timeout 10 -w "%{http_code}" "https://${fastest}/api/products/getPhoto?product=${MODEL/+/%2B}&type=img_s&sort=0" -o "${PART3_PATH}/logo.png")
if [ $? -ne 0 ] || [ "${STATUS:-0}" -ne 200 ] || [ ! -f "${PART3_PATH}/logo.png" ]; then
rm -f "${PART3_PATH}/logo.png"
return 1
fi
convert -rotate 180 "${PART3_PATH}/logo.png" "${PART3_PATH}/logo.png" 2>/dev/null
@ -382,10 +370,10 @@ function delCmdline() {
function checkCPU_VT_d() {
lsmod | grep -q msr || modprobe msr 2>/dev/null
if grep -q "GenuineIntel" /proc/cpuinfo 2>/dev/null; then
VT_D_ENABLED=$(rdmsr 0x3a 2>/dev/null)
local VT_D_ENABLED=$(rdmsr 0x3a 2>/dev/null)
[ "$((${VT_D_ENABLED:-0x0} & 0x5))" -eq $((0x5)) ] && return 0
elif grep -q "AuthenticAMD" /proc/cpuinfo 2>/dev/null; then
IOMMU_ENABLED=$(rdmsr 0xC0010114 2>/dev/null)
local IOMMU_ENABLED=$(rdmsr 0xC0010114 2>/dev/null)
[ "$((${IOMMU_ENABLED:-0x0} & 0x1))" -eq $((0x1)) ] && return 0
else
return 1
@ -409,7 +397,7 @@ function checkBIOS_VT_d() {
# Rebooting
# 1 - mode
function rebootTo() {
local MODES="config recovery junior uefi memtest"
local MODES="config recovery junior bios memtest"
if [ -z "${1}" ] || ! echo "${MODES}" | grep -wq "${1}"; then exit 1; fi
# echo "Rebooting to ${1} mode"
GRUBPATH="$(dirname "$(find "${PART1_PATH}/" -name grub.cfg 2>/dev/null | head -1)")"
@ -432,11 +420,16 @@ function connectwlanif() {
rm -f "/var/run/wpa_supplicant.pid.${1}"
fi
else
local CONF
CONF="$([ -f "${PART1_PATH}/wpa_supplicant.conf" ] && echo "${PART1_PATH}/wpa_supplicant.conf" || echo "")"
local CONF=""
[ -z "${CONF}" ] && [ -f "${PART1_PATH}/wpa_supplicant.conf.${1}" ] && CONF="${PART1_PATH}/wpa_supplicant.conf.${1}"
[ -z "${CONF}" ] && [ -f "${PART1_PATH}/wpa_supplicant.conf" ] && CONF="${PART1_PATH}/wpa_supplicant.conf"
[ -z "${CONF}" ] && return 2
[ -f "/var/run/wpa_supplicant.pid.${1}" ] && return 0
wpa_supplicant -i "${1}" -c "${CONF}" -qq -B -P "/var/run/wpa_supplicant.pid.${1}" >/dev/null 2>&1
if [ -f "/var/run/wpa_supplicant.pid.${1}" ]; then
kill -9 "$(cat /var/run/wpa_supplicant.pid.${1})"
rm -f "/var/run/wpa_supplicant.pid.${1}"
fi
wpa_supplicant -i "${1}" -c "${CONF}" -B -P "/var/run/wpa_supplicant.pid.${1}" >/dev/null 2>&1
fi
return 0
}
@ -445,7 +438,7 @@ function connectwlanif() {
# Find and mount the DSM root filesystem
function findDSMRoot() {
local DSMROOTS=""
[ -z "${DSMROOTS}" ] && DSMROOTS="$(mdadm --detail --scan 2>/dev/null | grep -v "INACTIVE-ARRAY" | grep -E "name=SynologyNAS:0|name=DiskStation:0|name=SynologyNVR:0|name=BeeStation:0" | awk '{print $2}' | uniq)"
[ -z "${DSMROOTS}" ] && DSMROOTS="$(mdadm --detail --scan 2>/dev/null | grep -E "name=SynologyNAS:0|name=DiskStation:0|name=SynologyNVR:0|name=BeeStation:0" | awk '{print $2}' | uniq)"
[ -z "${DSMROOTS}" ] && DSMROOTS="$(lsblk -pno KNAME,PARTN,FSTYPE,FSVER,LABEL | grep -E "sd[a-z]{1,2}1" | grep -w "linux_raid_member" | grep "0.9" | awk '{print $1}')"
echo "${DSMROOTS}"
return 0
@ -458,12 +451,7 @@ function fixDSMRootPart() {
if mdadm --detail "${1}" 2>/dev/null | grep -i "State" | grep -iEq "active|FAILED|Not Started"; then
mdadm --stop "${1}" >/dev/null 2>&1
mdadm --assemble --scan >/dev/null 2>&1
T="$(blkid -o value -s TYPE "${1}" 2>/dev/null | sed 's/linux_raid_member/ext4/')"
if [ "${T}" = "btrfs" ]; then
btrfs check --readonly "${1}" >/dev/null 2>&1
else
fsck "${1}" >/dev/null 2>&1
fi
fsck "${1}" >/dev/null 2>&1
fi
}
@ -489,18 +477,3 @@ function copyDSMFiles() {
return 1
fi
}
###############################################################################
# Send a webhook notification
# 1 - webhook url
# 2 - message (optional)
function sendWebhook() {
local URL="${1}"
local MSGT="Notification from ${RR_TITLE}${RR_RELEASE:+(${RR_RELEASE})}"
local MSGC="${2:-"test at $(date +'%Y-%m-%d %H:%M:%S')"}"
[ -z "${URL}" ] && return 1
curl -skL -X POST -H "Content-Type: application/json" -d "{\"title\":\"${MSGT}\", \"text\":\"${MSGC}\"}" "${URL}" >/dev/null 2>&1
return $?
}

View File

@ -1,18 +1,10 @@
#!/usr/bin/env bash
#
# Copyright (C) 2022 Ing <https://github.com/wjz304>
#
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
[ -z "${WORK_PATH}" ] || [ ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")/../" >/dev/null 2>&1 && pwd)"
type gettext >/dev/null 2>&1 && alias TEXT='gettext "rr"' || alias TEXT='echo'
shopt -s expand_aliases
[ -d "${WORK_PATH}/lang" ] && export TEXTDOMAINDIR="${WORK_PATH}/lang"
[ -f "${PART1_PATH}/.locale" ] && LC_ALL="$(cat "${PART1_PATH}/.locale")" && export LC_ALL="${LC_ALL}"
[ -f "${PART1_PATH}/.locale" ] && export LC_ALL="$(cat "${PART1_PATH}/.locale")"
if [ -f "${PART1_PATH}/.timezone" ]; then
TIMEZONE="$(cat "${PART1_PATH}/.timezone")"

View File

@ -1,27 +1,18 @@
#!/usr/bin/env bash
#
# Copyright (C) 2022 Ing <https://github.com/wjz304>
#
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
###############################################################################
# Unpack modules from a tgz file
# 1 - Platform
# 2 - Kernel Version
function unpackModules() {
local PLATFORM=${1}
local PKVER=${2}
local KERNEL
KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")"
local KVER=${2}
local KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")"
rm -rf "${TMP_PATH}/modules"
mkdir -p "${TMP_PATH}/modules"
if [ "${KERNEL}" = "custom" ]; then
tar -zxf "${CKS_PATH}/modules-${PLATFORM}-${PKVER}.tgz" -C "${TMP_PATH}/modules"
tar -zxf "${CKS_PATH}/modules-${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules"
else
tar -zxf "${MODULES_PATH}/${PLATFORM}-${PKVER}.tgz" -C "${TMP_PATH}/modules"
tar -zxf "${MODULES_PATH}/${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules"
fi
}
@ -31,14 +22,13 @@ function unpackModules() {
# 2 - Kernel Version
function packagModules() {
local PLATFORM=${1}
local PKVER=${2}
local KERNEL
KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")"
local KVER=${2}
local KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")"
if [ "${KERNEL}" = "custom" ]; then
tar -zcf "${CKS_PATH}/modules-${PLATFORM}-${PKVER}.tgz" -C "${TMP_PATH}/modules" .
tar -zcf "${CKS_PATH}/modules-${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules" .
else
tar -zcf "${MODULES_PATH}/${PLATFORM}-${PKVER}.tgz" -C "${TMP_PATH}/modules" .
tar -zcf "${MODULES_PATH}/${PLATFORM}-${KVER}.tgz" -C "${TMP_PATH}/modules" .
fi
}
@ -48,21 +38,20 @@ function packagModules() {
# 2 - Kernel Version
function getAllModules() {
local PLATFORM=${1}
local PKVER=${2}
local KVER=${2}
if [ -z "${PLATFORM}" ] || [ -z "${PKVER}" ]; then
if [ -z "${PLATFORM}" ] || [ -z "${KVER}" ]; then
return 1
fi
unpackModules "${PLATFORM}" "${PKVER}"
unpackModules "${PLATFORM}" "${KVER}"
for F in ${TMP_PATH}/modules/*.ko; do
[ ! -e "${F}" ] && continue
local N DESC
N="$(basename "${F}" .ko)"
DESC="$(modinfo -F description "${F}" 2>/dev/null)"
DESC="$(echo "${DESC}" | tr -d '\n\r\t\\' | sed "s/\"/'/g")"
echo "${N} \"${DESC:-${N}}\""
for F in $(ls ${TMP_PATH}/modules/*.ko 2>/dev/null); do
local X=$(basename "${F}")
local M=${X:0:-3}
local DESC=$(modinfo "${F}" 2>/dev/null | awk -F':' '/description:/{ print $2}' | awk '{sub(/^[ ]+/,""); print}')
[ -z "${DESC}" ] && DESC="${X}"
echo "${M} \"${DESC}\""
done
rm -rf "${TMP_PATH}/modules"
@ -75,24 +64,22 @@ function getAllModules() {
# 3 - Module list
function installModules() {
local PLATFORM=${1}
local PKVER=${2}
local KVER=${2}
shift 2
local MLIST="${@}"
if [ -z "${PLATFORM}" ] || [ -z "${PKVER}" ]; then
if [ -z "${PLATFORM}" ] || [ -z "${KVER}" ]; then
echo "ERROR: installModules: Platform or Kernel Version not defined" >"${LOG_FILE}"
return 1
fi
local MLIST ODP KERNEL
shift 2
MLIST="${*}"
unpackModules "${PLATFORM}" "${PKVER}"
unpackModules "${PLATFORM}" "${KVER}"
ODP="$(readConfigKey "odp" "${USER_CONFIG_FILE}")"
for F in ${TMP_PATH}/modules/*.ko; do
[ ! -e "${F}" ] && continue
M=$(basename "${F}")
local ODP="$(readConfigKey "odp" "${USER_CONFIG_FILE}")"
for F in $(ls "${TMP_PATH}/modules/"*.ko 2>/dev/null); do
local M=$(basename "${F}")
[ "${ODP}" = "true" ] && [ -f "${RAMDISK_PATH}/usr/lib/modules/${M}" ] && continue
if echo "${MLIST}" | grep -wq "$(basename "${M}" .ko)"; then
if echo "${MLIST}" | grep -wq "${M:0:-3}"; then
cp -f "${F}" "${RAMDISK_PATH}/usr/lib/modules/${M}" 2>"${LOG_FILE}"
else
rm -f "${RAMDISK_PATH}/usr/lib/modules/${M}" 2>"${LOG_FILE}"
@ -100,7 +87,7 @@ function installModules() {
done
mkdir -p "${RAMDISK_PATH}/usr/lib/firmware"
KERNEL=$(readConfigKey "kernel" "${USER_CONFIG_FILE}")
local KERNEL=$(readConfigKey "kernel" "${USER_CONFIG_FILE}")
if [ "${KERNEL}" = "custom" ]; then
tar -zxf "${CKS_PATH}/firmware.tgz" -C "${RAMDISK_PATH}/usr/lib/firmware" 2>"${LOG_FILE}"
else
@ -121,19 +108,19 @@ function installModules() {
# 3 - ko file
function addToModules() {
local PLATFORM=${1}
local PKVER=${2}
local KVER=${2}
local KOFILE=${3}
if [ -z "${PLATFORM}" ] || [ -z "${PKVER}" ] || [ -z "${KOFILE}" ]; then
if [ -z "${PLATFORM}" ] || [ -z "${KVER}" ] || [ -z "${KOFILE}" ]; then
echo ""
return 1
fi
unpackModules "${PLATFORM}" "${PKVER}"
unpackModules "${PLATFORM}" "${KVER}"
cp -f "${KOFILE}" "${TMP_PATH}/modules"
packagModules "${PLATFORM}" "${PKVER}"
packagModules "${PLATFORM}" "${KVER}"
}
###############################################################################
@ -143,19 +130,19 @@ function addToModules() {
# 3 - ko name
function delToModules() {
local PLATFORM=${1}
local PKVER=${2}
local KVER=${2}
local KONAME=${3}
if [ -z "${PLATFORM}" ] || [ -z "${PKVER}" ] || [ -z "${KONAME}" ]; then
if [ -z "${PLATFORM}" ] || [ -z "${KVER}" ] || [ -z "${KONAME}" ]; then
echo ""
return 1
fi
unpackModules "${PLATFORM}" "${PKVER}"
unpackModules "${PLATFORM}" "${KVER}"
rm -f "${TMP_PATH}/modules/${KONAME}"
packagModules "${PLATFORM}" "${PKVER}"
packagModules "${PLATFORM}" "${KVER}"
}
###############################################################################
@ -166,10 +153,9 @@ function delToModules() {
function getdepends() {
function _getdepends() {
if [ -f "${TMP_PATH}/modules/${1}.ko" ]; then
local depends
depends="$(modinfo -F depends "${TMP_PATH}/modules/${1}.ko" 2>/dev/null | sed 's/,/\n/g')"
if [ "$(echo "${depends}" | wc -w)" -gt 0 ]; then
for k in ${depends}; do
local depends=($(modinfo "${TMP_PATH}/modules/${1}.ko" 2>/dev/null | grep depends: | awk -F: '{print $2}' | awk '$1=$1' | sed 's/,/ /g'))
if [ ${#depends[@]} -gt 0 ]; then
for k in "${depends[@]}"; do
echo "${k}"
_getdepends "${k}"
done
@ -178,17 +164,17 @@ function getdepends() {
}
local PLATFORM=${1}
local PKVER=${2}
local KVER=${2}
local KONAME=${3}
if [ -z "${PLATFORM}" ] || [ -z "${PKVER}" ] || [ -z "${KONAME}" ]; then
if [ -z "${PLATFORM}" ] || [ -z "${KVER}" ] || [ -z "${KONAME}" ]; then
echo ""
return 1
fi
unpackModules "${PLATFORM}" "${PKVER}"
unpackModules "${PLATFORM}" "${KVER}"
_getdepends "${KONAME}" | sort -u
echo "${KONAME}"
local DPS=($(_getdepends "${KONAME}" | tr ' ' '\n' | sort -u))
echo "${DPS[@]}"
rm -rf "${TMP_PATH}/modules"
}

View File

@ -1,10 +1,4 @@
#!/usr/bin/env bash
#
# Copyright (C) 2022 Ing <https://github.com/wjz304>
#
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
set -e
[ -z "${WORK_PATH}" ] || [ ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
@ -12,15 +6,6 @@ set -e
. "${WORK_PATH}/include/functions.sh"
. "${WORK_PATH}/include/addons.sh"
if type vmware-toolbox-cmd >/dev/null 2>&1; then
if [ "Disable" = "$(vmware-toolbox-cmd timesync status 2>/dev/null)" ]; then
vmware-toolbox-cmd timesync enable >/dev/null 2>&1 || true
fi
if [ "Enabled" = "$(vmware-toolbox-cmd timesync status 2>/dev/null)" ]; then
vmware-toolbox-cmd timesync disable >/dev/null 2>&1 || true
fi
fi
[ -z "${LOADER_DISK}" ] && die "$(TEXT "Loader is not init!")"
checkBootLoader || die "$(TEXT "The loader is corrupted, please rewrite it!")"
@ -28,7 +13,7 @@ checkBootLoader || die "$(TEXT "The loader is corrupted, please rewrite it!")"
clear
COLUMNS=$(ttysize 2>/dev/null | awk '{print $1}')
COLUMNS=${COLUMNS:-80}
TITLE="$(printf "$(TEXT "Welcome to %s")" "${RR_TITLE}${RR_RELEASE:+(${RR_RELEASE})}")"
TITLE="$(printf "$(TEXT "Welcome to %s")" "$([ -z "${RR_RELEASE}" ] && echo "${RR_TITLE}" || echo "${RR_TITLE}(${RR_RELEASE})")")"
DATE="$(date)"
printf "\033[1;44m%*s\n" "${COLUMNS}" ""
printf "\033[1;44m%*s\033[A\n" "${COLUMNS}" ""
@ -37,7 +22,7 @@ printf "\033[1;44m%*s\033[A\n" "${COLUMNS}" ""
printf "\033[1;32m%*s\033[0m\n" "${COLUMNS}" "${DATE}"
# Get first MAC address
ETHX="$(find /sys/class/net/ -mindepth 1 -maxdepth 1 ! -name lo -exec basename {} \; | sort)"
ETHX=$(ls /sys/class/net/ 2>/dev/null | grep -v lo) || true
# No network devices
[ "$(echo "${ETHX}" | wc -w)" -le 0 ] && die "$(TEXT "Network devices not found! Please re execute init.sh after connecting to the network!")"
@ -67,9 +52,6 @@ initConfigKey "modelid" "" "${USER_CONFIG_FILE}"
initConfigKey "productver" "" "${USER_CONFIG_FILE}"
initConfigKey "buildnum" "" "${USER_CONFIG_FILE}"
initConfigKey "smallnum" "" "${USER_CONFIG_FILE}"
initConfigKey "dt" "" "${USER_CONFIG_FILE}"
initConfigKey "kver" "" "${USER_CONFIG_FILE}"
initConfigKey "kpre" "" "${USER_CONFIG_FILE}"
initConfigKey "paturl" "" "${USER_CONFIG_FILE}"
initConfigKey "patsum" "" "${USER_CONFIG_FILE}"
initConfigKey "sn" "" "${USER_CONFIG_FILE}"
@ -85,7 +67,6 @@ initConfigKey "addons" "{}" "${USER_CONFIG_FILE}"
if [ -z "$(readConfigMap "addons" "${USER_CONFIG_FILE}")" ]; then
initConfigKey "addons.acpid" "" "${USER_CONFIG_FILE}"
initConfigKey "addons.trivial" "" "${USER_CONFIG_FILE}"
initConfigKey "addons.vmtools" "" "${USER_CONFIG_FILE}"
initConfigKey "addons.mountloader" "" "${USER_CONFIG_FILE}"
initConfigKey "addons.powersched" "" "${USER_CONFIG_FILE}"
initConfigKey "addons.reboottoloader" "" "${USER_CONFIG_FILE}"
@ -93,22 +74,31 @@ fi
initConfigKey "modules" "{}" "${USER_CONFIG_FILE}"
initConfigKey "modblacklist" "evbug,cdc_ether" "${USER_CONFIG_FILE}"
# for update
if [ -f "${PART2_PATH}/GRUB_VER" ]; then
PLATFORMTMP="$(_get_conf_kv "PLATFORM" "${PART2_PATH}/GRUB_VER")"
MODELTMP="$(_get_conf_kv "MODEL" "${PART2_PATH}/GRUB_VER")"
[ -z "$(readConfigKey "platform" "${USER_CONFIG_FILE}")" ] &&
writeConfigKey "platform" "${PLATFORMTMP,,}" "${USER_CONFIG_FILE}"
[ -z "$(readConfigKey "model" "${USER_CONFIG_FILE}")" ] &&
writeConfigKey "model" "$(echo "${MODELTMP}" | sed 's/d$/D/; s/rp$/RP/; s/rp+/RP+/')" "${USER_CONFIG_FILE}"
[ -z "$(readConfigKey "modelid" "${USER_CONFIG_FILE}")" ] &&
writeConfigKey "modelid" "${MODELTMP}" "${USER_CONFIG_FILE}"
fi
if [ ! "LOCALBUILD" = "${LOADER_DISK}" ]; then
if arrayExistItem "sortnetif:" "$(readConfigMap "addons" "${USER_CONFIG_FILE}")"; then
_sort_netif "$(readConfigKey "addons.sortnetif" "${USER_CONFIG_FILE}")"
fi
for N in ${ETHX}; do
MACR="$(cat "/sys/class/net/${N}/address" 2>/dev/null | sed 's/://g')"
MACR="$(cat /sys/class/net/${N}/address 2>/dev/null | sed 's/://g')"
IPR="$(readConfigKey "network.${MACR}" "${USER_CONFIG_FILE}")"
if [ -n "${IPR}" ]; then
if [ ! "1" = "$(cat "/sys/class/net/${N}/carrier" 2>/dev/null)" ]; then
ip link set "${N}" up 2>/dev/null || true
fi
if [ -n "${IPR}" ] && [ "1" = "$(cat /sys/class/net/${N}/carrier 2>/dev/null)" ]; then
IFS='/' read -r -a IPRA <<<"${IPR}"
ip addr flush dev "${N}" 2>/dev/null || true
ip addr add "${IPRA[0]}/${IPRA[1]:-"255.255.255.0"}" dev "${N}" 2>/dev/null || true
ip addr flush dev "${N}"
ip addr add "${IPRA[0]}/${IPRA[1]:-"255.255.255.0"}" dev "${N}"
if [ -n "${IPRA[2]}" ]; then
ip route add default via "${IPRA[2]}" dev "${N}" 2>/dev/null || true
ip route add default via "${IPRA[2]}" dev "${N}"
fi
if [ -n "${IPRA[3]:-${IPRA[2]}}" ]; then
sed -i "/nameserver ${IPRA[3]:-${IPRA[2]}}/d" /etc/resolv.conf
@ -132,14 +122,13 @@ BUSLIST="usb sata sas scsi nvme mmc ide virtio vmbus xen"
if [ "${BUS}" = "usb" ]; then
VID="0x$(udevadm info --query property --name "${LOADER_DISK}" 2>/dev/null | grep "ID_VENDOR_ID" | cut -d= -f2)"
PID="0x$(udevadm info --query property --name "${LOADER_DISK}" 2>/dev/null | grep "ID_MODEL_ID" | cut -d= -f2)"
[ "${VID}" = "0x" ] || [ "${PID}" = "0x" ] && die "$(TEXT "The loader disk does not support the current USB Portable Hard Disk.")"
TYPE="flashdisk"
elif ! echo "${BUSLIST}" | grep -wq "${BUS}"; then
if [ "LOCALBUILD" = "${LOADER_DISK}" ]; then
echo "LOCALBUILD MODE"
TYPE="PC"
else
die "$(printf "$(TEXT "The loader disk does not support the current %s, only %s DoM is supported.")" "${BUS}" "${BUSLIST// /\/}")"
die "$(printf "$(TEXT "The boot disk does not support the current %s, only %s DoM is supported.")" "${BUS}" "${BUSLIST// /\/}")"
fi
fi
@ -186,7 +175,7 @@ COUNT=0
while [ ${COUNT} -lt 30 ]; do
MSG=""
for N in ${ETHX}; do
if [ "1" = "$(cat "/sys/class/net/${N}/carrier" 2>/dev/null)" ]; then
if [ "1" = "$(cat /sys/class/net/${N}/carrier 2>/dev/null)" ]; then
MSG+="${N} "
fi
done
@ -204,29 +193,28 @@ done
printf "$(TEXT "Waiting IP.\n")"
for N in ${ETHX}; do
COUNT=0
DRIVER="$(basename "$(realpath "/sys/class/net/${N}/device/driver" 2>/dev/null)" 2>/dev/null)"
MAC="$(cat "/sys/class/net/${N}/address" 2>/dev/null)"
printf "%s(%s): " "${N}" "${MAC}@${DRIVER}"
DRIVER=$(ls -ld /sys/class/net/${N}/device/driver 2>/dev/null | awk -F '/' '{print $NF}')
printf "%s(%s): " "${N}" "${DRIVER}"
while true; do
if [ -z "$(cat "/sys/class/net/${N}/carrier" 2>/dev/null)" ]; then
printf "\r%s(%s): %s\n" "${N}" "${MAC}@${DRIVER}" "$(TEXT "DOWN")"
if [ -z "$(cat /sys/class/net/${N}/carrier 2>/dev/null)" ]; then
printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "DOWN")"
break
fi
if [ "0" = "$(cat "/sys/class/net/${N}/carrier" 2>/dev/null)" ]; then
printf "\r%s(%s): %s\n" "${N}" "${MAC}@${DRIVER}" "$(TEXT "NOT CONNECTED")"
if [ "0" = "$(cat /sys/class/net/${N}/carrier 2>/dev/null)" ]; then
printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "NOT CONNECTED")"
break
fi
if [ ${COUNT} -eq 15 ]; then # Under normal circumstances, no errors should occur here.
printf "\r%s(%s): %s\n" "${N}" "${MAC}@${DRIVER}" "$(TEXT "TIMEOUT (Please check the IP on the router.)")"
printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "TIMEOUT (Please check the IP on the router.)")"
break
fi
COUNT=$((COUNT + 1))
IP="$(getIP "${N}")"
if [ -n "${IP}" ]; then
if echo "${IP}" | grep -q "^169\.254\."; then
printf "\r%s(%s): %s\n" "${N}" "${MAC}@${DRIVER}" "$(TEXT "LINK LOCAL (No DHCP server detected.)")"
printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(TEXT "LINK LOCAL (No DHCP server detected.)")"
else
printf "\r%s(%s): %s\n" "${N}" "${MAC}@${DRIVER}" "$(printf "$(TEXT "Access \033[1;34mhttp://%s:%d\033[0m to configure the loader via web terminal.")" "${IP}" "${TTYD:-7681}")"
printf "\r%s(%s): %s\n" "${N}" "${DRIVER}" "$(printf "$(TEXT "Access \033[1;34mhttp://%s:%d\033[0m to configure the loader via web terminal.")" "${IP}" "${TTYD:-7681}")"
fi
break
fi
@ -240,7 +228,6 @@ printf "\n"
printf "$(TEXT "Call \033[1;32minit.sh\033[0m to re get init info\n")"
printf "$(TEXT "Call \033[1;32mmenu.sh\033[0m to configure loader\n")"
printf "\n"
[ -n "$(cat "${ADD_TIPS_FILE}" 2>/dev/null)" ] && printf "$(TEXT "%s\n")" "$(cat "${ADD_TIPS_FILE}" 2>/dev/null)"
printf "$(TEXT "User config is on \033[1;32m%s\033[0m\n")" "${USER_CONFIG_FILE}"
printf "$(TEXT "HTTP: \033[1;34mhttp://%s:%d\033[0m\n")" "rr" "${HTTP:-7080}"
printf "$(TEXT "DUFS: \033[1;34mhttp://%s:%d\033[0m\n")" "rr" "${DUFS:-7304}"
@ -259,23 +246,14 @@ if [ "${DSMLOGO}" = "true" ] && [ -c "/dev/fb0" ] && [ ! "LOCALBUILD" = "${LOADE
echo "${IP}" | grep -q "^169\.254\." && IP=""
[ -n "${IP}" ] && URL="http://${IP}:${TTYD:-7681}" || URL="http://rr:${TTYD:-7681}"
python3 "${WORK_PATH}/include/functions.py" makeqr -d "${URL}" -l "0" -o "${TMP_PATH}/qrcode_init.png"
[ -f "${TMP_PATH}/qrcode_init.png" ] && echo | fbv -acufi "${TMP_PATH}/qrcode_init.png" >/dev/null 2>&1 || true
[ -f "${TMP_PATH}/qrcode_init.png" ] && echo | fbv -acufi "${TMP_PATH}/qrcode_init.png" >/dev/null 2>/dev/null || true
python3 "${WORK_PATH}/include/functions.py" makeqr -f "${WORK_PATH}/include/qhxg.png" -l "7" -o "${TMP_PATH}/qrcode_qhxg.png"
[ -f "${TMP_PATH}/qrcode_qhxg.png" ] && echo | fbv -acufi "${TMP_PATH}/qrcode_qhxg.png" >/dev/null 2>&1 || true
fi
WEBHOOKURL="$(readConfigKey "webhookurl" "${USER_CONFIG_FILE}")"
if [ -n "${WEBHOOKURL}" ] && [ ! -f "${TMP_PATH}/WebhookSent" ]; then
DMI="$(dmesg 2>/dev/null | grep -i "DMI:" | head -1 | sed 's/\[.*\] DMI: //i')"
IP="$(getIP)"
echo "${IP}" | grep -q "^169\.254\." && IP=""
[ -n "${IP}" ] && URL="http://${IP}:${TTYD:-7681}" || URL="http://rr:${TTYD:-7681}"
sendWebhook "${WEBHOOKURL}" "{\"RR\":\"${RR_TITLE}${RR_RELEASE:+(${RR_RELEASE})}\", \"DATE\":\"$(date +'%Y-%m-%d %H:%M:%S')\", \"DMI\":\"${DMI}\", \"URL\":\"${URL}\"}"
touch "${TMP_PATH}/WebhookSent"
[ -f "${TMP_PATH}/qrcode_qhxg.png" ] && echo | fbv -acufi "${TMP_PATH}/qrcode_qhxg.png" >/dev/null 2>/dev/null || true
fi
# Check memory
RAM="$(awk '/MemTotal:/ {printf "%.0f", $2 / 1024}' /proc/meminfo 2>/dev/null)"
RAM=$(awk '/MemTotal:/ {printf "%.0f", $2 / 1024}' /proc/meminfo 2>/dev/null)
if [ "${RAM:-0}" -le 3500 ]; then
printf "\033[1;33m%s\033[0m\n" "$(TEXT "You have less than 4GB of RAM, if errors occur in loader creation, please increase the amount of memory.")"
fi

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,39 @@
#!/usr/bin/env sh
#
# WARNING: this file is also embedded in the post-init patcher, so don't go to crazy with the syntax/tools as it must
# be able to execute in the initramfs/preboot environment (so no bashism etc)
# All comments will be stripped, functions here should NOT start with brp_ as they're not part of the builder
if [ -z "${SED_PATH+x}" ]; then
echo "Your SED_PATH variable is not set/is empty!"
exit 1
fi
##$1 from, $2 to, $3 file to path
_replace_in_file() {
if grep -q "${1}" "${3}"; then
"${SED_PATH}" -i "s#${1}#${2}#" "${3}" 2>/dev/null
fi
}
# Replace/remove/add values in .conf K=V file
#
# Args: $1 name, $2 new_val, $3 path
_set_conf_kv() {
# Delete
if [ -z "${2}" ]; then
"${SED_PATH}" -i "/^${1}=/d" "${3}" 2>/dev/null
return 0
fi
# Replace
if grep -q "^${1}=" "${3}"; then
"${SED_PATH}" -i "s#^${1}=.*#${1}=\"${2}\"#" "${3}" 2>/dev/null
return 0
fi
# Add if doesn't exist
echo "${1}=\"${2}\"" >>"${3}"
return 0
}

View File

@ -1,11 +1,9 @@
#!/usr/bin/env sh
#!/usr/bin/sh
# This script is saved to /sbin/modprobe which is a so called UMH (user-mode-helper) for kmod (kernel/kmod.c)
# The kmod subsystem in the kernel is used to load modules from kernel. We exploit it a bit to load RP as soon as
# possible (which turns out to be via init/main.c => load_default_modules => load_default_elevator_module
# When the kernel is booted with "elevator=elevator" it will attempt to load a module "elevator-iosched"... and the rest
# should be obvious from the code below. DO NOT print anything here (kernel doesn't attach STDOUT)
for arg in "$@"; do
if [ "${arg}" = "elevator-iosched" ]; then
insmod /usr/lib/modules/rp.ko

View File

@ -17,8 +17,6 @@ N acpi_call.ko
# misc
N check_signature.ko
N rfkill.ko
N rfkill-gpio.ko
# sensors
N coretemp.ko
@ -34,13 +32,7 @@ N nct6775.ko
F mii.ko
F cdc_ether.ko
F rndis_host.ko
F thunderbolt.ko
F thunderbolt-net.ko
F r8152.ko
F aqc111.ko
F ax88179_178a.ko
F aic_load_fw.ko
F aic8800_fdrv.ko
# iwlwifi
N libarc4.ko
@ -62,4 +54,4 @@ N iwldvm.ko
#N i915-compat.ko
#N intel-gtt.ko
#N ttm.ko
#N i915.ko
#N i915.ko

View File

@ -0,0 +1,11 @@
--- a/usr/syno/web/webman/get_state.cgi
+++ b/usr/syno/web/webman/get_state.cgi
@@ -1,7 +1,7 @@
#!/bin/sh
PATH="$PATH:/bin:/sbin:/usr/bin:/usr/sbin"
-DisabledPortDisks=`/usr/syno/bin/synodiskport -portthawlist`
+DisabledPortDisks=""
partition=`/usr/syno/bin/synodiskport -installable_disk_list`
product=`/bin/get_key_value /etc.defaults/synoinfo.conf product`

View File

@ -0,0 +1,11 @@
--- a/usr/syno/web/webman/get_state.cgi
+++ b/usr/syno/web/webman/get_state.cgi
@@ -1,7 +1,7 @@
#!/bin/sh
PATH="$PATH:/bin:/sbin:/usr/bin:/usr/sbin"
-DisabledPortDisks="$(/usr/syno/bin/synodiskport -portthawlist)"
+DisabledPortDisks=""
partition="$(/usr/syno/bin/synodiskport -installable_disk_list)"
upnpmodelname="$(/bin/get_key_value /etc.defaults/synoinfo.conf upnpmodelname)"

View File

@ -0,0 +1,8 @@
--- a/etc/passwd
+++ b/etc/passwd
@@ -1,4 +1,4 @@
-root:x:0:0::/root:/bin/ash
+root::0:0::/root:/bin/ash
system:x:1:1::/usr/syno/synoman:/usr/bin/nologin
daemon:x:2:2::/:/bin/sh
lp:x:7:7::/var/spool/lpd:/bin/sh

View File

@ -0,0 +1,8 @@
--- a/etc/passwd
+++ b/etc/passwd
@@ -1,4 +1,4 @@
-root:x:0:0::/root:/bin/ash
+root::0:0::/root:/bin/ash
system:x:1:1::/usr/syno/synoman:/usr/bin/nologin
daemon:x:2:2::/:/bin/sh
SYSTEM_ADMIN:x:3:101::/nonexist:/usr/bin/nologin

View File

@ -0,0 +1,8 @@
--- /etc/hosts
+++ /etc/hosts
@@ -2,3 +2,5 @@
# that require network functionality will fail.
127.0.0.1 localhost
::1 localhost
+127.0.0.1 update7.synology.com
+127.0.0.1 dataupdate7.synology.com

View File

@ -0,0 +1,32 @@
--- a/usr/sbin/init.post
+++ b/usr/sbin/init.post
@@ -18,6 +18,29 @@ if [ "$UniqueRD" = "nextkvmx64" ]; then
fi
Mount "$RootDevice" /tmpRoot -o barrier=1
+############################################################################################
+SED_PATH='/tmpRoot/usr/bin/sed'
+
+@@@CONFIG-MANIPULATORS-TOOLS@@@
+
+@@@CONFIG-GENERATED@@@
+
+UPSTART="/tmpRoot/usr/share/init"
+
+if ! echo; then
+ _replace_in_file '^start on' '#start on' $UPSTART/tty.conf
+ _replace_in_file "console output" "console none" $UPSTART/syno_poweroff_task.conf
+ _replace_in_file "console output" "console none" $UPSTART/burnin_loader.conf
+ _replace_in_file "console output" "console none" $UPSTART/udevtrigger.conf
+ _replace_in_file "console output" "console none" $UPSTART/bs-poweroff.conf
+ _replace_in_file "console output" "console none" $UPSTART/udevd.conf
+else
+ _replace_in_file '^#start on' 'start on' $UPSTART/tty.conf
+fi
+
+/addons/addons.sh late
+############################################################################################
+
Mkdir -p /tmpRoot/initrd
Umount /proc >/dev/null 2>&1

View File

@ -1,10 +1,31 @@
--- a/usr/sbin/init.post
+++ b/usr/sbin/init.post
@@ -18,6 +18,8 @@
@@ -18,6 +18,29 @@
fi
mount $RootDevice /tmpRoot -o barrier=1
+############################################################################################
+SED_PATH='/tmpRoot/usr/bin/sed'
+
+@@@CONFIG-MANIPULATORS-TOOLS@@@
+
+@@@CONFIG-GENERATED@@@
+
+UPSTART="/tmpRoot/usr/share/init"
+
+if ! echo; then
+ _replace_in_file '^start on' '#start on' $UPSTART/tty.conf
+ _replace_in_file "console output" "console none" $UPSTART/syno_poweroff_task.conf
+ _replace_in_file "console output" "console none" $UPSTART/burnin_loader.conf
+ _replace_in_file "console output" "console none" $UPSTART/udevtrigger.conf
+ _replace_in_file "console output" "console none" $UPSTART/bs-poweroff.conf
+ _replace_in_file "console output" "console none" $UPSTART/udevd.conf
+else
+ _replace_in_file '^#start on' 'start on' $UPSTART/tty.conf
+fi
+
+/addons/addons.sh late
+############################################################################################
+
mkdir -p /tmpRoot/initrd

View File

@ -1,11 +0,0 @@
--- a/usr/sbin/init.post
+++ b/usr/sbin/init.post
@@ -18,6 +18,8 @@
fi
Mount "$RootDevice" /tmpRoot -o barrier=1
+/addons/addons.sh late
+
Mkdir -p /tmpRoot/initrd
Umount /proc >/dev/null 2>&1

View File

@ -1,11 +1,31 @@
--- a/usr/sbin/init.post
+++ b/usr/sbin/init.post
@@ -32,6 +32,8 @@
@@ -31,7 +31,28 @@
OptPrjQuota="$(GetPQMountOpt "${RootMountPath}")"
# shellcheck disable=SC2046
Mount "${RootMountPath}" /tmpRoot -o barrier=1 ${OptPrjQuota}
+############################################################################################
+SED_PATH='/tmpRoot/usr/bin/sed'
+/addons/addons.sh late
+@@@CONFIG-MANIPULATORS-TOOLS@@@
+
+@@@CONFIG-GENERATED@@@
+
+UPSTART="/tmpRoot/usr/share/init"
+
+if ! echo; then
+ _replace_in_file '^start on' '#start on' $UPSTART/tty.conf
+ _replace_in_file "console output" "console none" $UPSTART/syno_poweroff_task.conf
+ _replace_in_file "console output" "console none" $UPSTART/burnin_loader.conf
+ _replace_in_file "console output" "console none" $UPSTART/udevtrigger.conf
+ _replace_in_file "console output" "console none" $UPSTART/bs-poweroff.conf
+ _replace_in_file "console output" "console none" $UPSTART/udevd.conf
+else
+ _replace_in_file '^#start on' 'start on' $UPSTART/tty.conf
+fi
+
+/addons/addons.sh late
+############################################################################################
Mkdir -p /tmpRoot/initrd
Umount /proc >/dev/null 2>&1

View File

@ -1,11 +1,31 @@
--- a/usr/sbin/init.post
+++ b/usr/sbin/init.post
@@ -32,6 +32,8 @@
@@ -31,7 +31,28 @@
OptPrjQuota="$(GetPQMountOpt "${RootMountPath}")"
# shellcheck disable=SC2046
Mount "${RootMountPath}" /tmpRoot -o barrier=1,noatime ${OptPrjQuota}
+############################################################################################
+SED_PATH='/tmpRoot/usr/bin/sed'
+/addons/addons.sh late
+@@@CONFIG-MANIPULATORS-TOOLS@@@
+
+@@@CONFIG-GENERATED@@@
+
+UPSTART="/tmpRoot/usr/share/init"
+
+if ! echo; then
+ _replace_in_file '^start on' '#start on' $UPSTART/tty.conf
+ _replace_in_file "console output" "console none" $UPSTART/syno_poweroff_task.conf
+ _replace_in_file "console output" "console none" $UPSTART/burnin_loader.conf
+ _replace_in_file "console output" "console none" $UPSTART/udevtrigger.conf
+ _replace_in_file "console output" "console none" $UPSTART/bs-poweroff.conf
+ _replace_in_file "console output" "console none" $UPSTART/udevd.conf
+else
+ _replace_in_file '^#start on' 'start on' $UPSTART/tty.conf
+fi
+
+/addons/addons.sh late
+############################################################################################
Mkdir -p /tmpRoot/initrd
Umount /proc >/dev/null 2>&1

View File

@ -1,11 +1,31 @@
--- a/usr/sbin/init.post
+++ b/usr/sbin/init.post
@@ -23,6 +23,8 @@
@@ -31,7 +31,28 @@
fi
Mount "$(GetRootMountOpt)" "$(GetRootMountPath)" /tmpRoot
+############################################################################################
+SED_PATH='/tmpRoot/usr/bin/sed'
+/addons/addons.sh late
+@@@CONFIG-MANIPULATORS-TOOLS@@@
+
+@@@CONFIG-GENERATED@@@
+
+UPSTART="/tmpRoot/usr/share/init"
+
+if ! echo; then
+ _replace_in_file '^start on' '#start on' $UPSTART/tty.conf
+ _replace_in_file "console output" "console none" $UPSTART/syno_poweroff_task.conf
+ _replace_in_file "console output" "console none" $UPSTART/burnin_loader.conf
+ _replace_in_file "console output" "console none" $UPSTART/udevtrigger.conf
+ _replace_in_file "console output" "console none" $UPSTART/bs-poweroff.conf
+ _replace_in_file "console output" "console none" $UPSTART/udevd.conf
+else
+ _replace_in_file '^#start on' 'start on' $UPSTART/tty.conf
+fi
+
+/addons/addons.sh late
+############################################################################################
Mkdir -p /tmpRoot/initrd
Umount /proc >/dev/null 2>&1

View File

@ -4,9 +4,7 @@ synoinfo: &synoinfo
support_printer: "yes"
support_usb_printer: "yes"
support_disk_compatibility: "no"
support_synodrive_ability: "no" # DS925+
support_memory_compatibility: "no"
support_memory_limitation: "no" # DVA3219
support_led_brightness_adjustment: "no"
support_leds_atmega1608: "no"
support_leds_lp3943: "no"
@ -14,13 +12,10 @@ synoinfo: &synoinfo
support_syno_hybrid_raid: "no"
supportraidgroup: "no"
enableRCPower: "yes"
# mem_max_mb: "" # Impact Creating an SSD Cache
# mem_min_mb: "" # Impact Creating an SSD Cache
support_fan: "no"
support_fan_adjust_dual_mode: "no"
supportadt7490: "no"
maxlanport: "8"
netif_seq: ""
netif_seq: "0 1 2 3 4 5 6 7"
buzzeroffen: "0xffff"
productvers4: &productvers4
@ -117,25 +112,3 @@ platforms:
<<: *synoinfo
netif_seq_by_dts: "no"
productvers: *productvers5
geminilakenk:
dt: true
noflags: ["x2apic"]
synoinfo:
<<: *synoinfo
netif_seq_by_dts: "no"
show_autoupdatetype_notify: "yes"
productvers: *productvers5
r1000nk:
dt: true
synoinfo:
<<: *synoinfo
netif_seq_by_dts: "no"
show_autoupdatetype_notify: "yes"
productvers: *productvers5
v1000nk:
dt: true
synoinfo:
<<: *synoinfo
netif_seq_by_dts: "no"
show_autoupdatetype_notify: "yes"
productvers: *productvers5

View File

@ -1,12 +1,4 @@
#!/usr/bin/env bash
#
# Copyright (C) 2022 Ing <https://github.com/wjz304>
#
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
# shellcheck disable=SC2034
[ -z "${WORK_PATH}" ] || [ ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
@ -31,10 +23,11 @@ KEYMAP="$(readConfigKey "keymap" "${USER_CONFIG_FILE}")"
PATURL="$(readConfigKey "paturl" "${USER_CONFIG_FILE}")"
PATSUM="$(readConfigKey "patsum" "${USER_CONFIG_FILE}")"
ODP="$(readConfigKey "odp" "${USER_CONFIG_FILE}")" # official drivers priorities
HDDSORT="$(readConfigKey "hddsort" "${USER_CONFIG_FILE}")"
DT="$(readConfigKey "dt" "${USER_CONFIG_FILE}")"
KVER="$(readConfigKey "kver" "${USER_CONFIG_FILE}")"
KPRE="$(readConfigKey "kpre" "${USER_CONFIG_FILE}")"
# Read model data
KVER="$(readConfigKey "platforms.${PLATFORM}.productvers.\"${PRODUCTVER}\".kver" "${WORK_PATH}/platforms.yml")"
KPRE="$(readConfigKey "platforms.${PLATFORM}.productvers.\"${PRODUCTVER}\".kpre" "${WORK_PATH}/platforms.yml")"
# Sanity check
if [ -z "${PLATFORM}" ] || [ -z "${KVER}" ]; then
@ -60,7 +53,7 @@ mkdir -p "${RAMDISK_PATH}"
. "${RAMDISK_PATH}/etc/VERSION"
if [ -n "${PRODUCTVER}" ] && [ -n "${BUILDNUM}" ] && [ -n "${SMALLNUM}" ] &&
([ ! "${PRODUCTVER}" = "${majorversion:-0}.${minorversion:-0}" ] || [ ! "${BUILDNUM}" = "${buildnumber:-0}" ] || [ ! "${SMALLNUM}" = "${smallfixnumber:-0}" ]); then
([ ! "${PRODUCTVER}" = "${majorversion}.${minorversion}" ] || [ ! "${BUILDNUM}" = "${buildnumber}" ] || [ ! "${SMALLNUM}" = "${smallfixnumber}" ]); then
OLDVER="${PRODUCTVER}(${BUILDNUM}$([ ${SMALLNUM:-0} -ne 0 ] && echo "u${SMALLNUM}"))"
NEWVER="${majorversion}.${minorversion}(${buildnumber}$([ ${smallfixnumber:-0} -ne 0 ] && echo "u${smallfixnumber}"))"
echo -e "\033[A\n\033[1;32mBuild number changed from \033[1;31m${OLDVER}\033[1;32m to \033[1;31m${NEWVER}\033[0m"
@ -80,11 +73,14 @@ writeConfigKey "productver" "${PRODUCTVER}" "${USER_CONFIG_FILE}"
writeConfigKey "buildnum" "${BUILDNUM}" "${USER_CONFIG_FILE}"
writeConfigKey "smallnum" "${SMALLNUM}" "${USER_CONFIG_FILE}"
declare -A SYNOINFO
declare -A ADDONS
declare -A MODULES
declare -A SYNOINFO
# Read addons, modules and synoinfo from user config
# Read synoinfo and addons from config
while IFS=': ' read -r KEY VALUE; do
[ -n "${KEY}" ] && SYNOINFO["${KEY}"]="${VALUE}"
done <<<"$(readConfigMap "synoinfo" "${USER_CONFIG_FILE}")"
while IFS=': ' read -r KEY VALUE; do
[ -n "${KEY}" ] && ADDONS["${KEY}"]="${VALUE}"
done <<<"$(readConfigMap "addons" "${USER_CONFIG_FILE}")"
@ -94,23 +90,19 @@ while IFS=': ' read -r KEY VALUE; do
[ -n "${KEY}" ] && MODULES["${KEY}"]="${VALUE}"
done <<<"$(readConfigMap "modules" "${USER_CONFIG_FILE}")"
# SYNOINFO["SN"]="${SN}"
while IFS=': ' read -r KEY VALUE; do
[ -n "${KEY}" ] && SYNOINFO["${KEY}"]="${VALUE}"
done <<<"$(readConfigMap "synoinfo" "${USER_CONFIG_FILE}")"
# Patches (diff -Naru OLDFILE NEWFILE > xxx.patch)
PATCHS=(
"ramdisk-etc-rc-*.patch"
"ramdisk-init-script-*.patch"
"ramdisk-post-init-script-*.patch"
"ramdisk-disable-root-pwd-*.patch"
"ramdisk-disable-disabled-ports-*.patch"
)
for PE in "${PATCHS[@]}"; do
RET=1
echo "Patching with ${PE}" >"${LOG_FILE}"
# ${PE} contains *, so double quotes cannot be added
for PF in ${WORK_PATH}/patch/${PE}; do
[ ! -e "${PF}" ] && continue
for PF in $(ls ${WORK_PATH}/patch/${PE} 2>/dev/null); do
echo "Patching with ${PF}" >>"${LOG_FILE}"
# busybox patch and gun patch have different processing methods and parameters.
(cd "${RAMDISK_PATH}" && busybox patch -p1 -i "${PF}") >>"${LOG_FILE}" 2>&1
@ -120,11 +112,50 @@ for PE in "${PATCHS[@]}"; do
[ ${RET} -ne 0 ] && exit 1
done
mkdir -p "${RAMDISK_PATH}/addons"
# Patch /etc/synoinfo.conf /etc.defaults/synoinfo.conf
echo -n "."
# Add serial number to synoinfo.conf, to help to recovery a installed DSM
echo "Set synoinfo SN" >"${LOG_FILE}"
_set_conf_kv "SN" "${SN}" "${RAMDISK_PATH}/etc/synoinfo.conf" >>"${LOG_FILE}" 2>&1 || exit 1
_set_conf_kv "SN" "${SN}" "${RAMDISK_PATH}/etc.defaults/synoinfo.conf" >>"${LOG_FILE}" 2>&1 || exit 1
for KEY in "${!SYNOINFO[@]}"; do
echo "Set synoinfo ${KEY}" >>"${LOG_FILE}"
_set_conf_kv "${KEY}" "${SYNOINFO[${KEY}]}" "${RAMDISK_PATH}/etc/synoinfo.conf" >>"${LOG_FILE}" 2>&1 || exit 1
_set_conf_kv "${KEY}" "${SYNOINFO[${KEY}]}" "${RAMDISK_PATH}/etc.defaults/synoinfo.conf" >>"${LOG_FILE}" 2>&1 || exit 1
done
# Patch /sbin/init.post
# Apply config manipulators
grep -v -e '^[\t ]*#' -e '^$' "${WORK_PATH}/patch/config-manipulators.sh" >"${TMP_PATH}/rp.txt"
sed -e "/@@@CONFIG-MANIPULATORS-TOOLS@@@/ {" -e "r ${TMP_PATH}/rp.txt" -e 'd' -e '}' -i "${RAMDISK_PATH}/sbin/init.post"
rm -f "${TMP_PATH}/rp.txt"
# Generate synoinfo configurations
{
echo "_set_conf_kv 'SN' '${SN}' '/tmpRoot/etc/synoinfo.conf'"
echo "_set_conf_kv 'SN' '${SN}' '/tmpRoot/etc.defaults/synoinfo.conf'"
for KEY in "${!SYNOINFO[@]}"; do
echo "_set_conf_kv '${KEY}' '${SYNOINFO[${KEY}]}' '/tmpRoot/etc/synoinfo.conf'"
echo "_set_conf_kv '${KEY}' '${SYNOINFO[${KEY}]}' '/tmpRoot/etc.defaults/synoinfo.conf'"
done
} >"${TMP_PATH}/rp.txt"
sed -e "/@@@CONFIG-GENERATED@@@/ {" -e "r ${TMP_PATH}/rp.txt" -e 'd' -e '}' -i "${RAMDISK_PATH}/sbin/init.post"
rm -f "${TMP_PATH}/rp.txt"
# Extract ck modules to ramdisk
echo -n "."
installModules "${PLATFORM}" "$([ -n "${KPRE}" ] && echo "${KPRE}-")${KVER}" "${!MODULES[@]}" || exit 1
# Copying fake modprobe
[ $(echo "${KVER:-4}" | cut -d'.' -f1) -lt 5 ] && cp -f "${WORK_PATH}/patch/iosched-trampoline.sh" "${RAMDISK_PATH}/usr/sbin/modprobe"
# Copying LKM to /usr/lib/modules
gzip -dc "${LKMS_PATH}/rp-${PLATFORM}-$([ -n "${KPRE}" ] && echo "${KPRE}-")${KVER}-${LKM}.ko.gz" >"${RAMDISK_PATH}/usr/lib/modules/rp.ko" 2>"${LOG_FILE}" || exit 1
# Addons
echo -n "."
echo "Create addons.sh" >"${LOG_FILE}"
mkdir -p "${RAMDISK_PATH}/addons"
{
echo "#!/bin/sh"
echo 'echo "addons.sh called with params ${@}"'
@ -142,54 +173,30 @@ echo "Create addons.sh" >"${LOG_FILE}"
chmod +x "${RAMDISK_PATH}/addons/addons.sh"
# This order cannot be changed.
for ADDON in "redpill" "revert" "misc" "eudev" "disks" "localrss" "notify" "wol"; do
for ADDON in "redpill" "revert" "misc" "eudev" "disks" "localrss" "notify" "wol" "rndis"; do
PARAMS=""
if [ "${ADDON}" = "disks" ]; then
[ -f "${USER_UP_PATH}/model.dts" ] && cp -f "${USER_UP_PATH}/model.dts" "${RAMDISK_PATH}/addons/model.dts"
PARAMS=${HDDSORT}
[ -f "${USER_UP_PATH}/${MODEL}.dts" ] && cp -f "${USER_UP_PATH}/${MODEL}.dts" "${RAMDISK_PATH}/addons/model.dts"
fi
installAddon "${ADDON}" "${PLATFORM}" "${KPRE:+${KPRE}-}${KVER}" || exit 1
installAddon "${ADDON}" "${PLATFORM}" "$([ -n "${KPRE}" ] && echo "${KPRE}-")${KVER}" || exit 1
echo "/addons/${ADDON}.sh \${1} ${PARAMS}" >>"${RAMDISK_PATH}/addons/addons.sh" 2>>"${LOG_FILE}" || exit 1
done
# User addons
for ADDON in "${!ADDONS[@]}"; do
PARAMS=${ADDONS[${ADDON}]}
installAddon "${ADDON}" "${PLATFORM}" "${KPRE:+${KPRE}-}${KVER}" || exit 1
installAddon "${ADDON}" "${PLATFORM}" "$([ -n "${KPRE}" ] && echo "${KPRE}-")${KVER}" || exit 1
echo "/addons/${ADDON}.sh \${1} ${PARAMS}" >>"${RAMDISK_PATH}/addons/addons.sh" 2>>"${LOG_FILE}" || exit 1
done
# Extract ck modules to ramdisk
echo -n "."
installModules "${PLATFORM}" "${KPRE:+${KPRE}-}${KVER}" "${!MODULES[@]}" || exit 1
# Copying fake modprobe
[ "$(echo "${KVER:-4}" | cut -d'.' -f1)" -lt 5 ] && cp -f "${WORK_PATH}/patch/iosched-trampoline.sh" "${RAMDISK_PATH}/usr/sbin/modprobe"
# Copying LKM to /usr/lib/modules
gzip -dc "${LKMS_PATH}/rp-${PLATFORM}-${KPRE:+${KPRE}-}${KVER}-${LKM}.ko.gz" >"${RAMDISK_PATH}/usr/lib/modules/rp.ko" 2>"${LOG_FILE}" || exit 1
# Patch synoinfo.conf
echo -n "."
echo -n "" >"${RAMDISK_PATH}/addons/synoinfo.conf"
for KEY in "${!SYNOINFO[@]}"; do
echo "Set synoinfo ${KEY}" >>"${LOG_FILE}"
echo "${KEY}=\"${SYNOINFO[${KEY}]}\"" >>"${RAMDISK_PATH}/addons/synoinfo.conf"
_set_conf_kv "${RAMDISK_PATH}/etc/synoinfo.conf" "${KEY}" "${SYNOINFO[${KEY}]}" || exit 1
_set_conf_kv "${RAMDISK_PATH}/etc.defaults/synoinfo.conf" "${KEY}" "${SYNOINFO[${KEY}]}" || exit 1
done
if [ ! -x "${RAMDISK_PATH}/usr/bin/get_key_value" ]; then
printf '#!/bin/sh\n%s\n_get_conf_kv "$@"' "$(declare -f _get_conf_kv)" >"${RAMDISK_PATH}/usr/bin/get_key_value"
chmod a+x "${RAMDISK_PATH}/usr/bin/get_key_value"
fi
if [ ! -x "${RAMDISK_PATH}/usr/bin/set_key_value" ]; then
printf '#!/bin/sh\n%s\n_set_conf_kv "$@"' "$(declare -f _set_conf_kv)" >"${RAMDISK_PATH}/usr/bin/set_key_value"
chmod a+x "${RAMDISK_PATH}/usr/bin/set_key_value"
fi
# Enable Telnet
echo "inetd" >>"${RAMDISK_PATH}/addons/addons.sh"
echo -n "."
echo "Modify files" >"${LOG_FILE}"
# Remove function from scripts
[ "2" = "${BUILDNUM:0:1}" ] && find "${RAMDISK_PATH}/addons/" -type f -name "*.sh" -exec sed -i 's/function //g' {} \;
[ "2" = "${BUILDNUM:0:1}" ] && sed -i 's/function //g' $(find "${RAMDISK_PATH}/addons/" -type f -name "*.sh")
# Build modules dependencies
# ${WORK_PATH}/depmod -a -b ${RAMDISK_PATH} 2>/dev/null # addon eudev will do this
@ -202,12 +209,6 @@ else
fi
# backup current loader configs
mkdir -p "${RAMDISK_PATH}/usr/rr"
{
echo "LOADERLABEL=\"RR\""
echo "LOADERRELEASE=\"${RR_RELEASE}\""
echo "LOADERVERSION=\"${RR_VERSION}\""
} >"${RAMDISK_PATH}/usr/rr/VERSION"
BACKUP_PATH="${RAMDISK_PATH}/usr/rr/backup"
rm -rf "${BACKUP_PATH}"
for F in "${USER_GRUB_CONFIG}" "${USER_CONFIG_FILE}" "${USER_LOCALE_FILE}" "${USER_UP_PATH}" "${SCRIPTS_PATH}"; do
@ -233,9 +234,7 @@ for N in $(seq 0 7); do
done
# issues/313
if [ "$(echo "${KVER:-4}" | cut -d'.' -f1)" -lt 5 ]; then
:
else
if [ "${PLATFORM}" = "epyc7002" ]; then
sed -i 's#/dev/console#/var/log/lrc#g' "${RAMDISK_PATH}/usr/bin/busybox"
sed -i '/^echo "START/a \\nmknod -m 0666 /dev/console c 1 3' "${RAMDISK_PATH}/linuxrc.syno"
fi
@ -246,10 +245,8 @@ fi
# Call user patch scripts
echo -n "."
for F in ${SCRIPTS_PATH}/*.sh; do
[ ! -e "${F}" ] && continue
for F in $(ls -1 "${SCRIPTS_PATH}/"*.sh 2>/dev/null); do
echo "Calling ${F}" >"${LOG_FILE}"
# shellcheck source=/dev/null
. "${F}" >>"${LOG_FILE}" 2>&1 || exit 1
done

View File

@ -45,13 +45,6 @@
- "TQR"
suffix: "alpha"
macpre: 9009d0
"DS925+":
prefix:
- "2520"
middle:
- "YHR"
suffix: "alpha"
macpre: 9009d0
"DS1019+":
prefix:
- "1850"
@ -72,13 +65,6 @@
- "TRR"
suffix: "alpha"
macpre: 9009d0
"DS1525+":
prefix:
- "2540"
middle:
- "YJR"
suffix: "alpha"
macpre: 9009d0
"DS1621+":
prefix:
- "2080"
@ -109,13 +95,6 @@
middle:
- "V5R"
suffix: "alpha"
"DS1825+":
prefix:
- "2540"
middle:
- "WDR"
suffix: "alpha"
macpre: 9009d0
"DS2419+":
prefix:
- "1880"

View File

@ -1,15 +1,9 @@
#!/usr/bin/env bash
#
# Copyright (C) 2022 Ing <https://github.com/wjz304>
#
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
# Based on code and ideas from @jumkey
[ -z "${WORK_PATH}" ] || [ ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
calc_run_size() {
calculate_run_size() {
NUM='\([0-9a-fA-F]*[ \t]*\)'
OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"${NUM}${NUM}${NUM}${NUM}"'.*/0x\1 0x\4/p')
@ -18,7 +12,7 @@ calc_run_size() {
return 1
fi
read -r sizeA offsetA sizeB offsetB <<<"$(echo "${OUT}" | awk '{printf "%d %d %d %d", strtonum($1), strtonum($2), strtonum($3), strtonum($4)}')"
read -r sizeA offsetA sizeB offsetB <<<$(echo ${OUT} | awk '{printf "%d %d %d %d", strtonum($1), strtonum($2), strtonum($3), strtonum($4)}')
runSize=$((offsetA + sizeA + sizeB))
@ -27,10 +21,10 @@ calc_run_size() {
# Gold linker shows them as consecutive.
endSize=$((offsetB + sizeB))
if [ "${endSize}" -ne "${runSize}" ]; then
printf "sizeA: 0x%x\n" "${sizeA}" >&2
printf "offsetA: 0x%x\n" "${offsetA}" >&2
printf "sizeB: 0x%x\n" "${sizeB}" >&2
printf "offsetB: 0x%x\n" "${offsetB}" >&2
printf "sizeA: 0x%x\n" ${sizeA} >&2
printf "offsetA: 0x%x\n" ${offsetA} >&2
printf "sizeB: 0x%x\n" ${sizeB} >&2
printf "offsetB: 0x%x\n" ${offsetB} >&2
echo ".bss and .brk are non-contiguous" >&2
return 1
fi
@ -44,23 +38,23 @@ calc_run_size() {
# Usage: size_append FILE [FILE2] [FILEn]...
# Output: LE HEX with size of file in bytes (to STDOUT)
file_size_le() {
printf "$(
printf $(
local dec_size=0
for F in "$@"; do dec_size=$((dec_size + $(stat -c "%s" "${F}"))); done
printf "%08x\n" "${dec_size}" | sed 's/\(..\)/\1 /g' | {
read -r ch0 ch1 ch2 ch3
for ch in "${ch3}" "${ch2}" "${ch1}" "${ch0}"; do printf '%s%03o' '\' "$((0x${ch}))"; done
}
)"
)
}
size_le() {
printf "$(
printf $(
printf "%08x\n" "${@}" | sed 's/\(..\)/\1 /g' | {
read -r ch0 ch1 ch2 ch3
for ch in "${ch3}" "${ch2}" "${ch1}" "${ch0}"; do printf '%s%03o' '\' "$((0x${ch}))"; done
}
)"
)
}
VMLINUX_MOD=${1}

View File

@ -1,10 +1,4 @@
#!/usr/bin/env bash
#
# Copyright (C) 2022 Ing <https://github.com/wjz304>
#
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
[ -z "${WORK_PATH}" ] || [ ! -d "${WORK_PATH}/include" ] && WORK_PATH="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
@ -22,14 +16,14 @@ echo -n "Patching zImage"
rm -f "${MOD_ZIMAGE_FILE}"
KERNEL="$(readConfigKey "kernel" "${USER_CONFIG_FILE}")"
if [ "${KERNEL}" = "custom" ]; then
echo -n "."
PLATFORM="$(readConfigKey "platform" "${USER_CONFIG_FILE}")"
KVER="$(readConfigKey "kver" "${USER_CONFIG_FILE}")"
KPRE="$(readConfigKey "kpre" "${USER_CONFIG_FILE}")"
PRODUCTVER="$(readConfigKey "productver" "${USER_CONFIG_FILE}")"
KVER="$(readConfigKey "platforms.${PLATFORM}.productvers.\"${PRODUCTVER}\".kver" "${WORK_PATH}/platforms.yml")"
KPRE="$(readConfigKey "platforms.${PLATFORM}.productvers.\"${PRODUCTVER}\".kpre" "${WORK_PATH}/platforms.yml")"
# Extract bzImage
gzip -dc "${CKS_PATH}/bzImage-${PLATFORM}-${KPRE:+${KPRE}-}${KVER}.gz" >"${MOD_ZIMAGE_FILE}"
gzip -dc "${CKS_PATH}/bzImage-${PLATFORM}-$([ -n "${KPRE}" ] && echo "${KPRE}-")${KVER}.gz" >"${MOD_ZIMAGE_FILE}"
echo -n "..."
else
echo -n "."

View File

@ -1 +1 @@
25.9.1
25.1.0

View File

@ -1,20 +0,0 @@
# GRUB gfxpayload blacklist. The format is a sequence of lines of the
# following form, using lower-case hexadecimal for all ID components:
#
# vVENDORdDEVICEsvSUBVENDORsdSUBDEVICEbcBASECLASSscSUBCLASS
#
# Blacklist lines are regex-matched (currently using Lua's string.find with
# the line surrounded by ^ and $) against a corresponding PCI ID string. In
# practice this means that you can replace any part of the ID string with .*
# to match anything.
#
# There is no need to customise this file locally. If you need to disable
# gfxpayload=keep on your system, just add this line (uncommented) to
# /etc/default/grub:
#
# GRUB_GFXPAYLOAD_LINUX=text
v15add0710.*
v15add0405.*
v80eedbeef.*
v1002d6738.*

View File

@ -1,64 +1,41 @@
set default="boot"
set timeout="5"
set timeout_style="menu"
set pager=1
set vesa_mode=1
set color_normal=white/black
set menu_color_normal=light-cyan/black
set menu_color_highlight=black/cyan
if [ -s ${prefix}/grubenv ]; then
load_env --skip-sig --file=${prefix}/grubenv
load_env --skip-sig
fi
if [ "${next_entry}" ]; then
set default="${next_entry}"
unset next_entry
save_env next_entry
else
set default="boot"
fi
if [ "${vesa_mode}" ]; then
set vesa_mode=${vesa_mode}
fi
if [ "${linux_gfx_mode}" ]; then
save_env linux_gfx_mode
terminal_input console
terminal_output console
if [ "${feature_all_video_module}" = "y" ]; then
insmod all_video
else
set linux_gfx_mode=keep
save_env linux_gfx_mode
insmod efi_gop
insmod efi_uga
insmod vbe
insmod vga
insmod video_bochs
insmod video_cirrus
fi
if [ x"${feature_menuentry_id}" = xy ]; then
menuentry_id_option="--id"
else
menuentry_id_option=""
fi
export menuentry_id_option
function load_video {
if [ x"${feature_all_video_module}" = xy ]; then
insmod all_video
else
insmod efi_gop
insmod efi_uga
insmod ieee1275_fb
insmod vbe
insmod vga
insmod video_bochs
insmod video_cirrus
fi
}
if [ x"${feature_default_font_path}" = xy ]; then
font=unicode
else
font=${prefix}/fonts/unicode.pf2
fi
terminal_output console
if loadfont ${font}; then
if loadfont unicode; then
set gfxmode=auto
load_video
insmod gfxterm
set locale_dir=$prefix/locale
set lang=en_US
insmod gettext
terminal_output --append gfxterm
terminal_output gfxterm
fi
if serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1; then
@ -66,29 +43,26 @@ if serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1; then
terminal_output --append serial
fi
set color_normal=white/black
set menu_color_normal=light-cyan/black
set menu_color_highlight=black/cyan
insmod png
background_image ${prefix}/logo.png
set RR_CMDLINE="earlyprintk earlycon=uart8250,io,0x3f8,115200n8 console=ttyS0,115200n8 root=/dev/ram rootwait intremap=off amd_iommu_intr=legacy net.ifnames=0 panic=5 split_lock_detect=off pcie_aspm=off intel_pstate=disable amd_pstate=disable nox2apic nomodeset nowatchdog"
function set_gfxpayload {
if [ ${vesa_mode} -eq 1 ]; then
set gfxpayload=keep
else
set gfxpayload=text
fi
}
insmod part_gpt
insmod lvm
search --set=root --label "RR3" --no-floppy
if [ -e /initrd-rru ]; then set RRU=/initrd-rru; fi
if [ -e /microcode.img ]; then set MCI=/microcode.img; fi
set RR_CMDLINE="earlyprintk earlycon=uart8250,io,0x3f8,115200n8 console=ttyS0,115200n8 root=/dev/ram rootwait intremap=off amd_iommu_intr=legacy net.ifnames=0 panic=5 split_lock_detect=off pcie_aspm=off intel_pstate=disable amd_pstate=disable nox2apic nomodeset"
search --set=root --label "RR3"
if [ -s /zImage-dsm -a -s /initrd-dsm ]; then
if [ "${default}" = "direct" ]; then
set timeout="1"
if [ -s ${prefix}/rsysenv ]; then
load_env --skip-sig --file=${prefix}/rsysenv
fi
menuentry 'Boot DSM kernel directly' ${menuentry_id_option} direct {
set gfxpayload="${linux_gfx_mode}"
set pager=0
menuentry 'Boot DSM kernel directly' --id direct {
set_gfxpayload
echo "RRVersion: ${rr_version}"
echo "${rr_booting}"
echo -n "Boot Time: "; date
@ -107,79 +81,82 @@ if [ -s /zImage-dsm -a -s /initrd-dsm ]; then
echo "Loading DSM kernel..."
linux /zImage-dsm ${dsm_cmdline}
echo "Loading DSM initramfs..."
initrd ${MCI} /initrd-dsm
initrd /initrd-dsm
echo "Booting..."
echo "Access http://find.synology.com/ to connect the DSM via web."
}
fi
menuentry 'Boot DSM' ${menuentry_id_option} boot {
set gfxpayload="${linux_gfx_mode}"
menuentry 'Boot DSM' --id boot {
set_gfxpayload
echo "Loading kernel..."
linux /bzImage-rr ${RR_CMDLINE} ${rr_cmdline}
echo "Loading initramfs..."
initrd ${MCI} /initrd-rr ${RRU}
if [ -e /initrd-rru ]; then
initrd /initrd-rr /initrd-rru
else
initrd /initrd-rr
fi
echo "Booting..."
}
menuentry 'Boot Recovery' ${menuentry_id_option} recovery {
set gfxpayload="${linux_gfx_mode}"
menuentry 'Boot Recovery' --id recovery {
set_gfxpayload
echo "Loading kernel..."
linux /bzImage-rr ${RR_CMDLINE} ${rr_cmdline} recovery
echo "Loading initramfs..."
initrd ${MCI} /initrd-rr ${RRU}
if [ -e /initrd-rru ]; then
initrd /initrd-rr /initrd-rru
else
initrd /initrd-rr
fi
echo "Booting..."
}
menuentry 'Force re-install DSM' ${menuentry_id_option} junior {
set gfxpayload="${linux_gfx_mode}"
menuentry 'Force re-install DSM' --id junior {
set_gfxpayload
echo "Loading kernel..."
linux /bzImage-rr ${RR_CMDLINE} ${rr_cmdline} force_junior
echo "Loading initramfs..."
initrd ${MCI} /initrd-rr ${RRU}
if [ -e /initrd-rru ]; then
initrd /initrd-rr /initrd-rru
else
initrd /initrd-rr
fi
echo "Booting..."
}
fi
menuentry 'Configure loader' ${menuentry_id_option} config {
set gfxpayload="${linux_gfx_mode}"
menuentry 'Configure loader' --id config {
set_gfxpayload
echo "Loading kernel..."
linux /bzImage-rr earlycon=uart8250,io,0x3f8,115200n8 console=ttyS0,115200n8 ${RR_CMDLINE} ${rr_cmdline} IWANTTOCHANGETHECONFIG
linux /bzImage-rr ${RR_CMDLINE} ${rr_cmdline} IWANTTOCHANGETHECONFIG
echo "Loading initramfs..."
initrd ${MCI} /initrd-rr ${RRU}
if [ -e /initrd-rru ]; then
initrd /initrd-rr /initrd-rru
else
initrd /initrd-rr
fi
echo "Booting..."
}
menuentry 'Configure loader (verbose)' ${menuentry_id_option} verbose {
set gfxpayload="${linux_gfx_mode}"
echo "Loading kernel..."
linux /bzImage-rr ${RR_CMDLINE} ${rr_cmdline} earlycon=tty2 console=tty2 IWANTTOCHANGETHECONFIG
echo "Loading initramfs..."
initrd ${MCI} /initrd-rr ${RRU}
echo "Booting..."
menuentry 'Enter BIOS Setup' --id bios {
fwsetup
}
if [ "${grub_platform}" = "efi" ]; then
insmod bli
menuentry 'UEFI Firmware Settings' ${menuentry_id_option} uefi {
fwsetup
}
fi
menuentry 'Start Memtest86+' --id memtest {
echo "Loading memtest86+..."
linux ${prefix}/memtest
}
if [ -e ${prefix}/memtest ]; then
menuentry 'Start Memtest86+' ${menuentry_id_option} memtest {
echo "Loading memtest86+..."
linux ${prefix}/memtest
}
fi
if [ "${linux_gfx_mode}" = "keep" ]; then
menuentry 'Change vesa to text video mode' ${menuentry_id_option} videomode {
set linux_gfx_mode=text
save_env linux_gfx_mode
if [ ${vesa_mode} = 1 ]; then
menuentry 'Change vesa to text video mode' --id videomode {
set vesa_mode=0
save_env vesa_mode
configfile ${prefix}/grub.cfg
}
else
menuentry 'Change text to vesa video mode' ${menuentry_id_option} videomode {
set linux_gfx_mode=keep
save_env linux_gfx_mode
menuentry 'Change text to vesa video mode' --id videomode {
set vesa_mode=1
save_env vesa_mode
reboot
configfile ${prefix}/grub.cfg
}
fi

View File

@ -6,7 +6,6 @@
* sftp 工具 WinSCP (下载: https://winscp.net/eng/index.php)
* 文本编辑工具 Notepad3 (下载: https://github.com/rizonesoft/Notepad3/releases)
* 镜像写盘工具 Rufus (下载: https://rufus.ie/zh/)
* 镜像转换工具 qemu-img (下载: https://cloudbase.it/qemu-img-windows/)
* 镜像转换工具 StarWind V2V Image Converter (下载: https://www.starwindsoftware.com/starwind-v2v-converter)
* 磁盘管理工具 Diskgenius (下载: https://www.diskgenius.com/)
@ -34,33 +33,12 @@
* https://dataupdate7.synology.com/toolchain/v1/get_download_list?identify=toolkit&version=7.2&platform=purley
# 安装条件
1. 引导盘:当前支持 SATA/SCSI/NVME/MMC/IDE or USB 设备, 且要大于 2GB. (SCSI 比较复杂, 并不是全部可用)
2. 安装盘: 至少需要 1 SATA 接口硬盘(DT 型号支持 NVME 安装) 或者 1 个 MMC 作为存储设备. 且要大于 32GB 才可创建存储池.
1. 引导盘:当前支持 SATA/SCSI/NVME/MMC/IDE or USB 设备, 且要大于 2GB. (SCSI比较复杂, 并不是全部可用)
2. 安装盘: 至少需要1个SATA接口硬盘 或者 1个 MMC 作为存储设备. 且要大于 32GB 才可创建存储池.
3. 内存: 需要大于 4GB.
4. DT 的型号目前不支持 HBA 扩展卡(较新版本的RR引导 SA6400 支持).
5. NVME 的 PCIPATH 有两种格式, 单层深度路径的仅兼容 DT 的型号, 多层深度路径的兼容 DT 和非 DT 等型号.
4. DT的型号目前不支持HBA扩展卡(较新版本的RR引导 SA6400 支持).
5. NVME的PCIPATH有两种格式, 单层路径的兼容 DT 的型号, 多层路径的兼容 DS918+ 等型号.
# 镜像格式
```shell
# 安装 qemu-img
# https://cloudbase.it/qemu-img-windows/ # Windows
# apt install qemu-img # Debian/Ubuntu
# yum install qemu-img # CentOS
# brew install qemu-img # MacOS
# img to vmdk (VMWare / ESXi6 / ESXi7)
qemu-img convert -O vmdk -o adapter_type=lsilogic,subformat=streamOptimized,compat6 rr.img rr.vmdk
# img to vmdk (ESXi8)
qemu-img convert -O vmdk -o adapter_type=lsilogic,subformat=monolithicFlat,compat6 rr.img rr.vmdk
# img to vhdx (Hyper-V)
qemu-img convert -O vhdx -o subformat=dynamic rr.img rr.vhdx
# img to vhd (Parallels Desktop)
qemu-img convert -O vpc rr.img rr.vhd
```
# GPU
* vGPU: https://blog.kkk.rs/
@ -79,7 +57,7 @@
# 解压 并写入到引导盘
# Decompress and write to the boot disk
# 获取当前的引导盘
LOADER_DISK="$(blkid -L RR3 2>/dev/null | cut -d3 -f1)"
LOADER_DISK="$(blkid | grep 'LABEL="RR3"' | cut -d3 -f1)"
unzip -p rr.zip | dd of=${LOADER_DISK} bs=1M conv=fsync
# 重启 reboot
reboot
@ -87,7 +65,7 @@
* RR 备份 (Any version):
```shell
# 备份为 disk.img.gz, 自行导出.
dd if="$(blkid -L RR3 2>/dev/null | cut -d3 -f1)" | gzip > disk.img.gz
dd if="$(blkid | grep 'LABEL="RR3"' | cut -d3 -f1)" | gzip > disk.img.gz
# 结合 transfer.sh 直接导出链接
curl -skL --insecure -w '\n' --upload-file disk.img.gz https://transfer.sh
```
@ -186,14 +164,9 @@
lsblk # 查看磁盘设备
lspci -Qnnk # 查看 PCI 设备
# 网卡
# 驱动相关
ls -ld /sys/class/net/*/device/driver # 查看已加载网卡和对应驱动
cat /sys/class/net/*/address # 查看已加载网卡的 MAC 地址
ethtool -i eth0 # 查看网卡驱动信息
ethtool -s eth0 wol g # 配置网卡进入低功耗模式
ethtool -s eth0 autoneg on # 开启网卡自动协商
ethtool -s eth0 speed 1000 # 设置网卡速度为 1000 Mbps
ethtool -s eth0 duplex full # 设置网卡全双工
# 串口
cat /proc/tty/drivers # 查看串口属性
@ -252,14 +225,13 @@
mdadm --zero-superblock /dev/sda1 # 清除 sda1 磁盘分区的 RAID 超级块 (使这个磁盘分区不再被识别为 RAID 设备的一部分)
# eudev
udevadm control --reload-rules # 重新加载 udev 规则
udevadm trigger # 触发 udev 事件
udevadm info --query all --name /dev/sata1 # 查看 udev 属性
udevadm control --reload-rules # 重新加载 udev 规则
udevadm trigger # 触发 udev 事件
udevadm info --query all --name /dev/sda1 # 查看 udev 属性
udevadm info --query all --path /sys/class/net/eth0 # 查看 udev 属性
udevadm info --attribute-walk --name=/dev/sata1 # 列出 udev 属性
udevadm monitor --property --udev # 监控 udev 事件
udevadm test /dev/sata1 # 测试 udev 规则
udevadm monitor --property --udev # 监控 udev 事件
udevadm test /dev/sda1 # 测试 udev 规则
# 服务相关
journalctl -xe # 查看服务日志
systemctl # 查看服务
@ -276,19 +248,14 @@
lsof -i :7681 # 查看 7681 端口占用情况
# CPU
cat /sys/devices/system/cpu/cpufreq/boost # 查看 CPU 睿频状态
echo 1 > /sys/devices/system/cpu/cpufreq/boost # 开启 CPU 睿频
echo 0 > /sys/devices/system/cpu/cpufreq/boost # 关闭 CPU 睿频
cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_available_governors # 查看可用的 CPU 频率调节器状态
cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor # 查看 CPU 频率调节器状态
echo userspace | tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor # 设置 CPU 频率调节器状态为 userspace
cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_cur_freq # 查看 CPU 当前频率
cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_max_freq # 查看 CPU 最大频率
echo 2000000 | tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_max_freq # 设置 CPU 最大频率
cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_min_freq # 查看 CPU 最小频率
echo 1000000 | tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_min_freq # 设置 CPU 最小频率
cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_setspeed # 查看 CPU 设置频率
echo 1000000 | tee /sys/devices/system/cpu/cpu*/cpufreq/scaling_setspeed # 设置 CPU 设置频率
cat /sys/devices/system/cpu/cpufreq/boost # 查看 CPU 睿频状态
echo 1 > /sys/devices/system/cpu/cpufreq/boost # 开启 CPU 睿频
echo 0 > /sys/devices/system/cpu/cpufreq/boost # 关闭 CPU 睿频
cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_available_governors # 查看可用的 CPU 频率调节器状态
cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor # 查看 CPU 频率调节器状态
cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq # 查看 CPU 当前频率
cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq # 查看 CPU 最大频率
cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq # 查看 CPU 最小频率
# 日志相关
@ -306,7 +273,7 @@
lspci -d ::302 # 查看 3D 控制器 (不是 VGA 兼容)
# Intel GPU
lspci -nd ::300 | cut -d' ' -f3 # PIDVID
lspci -nd ::300 | cut -d " " -f 3 # PIDVID
ls /dev/dri # 查看显卡设备
cat /sys/kernel/debug/dri/0/i915_frequency_info # 显卡驱动详细信息
@ -329,25 +296,11 @@
synodsdefault --reinstall # 重装系统
synodsdefault --factory-default # 重置系统 (清空全部数据)
# 虚拟机
virsh -h # 列出所有虚拟机命令
virsh list --all # 列出所有虚拟机
virsh console <guest_name> # 进入虚拟机控制台
etcdctl -h # 列出所有 etcd 命令
etcdctl ls /syno/live_cluster/guests/ # 列出所有虚拟机 (etcd)
# API
# 获取系统信息
synowebapi --exec api=SYNO.Core.System method=info version=3
synowebapi --exec api=SYNO.Core.System method=info version=3 type="firmware"
synowebapi --exec api=SYNO.Core.System method=info
# 获取设备信息
synowebapi --exec api=SYNO.Core.System.Utilization method=get version=1
# 关机
synowebapi --exec api=SYNO.Core.System method=shutdown version=2 local=true force=false
# 重启
synowebapi --exec api=SYNO.Core.System method=reboot version=2 local=true force=false
# 关闭 自动 https 重定向
synowebapi --exec api=SYNO.Core.Web.DSM method=set version=2 enable_https_redirect=false
# 开启 telnet/ssh

View File

@ -37,14 +37,14 @@ function create() {
sudo apt update
sudo apt install -y locales busybox dialog gettext sed gawk jq curl
sudo apt install -y python-is-python3 python3-pip libelf-dev qemu-utils dosfstools cpio xz-utils lz4 lzma bzip2 gzip zstd
sudo apt install -y python-is-python3 python3-pip libelf-dev qemu-utils cpio xz-utils lz4 lzma bzip2 gzip zstd
# sudo snap install yq
if ! type yq >/dev/null 2>&1 || ! yq --version 2>/dev/null | grep -q "v4."; then
if ! command -v yq &>/dev/null || ! yq --version 2>/dev/null | grep -q "v4."; then
sudo curl -kL https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -o /usr/bin/yq && sudo chmod a+x /usr/bin/yq
fi
# Backup the original python3 executable.
sudo mv -f "$(realpath "$(which python3)")/EXTERNALLY-MANAGED" "$(realpath "$(which python3)")/EXTERNALLY-MANAGED.bak" 2>/dev/null || true
sudo mv -f "$(realpath $(which python3))/EXTERNALLY-MANAGED" "$(realpath $(which python3))/EXTERNALLY-MANAGED.bak" 2>/dev/null || true
sudo pip3 install -U click requests requests-toolbelt qrcode[pil] beautifulsoup4
sudo locale-gen ar_SA.UTF-8 de_DE.UTF-8 en_US.UTF-8 es_ES.UTF-8 fr_FR.UTF-8 ja_JP.UTF-8 ko_KR.UTF-8 ru_RU.UTF-8 th_TH.UTF-8 tr_TR.UTF-8 uk_UA.UTF-8 vi_VN.UTF-8 zh_CN.UTF-8 zh_HK.UTF-8 zh_TW.UTF-8
@ -52,11 +52,6 @@ function create() {
LOOPX=$(sudo losetup -f)
sudo losetup -P "${LOOPX}" "${RRIMGPATH}"
# Check partitions and ignore errors
fsck.vfat -aw "${LOOPX}p1" >/dev/null 2>&1 || true
fsck.ext2 -p "${LOOPX}p2" >/dev/null 2>&1 || true
fsck.ext4 -p "${LOOPX}p3" >/dev/null 2>&1 || true
echo "Mounting image file"
for i in {1..3}; do
rm -rf "/tmp/mnt/p${i}"
@ -112,11 +107,11 @@ function init() {
exit 1
fi
. "$(dirname "${BASH_SOURCE[0]}")/rr.env"
pushd "${CHROOT_PATH}/initrd/opt/rr" || exit 1
pushd "${CHROOT_PATH}/initrd/opt/rr" >/dev/null
echo "init"
./init.sh
local RET=$?
popd || exit 1
popd >/dev/null
[ ${RET} -ne 0 ] && echo "Failed." || echo "Success."
exit ${RET}
}
@ -128,7 +123,7 @@ function config() {
fi
. "$(dirname "${BASH_SOURCE[0]}")/rr.env"
local RET=1
pushd "${CHROOT_PATH}/initrd/opt/rr" || exit 1
pushd "${CHROOT_PATH}/initrd/opt/rr" >/dev/null
while true; do
if [ -z "${1}" ]; then
echo "menu"
@ -143,7 +138,7 @@ function config() {
fi
break
done
popd || exit 1
popd >/dev/null
[ ${RET} -ne 0 ] && echo "Failed." || echo "Success."
exit ${RET}
}
@ -155,7 +150,7 @@ function build() {
fi
. "$(dirname "${BASH_SOURCE[0]}")/rr.env"
local RET=1
pushd "${CHROOT_PATH}/initrd/opt/rr" || exit 1
pushd "${CHROOT_PATH}/initrd/opt/rr" >/dev/null
while true; do
echo "build"
./menu.sh make -1 || break
@ -164,7 +159,7 @@ function build() {
RET=0
break
done
popd || exit 1
popd >/dev/null
[ ${RET} -ne 0 ] && echo "Failed." || echo "Success."
exit ${RET}
}
@ -178,18 +173,14 @@ function pack() {
local RRIMGPATH LOOPX
RRIMGPATH="$(realpath "${1:-rr.img}")"
rm -f "${RRIMGPATH}"
gzip -dc "${CHROOT_PATH}/initrd/opt/rr/grub.img.gz" >"${RRIMGPATH}"
if [ ! -f "${RRIMGPATH}" ]; then
gzip -dc "${CHROOT_PATH}/initrd/opt/rr/grub.img.gz" >"${RRIMGPATH}"
fi
fdisk -l "${RRIMGPATH}"
LOOPX=$(sudo losetup -f)
sudo losetup -P "${LOOPX}" "${RRIMGPATH}"
# Check partitions and ignore errors
fsck.vfat -aw "${LOOPX}p1" >/dev/null 2>&1 || true
fsck.ext2 -p "${LOOPX}p2" >/dev/null 2>&1 || true
fsck.ext4 -p "${LOOPX}p3" >/dev/null 2>&1 || true
echo "Mounting image file"
for i in {1..3}; do
rm -rf "/tmp/mnt/p${i}"
@ -219,33 +210,4 @@ function pack() {
exit 0
}
function resize() {
local INPUT_FILE="${1}"
local CHANGE_SIZE="${2}"
local OUTPUT_FILE="${3:-${INPUT_FILE}}"
[ -z "${INPUT_FILE}" ] || [ ! -f "${INPUT_FILE}" ] && exit 1
[ -z "${CHANGE_SIZE}" ] && exit 1
INPUT_FILE="$(realpath "${INPUT_FILE}")"
OUTPUT_FILE="$(realpath "${OUTPUT_FILE}")"
local SIZE=$(($(du -sm "${INPUT_FILE}" 2>/dev/null | awk '{print $1}')$(echo "${CHANGE_SIZE}" | sed 's/M//g; s/b//g')))
[ "${SIZE:-0}" -lt 0 ] && exit 1
if [ ! "${INPUT_FILE}" = "${OUTPUT_FILE}" ]; then
sudo cp -f "${INPUT_FILE}" "${OUTPUT_FILE}"
fi
sudo truncate -s ${SIZE}M "${OUTPUT_FILE}"
echo -e "d\n\nn\n\n\n\n\nn\nw" | sudo fdisk "${OUTPUT_FILE}" >/dev/null 2>&1
local LOOPX LOOPXPY
LOOPX=$(sudo losetup -f)
sudo losetup -P "${LOOPX}" "${OUTPUT_FILE}"
LOOPXPY="$(find "${LOOPX}p"* -maxdepth 0 2>/dev/null | sort -n | tail -1)"
sudo e2fsck -fp "${LOOPXPY:-${LOOPX}p3}"
sudo resize2fs "${LOOPXPY:-${LOOPX}p3}"
sudo losetup -d "${LOOPX}"
}
"$@"
$@

View File

@ -1,290 +1,281 @@
# -*- coding: utf-8 -*-
#
# Copyright (C) 2022 Ing <https://github.com/wjz304>
#
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
import os, re, sys, glob, json, yaml, click, shutil, tarfile, kmodule, requests, urllib3
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry # type: ignore
from openpyxl import Workbook
@click.group()
def cli():
"""
The CLI is a commands to RR.
"""
pass
@cli.command()
@click.option("-w", "--workpath", type=str, required=True, help="The workpath of RR.")
@click.option("-j", "--jsonpath", type=str, required=True, help="The output path of jsonfile.")
@click.option("-x", "--xlsxpath", type=str, required=False, help="The output path of xlsxfile.")
def getmodels(workpath, jsonpath, xlsxpath):
models = {}
platforms_yml = os.path.join(workpath, "opt", "rr", "platforms.yml")
with open(platforms_yml, "r") as f:
P_data = yaml.safe_load(f)
P_platforms = P_data.get("platforms", [])
for P in P_platforms:
productvers = {}
for V in P_platforms[P]["productvers"]:
kpre = P_platforms[P]["productvers"][V].get("kpre", "")
kver = P_platforms[P]["productvers"][V].get("kver", "")
productvers[V] = f"{kpre}-{kver}" if kpre else kver
models[P] = {"productvers": productvers, "models": []}
adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=1, status_forcelist=[500, 502, 503, 504]))
session = requests.Session()
session.mount("http://", adapter)
session.mount("https://", adapter)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
try:
url = "http://update7.synology.com/autoupdate/genRSS.php?include_beta=1"
#url = "https://update7.synology.com/autoupdate/genRSS.php?include_beta=1"
req = session.get(url, timeout=10, verify=False)
req.encoding = "utf-8"
p = re.compile(r"<mUnique>(.*?)</mUnique>.*?<mLink>(.*?)</mLink>", re.MULTILINE | re.DOTALL)
data = p.findall(req.text)
except Exception as e:
click.echo(f"Error: {e}")
return
for item in data:
if not "DSM" in item[1]:
continue
arch = item[0].split("_")[1]
name = item[1].split("/")[-1].split("_")[1].replace("%2B", "+")
if arch not in models:
continue
if name in (A for B in models for A in models[B]["models"]):
continue
models[arch]["models"].append(name)
if jsonpath:
with open(jsonpath, "w") as f:
json.dump(models, f, indent=4, ensure_ascii=False)
if xlsxpath:
wb = Workbook()
ws = wb.active
ws.append(["platform", "productvers", "Model"])
for k, v in models.items():
ws.append([k, str(v["productvers"]), str(v["models"])])
wb.save(xlsxpath)
@cli.command()
@click.option("-w", "--workpath", type=str, required=True, help="The workpath of RR.")
@click.option("-j", "--jsonpath", type=str, required=True, help="The output path of jsonfile.")
@click.option("-x", "--xlsxpath", type=str, required=False, help="The output path of xlsxfile.")
def getpats(workpath, jsonpath, xlsxpath):
def __fullversion(ver):
arr = ver.split('-')
a, b, c = (arr[0].split('.') + ['0', '0', '0'])[:3]
d = arr[1] if len(arr) > 1 else '00000'
e = arr[2] if len(arr) > 2 else '0'
return f'{a}.{b}.{c}-{d}-{e}'
platforms_yml = os.path.join(workpath, "opt", "rr", "platforms.yml")
with open(platforms_yml, "r") as f:
data = yaml.safe_load(f)
platforms = data.get("platforms", [])
adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=1, status_forcelist=[500, 502, 503, 504]))
session = requests.Session()
session.mount("http://", adapter)
session.mount("https://", adapter)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
try:
url = "http://update7.synology.com/autoupdate/genRSS.php?include_beta=1"
#url = "https://update7.synology.com/autoupdate/genRSS.php?include_beta=1"
req = session.get(url, timeout=10, verify=False)
req.encoding = "utf-8"
p = re.compile(r"<mUnique>(.*?)</mUnique>.*?<mLink>(.*?)</mLink>", re.MULTILINE | re.DOTALL)
data = p.findall(req.text)
except Exception as e:
click.echo(f"Error: {e}")
return
models = []
for item in data:
if not "DSM" in item[1]:
continue
arch = item[0].split("_")[1]
name = item[1].split("/")[-1].split("_")[1].replace("%2B", "+")
if arch not in platforms:
continue
if name in models:
continue
models.append(name)
pats = {}
for M in models:
pats[M] = {}
version = '7'
urlInfo = "https://www.synology.com/api/support/findDownloadInfo?lang=en-us"
urlSteps = "https://www.synology.com/api/support/findUpgradeSteps?"
#urlInfo = "https://www.synology.cn/api/support/findDownloadInfo?lang=zh-cn"
#urlSteps = "https://www.synology.cn/api/support/findUpgradeSteps?"
major = f"&major={version.split('.')[0]}" if len(version.split('.')) > 0 else ""
minor = f"&minor={version.split('.')[1]}" if len(version.split('.')) > 1 else ""
try:
req = session.get(f"{urlInfo}&product={M.replace('+', '%2B')}{major}{minor}", timeout=10, verify=False)
req.encoding = "utf-8"
data = json.loads(req.text)
except Exception as e:
click.echo(f"Error: {e}")
continue
build_ver = data['info']['system']['detail'][0]['items'][0]['build_ver']
build_num = data['info']['system']['detail'][0]['items'][0]['build_num']
buildnano = data['info']['system']['detail'][0]['items'][0]['nano']
V = __fullversion(f"{build_ver}-{build_num}-{buildnano}")
if V not in pats[M]:
pats[M][V] = {
'url': data['info']['system']['detail'][0]['items'][0]['files'][0]['url'].split('?')[0],
'sum': data['info']['system']['detail'][0]['items'][0]['files'][0].get('checksum', '0' * 32)
}
from_ver = min(I['build'] for I in data['info']['pubVers'])
for I in data['info']['productVers']:
if not I['version'].startswith(version):
continue
if not major or not minor:
majorTmp = f"&major={I['version'].split('.')[0]}" if len(I['version'].split('.')) > 0 else ""
minorTmp = f"&minor={I['version'].split('.')[1]}" if len(I['version'].split('.')) > 1 else ""
try:
reqTmp = session.get(f"{urlInfo}&product={M.replace('+', '%2B')}{majorTmp}{minorTmp}", timeout=10, verify=False)
reqTmp.encoding = "utf-8"
dataTmp = json.loads(reqTmp.text)
except Exception as e:
click.echo(f"Error: {e}")
continue
build_ver = dataTmp['info']['system']['detail'][0]['items'][0]['build_ver']
build_num = dataTmp['info']['system']['detail'][0]['items'][0]['build_num']
buildnano = dataTmp['info']['system']['detail'][0]['items'][0]['nano']
V = __fullversion(f"{build_ver}-{build_num}-{buildnano}")
if V not in pats[M]:
pats[M][V] = {
'url': dataTmp['info']['system']['detail'][0]['items'][0]['files'][0]['url'].split('?')[0],
'sum': dataTmp['info']['system']['detail'][0]['items'][0]['files'][0].get('checksum', '0' * 32)
}
for J in I['versions']:
to_ver = J['build']
try:
reqSteps = session.get(f"{urlSteps}&product={M.replace('+', '%2B')}&from_ver={from_ver}&to_ver={to_ver}", timeout=10, verify=False)
if reqSteps.status_code != 200:
continue
reqSteps.encoding = "utf-8"
dataSteps = json.loads(reqSteps.text)
except Exception as e:
click.echo(f"Error: {e}")
continue
for S in dataSteps['upgrade_steps']:
if not S.get('full_patch') or not S['build_ver'].startswith(version):
continue
V = __fullversion(f"{S['build_ver']}-{S['build_num']}-{S['nano']}")
if V not in pats[M]:
reqPat = session.head(S['files'][0]['url'].split('?')[0], timeout=10, verify=False)
if reqPat.status_code == 403:
continue
pats[M][V] = {
'url': S['files'][0]['url'].split('?')[0],
'sum': S['files'][0].get('checksum', '0' * 32)
}
if jsonpath:
with open(jsonpath, "w") as f:
json.dump(pats, f, indent=4, ensure_ascii=False)
if xlsxpath:
wb = Workbook()
ws = wb.active
ws.append(["Model", "version", "url", "sum"])
for k1, v1 in pats.items():
for k2, v2 in v1.items():
ws.append([k1, k2, v2["url"], v2["sum"]])
wb.save(xlsxpath)
@cli.command()
@click.option("-w", "--workpath", type=str, required=True, help="The workpath of RR.")
@click.option("-j", "--jsonpath", type=str, required=True, help="The output path of jsonfile.")
@click.option("-x", "--xlsxpath", type=str, required=False, help="The output path of xlsxfile.")
def getaddons(workpath, jsonpath, xlsxpath):
AS = glob.glob(os.path.join(workpath, "mnt", "p3", "addons", "*", "manifest.yml"))
AS.sort()
addons = {}
for A in AS:
with open(A, "r") as file:
A_data = yaml.safe_load(file)
A_name = A_data.get("name", "")
A_system = A_data.get("system", False)
A_description = A_data.get("description", {"en_US": "Unknown", "zh_CN": "Unknown"})
addons[A_name] = {"system": A_system, "description": A_description}
if jsonpath:
with open(jsonpath, "w") as f:
json.dump(addons, f, indent=4, ensure_ascii=False)
if xlsxpath:
wb = Workbook()
ws = wb.active
ws.append(["Name", "system", "en_US", "zh_CN"])
for k1, v1 in addons.items():
ws.append([k1, v1.get("system", False), v1.get("description").get("en_US", ""), v1.get("description").get("zh_CN", "")])
wb.save(xlsxpath)
@cli.command()
@click.option("-w", "--workpath", type=str, required=True, help="The workpath of RR.")
@click.option("-j", "--jsonpath", type=str, required=True, help="The output path of jsonfile.")
@click.option("-x", "--xlsxpath", type=str, required=False, help="The output path of xlsxfile.")
def getmodules(workpath, jsonpath, xlsxpath):
MS = glob.glob(os.path.join(workpath, "mnt", "p3", "modules", "*.tgz"))
MS.sort()
modules = {}
TMP_PATH = "/tmp/modules"
if os.path.exists(TMP_PATH):
shutil.rmtree(TMP_PATH)
for M in MS:
M_name = os.path.splitext(os.path.basename(M))[0]
M_modules = {}
os.makedirs(TMP_PATH)
with tarfile.open(M, "r") as tar:
tar.extractall(TMP_PATH)
KS = glob.glob(os.path.join(TMP_PATH, "*.ko"))
KS.sort()
for K in KS:
K_name = os.path.splitext(os.path.basename(K))[0]
K_info = kmodule.modinfo(K, basedir=os.path.dirname(K), kernel=None)[0]
K_description = K_info.get("description", "")
K_depends = K_info.get("depends", "")
M_modules[K_name] = {"description": K_description, "depends": K_depends}
modules[M_name] = M_modules
if os.path.exists(TMP_PATH):
shutil.rmtree(TMP_PATH)
if jsonpath:
with open(jsonpath, "w") as file:
json.dump(modules, file, indent=4, ensure_ascii=False)
if xlsxpath:
wb = Workbook()
ws = wb.active
ws.append(["Name", "Arch", "description", "depends"])
for k1, v1 in modules.items():
for k2, v2 in v1.items():
ws.append([k2, k1, v2["description"], v2["depends"]])
wb.save(xlsxpath)
if __name__ == "__main__":
cli()
# -*- coding: utf-8 -*-
#
# Copyright (C) 2022 Ing <https://github.com/wjz304>
#
# This is free software, licensed under the MIT License.
# See /LICENSE for more information.
#
import os, sys, glob, json, yaml, click, shutil, tarfile, kmodule, requests, urllib3
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry # type: ignore
from openpyxl import Workbook
@click.group()
def cli():
"""
The CLI is a commands to RR.
"""
pass
@cli.command()
@click.option("-w", "--workpath", type=str, required=True, help="The workpath of RR.")
@click.option("-j", "--jsonpath", type=str, required=True, help="The output path of jsonfile.")
@click.option("-x", "--xlsxpath", type=str, required=False, help="The output path of xlsxfile.")
def getmodels(workpath, jsonpath, xlsxpath):
models = {}
platforms_yml = os.path.join(workpath, "opt", "rr", "platforms.yml")
with open(platforms_yml, "r") as f:
P_data = yaml.safe_load(f)
P_platforms = P_data.get("platforms", [])
for P in P_platforms:
productvers = {}
for V in P_platforms[P]["productvers"]:
kpre = P_platforms[P]["productvers"][V].get("kpre", "")
kver = P_platforms[P]["productvers"][V].get("kver", "")
productvers[V] = f"{kpre}-{kver}" if kpre else kver
models[P] = {"productvers": productvers, "models": []}
adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=1, status_forcelist=[500, 502, 503, 504]))
session = requests.Session()
session.mount("http://", adapter)
session.mount("https://", adapter)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
try:
req = session.get("https://autoupdate.synology.com/os/v2", timeout=10, verify=False)
req.encoding = "utf-8"
data = json.loads(req.text)
except Exception as e:
click.echo(f"Error: {e}")
return
for item in data["channel"]["item"]:
if not item["title"].startswith("DSM"):
continue
for model in item["model"]:
arch = model["mUnique"].split("_")[1].lower()
name = model["mLink"].split("/")[-1].split("_")[1].replace("%2B", "+")
if arch not in models:
continue
if name in (A for B in models for A in models[B]["models"]):
continue
models[arch]["models"].append(name)
if jsonpath:
with open(jsonpath, "w") as f:
json.dump(models, f, indent=4, ensure_ascii=False)
if xlsxpath:
wb = Workbook()
ws = wb.active
ws.append(["platform", "productvers", "Model"])
for k, v in models.items():
ws.append([k, str(v["productvers"]), str(v["models"])])
wb.save(xlsxpath)
@cli.command()
@click.option("-w", "--workpath", type=str, required=True, help="The workpath of RR.")
@click.option("-j", "--jsonpath", type=str, required=True, help="The output path of jsonfile.")
@click.option("-x", "--xlsxpath", type=str, required=False, help="The output path of xlsxfile.")
def getpats(workpath, jsonpath, xlsxpath):
def __fullversion(ver):
arr = ver.split('-')
a, b, c = (arr[0].split('.') + ['0', '0', '0'])[:3]
d = arr[1] if len(arr) > 1 else '00000'
e = arr[2] if len(arr) > 2 else '0'
return f'{a}.{b}.{c}-{d}-{e}'
platforms_yml = os.path.join(workpath, "opt", "rr", "platforms.yml")
with open(platforms_yml, "r") as f:
data = yaml.safe_load(f)
platforms = data.get("platforms", [])
adapter = HTTPAdapter(max_retries=Retry(total=3, backoff_factor=1, status_forcelist=[500, 502, 503, 504]))
session = requests.Session()
session.mount("http://", adapter)
session.mount("https://", adapter)
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
try:
req = session.get("https://autoupdate.synology.com/os/v2", timeout=10, verify=False)
req.encoding = "utf-8"
data = json.loads(req.text)
except Exception as e:
click.echo(f"Error: {e}")
return
models = []
for item in data["channel"]["item"]:
if not item["title"].startswith("DSM"):
continue
for model in item["model"]:
arch = model["mUnique"].split("_")[1].lower()
name = model["mLink"].split("/")[-1].split("_")[1].replace("%2B", "+")
if arch not in platforms:
continue
if name in models:
continue
models.append(name)
pats = {}
for M in models:
pats[M] = {}
version = '7'
urlInfo = "https://www.synology.com/api/support/findDownloadInfo?lang=en-us"
urlSteps = "https://www.synology.com/api/support/findUpgradeSteps?"
#urlInfo = "https://www.synology.cn/api/support/findDownloadInfo?lang=zh-cn"
#urlSteps = "https://www.synology.cn/api/support/findUpgradeSteps?"
major = f"&major={version.split('.')[0]}" if len(version.split('.')) > 0 else ""
minor = f"&minor={version.split('.')[1]}" if len(version.split('.')) > 1 else ""
try:
req = session.get(f"{urlInfo}&product={M.replace('+', '%2B')}{major}{minor}", timeout=10, verify=False)
req.encoding = "utf-8"
data = json.loads(req.text)
except Exception as e:
click.echo(f"Error: {e}")
continue
build_ver = data['info']['system']['detail'][0]['items'][0]['build_ver']
build_num = data['info']['system']['detail'][0]['items'][0]['build_num']
buildnano = data['info']['system']['detail'][0]['items'][0]['nano']
V = __fullversion(f"{build_ver}-{build_num}-{buildnano}")
if V not in pats[M]:
pats[M][V] = {
'url': data['info']['system']['detail'][0]['items'][0]['files'][0]['url'].split('?')[0],
'sum': data['info']['system']['detail'][0]['items'][0]['files'][0]['checksum']
}
from_ver = min(I['build'] for I in data['info']['pubVers'])
for I in data['info']['productVers']:
if not I['version'].startswith(version):
continue
if not major or not minor:
majorTmp = f"&major={I['version'].split('.')[0]}" if len(I['version'].split('.')) > 0 else ""
minorTmp = f"&minor={I['version'].split('.')[1]}" if len(I['version'].split('.')) > 1 else ""
try:
reqTmp = session.get(f"{urlInfo}&product={M.replace('+', '%2B')}{majorTmp}{minorTmp}", timeout=10, verify=False)
reqTmp.encoding = "utf-8"
dataTmp = json.loads(reqTmp.text)
except Exception as e:
click.echo(f"Error: {e}")
continue
build_ver = dataTmp['info']['system']['detail'][0]['items'][0]['build_ver']
build_num = dataTmp['info']['system']['detail'][0]['items'][0]['build_num']
buildnano = dataTmp['info']['system']['detail'][0]['items'][0]['nano']
V = __fullversion(f"{build_ver}-{build_num}-{buildnano}")
if V not in pats[M]:
pats[M][V] = {
'url': dataTmp['info']['system']['detail'][0]['items'][0]['files'][0]['url'].split('?')[0],
'sum': dataTmp['info']['system']['detail'][0]['items'][0]['files'][0]['checksum']
}
for J in I['versions']:
to_ver = J['build']
try:
reqSteps = session.get(f"{urlSteps}&product={M.replace('+', '%2B')}&from_ver={from_ver}&to_ver={to_ver}", timeout=10, verify=False)
if reqSteps.status_code != 200:
continue
reqSteps.encoding = "utf-8"
dataSteps = json.loads(reqSteps.text)
except Exception as e:
click.echo(f"Error: {e}")
continue
for S in dataSteps['upgrade_steps']:
if not S.get('full_patch') or not S['build_ver'].startswith(version):
continue
V = __fullversion(f"{S['build_ver']}-{S['build_num']}-{S['nano']}")
if V not in pats[M]:
pats[M][V] = {
'url': S['files'][0]['url'].split('?')[0],
'sum': S['files'][0]['checksum']
}
if jsonpath:
with open(jsonpath, "w") as f:
json.dump(pats, f, indent=4, ensure_ascii=False)
if xlsxpath:
wb = Workbook()
ws = wb.active
ws.append(["Model", "version", "url", "sum"])
for k1, v1 in pats.items():
for k2, v2 in v1.items():
ws.append([k1, k2, v2["url"], v2["sum"]])
wb.save(xlsxpath)
@cli.command()
@click.option("-w", "--workpath", type=str, required=True, help="The workpath of RR.")
@click.option("-j", "--jsonpath", type=str, required=True, help="The output path of jsonfile.")
@click.option("-x", "--xlsxpath", type=str, required=False, help="The output path of xlsxfile.")
def getaddons(workpath, jsonpath, xlsxpath):
AS = glob.glob(os.path.join(workpath, "mnt", "p3", "addons", "*", "manifest.yml"))
AS.sort()
addons = {}
for A in AS:
with open(A, "r") as file:
A_data = yaml.safe_load(file)
A_name = A_data.get("name", "")
A_system = A_data.get("system", False)
A_description = A_data.get("description", {"en_US": "Unknown", "zh_CN": "Unknown"})
addons[A_name] = {"system": A_system, "description": A_description}
if jsonpath:
with open(jsonpath, "w") as f:
json.dump(addons, f, indent=4, ensure_ascii=False)
if xlsxpath:
wb = Workbook()
ws = wb.active
ws.append(["Name", "system", "en_US", "zh_CN"])
for k1, v1 in addons.items():
ws.append([k1, v1.get("system", False), v1.get("description").get("en_US", ""), v1.get("description").get("zh_CN", "")])
wb.save(xlsxpath)
@cli.command()
@click.option("-w", "--workpath", type=str, required=True, help="The workpath of RR.")
@click.option("-j", "--jsonpath", type=str, required=True, help="The output path of jsonfile.")
@click.option("-x", "--xlsxpath", type=str, required=False, help="The output path of xlsxfile.")
def getmodules(workpath, jsonpath, xlsxpath):
MS = glob.glob(os.path.join(workpath, "mnt", "p3", "modules", "*.tgz"))
MS.sort()
modules = {}
TMP_PATH = "/tmp/modules"
if os.path.exists(TMP_PATH):
shutil.rmtree(TMP_PATH)
for M in MS:
M_name = os.path.splitext(os.path.basename(M))[0]
M_modules = {}
os.makedirs(TMP_PATH)
with tarfile.open(M, "r") as tar:
tar.extractall(TMP_PATH)
KS = glob.glob(os.path.join(TMP_PATH, "*.ko"))
KS.sort()
for K in KS:
K_name = os.path.splitext(os.path.basename(K))[0]
K_info = kmodule.modinfo(K, basedir=os.path.dirname(K), kernel=None)[0]
K_description = K_info.get("description", "")
K_depends = K_info.get("depends", "")
M_modules[K_name] = {"description": K_description, "depends": K_depends}
modules[M_name] = M_modules
if os.path.exists(TMP_PATH):
shutil.rmtree(TMP_PATH)
if jsonpath:
with open(jsonpath, "w") as file:
json.dump(modules, file, indent=4, ensure_ascii=False)
if xlsxpath:
wb = Workbook()
ws = wb.active
ws.append(["Name", "Arch", "description", "depends"])
for k1, v1 in modules.items():
for k2, v2 in v1.items():
ws.append([k2, k1, v2["description"], v2["depends"]])
wb.save(xlsxpath)
if __name__ == "__main__":
cli()

View File

@ -21,7 +21,7 @@ function convertpo2mo() {
# Use msgfmt command to compile the .po file into a binary .mo file
echo "msgfmt ${P} to ${P/.po/.mo}"
msgfmt "${P}" -o "${P/.po/.mo}"
done <<<"$(find "${DEST_PATH}" -type f -name 'rr.po')"
done <<<$(find "${DEST_PATH}" -type f -name 'rr.po')
echo "Convert po2mo end"
}
@ -37,8 +37,7 @@ function getExtractor() {
# global.synologydownload.com, global.download.synology.com, cndl.synology.cn
local PAT_URL="https://global.synologydownload.com/download/DSM/release/7.0.1/42218/DSM_DS3622xs%2B_42218.pat"
local PAT_FILE="DSM_DS3622xs+_42218.pat"
local STATUS
STATUS=$(curl -#L -w "%{http_code}" "${PAT_URL}" -o "${CACHE_DIR}/${PAT_FILE}")
local STATUS=$(curl -#L -w "%{http_code}" "${PAT_URL}" -o "${CACHE_DIR}/${PAT_FILE}")
if [ $? -ne 0 ] || [ "${STATUS:-0}" -ne 200 ]; then
echo "[E] DSM_DS3622xs%2B_42218.pat download error!"
rm -rf "${CACHE_DIR}"
@ -85,12 +84,11 @@ function getBuildroot() {
fi
while read -r ID NAME; do
if [ "${NAME}" = "buildroot-${TAG}.zip" ]; then
local STATUS
STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "${REPO}/rr-buildroot/releases/assets/${ID}" -o "${CACHE_FILE}")
echo "TAG=${TAG}; Status=${STATUS}"
[ ${STATUS:-0} -ne 200 ] && exit 1
fi
done <<<"$(curl -skL -H "Authorization: Bearer ${TOKEN}" "${REPO}/rr-buildroot/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')"
done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "${REPO}/rr-buildroot/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')
# Unzip Buildroot
rm -rf "${CACHE_DIR}"
mkdir -p "${CACHE_DIR}"
@ -119,12 +117,11 @@ function getCKs() {
fi
while read -r ID NAME; do
if [ "${NAME}" = "rr-cks-${TAG}.zip" ]; then
local STATUS
STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "${REPO}/rr-cks/releases/assets/${ID}" -o "${CACHE_FILE}")
echo "TAG=${TAG}; Status=${STATUS}"
[ ${STATUS:-0} -ne 200 ] && exit 1
fi
done <<<"$(curl -skL -H "Authorization: Bearer ${TOKEN}" "${REPO}/rr-cks/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')"
done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "${REPO}/rr-cks/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')
[ ! -f "${CACHE_FILE}" ] && exit 1
# Unzip CKs
rm -rf "${DEST_PATH}"
@ -150,12 +147,11 @@ function getLKMs() {
fi
while read -r ID NAME; do
if [ "${NAME}" = "rp-lkms-${TAG}.zip" ]; then
local STATUS
STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "${REPO}/rr-lkms/releases/assets/${ID}" -o "${CACHE_FILE}")
echo "TAG=${TAG}; Status=${STATUS}"
[ ${STATUS:-0} -ne 200 ] && exit 1
fi
done <<<"$(curl -skL -H "Authorization: Bearer ${TOKEN}" "${REPO}/rr-lkms/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')"
done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "${REPO}/rr-lkms/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')
[ ! -f "${CACHE_FILE}" ] && exit 1
# Unzip LKMs
rm -rf "${DEST_PATH}"
@ -181,12 +177,11 @@ function getAddons() {
fi
while read -r ID NAME; do
if [ "${NAME}" = "addons-${TAG}.zip" ]; then
local STATUS
STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "${REPO}/rr-addons/releases/assets/${ID}" -o "${CACHE_FILE}")
echo "TAG=${TAG}; Status=${STATUS}"
[ ${STATUS:-0} -ne 200 ] && exit 1
fi
done <<<"$(curl -skL -H "Authorization: Bearer ${TOKEN}" "${REPO}/rr-addons/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')"
done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "${REPO}/rr-addons/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')
[ ! -f "${CACHE_FILE}" ] && exit 1
rm -rf "${DEST_PATH}"
mkdir -p "${DEST_PATH}"
@ -196,15 +191,11 @@ function getAddons() {
unzip "${CACHE_FILE}" -d "${CACHE_DIR}"
echo "Installing addons to ${DEST_PATH}"
[ -f "/tmp/addons/VERSION" ] && cp -f "/tmp/addons/VERSION" "${DEST_PATH}/"
for F in ${CACHE_DIR}/*.addon; do
[ ! -e "${F}" ] && continue
ADDON=$(basename "${F}" .addon)
# shellcheck disable=SC2115
rm -rf "${DEST_PATH}/${ADDON}"
for PKG in "${CACHE_DIR}"/*.addon; do
ADDON=$(basename "${PKG}" .addon)
mkdir -p "${DEST_PATH}/${ADDON}"
echo "Extracting ${F} to ${DEST_PATH}/${ADDON}"
tar -xaf "${F}" -C "${DEST_PATH}/${ADDON}"
rm -f "${F}"
echo "Extracting ${PKG} to ${DEST_PATH}/${ADDON}"
tar -xaf "${PKG}" -C "${DEST_PATH}/${ADDON}"
done
rm -rf "${CACHE_DIR}"
rm -f "${CACHE_FILE}"
@ -227,12 +218,11 @@ function getModules() {
fi
while read -r ID NAME; do
if [ "${NAME}" = "modules-${TAG}.zip" ]; then
local STATUS
STATUS=$(curl -kL -w "%{http_code}" -H "Authorization: token ${TOKEN}" -H "Accept: application/octet-stream" "${REPO}/rr-modules/releases/assets/${ID}" -o "${CACHE_FILE}")
echo "TAG=${TAG}; Status=${STATUS}"
[ ${STATUS:-0} -ne 200 ] && exit 1
fi
done <<<"$(curl -skL -H "Authorization: Bearer ${TOKEN}" "${REPO}/rr-modules/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')"
done <<<$(curl -skL -H "Authorization: Bearer ${TOKEN}" "${REPO}/rr-modules/releases/tags/${TAG}" | jq -r '.assets[] | "\(.id) \(.name)"')
[ ! -f "${CACHE_FILE}" ] && exit 1
# Unzip Modules
rm -rf "${DEST_PATH}"
@ -260,8 +250,7 @@ function repackInitrd() {
local RDXZ_PATH="rdxz_tmp"
mkdir -p "${RDXZ_PATH}"
local INITRD_FORMAT
INITRD_FORMAT=$(file -b --mime-type "${INITRD_FILE}")
local INITRD_FORMAT=$(file -b --mime-type "${INITRD_FILE}")
case "${INITRD_FORMAT}" in
*'x-cpio'*) (cd "${RDXZ_PATH}" && sudo cpio -idm <"${INITRD_FILE}") >/dev/null 2>&1 ;;
@ -276,7 +265,7 @@ function repackInitrd() {
sudo cp -rf "${PLUGIN_PATH}/"* "${RDXZ_PATH}/"
[ -f "${OUTPUT_PATH}" ] && rm -rf "${OUTPUT_PATH}"
# shellcheck disable=SC2024
case "${INITRD_FORMAT}" in
*'x-cpio'*) (cd "${RDXZ_PATH}" && sudo find . 2>/dev/null | sudo cpio -o -H newc -R root:root >"${OUTPUT_PATH}") >/dev/null 2>&1 ;;
*'x-xz'*) (cd "${RDXZ_PATH}" && sudo find . 2>/dev/null | sudo cpio -o -H newc -R root:root | xz -9 -C crc32 -c - >"${OUTPUT_PATH}") >/dev/null 2>&1 ;;
@ -314,23 +303,21 @@ function resizeImg() {
sudo truncate -s ${SIZE}M "${OUTPUT_FILE}"
echo -e "d\n\nn\n\n\n\n\nn\nw" | sudo fdisk "${OUTPUT_FILE}" >/dev/null 2>&1
local LOOPX LOOPXPY
LOOPX=$(sudo losetup -f)
sudo losetup -P "${LOOPX}" "${OUTPUT_FILE}"
LOOPXPY="$(find "${LOOPX}p"* -maxdepth 0 2>/dev/null | sort -n | tail -1)"
sudo e2fsck -fp "${LOOPXPY:-${LOOPX}p3}"
sudo resize2fs "${LOOPXPY:-${LOOPX}p3}"
sudo losetup -d "${LOOPX}"
local LOOPX=$(sudo losetup -f)
sudo losetup -P ${LOOPX} "${OUTPUT_FILE}"
sudo e2fsck -fp $(ls ${LOOPX}* 2>/dev/null | sort -n | tail -1)
sudo resize2fs $(ls ${LOOPX}* 2>/dev/null | sort -n | tail -1)
sudo losetup -d ${LOOPX}
}
# createvmx
# $1 bootloader file
# $2 vmx name
function createvmx() {
local BLIMAGE=${1}
local VMNAME=${2}
BLIMAGE=${1}
VMNAME=${2}
if ! type qemu-img >/dev/null 2>&1; then
if ! command -v qemu-img &>/dev/null; then
sudo apt install -y qemu-utils
fi
@ -410,10 +397,10 @@ _EOF_
function convertvmx() {
local BLIMAGE=${1}
local VMXPATH=${2}
local VMNAME
BLIMAGE="$(realpath "${BLIMAGE}")"
VMXPATH="$(realpath "${VMXPATH}")"
VMNAME="$(basename "${VMXPATH}" .vmx)"
local VMNAME="$(basename "${VMXPATH}" .vmx)"
createvmx "${BLIMAGE}" "${VMNAME}"
@ -427,11 +414,10 @@ function convertvmx() {
function convertova() {
local BLIMAGE=${1}
local OVAPATH=${2}
local VMNAME
BLIMAGE="$(realpath "${BLIMAGE}")"
OVAPATH="$(realpath "${OVAPATH}")"
VMNAME="$(basename "${OVAPATH}" .ova)"
local VMNAME="$(basename "${OVAPATH}" .ova)"
createvmx "${BLIMAGE}" "${VMNAME}"

View File

@ -6,17 +6,17 @@
# See /LICENSE for more information.
#
# sudo apt update
# sudo apt install -y locales busybox dialog gettext sed gawk jq curl
# sudo apt install -y python-is-python3 python3-pip libelf-dev qemu-utils dosfstools cpio xz-utils lz4 lzma bzip2 gzip zstd
# sudo apt install -y locales busybox dialog gettext sed gawk jq curl
# sudo apt install -y python-is-python3 python3-pip libelf-dev qemu-utils cpio xz-utils lz4 lzma bzip2 gzip zstd
# # sudo snap install yq
# if ! type yq >/dev/null 2>&1 || ! yq --version 2>/dev/null | grep -q "v4."; then
# if ! command -v yq &>/dev/null || ! yq --version 2>/dev/null | grep -q "v4."; then
# sudo curl -kL https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 -o /usr/bin/yq && sudo chmod a+x /usr/bin/yq
# fi
#
#
# # Backup the original python3 executable.
# sudo mv -f "$(realpath $(which python3))/EXTERNALLY-MANAGED" "$(realpath $(which python3))/EXTERNALLY-MANAGED.bak" 2>/dev/null || true
# sudo pip3 install -U click requests requests-toolbelt qrcode[pil] beautifulsoup4
#
#
# sudo locale-gen ar_SA.UTF-8 de_DE.UTF-8 en_US.UTF-8 es_ES.UTF-8 fr_FR.UTF-8 ja_JP.UTF-8 ko_KR.UTF-8 ru_RU.UTF-8 th_TH.UTF-8 tr_TR.UTF-8 uk_UA.UTF-8 vi_VN.UTF-8 zh_CN.UTF-8 zh_HK.UTF-8 zh_TW.UTF-8
#
# export TOKEN="${1}"
@ -42,12 +42,10 @@ convertpo2mo "files/initrd/opt/rr/lang"
repackInitrd "files/mnt/p3/initrd-rr" "files/initrd"
if [ -n "${1}" ]; then
LOADER_DISK="LOCALBUILD"
CHROOT_PATH="$(realpath files)"
export LOADER_DISK="LOCALBUILD"
export CHROOT_PATH="${CHROOT_PATH}"
export CHROOT_PATH="$(realpath files)"
(
cd "${CHROOT_PATH}/initrd/opt/rr" || exit 1
cd "${CHROOT_PATH}/initrd/opt/rr"
./init.sh
./menu.sh modelMenu "${1}"
./menu.sh productversMenu "${2:-7.2}"
@ -63,14 +61,9 @@ fdisk -l "${IMAGE_FILE}"
LOOPX=$(sudo losetup -f)
sudo losetup -P "${LOOPX}" "${IMAGE_FILE}"
# Check partitions and ignore errors
fsck.vfat -aw "${LOOPX}p1" >/dev/null 2>&1 || true
fsck.ext2 -p "${LOOPX}p2" >/dev/null 2>&1 || true
fsck.ext4 -p "${LOOPX}p3" >/dev/null 2>&1 || true
for i in {1..3}; do
[ ! -d "files/mnt/p${i}" ] && continue
rm -rf "/tmp/mnt/p${i}"
mkdir -p "/tmp/mnt/p${i}"
@ -110,8 +103,8 @@ while read -r F; do
zip -9j "update.zip" "${FTGZ}"
rm -f "${FTGZ}"
else
(cd "$(dirname "${F}")" && sha256sum "$(basename "${F}")") >>sha256sum
(cd $(dirname "${F}") && sha256sum $(basename "${F}")) >>sha256sum
zip -9j "update.zip" "${F}"
fi
done <<<"$(yq '.replace | explode(.) | to_entries | map([.key])[] | .[]' update-list.yml)"
done <<<$(yq '.replace | explode(.) | to_entries | map([.key])[] | .[]' update-list.yml)
zip -9j "update.zip" sha256sum