diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100755 index 0000000..f670969 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,404 @@ +name: a1to5x + +on: + workflow_dispatch: + inputs: + mode: + description: 'Download mode(aria2c,curl,axel,wget)' + required: true + default: 'aria2c' + output: + description: 'Output(wget=O)' + required: true + default: 'o' + filename: + description: 'ROM NAME(Optional)' + filelink: + description: 'ROM LINK(Support gdrive link)' + required: true + release: + description: 'Upload ROM to Release' + required: true + default: 'true' + anon: + description: 'Upload ROM to Anonfile' + required: true + default: 'true' + we: + description: 'Upload ROM to WeTransfer' + required: true + default: 'true' + muse: + description: 'Upload ROM to MuseTransfer' + required: true + default: 'true' + fstab: + description: 'Fstab Patching Mode(Default)' + required: true + default: 'dd' + flags: + description: 'Flags(Default)' + required: true + default: 'true' + +jobs: + a1to5x: + runs-on: ubuntu-18.04 + continue-on-error: false + + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Initialization environment + run: | + sudo apt-get update -y + sudo apt-get install -y git wget curl aria2 python python3 python3-setuptools unzip axel cpio file + pip3 install protobuf + pip3 install gdown + + - id: var + name: Download + run: | + echo "Download" + if [[ ${{ github.event.inputs.filelink }} == *"drive.google.com"* ]]; then + if [[ ${{ github.event.inputs.filelink }} == *"file"* ]]; then + DOWNLOAD_LINK=$(echo ${{ github.event.inputs.filelink }} | cut -d / -f 6) + echo "DOWNLOAD_LINK: $DOWNLOAD_LINK" + gdown https://drive.google.com/uc?id=${DOWNLOAD_LINK} + elif [[ ${{ github.event.inputs.filelink }} == *"id"* ]]; then + DOWNLOAD_LINK1=$(echo ${{ github.event.inputs.filelink }} | cut -d = -f 2 | cut -d '&' -f 1) + DOWNLOAD_LINK2=$(echo ${{ github.event.inputs.filelink }} | cut -d = -f 4) + echo "DOWNLOAD_LINK1: $DOWNLOAD_LINK1" + echo "DOWNLOAD_LINK2: $DOWNLOAD_LINK2" + gdown https://drive.google.com/uc?id=${DOWNLOAD_LINK1} || gdown https://drive.google.com/uc?id=${DOWNLOAD_LINK2} + fi + else + if [[ ! -z "${{ github.event.inputs.filename }}" ]]; then + FILENAME="${{ github.event.inputs.filename }}" + ${{ github.event.inputs.mode }} ${{ github.event.inputs.filelink }} -${{ github.event.inputs.output }} "$FILENAME" + else + ${{ github.event.inputs.mode }} ${{ github.event.inputs.filelink }} + fi + fi + ls + FILENAME="$(ls -lS * | head -1 | awk '{print $9}')" + echo "FILENAME: $FILENAME" + FILENAME=$(basename ${FILENAME}) + NAME=$(basename $FILENAME .zip) + echo "::set-output name=filename::$FILENAME" + echo "::set-output name=name::$NAME" + + - name: Payload Extracting + run: | + echo "Payload Extracting" + unzip ${{ steps.var.outputs.filename }} payload.bin + rm -rf ${{ steps.var.outputs.filename }} + python3 update-payload-extractor/extract.py payload.bin --output_dir output/ + rm -rf payload.bin + ls + + - name: Fstab Patching + run: | + echo "system" + echo "**************************************" + ls -al + echo "**************************************" + ls -al output/system.img + sudo mkdir system + sudo mount output/system.img system + sudo du -sb system + cat system/system/vendor/etc/fstab.qcom + sudo cp -f system/system/vendor/etc/fstab.qcom fstab.qcom + fstab="system/system/vendor/etc/fstab.qcom" + echo "**************************************" + ls -al $fstab + echo "**************************************" + if [[ "${{ github.event.inputs.fstab }}" == "i" ]]; then + sudo sed -i 's/,slotselect//g' $fstab + sudo sed -i 's/,verify//g' $fstab + sudo sed -i 's/,avb//g' $fstab + sudo sed -i 's/forceencrypt/encryptable/g' $fstab + sudo sed -i 's/fileencryption=ice/encryptable=footer/g' $fstab + if [[ "${{ github.event.inputs.flags }}" == "true" ]]; then + sudo sed -i '/mnt_point/{n;/system/d;}' $fstab + sudo sed -i '/mnt_point/a\/dev/block/bootdevice/by-name/system /system ext4 ro wait,recoveryonly' $fstab + sudo sed -i '/mnt_point/G' $fstab + sudo sed -i '/mnt_point/a\/dev/block/bootdevice/by-name/recovery /recovery emmc defaults defaults' $fstab + sudo sed -i '/mnt_point/a\/dev/block/bootdevice/by-name/boot /boot emmc defaults defaults' $fstab + sudo sed -i '/persist/i\/dev/block/bootdevice/by-name/cache /cache f2fs nosuid,nodev,noatime,inline_xattr,flush_merge,data_flush wait,formattable,check' $fstab + sudo sed -i '/cache/a\/dev/block/bootdevice/by-name/cache /cache ext4 nosuid,nodev,noatime wait,formattable,check' $fstab + sudo sed -i '/modem/d' $fstab + sudo sed -i '/misc/i\/dev/block/bootdevice/by-name/modem /vendor/firmware_mnt vfat ro,context=u:object_r:firmware_file:s0,shortname=lower,uid=1000,gid=1000,dmask=227,fmask=337 wait' $fstab + fi + elif [[ "${{ github.event.inputs.fstab }}" == "dd" ]]; then + sudo chmod 777 $fstab + num_bytes=$(sudo sed 's/,slotselect//g' $fstab | wc -c) + sudo sed 's/,slotselect//g' $fstab 1<> $fstab + sudo dd if=/dev/null of=$fstab bs="$num_bytes" seek=1 + echo "---------------------" + num_bytes=$(sudo sed 's/,verify//g' $fstab | wc -c) + sudo sed 's/,verify//g' $fstab 1<> $fstab + sudo dd if=/dev/null of=$fstab bs="$num_bytes" seek=1 + echo "---------------------" + num_bytes=$(sudo sed 's/,avb//g' $fstab | wc -c) + sudo sed 's/,avb//g' $fstab 1<> $fstab + sudo dd if=/dev/null of=$fstab bs="$num_bytes" seek=1 + echo "---------------------" + num_bytes=$(sudo sed 's/forceencrypt/encryptable/g' $fstab | wc -c) + sudo sed 's/forceencrypt/encryptable/g' $fstab 1<> $fstab + sudo dd if=/dev/null of=$fstab bs="$num_bytes" seek=1 + echo "---------------------" + num_bytes=$(sudo sed 's/fileencryption=ice/encryptable=footer/g' $fstab | wc -c) + sudo sed 's/fileencryption=ice/encryptable=footer/g' $fstab 1<> $fstab + sudo dd if=/dev/null of=$fstab bs="$num_bytes" seek=1 + echo "---------------------" + if [[ "${{ github.event.inputs.flags }}" == "true" ]]; then + num_bytes=$(sudo sed '/mnt_point/{n;/system/d;}' $fstab | wc -c) + sudo sed '/mnt_point/{n;/system/d;}' $fstab 1<> $fstab + sudo dd if=/dev/null of=$fstab bs="$num_bytes" seek=1 + echo "---------------------" + num_bytes=$(sudo sed '/mnt_point/a\/dev/block/bootdevice/by-name/system /system ext4 ro wait,recoveryonly' $fstab | wc -c) + sudo sed '/mnt_point/a\/dev/block/bootdevice/by-name/system /system ext4 ro wait,recoveryonly' $fstab 1<> $fstab + sudo dd if=/dev/null of=$fstab bs="$num_bytes" seek=1 + echo "---------------------" + num_bytes=$(sudo sed '/mnt_point/G' $fstab | wc -c) + sudo sed '/mnt_point/G' $fstab 1<> $fstab + sudo dd if=/dev/null of=$fstab bs="$num_bytes" seek=1 + echo "---------------------" + num_bytes=$(sudo sed '/mnt_point/a\/dev/block/bootdevice/by-name/recovery /recovery emmc defaults defaults' $fstab | wc -c) + sudo sed '/mnt_point/a\/dev/block/bootdevice/by-name/recovery /recovery emmc defaults defaults' $fstab 1<> $fstab + sudo dd if=/dev/null of=$fstab bs="$num_bytes" seek=1 + echo "---------------------" + num_bytes=$(sudo sed '/mnt_point/a\\n/dev/block/bootdevice/by-name/boot /boot emmc defaults defaults' $fstab | wc -c) + sudo sed '/mnt_point/a\/dev/block/bootdevice/by-name/boot /boot emmc defaults defaults' $fstab 1<> $fstab + sudo dd if=/dev/null of=$fstab bs="$num_bytes" seek=1 + echo "---------------------" + num_bytes=$(sudo sed '/persist/i\/dev/block/bootdevice/by-name/cache /cache f2fs nosuid,nodev,noatime,inline_xattr,flush_merge,data_flush wait,formattable,check' $fstab | wc -c) + sudo sed '/persist/i\/dev/block/bootdevice/by-name/cache /cache f2fs nosuid,nodev,noatime,inline_xattr,flush_merge,data_flush wait,formattable,check' $fstab 1<> $fstab + sudo dd if=/dev/null of=$fstab bs="$num_bytes" seek=1 + echo "---------------------" + num_bytes=$(sudo sed '/cache/a\\n/dev/block/bootdevice/by-name/cache /cache ext4 nosuid,nodev,noatime wait,formattable,check' $fstab | wc -c) + sudo sed '/cache/a\/dev/block/bootdevice/by-name/cache /cache ext4 nosuid,nodev,noatime wait,formattable,check' $fstab 1<> $fstab + sudo dd if=/dev/null of=$fstab bs="$num_bytes" seek=1 + echo "---------------------" + num_bytes=$(sudo sed '/modem/d' $fstab | wc -c) + sudo sed '/modem/d' $fstab 1<> $fstab + sudo dd if=/dev/null of=$fstab bs="$num_bytes" seek=1 + echo "---------------------" + num_bytes=$(sudo sed '/dsp/a\/dev/block/bootdevice/by-name/modem /vendor/firmware_mnt vfat ro,context=u:object_r:firmware_file:s0,shortname=lower,uid=1000,gid=1000,dmask=227,fmask=337 wait' $fstab | wc -c) + sudo sed '/dsp/a\/dev/block/bootdevice/by-name/modem /vendor/firmware_mnt vfat ro,context=u:object_r:firmware_file:s0,shortname=lower,uid=1000,gid=1000,dmask=227,fmask=337 wait' $fstab 1<> $fstab + sudo dd if=/dev/null of=$fstab bs="$num_bytes" seek=1 + echo "---------------------" + num_bytes=$(sudo sed '$d' $fstab | wc -c) + sudo sed '$d' $fstab 1<> $fstab + sudo dd if=/dev/null of=$fstab bs="$num_bytes" seek=1 + echo "---------------------" + fi + sudo chmod 644 $fstab + ls -al fstab.qcom + echo "**************************************" + ls -al $fstab + echo "**************************************" + fi + cat system/system/vendor/etc/fstab.qcom + sudo cp system/system/vendor/etc/fstab.qcom output/fstab.qcom + echo "fstab修补中" + echo "**************************************" + diff fstab.qcom output/fstab.qcom || true + echo "**************************************" + sudo du -sb system + sudo umount system + sudo rm -rf system + ls -al output/system.img + + - name: Patching boot + run: | + echo "Patching boot" + mv output/boot.img mkbootimg/ + cd mkbootimg + ./mkboot boot.img boot + rm boot.img + cd boot + echo "打印出boot下所有文件" + echo "**************************************" + ls -al + echo "**************************************" + echo "img_info修补中" + cp -f img_info ../img_info + sed -i 's/veritykeyid=id:\w*//g' img_info + echo "**************************************" + diff img_info ../img_info || true + echo "**************************************" + mv kernel image.gz-dtb + ../split-appended-dtb image.gz-dtb + rm image.gz-dtb + for i in `find *.dtb`; do + if [[ ! -z "$(../../fdtget $i /firmware/android/fstab -p 2>/dev/null || true)" ]]; then + echo "**************************************" + ../../magiskboot dtb $i print -f + echo "**************************************" + ../../fdtget $i /firmware/android/vbmeta parts 2>/dev/null || true + ../../fdtput $i -r /firmware/android/vbmeta + ../../fdtput $i /firmware/android/fstab/system status -d + ../../fdtput $i /firmware/android/fstab/system fsmgr_flags -d + ../../fdtput $i /firmware/android/fstab/system status ok -ts + ../../fdtput $i /firmware/android/fstab/system fsmgr_flags wait -ts + ../../fdtput $i /firmware/android/fstab/system mnt_flags ro,barrier=1,discard -ts + ../../fdtput $i /firmware/android/fstab/system type ext4 -ts + ../../fdtput $i /firmware/android/fstab/system dev /dev/block/platform/soc/7824900.sdhci/by-name/system -ts + ../../fdtput $i /firmware/android/fstab/system compatible android,system -ts + ../../fdtput $i /firmware/android/fstab/vendor fsmgr_flags wait -ts + ../../fdtput $i /firmware/android/fstab/vendor status disable -ts + echo "列出android下的节点" + ../../fdtget $i /firmware/android/vbmeta -l 2>/dev/null || true + ../../fdtget $i /firmware/android/vbmeta parts 2>/dev/null || true + echo "**************************************" + ../../magiskboot dtb $i print -f + echo "**************************************" + dtb=${i##* } + dts=${dtb%.*}.dts + echo "dtb $dtb" + echo "dts $dts" + ../../dtc -q -I dtb -O dts -o $dts $dtb + line="$(grep -n "firmware {" $dts)" + linea="$(echo $line | cut -d ":" -f 1)" + line="$(grep -n "reserved-memory" $dts)" + lineb="$(echo $line | cut -d ":" -f 1)" + lineb="`expr $lineb - 1`""p" + echo "**************************************" + sed -n "$linea,$lineb" $dts + echo "**************************************" + mv $dts ../../output/$dts + fi + done + echo "**************************************" + ls -al + echo "**************************************" + cat kernel *.dtb > image.gz-dtb + rm -f *.dtb + rm -f kernel + mv image.gz-dtb kernel + echo "列出所有文件" + echo "**************************************" + ls -al + echo "**************************************" + cd .. + ./mkboot boot boot.img + cd .. + mv mkbootimg/boot.img tmp/boot.img + cd tmp + ../magiskboot unpack -h boot.img + echo "打印tmp下文件" + echo "**************************************" + ls -al + echo "**************************************" + echo "替换ramdisk.cpio" + echo "**************************************" + diff ../cpio/ramdisk-sar.cpio ramdisk.cpio || true + echo "**************************************" + cp -f ../cpio/ramdisk-sar.cpio ramdisk.cpio + ../magiskboot repack boot.img + echo "boot打包" + diff new-boot.img boot.img || true + echo "**************************************" + rm -rf boot.img + echo "打印出所有文件" + echo "**************************************" + ls -al + echo "**************************************" + mv new-boot.img ../output/boot.img + cd .. + + - name: Packing + run: | + mv output/boot.img template/boot.img + mv output/system.img template/system.img + cd template + echo "**************************************" + ls -al + echo "**************************************" + zip -q -r -9 ${{ steps.var.outputs.filename }} * + cd .. + mv template/${{ steps.var.outputs.filename }} ./ + echo "**************************************" + ls -al + echo "**************************************" + echo "" >> boby.txt + echo "> **Downloads:**" >> boby.txt + echo "" >> boby.txt + + - name: Upload ROM to Artifact + uses: actions/upload-artifact@v2 + with: + name: output + path: | + output + retention-days: 7 + + - name: Upload ROM to Anonfile + if: github.event.inputs.anon == 'true' + continue-on-error: true + run: | + echo "Upload ROM to Anonfile" + curl -fsSL git.io/file-transfer | sh + anontrans=$(./transfer anon --no-progress ${{ steps.var.outputs.filename }}) + anontrans=$(echo $anontrans | grep -o -E "https[^ ]*") + echo "Anonfile: $anontrans" + echo "- [ ] Anonfile:" >> boby.txt + echo " - $anontrans" >> boby.txt + + - name: Upload ROM to WeTransfer + if: github.event.inputs.we == 'true' + continue-on-error: true + run: | + echo "Upload to WeTransfer" + curl -fsSL git.io/file-transfer | sh + wetrans=$(./transfer wet -s -p 16 --no-progress ${{ steps.var.outputs.filename }}) + wetrans=$(echo $wetrans | grep -o -E "https[^ ]*") + echo "WeTransfer: $wetrans" + echo "- [ ] WeTransfer:" >> boby.txt + echo " - $wetrans" >> boby.txt + + - name: Upload ROM to MuseTransfer + if: github.event.inputs.muse == 'true' + continue-on-error: true + run: | + echo "Upload to MuseTransfer" + curl -fsSL git.io/file-transfer | sh + musetrans=$(./transfer muse -s -p 12 --no-progress ${{ steps.var.outputs.filename }}) + musetrans=$(echo $musetrans | grep -o -E "https[^ ]*") + echo "MuseTransfer: $musetrans" + echo "- [ ] MuseTransfer:" >> boby.txt + echo " - $musetrans" >> boby.txt + MD5SUM=$(md5sum ${{ steps.var.outputs.filename }} | cut -d' ' -f1) + SHA1SUM=$(sha1sum ${{ steps.var.outputs.filename }} | cut -d' ' -f1) + SHA256SUM=$(sha256sum ${{ steps.var.outputs.filename }} | cut -d' ' -f1) + echo "MD5SUM: $MD5SUM" + echo "SHA1SUM: $SHA1SUM" + echo "SHA256SUM: $SHA256SUM" + echo "" >> boby.txt + echo "> Verify:" >> boby.txt + echo "" >> boby.txt + echo "- [ ] MD5SUM:" >> boby.txt + echo "" >> boby.txt + echo " $MD5SUM" >> boby.txt + echo "- [ ] SHA1SUM:" >> boby.txt + echo "" >> boby.txt + echo " $SHA1SUM" >> boby.txt + echo "- [ ] SHA256SUM:" >> boby.txt + echo "" >> boby.txt + echo " $SHA256SUM" >> boby.txt + echo "" >> boby.txt + + - name: Upload ROM to Release + if: github.event.inputs.release == 'true' + uses: softprops/action-gh-release@v1 + with: + files: ${{ steps.var.outputs.filename }} + name: ${{ steps.var.outputs.name }} + tag_name: ${{ github.run_id }} + body_path: boby.txt + env: + GITHUB_TOKEN: ${{ secrets.RELEASE }} + diff --git a/README.md b/README.md new file mode 100755 index 0000000..ee0d9ba --- /dev/null +++ b/README.md @@ -0,0 +1,27 @@ +# You only need the A1 ROM link to port the ROM to the 5x + +**如何使用a1to5x转换a1包成5x可用的刷机包** + +* 登录你的github,fork这个项目到你的github + +## 说明 +### 1. 创建token +转到你的`repo settings > Developer settings > Personal access tokens > Generate new token` +- `note`: 填写RELEASE +- `Expiration`: 改为No expiration +- `Select scopes`: 全部勾选 +- `Generate token`: 点击创建token +- `token`: 复制得到的token令牌 + +### 2. 添加secrets +转到你的`repo settings > secrets > new repository secret` +- `Name`: 填写RELEASE +- `Value`: 填写刚刚复制的token + +### 3. Running workflow +- 转到你的 repo `Actions` +- 选择 `a1to5x` 工作流程(workflow) +- 打开 `Run workflow` +- 输入 ROM文件名 在 `ROM NAME` (可选) +- 输入 ROM链接地址 在 `ROM LINK` +- 点击`Run workflow` 按钮运行工作流程 diff --git a/boby.txt b/boby.txt new file mode 100644 index 0000000..dee5d31 --- /dev/null +++ b/boby.txt @@ -0,0 +1,13 @@ +> **First Time Installation:** + +- [ ] Format data (if encrypted) +- [ ] Reboot to recovery +- [ ] Back up your efs, persist, modem partitions(Backup 5x baseband) +- [ ] Wipe system, vendor, cache, dalvik, data +- [ ] Flash ROM zip and magisk (optional) +- [ ] Flash [A1-V10.0.24.0_Pie_Baseband.zip](https://t.me/TiffanyUpdates/523?single)**(First Time)** +- [ ] Reboot & Enjoy + +> **Notes:** + +- [ ] To restore the 5X baseband, please use twrp to restore the efs, persist, modem backup files. diff --git a/cpio/ramdisk-sar.cpio b/cpio/ramdisk-sar.cpio new file mode 100644 index 0000000..2ac7c48 Binary files /dev/null and b/cpio/ramdisk-sar.cpio differ diff --git a/dtc b/dtc new file mode 100755 index 0000000..fe711b1 Binary files /dev/null and b/dtc differ diff --git a/fdtget b/fdtget new file mode 100755 index 0000000..f0d1853 Binary files /dev/null and b/fdtget differ diff --git a/fdtput b/fdtput new file mode 100755 index 0000000..99fd4f0 Binary files /dev/null and b/fdtput differ diff --git a/magiskboot b/magiskboot new file mode 100755 index 0000000..2dfbb8d Binary files /dev/null and b/magiskboot differ diff --git a/mkbootimg/dtbTool b/mkbootimg/dtbTool new file mode 100755 index 0000000..f9b7cb8 Binary files /dev/null and b/mkbootimg/dtbTool differ diff --git a/mkbootimg/dtbToolCM b/mkbootimg/dtbToolCM new file mode 100755 index 0000000..40c691e Binary files /dev/null and b/mkbootimg/dtbToolCM differ diff --git a/mkbootimg/dtc b/mkbootimg/dtc new file mode 100755 index 0000000..22cdad7 Binary files /dev/null and b/mkbootimg/dtc differ diff --git a/mkbootimg/lz4 b/mkbootimg/lz4 new file mode 100755 index 0000000..d28446b Binary files /dev/null and b/mkbootimg/lz4 differ diff --git a/mkbootimg/mkboot b/mkbootimg/mkboot new file mode 100755 index 0000000..f970496 --- /dev/null +++ b/mkbootimg/mkboot @@ -0,0 +1,305 @@ +#!/bin/bash +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +#boot.img tool +#by xiaolu +trap "clean" 2 3 4 +workdir=$(pwd) +toolpath=$(readlink -f $0) +tooldir=$(dirname $toolpath) +mkbootimg=$tooldir/mkbootimg +mkbootfs=$tooldir/mkbootfs +C_OUT="\033[0;1m" +C_ERR="\033[31;1m" +C_CLEAR="\033[0;0m" + +pout() { + printf "${C_OUT}${*}${C_CLEAR}\n" +} +perr() { + printf "${C_ERR}${*}${C_CLEAR}\n" +} +clean() +{ + rm -rf /tmp/mkboot.* + pout "..." + exit +} + +unpack_complete() +{ + [ ! -z $format ] && echo format=$format >> ../img_info + pout "Unpack completed." + exit +} + +zip_command() +{ + if [ "$1" == lzop ]; then + zcommand="lzop -n -f -9" + elif [ "$1" == lz4 ]; then + zcommand="lz4 -f -9" + elif [ "$1" == lzma ]; then + zcommand="lzma -f -c" + elif [ "$1" == xz ]; then + zcommand="xz -f -c" + else + zcommand="gzip -n -f" + fi +} + +usage() +{ + pout "" + pout "----------------------------------------------------------------------" + pout "Not enough parameters or parameter error!" + pout "unpack boot.img & decompress ramdisk:\n $(basename $0) [img] [output dir]" + pout " $(basename $0) boot.img boot20130905" + pout "Use the unpacked directory repack boot.img(img_info):\n $(basename $0) [unpacked dir] [newbootfile]" + pout " $(basename $0) boot20130905 newboot.img" + clean +} + +print_info() +{ + [ ! -z "$board" ] && pout " board : $board" + pout " kernel : $kernel" + pout " ramdisk : $ramdisk" + pout " page size : $page_size" + pout " kernel size : $kernel_size" + pout " ramdisk size : $ramdisk_size" + [ ! -z $second_size ] && [ $second_size -gt 0 ] && \ + pout " second_size : $second_size" + [ $dtb_size -gt 0 ] && pout " dtb size : $dtb_size" + pout " base : $base_addr" + pout " kernel offset : $kernel_offset" + pout " ramdisk offset : $ramdisk_offset" + [ ! -z $second_size ] && [ $second_size -gt 0 ] && \ + pout " second_offset : $second_offset" + pout " tags offset : $tags_offset" + [ $dtb_size -gt 0 ] && pout " dtb img : $dt" + pout " cmd line : $cmd_line" +} + +mkboot_img() +{ + error=0 + [ $second_size -gt 0 ] && second="--second ${second}" + [ $dtb_size -gt 0 ] && dtb="--dt ${dt}" + + $mkbootimg --kernel $kernel --ramdisk $ramdisk --board "$board" \ + --base $base_addr --ramdisk_offset $ramdisk_offset \ + --tags_offset $tags_offset --cmdline "$cmd_line" \ + --pagesize $page_size $second $dtb -o $1 || error=1 + + [ $error -eq 1 ] && return $error + ramdisk_size=$(stat -c "%s" $ramdisk) + boot_size=$(stat -c "%s" $1) + pout "Kernel size: $kernel_size, new ramdisk size: $ramdisk_size, $(basename $1): $boot_size." + pout "$(basename $1) has been created." +} + +#decide action +[ $# -lt 2 ] || [ $# -gt 3 ] && usage +if [ $# -eq 2 ] && [ -d $1 ]; then + mkboot_from_dir=1 +elif [ $# -eq 2 ] && [ -s $1 ]; then + split_boot_to_dir=1 +else + usage +fi + +#mkboot_from_dir, img_info +if [ ! -z $mkboot_from_dir ]; then + pout "mkbootimg from $1/img_info." + unpacked_dir=$(readlink -f $1) + new_img=$(readlink -f $2) + cd $unpacked_dir + if [ ! -s img_info ]; then + pout "not found img_info file! can't rebuild img." + clean + fi + eval $(cat img_info) + if [ -z $kernel ] || [ -z $ramdisk ] || [ -z $base_addr ]; then + pout "img_info file have not enough parameters." + clean + fi + [ -z $second_size ] && second_size=0 + [ -z $dtb_size ] && dtb_size=0 + if [ -d $ramdisk ]; then + [ -z $format ] && format=gzip + zip_command $format + #cd $ramdisk; find . | fakeroot cpio -R 0:0 -H newc -o 2>/dev/null \ + # | $zcommand > $unpacked_dir/new_ramdisk; cd $unpacked_dir + $mkbootfs $ramdisk | $zcommand > new_ramdisk + ramdisk=new_ramdisk + ramdisk_size=$(stat -c "%s" $ramdisk) + fi + print_info + pout "ramdisk is $format format." + rm -f $new_img + mkboot_img $new_img || perr "Make boot.img Error! pls check img_info file." + #pout "Add SEANDROIDENFORCE tag." + #printf SEANDROIDENFORCE >> $new_img + rm -f new_ramdisk + clean +fi + +#split boot.img to dir. +if [ -e $2 ]; then + read -p "$2 exists, delete?(N/y)" reply + case $reply in + y | Y) + rm -rf $2 + ;; + *) + exit + ;; + esac +fi +tempdir="$(readlink -f $2)" +mkdir -p $tempdir +pout "Unpack & decompress $1 to $2" + +#get boot.img info +cp -f $1 $tempdir/ +cd $tempdir +bootimg=$(basename $1) +offset=$(grep -abo ANDROID! $bootimg | cut -f 1 -d :) +[ -z $offset ] && clean +if [ $offset -gt 0 ]; then + dd if=$bootimg of=bootimg bs=$offset skip=1 2>/dev/null + bootimg=bootimg +fi + +kernel_addr=0x$(od -A n -X -j 12 -N 4 $bootimg | sed 's/ //g' | sed 's/^0*//g') +ramdisk_addr=0x$(od -A n -X -j 20 -N 4 $bootimg | sed 's/ //g' | sed 's/^0*//g') +second_addr=0x$(od -A n -X -j 28 -N 4 $bootimg | sed 's/ //g' | sed 's/^0*//g') +tags_addr=0x$(od -A n -X -j 32 -N 4 $bootimg | sed 's/ //g' | sed 's/^0*//g') + +kernel_size=$(od -A n -D -j 8 -N 4 $bootimg | sed 's/ //g') +#base_addr=0x$(od -A n -x -j 14 -N 2 $bootimg | sed 's/ //g')0000 +ramdisk_size=$(od -A n -D -j 16 -N 4 $bootimg | sed 's/ //g') +second_size=$(od -A n -D -j 24 -N 4 $bootimg | sed 's/ //g') +page_size=$(od -A n -D -j 36 -N 4 $bootimg | sed 's/ //g') +dtb_size=$(od -A n -D -j 40 -N 4 $bootimg | sed 's/ //g') +#cmd_line=$(od -A n --strings -j 64 -N 512 $bootimg) +#board=$(od -A n --strings -j 48 -N 16 $bootimg) +cmd_line=$(od -A n -S1 -j 64 -N 512 $bootimg) +board=$(od -A n -S1 -j 48 -N 16 $bootimg) + +base_addr=$((kernel_addr-0x00008000)) +kernel_offset=$((kernel_addr-base_addr)) +ramdisk_offset=$((ramdisk_addr-base_addr)) +second_offset=$((second_addr-base_addr)) +tags_offset=$((tags_addr-base_addr)) + +base_addr=$(printf "%08x" $base_addr) +kernel_offset=$(printf "%08x" $kernel_offset) +ramdisk_offset=$(printf "%08x" $ramdisk_offset) +second_offset=$(printf "%08x" $second_offset) +tags_offset=$(printf "%08x" $tags_offset) + +base_addr=0x${base_addr:0-8} +kernel_offset=0x${kernel_offset:0-8} +ramdisk_offset=0x${ramdisk_offset:0-8} +second_offset=0x${second_offset:0-8} +tags_offset=0x${tags_offset:0-8} + +k_count=$(((kernel_size+page_size-1)/page_size)) +r_count=$(((ramdisk_size+page_size-1)/page_size)) +s_count=$(((second_size+page_size-1)/page_size)) +d_count=$(((dtb_size+page_size-1)/page_size)) +k_offset=1 +r_offset=$((k_offset+k_count)) +s_offset=$((r_offset+r_count)) +d_offset=$((s_offset+s_count)) + +#kernel +dd if=$bootimg of=kernel_tmp bs=$page_size skip=$k_offset count=$k_count 2>/dev/null +dd if=kernel_tmp of=kernel bs=$kernel_size count=1 2>/dev/null +#ramdisk.packed +dd if=$bootimg of=ramdisk_tmp bs=$page_size skip=$r_offset count=$r_count 2>/dev/null +dd if=ramdisk_tmp of=ramdisk.packed bs=$ramdisk_size count=1 2>/dev/null +#second +if [ $second_size -gt 0 ]; then + dd if=$bootimg of=second.img.tmp bs=$page_size skip=$s_offset count=$s_count 2>/dev/null + dd if=second.img.tmp of=second.img bs=$second_size count=1 2>/dev/null + s_name="second=second.img\n" + s_size="second_size=$second_size\n" +fi +#dtb +if [ $dtb_size -gt 0 ]; then + dd if=$bootimg of=dt.img_tmp bs=$page_size skip=$d_offset count=$d_count 2>/dev/null + dd if=dt.img_tmp of=dt.img bs=$dtb_size count=1 2>/dev/null + dt="$tempdir/dt.img" + dt=$(basename $dt) + dt_name="dt=$dt\n" + dt_size="dtb_size=$dtb_size\n" +fi +rm -f *_tmp $(basename $1) $bootimg + +kernel=kernel +ramdisk=ramdisk +[ ! -s $kernel ] && clean +#print boot.img info +print_info + +esq="'\"'\"'" +escaped_cmd_line=`echo $cmd_line | sed "s/'/$esq/g"` + +#write info to img_info,decompression ramdisk.packed +printf "kernel=kernel\nramdisk=ramdisk\n${s_name}${dt_name}page_size=$page_size\n\ +kernel_size=$kernel_size\nramdisk_size=$ramdisk_size\n${s_size}${dt_size}base_addr=$base_addr\nkernel_offset=$kernel_offset\n\ +ramdisk_offset=$ramdisk_offset\ntags_offset=$tags_offset\ncmd_line=\'$escaped_cmd_line\'\nboard=\"$board\"\n" > img_info +mkdir ramdisk +cd ramdisk + +gzip -t ../ramdisk.packed 2>/dev/null +if [ $? -eq 0 ]; then + pout "ramdisk is gzip format." + format=gzip + gzip -d -c ../ramdisk.packed | cpio -i -d -m --no-absolute-filenames 2>/dev/null + unpack_complete +fi +lzma -t ../ramdisk.packed 2>/dev/null +if [ $? -eq 0 ]; then + pout "ramdisk is lzma format." + format=lzma + lzma -d -c ../ramdisk.packed | cpio -i -d -m --no-absolute-filenames 2>/dev/null + unpack_complete +fi +xz -t ../ramdisk.packed 2>/dev/null +if [ $? -eq 0 ]; then + pout "ramdisk is xz format." + format=xz + xz -d -c ../ramdisk.packed | cpio -i -d -m --no-absolute-filenames 2>/dev/null + unpack_complete +fi +lzop -t ../ramdisk.packed 2>/dev/null +if [ $? -eq 0 ]; then + pout "ramdisk is lzo format." + format=lzop + lzop -d -c ../ramdisk.packed | cpio -i -d -m --no-absolute-filenames 2>/dev/null + unpack_complete +fi +$tooldir/lz4 -d ../ramdisk.packed 2>/dev/null | cpio -i -d -m --no-absolute-filenames 2>/dev/null +if [ $? -eq 0 ]; then + pout "ramdisk is lz4 format." + format=lz4 +else + pout "ramdisk is unknown format,can't unpack ramdisk" +fi +unpack_complete + diff --git a/mkbootimg/mkbootfs b/mkbootimg/mkbootfs new file mode 100755 index 0000000..18f88c9 Binary files /dev/null and b/mkbootimg/mkbootfs differ diff --git a/mkbootimg/mkbootimg b/mkbootimg/mkbootimg new file mode 100755 index 0000000..4e8bdb8 Binary files /dev/null and b/mkbootimg/mkbootimg differ diff --git a/mkbootimg/split-appended-dtb b/mkbootimg/split-appended-dtb new file mode 100755 index 0000000..45bfb6e Binary files /dev/null and b/mkbootimg/split-appended-dtb differ diff --git a/mkdtimg b/mkdtimg new file mode 100755 index 0000000..d8fb9eb Binary files /dev/null and b/mkdtimg differ diff --git a/output/.gitkeep b/output/.gitkeep new file mode 100755 index 0000000..e69de29 diff --git a/template/META-INF/com/google/android/update-binary b/template/META-INF/com/google/android/update-binary new file mode 100755 index 0000000..fcd0b84 Binary files /dev/null and b/template/META-INF/com/google/android/update-binary differ diff --git a/template/META-INF/com/google/android/updater-script b/template/META-INF/com/google/android/updater-script new file mode 100755 index 0000000..e7a7c40 --- /dev/null +++ b/template/META-INF/com/google/android/updater-script @@ -0,0 +1,29 @@ +# +ui_print("--------------------------------------------------"); +ui_print("| |"); +ui_print("| A1 TO 5X |"); +ui_print("| |"); +ui_print("--------------------------------------------------"); +ui_print("--------------------------------------------------"); +ui_print("| TG:- @Eytan_Tan |"); +ui_print("--------------------------------------------------"); +ui_print(" "); +# +ui_print("- Flashing system image"); +show_progress(0.040000, 0); +package_extract_file("system.img", "/dev/block/bootdevice/by-name/system") || + abort("E1001: Failed to flash system image."); +# +ui_print("- Flashing boot image"); +show_progress(0.090000, 0); +package_extract_file("boot.img", "/dev/block/bootdevice/by-name/boot"); +# +ui_print("- Flashing tz image"); +package_extract_file("tz.img", "/dev/block/bootdevice/by-name/tz"); +# +ui_print("- Flashed successfully..."); +ui_print("- Enjoy!"); +ui_print(" "); +# +show_progress(0.100000, 10); +set_progress(1.000000); diff --git a/template/tz.img b/template/tz.img new file mode 100644 index 0000000..078d618 Binary files /dev/null and b/template/tz.img differ diff --git a/tmp/.gitkeep b/tmp/.gitkeep new file mode 100755 index 0000000..e69de29 diff --git a/update-payload-extractor/.gitignore b/update-payload-extractor/.gitignore new file mode 100755 index 0000000..0d20b64 --- /dev/null +++ b/update-payload-extractor/.gitignore @@ -0,0 +1 @@ +*.pyc diff --git a/update-payload-extractor/extract.py b/update-payload-extractor/extract.py new file mode 100755 index 0000000..cc01051 --- /dev/null +++ b/update-payload-extractor/extract.py @@ -0,0 +1,63 @@ +#!/usr/bin/env python + +import argparse +import errno +import os + +import update_payload +from update_payload import applier + + +def list_content(payload_file_name): + with open(payload_file_name, 'rb') as payload_file: + payload = update_payload.Payload(payload_file) + payload.Init() + + for part in payload.manifest.partitions: + print("{} ({} bytes)".format(part.partition_name, + part.new_partition_info.size)) + + +def extract(payload_file_name, output_dir="output", partition_names=None): + try: + os.makedirs(output_dir) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + with open(payload_file_name, 'rb') as payload_file: + payload = update_payload.Payload(payload_file) + payload.Init() + + if payload.IsDelta(): + print("Delta payloads are not supported") + exit(1) + + helper = applier.PayloadApplier(payload) + for part in payload.manifest.partitions: + if partition_names and part.partition_name not in partition_names: + continue + print("Extracting {}".format(part.partition_name)) + output_file = os.path.join(output_dir, part.partition_name + '.img') + helper._ApplyToPartition( + part.operations, part.partition_name, + 'install_operations', output_file, + part.new_partition_info) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument("payload", metavar="payload.bin", + help="Path to the payload.bin") + parser.add_argument("--output_dir", default="output", + help="Output directory") + parser.add_argument("--partitions", type=str, nargs='+', + help="Name of the partitions to extract") + parser.add_argument("--list_partitions", action="store_true", + help="List the partitions included in the payload.bin") + + args = parser.parse_args() + if args.list_partitions: + list_content(args.payload) + else: + extract(args.payload, args.output_dir, args.partitions) diff --git a/update-payload-extractor/update_payload/__init__.py b/update-payload-extractor/update_payload/__init__.py new file mode 100755 index 0000000..6e77678 --- /dev/null +++ b/update-payload-extractor/update_payload/__init__.py @@ -0,0 +1,24 @@ +# +# Copyright (C) 2013 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Library for processing, verifying and applying Chrome OS update payloads.""" + +# Just raise the interface classes to the root namespace. +from __future__ import absolute_import + +from update_payload.checker import CHECKS_TO_DISABLE +from update_payload.error import PayloadError +from update_payload.payload import Payload diff --git a/update-payload-extractor/update_payload/applier.py b/update-payload-extractor/update_payload/applier.py new file mode 100755 index 0000000..29ccb8e --- /dev/null +++ b/update-payload-extractor/update_payload/applier.py @@ -0,0 +1,621 @@ +# +# Copyright (C) 2013 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Applying a Chrome OS update payload. + +This module is used internally by the main Payload class for applying an update +payload. The interface for invoking the applier is as follows: + + applier = PayloadApplier(payload) + applier.Run(...) + +""" + +from __future__ import absolute_import +from __future__ import print_function + +import array +import bz2 +import hashlib +# Not everywhere we can have the lzma library so we ignore it if we didn't have +# it because it is not going to be used. For example, 'cros flash' uses +# devserver code which eventually loads this file, but the lzma library is not +# included in the client test devices, and it is not necessary to do so. But +# lzma is not used in 'cros flash' so it should be fine. Python 3.x include +# lzma, but for backward compatibility with Python 2.7, backports-lzma is +# needed. +try: + import lzma +except ImportError: + try: + from backports import lzma + except ImportError: + pass +import os +import subprocess +import sys +import tempfile + +from update_payload import common +from update_payload.error import PayloadError + +# +# Helper functions. +# +def _VerifySha256(file_obj, expected_hash, name, length=-1): + """Verifies the SHA256 hash of a file. + + Args: + file_obj: file object to read + expected_hash: the hash digest we expect to be getting + name: name string of this hash, for error reporting + length: precise length of data to verify (optional) + + Raises: + PayloadError if computed hash doesn't match expected one, or if fails to + read the specified length of data. + """ + hasher = hashlib.sha256() + block_length = 1024 * 1024 + max_length = length if length >= 0 else sys.maxsize + + while max_length > 0: + read_length = min(max_length, block_length) + data = file_obj.read(read_length) + if not data: + break + max_length -= len(data) + hasher.update(data) + + if length >= 0 and max_length > 0: + raise PayloadError( + 'insufficient data (%d instead of %d) when verifying %s' % + (length - max_length, length, name)) + + actual_hash = hasher.digest() + if actual_hash != expected_hash: + raise PayloadError('%s hash (%s) not as expected (%s)' % + (name, common.FormatSha256(actual_hash), + common.FormatSha256(expected_hash))) + + +def _ReadExtents(file_obj, extents, block_size, max_length=-1): + """Reads data from file as defined by extent sequence. + + This tries to be efficient by not copying data as it is read in chunks. + + Args: + file_obj: file object + extents: sequence of block extents (offset and length) + block_size: size of each block + max_length: maximum length to read (optional) + + Returns: + A character array containing the concatenated read data. + """ + data = array.array('B') + if max_length < 0: + max_length = sys.maxsize + for ex in extents: + if max_length == 0: + break + read_length = min(max_length, ex.num_blocks * block_size) + + file_obj.seek(ex.start_block * block_size) + data.fromfile(file_obj, read_length) + + max_length -= read_length + + return data + + +def _WriteExtents(file_obj, data, extents, block_size, base_name): + """Writes data to file as defined by extent sequence. + + This tries to be efficient by not copy data as it is written in chunks. + + Args: + file_obj: file object + data: data to write + extents: sequence of block extents (offset and length) + block_size: size of each block + base_name: name string of extent sequence for error reporting + + Raises: + PayloadError when things don't add up. + """ + data_offset = 0 + data_length = len(data) + for ex, ex_name in common.ExtentIter(extents, base_name): + if not data_length: + raise PayloadError('%s: more write extents than data' % ex_name) + write_length = min(data_length, ex.num_blocks * block_size) + file_obj.seek(ex.start_block * block_size) + file_obj.write(data[data_offset:(data_offset + write_length)]) + + data_offset += write_length + data_length -= write_length + + if data_length: + raise PayloadError('%s: more data than write extents' % base_name) + + +def _ExtentsToBspatchArg(extents, block_size, base_name, data_length=-1): + """Translates an extent sequence into a bspatch-compatible string argument. + + Args: + extents: sequence of block extents (offset and length) + block_size: size of each block + base_name: name string of extent sequence for error reporting + data_length: the actual total length of the data in bytes (optional) + + Returns: + A tuple consisting of (i) a string of the form + "off_1:len_1,...,off_n:len_n", (ii) an offset where zero padding is needed + for filling the last extent, (iii) the length of the padding (zero means no + padding is needed and the extents cover the full length of data). + + Raises: + PayloadError if data_length is too short or too long. + """ + arg = '' + pad_off = pad_len = 0 + if data_length < 0: + data_length = sys.maxsize + for ex, ex_name in common.ExtentIter(extents, base_name): + if not data_length: + raise PayloadError('%s: more extents than total data length' % ex_name) + + start_byte = ex.start_block * block_size + num_bytes = ex.num_blocks * block_size + if data_length < num_bytes: + # We're only padding a real extent. + pad_off = start_byte + data_length + pad_len = num_bytes - data_length + num_bytes = data_length + + arg += '%s%d:%d' % (arg and ',', start_byte, num_bytes) + data_length -= num_bytes + + if data_length: + raise PayloadError('%s: extents not covering full data length' % base_name) + + return arg, pad_off, pad_len + + +# +# Payload application. +# +class PayloadApplier(object): + """Applying an update payload. + + This is a short-lived object whose purpose is to isolate the logic used for + applying an update payload. + """ + + def __init__(self, payload, bsdiff_in_place=True, bspatch_path=None, + puffpatch_path=None, truncate_to_expected_size=True): + """Initialize the applier. + + Args: + payload: the payload object to check + bsdiff_in_place: whether to perform BSDIFF operation in-place (optional) + bspatch_path: path to the bspatch binary (optional) + puffpatch_path: path to the puffpatch binary (optional) + truncate_to_expected_size: whether to truncate the resulting partitions + to their expected sizes, as specified in the + payload (optional) + """ + assert payload.is_init, 'uninitialized update payload' + self.payload = payload + self.block_size = payload.manifest.block_size + self.minor_version = payload.manifest.minor_version + self.bsdiff_in_place = bsdiff_in_place + self.bspatch_path = bspatch_path or 'bspatch' + self.puffpatch_path = puffpatch_path or 'puffin' + self.truncate_to_expected_size = truncate_to_expected_size + + def _ApplyReplaceOperation(self, op, op_name, out_data, part_file, part_size): + """Applies a REPLACE{,_BZ,_XZ} operation. + + Args: + op: the operation object + op_name: name string for error reporting + out_data: the data to be written + part_file: the partition file object + part_size: the size of the partition + + Raises: + PayloadError if something goes wrong. + """ + block_size = self.block_size + data_length = len(out_data) + + # Decompress data if needed. + if op.type == common.OpType.REPLACE_BZ: + out_data = bz2.decompress(out_data) + data_length = len(out_data) + elif op.type == common.OpType.REPLACE_XZ: + # pylint: disable=no-member + out_data = lzma.decompress(out_data) + data_length = len(out_data) + + # Write data to blocks specified in dst extents. + data_start = 0 + for ex, ex_name in common.ExtentIter(op.dst_extents, + '%s.dst_extents' % op_name): + start_block = ex.start_block + num_blocks = ex.num_blocks + count = num_blocks * block_size + + data_end = data_start + count + + # Make sure we're not running past partition boundary. + if (start_block + num_blocks) * block_size > part_size: + raise PayloadError( + '%s: extent (%s) exceeds partition size (%d)' % + (ex_name, common.FormatExtent(ex, block_size), + part_size)) + + # Make sure that we have enough data to write. + if data_end >= data_length + block_size: + raise PayloadError( + '%s: more dst blocks than data (even with padding)') + + # Pad with zeros if necessary. + if data_end > data_length: + padding = data_end - data_length + out_data += b'\0' * padding + + self.payload.payload_file.seek(start_block * block_size) + part_file.seek(start_block * block_size) + part_file.write(out_data[data_start:data_end]) + + data_start += count + + # Make sure we wrote all data. + if data_start < data_length: + raise PayloadError('%s: wrote fewer bytes (%d) than expected (%d)' % + (op_name, data_start, data_length)) + + def _ApplyZeroOperation(self, op, op_name, part_file): + """Applies a ZERO operation. + + Args: + op: the operation object + op_name: name string for error reporting + part_file: the partition file object + + Raises: + PayloadError if something goes wrong. + """ + block_size = self.block_size + base_name = '%s.dst_extents' % op_name + + # Iterate over the extents and write zero. + # pylint: disable=unused-variable + for ex, ex_name in common.ExtentIter(op.dst_extents, base_name): + part_file.seek(ex.start_block * block_size) + part_file.write(b'\0' * (ex.num_blocks * block_size)) + + def _ApplySourceCopyOperation(self, op, op_name, old_part_file, + new_part_file): + """Applies a SOURCE_COPY operation. + + Args: + op: the operation object + op_name: name string for error reporting + old_part_file: the old partition file object + new_part_file: the new partition file object + + Raises: + PayloadError if something goes wrong. + """ + if not old_part_file: + raise PayloadError( + '%s: no source partition file provided for operation type (%d)' % + (op_name, op.type)) + + block_size = self.block_size + + # Gather input raw data from src extents. + in_data = _ReadExtents(old_part_file, op.src_extents, block_size) + + # Dump extracted data to dst extents. + _WriteExtents(new_part_file, in_data, op.dst_extents, block_size, + '%s.dst_extents' % op_name) + + def _BytesInExtents(self, extents, base_name): + """Counts the length of extents in bytes. + + Args: + extents: The list of Extents. + base_name: For error reporting. + + Returns: + The number of bytes in extents. + """ + + length = 0 + # pylint: disable=unused-variable + for ex, ex_name in common.ExtentIter(extents, base_name): + length += ex.num_blocks * self.block_size + return length + + def _ApplyDiffOperation(self, op, op_name, patch_data, old_part_file, + new_part_file): + """Applies a SOURCE_BSDIFF, BROTLI_BSDIFF or PUFFDIFF operation. + + Args: + op: the operation object + op_name: name string for error reporting + patch_data: the binary patch content + old_part_file: the source partition file object + new_part_file: the target partition file object + + Raises: + PayloadError if something goes wrong. + """ + if not old_part_file: + raise PayloadError( + '%s: no source partition file provided for operation type (%d)' % + (op_name, op.type)) + + block_size = self.block_size + + # Dump patch data to file. + with tempfile.NamedTemporaryFile(delete=False) as patch_file: + patch_file_name = patch_file.name + patch_file.write(patch_data) + + if (hasattr(new_part_file, 'fileno') and + ((not old_part_file) or hasattr(old_part_file, 'fileno'))): + # Construct input and output extents argument for bspatch. + + in_extents_arg, _, _ = _ExtentsToBspatchArg( + op.src_extents, block_size, '%s.src_extents' % op_name, + data_length=op.src_length if op.src_length else + self._BytesInExtents(op.src_extents, "%s.src_extents")) + out_extents_arg, pad_off, pad_len = _ExtentsToBspatchArg( + op.dst_extents, block_size, '%s.dst_extents' % op_name, + data_length=op.dst_length if op.dst_length else + self._BytesInExtents(op.dst_extents, "%s.dst_extents")) + + new_file_name = '/dev/fd/%d' % new_part_file.fileno() + # Diff from source partition. + old_file_name = '/dev/fd/%d' % old_part_file.fileno() + + # In python3, file descriptors(fd) are not passed to child processes by + # default. To pass the fds to the child processes, we need to set the flag + # 'inheritable' in the fds and make the subprocess calls with the argument + # close_fds set to False. + if sys.version_info.major >= 3: + os.set_inheritable(new_part_file.fileno(), True) + os.set_inheritable(old_part_file.fileno(), True) + + if op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.BROTLI_BSDIFF): + # Invoke bspatch on partition file with extents args. + bspatch_cmd = [self.bspatch_path, old_file_name, new_file_name, + patch_file_name, in_extents_arg, out_extents_arg] + subprocess.check_call(bspatch_cmd, close_fds=False) + elif op.type == common.OpType.PUFFDIFF: + # Invoke puffpatch on partition file with extents args. + puffpatch_cmd = [self.puffpatch_path, + "--operation=puffpatch", + "--src_file=%s" % old_file_name, + "--dst_file=%s" % new_file_name, + "--patch_file=%s" % patch_file_name, + "--src_extents=%s" % in_extents_arg, + "--dst_extents=%s" % out_extents_arg] + subprocess.check_call(puffpatch_cmd, close_fds=False) + else: + raise PayloadError("Unknown operation %s" % op.type) + + # Pad with zeros past the total output length. + if pad_len: + new_part_file.seek(pad_off) + new_part_file.write(b'\0' * pad_len) + else: + # Gather input raw data and write to a temp file. + input_part_file = old_part_file if old_part_file else new_part_file + in_data = _ReadExtents(input_part_file, op.src_extents, block_size, + max_length=op.src_length if op.src_length else + self._BytesInExtents(op.src_extents, + "%s.src_extents")) + with tempfile.NamedTemporaryFile(delete=False) as in_file: + in_file_name = in_file.name + in_file.write(in_data) + + # Allocate temporary output file. + with tempfile.NamedTemporaryFile(delete=False) as out_file: + out_file_name = out_file.name + + if op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.BROTLI_BSDIFF): + # Invoke bspatch. + bspatch_cmd = [self.bspatch_path, in_file_name, out_file_name, + patch_file_name] + subprocess.check_call(bspatch_cmd) + elif op.type == common.OpType.PUFFDIFF: + # Invoke puffpatch. + puffpatch_cmd = [self.puffpatch_path, + "--operation=puffpatch", + "--src_file=%s" % in_file_name, + "--dst_file=%s" % out_file_name, + "--patch_file=%s" % patch_file_name] + subprocess.check_call(puffpatch_cmd) + else: + raise PayloadError("Unknown operation %s" % op.type) + + # Read output. + with open(out_file_name, 'rb') as out_file: + out_data = out_file.read() + if len(out_data) != op.dst_length: + raise PayloadError( + '%s: actual patched data length (%d) not as expected (%d)' % + (op_name, len(out_data), op.dst_length)) + + # Write output back to partition, with padding. + unaligned_out_len = len(out_data) % block_size + if unaligned_out_len: + out_data += b'\0' * (block_size - unaligned_out_len) + _WriteExtents(new_part_file, out_data, op.dst_extents, block_size, + '%s.dst_extents' % op_name) + + # Delete input/output files. + os.remove(in_file_name) + os.remove(out_file_name) + + # Delete patch file. + os.remove(patch_file_name) + + def _ApplyOperations(self, operations, base_name, old_part_file, + new_part_file, part_size): + """Applies a sequence of update operations to a partition. + + Args: + operations: the sequence of operations + base_name: the name of the operation sequence + old_part_file: the old partition file object, open for reading/writing + new_part_file: the new partition file object, open for reading/writing + part_size: the partition size + + Raises: + PayloadError if anything goes wrong while processing the payload. + """ + for op, op_name in common.OperationIter(operations, base_name): + # Read data blob. + data = self.payload.ReadDataBlob(op.data_offset, op.data_length) + + if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ, + common.OpType.REPLACE_XZ): + self._ApplyReplaceOperation(op, op_name, data, new_part_file, part_size) + elif op.type == common.OpType.ZERO: + self._ApplyZeroOperation(op, op_name, new_part_file) + elif op.type == common.OpType.SOURCE_COPY: + self._ApplySourceCopyOperation(op, op_name, old_part_file, + new_part_file) + elif op.type in (common.OpType.SOURCE_BSDIFF, common.OpType.PUFFDIFF, + common.OpType.BROTLI_BSDIFF): + self._ApplyDiffOperation(op, op_name, data, old_part_file, + new_part_file) + else: + raise PayloadError('%s: unknown operation type (%d)' % + (op_name, op.type)) + + def _ApplyToPartition(self, operations, part_name, base_name, + new_part_file_name, new_part_info, + old_part_file_name=None, old_part_info=None): + """Applies an update to a partition. + + Args: + operations: the sequence of update operations to apply + part_name: the name of the partition, for error reporting + base_name: the name of the operation sequence + new_part_file_name: file name to write partition data to + new_part_info: size and expected hash of dest partition + old_part_file_name: file name of source partition (optional) + old_part_info: size and expected hash of source partition (optional) + + Raises: + PayloadError if anything goes wrong with the update. + """ + # Do we have a source partition? + if old_part_file_name: + # Verify the source partition. + with open(old_part_file_name, 'rb') as old_part_file: + _VerifySha256(old_part_file, old_part_info.hash, + 'old ' + part_name, length=old_part_info.size) + new_part_file_mode = 'r+b' + open(new_part_file_name, 'w').close() + + else: + # We need to create/truncate the dst partition file. + new_part_file_mode = 'w+b' + + # Apply operations. + with open(new_part_file_name, new_part_file_mode) as new_part_file: + old_part_file = (open(old_part_file_name, 'r+b') + if old_part_file_name else None) + try: + self._ApplyOperations(operations, base_name, old_part_file, + new_part_file, new_part_info.size) + finally: + if old_part_file: + old_part_file.close() + + # Truncate the result, if so instructed. + if self.truncate_to_expected_size: + new_part_file.seek(0, 2) + if new_part_file.tell() > new_part_info.size: + new_part_file.seek(new_part_info.size) + new_part_file.truncate() + + # Verify the resulting partition. + with open(new_part_file_name, 'rb') as new_part_file: + _VerifySha256(new_part_file, new_part_info.hash, + 'new ' + part_name, length=new_part_info.size) + + def Run(self, new_parts, old_parts=None): + """Applier entry point, invoking all update operations. + + Args: + new_parts: map of partition name to dest partition file + old_parts: map of partition name to source partition file (optional) + + Raises: + PayloadError if payload application failed. + """ + if old_parts is None: + old_parts = {} + + self.payload.ResetFile() + + new_part_info = {} + old_part_info = {} + install_operations = [] + + manifest = self.payload.manifest + for part in manifest.partitions: + name = part.partition_name + new_part_info[name] = part.new_partition_info + old_part_info[name] = part.old_partition_info + install_operations.append((name, part.operations)) + + part_names = set(new_part_info.keys()) # Equivalently, old_part_info.keys() + + # Make sure the arguments are sane and match the payload. + new_part_names = set(new_parts.keys()) + if new_part_names != part_names: + raise PayloadError('missing dst partition(s) %s' % + ', '.join(part_names - new_part_names)) + + old_part_names = set(old_parts.keys()) + if part_names - old_part_names: + if self.payload.IsDelta(): + raise PayloadError('trying to apply a delta update without src ' + 'partition(s) %s' % + ', '.join(part_names - old_part_names)) + elif old_part_names == part_names: + if self.payload.IsFull(): + raise PayloadError('trying to apply a full update onto src partitions') + else: + raise PayloadError('not all src partitions provided') + + for name, operations in install_operations: + # Apply update to partition. + self._ApplyToPartition( + operations, name, '%s_install_operations' % name, new_parts[name], + new_part_info[name], old_parts.get(name, None), old_part_info[name]) diff --git a/update-payload-extractor/update_payload/checker.py b/update-payload-extractor/update_payload/checker.py new file mode 100755 index 0000000..4c65516 --- /dev/null +++ b/update-payload-extractor/update_payload/checker.py @@ -0,0 +1,1265 @@ +# +# Copyright (C) 2013 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Verifying the integrity of a Chrome OS update payload. + +This module is used internally by the main Payload class for verifying the +integrity of an update payload. The interface for invoking the checks is as +follows: + + checker = PayloadChecker(payload) + checker.Run(...) +""" + +from __future__ import absolute_import +from __future__ import print_function + +import array +import base64 +import collections +import hashlib +import itertools +import os +import subprocess + +from six.moves import range + +from update_payload import common +from update_payload import error +from update_payload import format_utils +from update_payload import histogram +from update_payload import update_metadata_pb2 + +# +# Constants. +# + +_CHECK_MOVE_SAME_SRC_DST_BLOCK = 'move-same-src-dst-block' +_CHECK_PAYLOAD_SIG = 'payload-sig' +CHECKS_TO_DISABLE = ( + _CHECK_MOVE_SAME_SRC_DST_BLOCK, + _CHECK_PAYLOAD_SIG, +) + +_TYPE_FULL = 'full' +_TYPE_DELTA = 'delta' + +_DEFAULT_BLOCK_SIZE = 4096 + +_DEFAULT_PUBKEY_BASE_NAME = 'update-payload-key.pub.pem' +_DEFAULT_PUBKEY_FILE_NAME = os.path.join(os.path.dirname(__file__), + _DEFAULT_PUBKEY_BASE_NAME) + +# Supported minor version map to payload types allowed to be using them. +_SUPPORTED_MINOR_VERSIONS = { + 0: (_TYPE_FULL,), + 2: (_TYPE_DELTA,), + 3: (_TYPE_DELTA,), + 4: (_TYPE_DELTA,), + 5: (_TYPE_DELTA,), + 6: (_TYPE_DELTA,), +} + + +# +# Helper functions. +# + +def _IsPowerOfTwo(val): + """Returns True iff val is a power of two.""" + return val > 0 and (val & (val - 1)) == 0 + + +def _AddFormat(format_func, value): + """Adds a custom formatted representation to ordinary string representation. + + Args: + format_func: A value formatter. + value: Value to be formatted and returned. + + Returns: + A string 'x (y)' where x = str(value) and y = format_func(value). + """ + ret = str(value) + formatted_str = format_func(value) + if formatted_str: + ret += ' (%s)' % formatted_str + return ret + + +def _AddHumanReadableSize(size): + """Adds a human readable representation to a byte size value.""" + return _AddFormat(format_utils.BytesToHumanReadable, size) + + +# +# Payload report generator. +# + +class _PayloadReport(object): + """A payload report generator. + + A report is essentially a sequence of nodes, which represent data points. It + is initialized to have a "global", untitled section. A node may be a + sub-report itself. + """ + + # Report nodes: Field, sub-report, section. + class Node(object): + """A report node interface.""" + + @staticmethod + def _Indent(indent, line): + """Indents a line by a given indentation amount. + + Args: + indent: The indentation amount. + line: The line content (string). + + Returns: + The properly indented line (string). + """ + return '%*s%s' % (indent, '', line) + + def GenerateLines(self, base_indent, sub_indent, curr_section): + """Generates the report lines for this node. + + Args: + base_indent: Base indentation for each line. + sub_indent: Additional indentation for sub-nodes. + curr_section: The current report section object. + + Returns: + A pair consisting of a list of properly indented report lines and a new + current section object. + """ + raise NotImplementedError + + class FieldNode(Node): + """A field report node, representing a (name, value) pair.""" + + def __init__(self, name, value, linebreak, indent): + super(_PayloadReport.FieldNode, self).__init__() + self.name = name + self.value = value + self.linebreak = linebreak + self.indent = indent + + def GenerateLines(self, base_indent, sub_indent, curr_section): + """Generates a properly formatted 'name : value' entry.""" + report_output = '' + if self.name: + report_output += self.name.ljust(curr_section.max_field_name_len) + ' :' + value_lines = str(self.value).splitlines() + if self.linebreak and self.name: + report_output += '\n' + '\n'.join( + ['%*s%s' % (self.indent, '', line) for line in value_lines]) + else: + if self.name: + report_output += ' ' + report_output += '%*s' % (self.indent, '') + cont_line_indent = len(report_output) + indented_value_lines = [value_lines[0]] + indented_value_lines.extend(['%*s%s' % (cont_line_indent, '', line) + for line in value_lines[1:]]) + report_output += '\n'.join(indented_value_lines) + + report_lines = [self._Indent(base_indent, line + '\n') + for line in report_output.split('\n')] + return report_lines, curr_section + + class SubReportNode(Node): + """A sub-report node, representing a nested report.""" + + def __init__(self, title, report): + super(_PayloadReport.SubReportNode, self).__init__() + self.title = title + self.report = report + + def GenerateLines(self, base_indent, sub_indent, curr_section): + """Recurse with indentation.""" + report_lines = [self._Indent(base_indent, self.title + ' =>\n')] + report_lines.extend(self.report.GenerateLines(base_indent + sub_indent, + sub_indent)) + return report_lines, curr_section + + class SectionNode(Node): + """A section header node.""" + + def __init__(self, title=None): + super(_PayloadReport.SectionNode, self).__init__() + self.title = title + self.max_field_name_len = 0 + + def GenerateLines(self, base_indent, sub_indent, curr_section): + """Dump a title line, return self as the (new) current section.""" + report_lines = [] + if self.title: + report_lines.append(self._Indent(base_indent, + '=== %s ===\n' % self.title)) + return report_lines, self + + def __init__(self): + self.report = [] + self.last_section = self.global_section = self.SectionNode() + self.is_finalized = False + + def GenerateLines(self, base_indent, sub_indent): + """Generates the lines in the report, properly indented. + + Args: + base_indent: The indentation used for root-level report lines. + sub_indent: The indentation offset used for sub-reports. + + Returns: + A list of indented report lines. + """ + report_lines = [] + curr_section = self.global_section + for node in self.report: + node_report_lines, curr_section = node.GenerateLines( + base_indent, sub_indent, curr_section) + report_lines.extend(node_report_lines) + + return report_lines + + def Dump(self, out_file, base_indent=0, sub_indent=2): + """Dumps the report to a file. + + Args: + out_file: File object to output the content to. + base_indent: Base indentation for report lines. + sub_indent: Added indentation for sub-reports. + """ + report_lines = self.GenerateLines(base_indent, sub_indent) + if report_lines and not self.is_finalized: + report_lines.append('(incomplete report)\n') + + for line in report_lines: + out_file.write(line) + + def AddField(self, name, value, linebreak=False, indent=0): + """Adds a field/value pair to the payload report. + + Args: + name: The field's name. + value: The field's value. + linebreak: Whether the value should be printed on a new line. + indent: Amount of extra indent for each line of the value. + """ + assert not self.is_finalized + if name and self.last_section.max_field_name_len < len(name): + self.last_section.max_field_name_len = len(name) + self.report.append(self.FieldNode(name, value, linebreak, indent)) + + def AddSubReport(self, title): + """Adds and returns a sub-report with a title.""" + assert not self.is_finalized + sub_report = self.SubReportNode(title, type(self)()) + self.report.append(sub_report) + return sub_report.report + + def AddSection(self, title): + """Adds a new section title.""" + assert not self.is_finalized + self.last_section = self.SectionNode(title) + self.report.append(self.last_section) + + def Finalize(self): + """Seals the report, marking it as complete.""" + self.is_finalized = True + + +# +# Payload verification. +# + +class PayloadChecker(object): + """Checking the integrity of an update payload. + + This is a short-lived object whose purpose is to isolate the logic used for + verifying the integrity of an update payload. + """ + + def __init__(self, payload, assert_type=None, block_size=0, + allow_unhashed=False, disabled_tests=()): + """Initialize the checker. + + Args: + payload: The payload object to check. + assert_type: Assert that payload is either 'full' or 'delta' (optional). + block_size: Expected filesystem / payload block size (optional). + allow_unhashed: Allow operations with unhashed data blobs. + disabled_tests: Sequence of tests to disable. + """ + if not payload.is_init: + raise ValueError('Uninitialized update payload.') + + # Set checker configuration. + self.payload = payload + self.block_size = block_size if block_size else _DEFAULT_BLOCK_SIZE + if not _IsPowerOfTwo(self.block_size): + raise error.PayloadError( + 'Expected block (%d) size is not a power of two.' % self.block_size) + if assert_type not in (None, _TYPE_FULL, _TYPE_DELTA): + raise error.PayloadError('Invalid assert_type value (%r).' % + assert_type) + self.payload_type = assert_type + self.allow_unhashed = allow_unhashed + + # Disable specific tests. + self.check_move_same_src_dst_block = ( + _CHECK_MOVE_SAME_SRC_DST_BLOCK not in disabled_tests) + self.check_payload_sig = _CHECK_PAYLOAD_SIG not in disabled_tests + + # Reset state; these will be assigned when the manifest is checked. + self.sigs_offset = 0 + self.sigs_size = 0 + self.old_part_info = {} + self.new_part_info = {} + self.new_fs_sizes = collections.defaultdict(int) + self.old_fs_sizes = collections.defaultdict(int) + self.minor_version = None + self.major_version = None + + @staticmethod + def _CheckElem(msg, name, report, is_mandatory, is_submsg, convert=str, + msg_name=None, linebreak=False, indent=0): + """Adds an element from a protobuf message to the payload report. + + Checks to see whether a message contains a given element, and if so adds + the element value to the provided report. A missing mandatory element + causes an exception to be raised. + + Args: + msg: The message containing the element. + name: The name of the element. + report: A report object to add the element name/value to. + is_mandatory: Whether or not this element must be present. + is_submsg: Whether this element is itself a message. + convert: A function for converting the element value for reporting. + msg_name: The name of the message object (for error reporting). + linebreak: Whether the value report should induce a line break. + indent: Amount of indent used for reporting the value. + + Returns: + A pair consisting of the element value and the generated sub-report for + it (if the element is a sub-message, None otherwise). If the element is + missing, returns (None, None). + + Raises: + error.PayloadError if a mandatory element is missing. + """ + element_result = collections.namedtuple('element_result', ['msg', 'report']) + + if not msg.HasField(name): + if is_mandatory: + raise error.PayloadError('%smissing mandatory %s %r.' % + (msg_name + ' ' if msg_name else '', + 'sub-message' if is_submsg else 'field', + name)) + return element_result(None, None) + + value = getattr(msg, name) + if is_submsg: + return element_result(value, report and report.AddSubReport(name)) + else: + if report: + report.AddField(name, convert(value), linebreak=linebreak, + indent=indent) + return element_result(value, None) + + @staticmethod + def _CheckRepeatedElemNotPresent(msg, field_name, msg_name): + """Checks that a repeated element is not specified in the message. + + Args: + msg: The message containing the element. + field_name: The name of the element. + msg_name: The name of the message object (for error reporting). + + Raises: + error.PayloadError if the repeated element is present or non-empty. + """ + if getattr(msg, field_name, None): + raise error.PayloadError('%sfield %r not empty.' % + (msg_name + ' ' if msg_name else '', field_name)) + + @staticmethod + def _CheckElemNotPresent(msg, field_name, msg_name): + """Checks that an element is not specified in the message. + + Args: + msg: The message containing the element. + field_name: The name of the element. + msg_name: The name of the message object (for error reporting). + + Raises: + error.PayloadError if the repeated element is present. + """ + if msg.HasField(field_name): + raise error.PayloadError('%sfield %r exists.' % + (msg_name + ' ' if msg_name else '', field_name)) + + @staticmethod + def _CheckMandatoryField(msg, field_name, report, msg_name, convert=str, + linebreak=False, indent=0): + """Adds a mandatory field; returning first component from _CheckElem.""" + return PayloadChecker._CheckElem(msg, field_name, report, True, False, + convert=convert, msg_name=msg_name, + linebreak=linebreak, indent=indent)[0] + + @staticmethod + def _CheckOptionalField(msg, field_name, report, convert=str, + linebreak=False, indent=0): + """Adds an optional field; returning first component from _CheckElem.""" + return PayloadChecker._CheckElem(msg, field_name, report, False, False, + convert=convert, linebreak=linebreak, + indent=indent)[0] + + @staticmethod + def _CheckMandatorySubMsg(msg, submsg_name, report, msg_name): + """Adds a mandatory sub-message; wrapper for _CheckElem.""" + return PayloadChecker._CheckElem(msg, submsg_name, report, True, True, + msg_name) + + @staticmethod + def _CheckOptionalSubMsg(msg, submsg_name, report): + """Adds an optional sub-message; wrapper for _CheckElem.""" + return PayloadChecker._CheckElem(msg, submsg_name, report, False, True) + + @staticmethod + def _CheckPresentIff(val1, val2, name1, name2, obj_name): + """Checks that val1 is None iff val2 is None. + + Args: + val1: first value to be compared. + val2: second value to be compared. + name1: name of object holding the first value. + name2: name of object holding the second value. + obj_name: Name of the object containing these values. + + Raises: + error.PayloadError if assertion does not hold. + """ + if None in (val1, val2) and val1 is not val2: + present, missing = (name1, name2) if val2 is None else (name2, name1) + raise error.PayloadError('%r present without %r%s.' % + (present, missing, + ' in ' + obj_name if obj_name else '')) + + @staticmethod + def _CheckPresentIffMany(vals, name, obj_name): + """Checks that a set of vals and names imply every other element. + + Args: + vals: The set of values to be compared. + name: The name of the objects holding the corresponding value. + obj_name: Name of the object containing these values. + + Raises: + error.PayloadError if assertion does not hold. + """ + if any(vals) and not all(vals): + raise error.PayloadError('%r is not present in all values%s.' % + (name, ' in ' + obj_name if obj_name else '')) + + @staticmethod + def _Run(cmd, send_data=None): + """Runs a subprocess, returns its output. + + Args: + cmd: Sequence of command-line argument for invoking the subprocess. + send_data: Data to feed to the process via its stdin. + + Returns: + A tuple containing the stdout and stderr output of the process. + """ + run_process = subprocess.Popen(cmd, stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + try: + result = run_process.communicate(input=send_data) + finally: + exit_code = run_process.wait() + + if exit_code: + raise RuntimeError('Subprocess %r failed with code %r.' % + (cmd, exit_code)) + + return result + + @staticmethod + def _CheckSha256Signature(sig_data, pubkey_file_name, actual_hash, sig_name): + """Verifies an actual hash against a signed one. + + Args: + sig_data: The raw signature data. + pubkey_file_name: Public key used for verifying signature. + actual_hash: The actual hash digest. + sig_name: Signature name for error reporting. + + Raises: + error.PayloadError if signature could not be verified. + """ + if len(sig_data) != 256: + raise error.PayloadError( + '%s: signature size (%d) not as expected (256).' % + (sig_name, len(sig_data))) + signed_data, _ = PayloadChecker._Run( + ['openssl', 'rsautl', '-verify', '-pubin', '-inkey', pubkey_file_name], + send_data=sig_data) + + if len(signed_data) != len(common.SIG_ASN1_HEADER) + 32: + raise error.PayloadError('%s: unexpected signed data length (%d).' % + (sig_name, len(signed_data))) + + if not signed_data.startswith(common.SIG_ASN1_HEADER): + raise error.PayloadError('%s: not containing standard ASN.1 prefix.' % + sig_name) + + signed_hash = signed_data[len(common.SIG_ASN1_HEADER):] + if signed_hash != actual_hash: + raise error.PayloadError( + '%s: signed hash (%s) different from actual (%s).' % + (sig_name, common.FormatSha256(signed_hash), + common.FormatSha256(actual_hash))) + + @staticmethod + def _CheckBlocksFitLength(length, num_blocks, block_size, length_name, + block_name=None): + """Checks that a given length fits given block space. + + This ensures that the number of blocks allocated is appropriate for the + length of the data residing in these blocks. + + Args: + length: The actual length of the data. + num_blocks: The number of blocks allocated for it. + block_size: The size of each block in bytes. + length_name: Name of length (used for error reporting). + block_name: Name of block (used for error reporting). + + Raises: + error.PayloadError if the aforementioned invariant is not satisfied. + """ + # Check: length <= num_blocks * block_size. + if length > num_blocks * block_size: + raise error.PayloadError( + '%s (%d) > num %sblocks (%d) * block_size (%d).' % + (length_name, length, block_name or '', num_blocks, block_size)) + + # Check: length > (num_blocks - 1) * block_size. + if length <= (num_blocks - 1) * block_size: + raise error.PayloadError( + '%s (%d) <= (num %sblocks - 1 (%d)) * block_size (%d).' % + (length_name, length, block_name or '', num_blocks - 1, block_size)) + + def _CheckManifestMinorVersion(self, report): + """Checks the payload manifest minor_version field. + + Args: + report: The report object to add to. + + Raises: + error.PayloadError if any of the checks fail. + """ + self.minor_version = self._CheckOptionalField(self.payload.manifest, + 'minor_version', report) + if self.minor_version in _SUPPORTED_MINOR_VERSIONS: + if self.payload_type not in _SUPPORTED_MINOR_VERSIONS[self.minor_version]: + raise error.PayloadError( + 'Minor version %d not compatible with payload type %s.' % + (self.minor_version, self.payload_type)) + elif self.minor_version is None: + raise error.PayloadError('Minor version is not set.') + else: + raise error.PayloadError('Unsupported minor version: %d' % + self.minor_version) + + def _CheckManifest(self, report, part_sizes=None): + """Checks the payload manifest. + + Args: + report: A report object to add to. + part_sizes: Map of partition label to partition size in bytes. + + Returns: + A tuple consisting of the partition block size used during the update + (integer), the signatures block offset and size. + + Raises: + error.PayloadError if any of the checks fail. + """ + self.major_version = self.payload.header.version + + part_sizes = part_sizes or collections.defaultdict(int) + manifest = self.payload.manifest + report.AddSection('manifest') + + # Check: block_size must exist and match the expected value. + actual_block_size = self._CheckMandatoryField(manifest, 'block_size', + report, 'manifest') + if actual_block_size != self.block_size: + raise error.PayloadError('Block_size (%d) not as expected (%d).' % + (actual_block_size, self.block_size)) + + # Check: signatures_offset <==> signatures_size. + self.sigs_offset = self._CheckOptionalField(manifest, 'signatures_offset', + report) + self.sigs_size = self._CheckOptionalField(manifest, 'signatures_size', + report) + self._CheckPresentIff(self.sigs_offset, self.sigs_size, + 'signatures_offset', 'signatures_size', 'manifest') + + for part in manifest.partitions: + name = part.partition_name + self.old_part_info[name] = self._CheckOptionalSubMsg( + part, 'old_partition_info', report) + self.new_part_info[name] = self._CheckMandatorySubMsg( + part, 'new_partition_info', report, 'manifest.partitions') + + # Check: Old-style partition infos should not be specified. + for _, part in common.CROS_PARTITIONS: + self._CheckElemNotPresent(manifest, 'old_%s_info' % part, 'manifest') + self._CheckElemNotPresent(manifest, 'new_%s_info' % part, 'manifest') + + # Check: If old_partition_info is specified anywhere, it must be + # specified everywhere. + old_part_msgs = [part.msg for part in self.old_part_info.values() if part] + self._CheckPresentIffMany(old_part_msgs, 'old_partition_info', + 'manifest.partitions') + + is_delta = any(part and part.msg for part in self.old_part_info.values()) + if is_delta: + # Assert/mark delta payload. + if self.payload_type == _TYPE_FULL: + raise error.PayloadError( + 'Apparent full payload contains old_{kernel,rootfs}_info.') + self.payload_type = _TYPE_DELTA + + for part, (msg, part_report) in self.old_part_info.items(): + # Check: {size, hash} present in old_{kernel,rootfs}_info. + field = 'old_%s_info' % part + self.old_fs_sizes[part] = self._CheckMandatoryField(msg, 'size', + part_report, field) + self._CheckMandatoryField(msg, 'hash', part_report, field, + convert=common.FormatSha256) + + # Check: old_{kernel,rootfs} size must fit in respective partition. + if self.old_fs_sizes[part] > part_sizes[part] > 0: + raise error.PayloadError( + 'Old %s content (%d) exceed partition size (%d).' % + (part, self.old_fs_sizes[part], part_sizes[part])) + else: + # Assert/mark full payload. + if self.payload_type == _TYPE_DELTA: + raise error.PayloadError( + 'Apparent delta payload missing old_{kernel,rootfs}_info.') + self.payload_type = _TYPE_FULL + + # Check: new_{kernel,rootfs}_info present; contains {size, hash}. + for part, (msg, part_report) in self.new_part_info.items(): + field = 'new_%s_info' % part + self.new_fs_sizes[part] = self._CheckMandatoryField(msg, 'size', + part_report, field) + self._CheckMandatoryField(msg, 'hash', part_report, field, + convert=common.FormatSha256) + + # Check: new_{kernel,rootfs} size must fit in respective partition. + if self.new_fs_sizes[part] > part_sizes[part] > 0: + raise error.PayloadError( + 'New %s content (%d) exceed partition size (%d).' % + (part, self.new_fs_sizes[part], part_sizes[part])) + + # Check: minor_version makes sense for the payload type. This check should + # run after the payload type has been set. + self._CheckManifestMinorVersion(report) + + def _CheckLength(self, length, total_blocks, op_name, length_name): + """Checks whether a length matches the space designated in extents. + + Args: + length: The total length of the data. + total_blocks: The total number of blocks in extents. + op_name: Operation name (for error reporting). + length_name: Length name (for error reporting). + + Raises: + error.PayloadError is there a problem with the length. + """ + # Check: length is non-zero. + if length == 0: + raise error.PayloadError('%s: %s is zero.' % (op_name, length_name)) + + # Check that length matches number of blocks. + self._CheckBlocksFitLength(length, total_blocks, self.block_size, + '%s: %s' % (op_name, length_name)) + + def _CheckExtents(self, extents, usable_size, block_counters, name): + """Checks a sequence of extents. + + Args: + extents: The sequence of extents to check. + usable_size: The usable size of the partition to which the extents apply. + block_counters: Array of counters corresponding to the number of blocks. + name: The name of the extent block. + + Returns: + The total number of blocks in the extents. + + Raises: + error.PayloadError if any of the entailed checks fails. + """ + total_num_blocks = 0 + for ex, ex_name in common.ExtentIter(extents, name): + # Check: Mandatory fields. + start_block = PayloadChecker._CheckMandatoryField(ex, 'start_block', + None, ex_name) + num_blocks = PayloadChecker._CheckMandatoryField(ex, 'num_blocks', None, + ex_name) + end_block = start_block + num_blocks + + # Check: num_blocks > 0. + if num_blocks == 0: + raise error.PayloadError('%s: extent length is zero.' % ex_name) + + # Check: Make sure we're within the partition limit. + if usable_size and end_block * self.block_size > usable_size: + raise error.PayloadError( + '%s: extent (%s) exceeds usable partition size (%d).' % + (ex_name, common.FormatExtent(ex, self.block_size), usable_size)) + + # Record block usage. + for i in range(start_block, end_block): + block_counters[i] += 1 + + total_num_blocks += num_blocks + + return total_num_blocks + + def _CheckReplaceOperation(self, op, data_length, total_dst_blocks, op_name): + """Specific checks for REPLACE/REPLACE_BZ/REPLACE_XZ operations. + + Args: + op: The operation object from the manifest. + data_length: The length of the data blob associated with the operation. + total_dst_blocks: Total number of blocks in dst_extents. + op_name: Operation name for error reporting. + + Raises: + error.PayloadError if any check fails. + """ + # Check: total_dst_blocks is not a floating point. + if isinstance(total_dst_blocks, float): + raise error.PayloadError('%s: contains invalid data type of ' + 'total_dst_blocks.' % op_name) + + # Check: Does not contain src extents. + if op.src_extents: + raise error.PayloadError('%s: contains src_extents.' % op_name) + + # Check: Contains data. + if data_length is None: + raise error.PayloadError('%s: missing data_{offset,length}.' % op_name) + + if op.type == common.OpType.REPLACE: + PayloadChecker._CheckBlocksFitLength(data_length, total_dst_blocks, + self.block_size, + op_name + '.data_length', 'dst') + else: + # Check: data_length must be smaller than the allotted dst blocks. + if data_length >= total_dst_blocks * self.block_size: + raise error.PayloadError( + '%s: data_length (%d) must be less than allotted dst block ' + 'space (%d * %d).' % + (op_name, data_length, total_dst_blocks, self.block_size)) + + def _CheckZeroOperation(self, op, op_name): + """Specific checks for ZERO operations. + + Args: + op: The operation object from the manifest. + op_name: Operation name for error reporting. + + Raises: + error.PayloadError if any check fails. + """ + # Check: Does not contain src extents, data_length and data_offset. + if op.src_extents: + raise error.PayloadError('%s: contains src_extents.' % op_name) + if op.data_length: + raise error.PayloadError('%s: contains data_length.' % op_name) + if op.data_offset: + raise error.PayloadError('%s: contains data_offset.' % op_name) + + def _CheckAnyDiffOperation(self, op, data_length, total_dst_blocks, op_name): + """Specific checks for SOURCE_BSDIFF, PUFFDIFF and BROTLI_BSDIFF + operations. + + Args: + op: The operation. + data_length: The length of the data blob associated with the operation. + total_dst_blocks: Total number of blocks in dst_extents. + op_name: Operation name for error reporting. + + Raises: + error.PayloadError if any check fails. + """ + # Check: data_{offset,length} present. + if data_length is None: + raise error.PayloadError('%s: missing data_{offset,length}.' % op_name) + + # Check: data_length is strictly smaller than the allotted dst blocks. + if data_length >= total_dst_blocks * self.block_size: + raise error.PayloadError( + '%s: data_length (%d) must be smaller than allotted dst space ' + '(%d * %d = %d).' % + (op_name, data_length, total_dst_blocks, self.block_size, + total_dst_blocks * self.block_size)) + + # Check the existence of src_length and dst_length for legacy bsdiffs. + if op.type == common.OpType.SOURCE_BSDIFF and self.minor_version <= 3: + if not op.HasField('src_length') or not op.HasField('dst_length'): + raise error.PayloadError('%s: require {src,dst}_length.' % op_name) + else: + if op.HasField('src_length') or op.HasField('dst_length'): + raise error.PayloadError('%s: unneeded {src,dst}_length.' % op_name) + + def _CheckSourceCopyOperation(self, data_offset, total_src_blocks, + total_dst_blocks, op_name): + """Specific checks for SOURCE_COPY. + + Args: + data_offset: The offset of a data blob for the operation. + total_src_blocks: Total number of blocks in src_extents. + total_dst_blocks: Total number of blocks in dst_extents. + op_name: Operation name for error reporting. + + Raises: + error.PayloadError if any check fails. + """ + # Check: No data_{offset,length}. + if data_offset is not None: + raise error.PayloadError('%s: contains data_{offset,length}.' % op_name) + + # Check: total_src_blocks == total_dst_blocks. + if total_src_blocks != total_dst_blocks: + raise error.PayloadError( + '%s: total src blocks (%d) != total dst blocks (%d).' % + (op_name, total_src_blocks, total_dst_blocks)) + + def _CheckAnySourceOperation(self, op, total_src_blocks, op_name): + """Specific checks for SOURCE_* operations. + + Args: + op: The operation object from the manifest. + total_src_blocks: Total number of blocks in src_extents. + op_name: Operation name for error reporting. + + Raises: + error.PayloadError if any check fails. + """ + # Check: total_src_blocks != 0. + if total_src_blocks == 0: + raise error.PayloadError('%s: no src blocks in a source op.' % op_name) + + # Check: src_sha256_hash present in minor version >= 3. + if self.minor_version >= 3 and op.src_sha256_hash is None: + raise error.PayloadError('%s: source hash missing.' % op_name) + + def _CheckOperation(self, op, op_name, old_block_counters, new_block_counters, + old_usable_size, new_usable_size, prev_data_offset, + blob_hash_counts): + """Checks a single update operation. + + Args: + op: The operation object. + op_name: Operation name string for error reporting. + old_block_counters: Arrays of block read counters. + new_block_counters: Arrays of block write counters. + old_usable_size: The overall usable size for src data in bytes. + new_usable_size: The overall usable size for dst data in bytes. + prev_data_offset: Offset of last used data bytes. + blob_hash_counts: Counters for hashed/unhashed blobs. + + Returns: + The amount of data blob associated with the operation. + + Raises: + error.PayloadError if any check has failed. + """ + # Check extents. + total_src_blocks = self._CheckExtents( + op.src_extents, old_usable_size, old_block_counters, + op_name + '.src_extents') + total_dst_blocks = self._CheckExtents( + op.dst_extents, new_usable_size, new_block_counters, + op_name + '.dst_extents') + + # Check: data_offset present <==> data_length present. + data_offset = self._CheckOptionalField(op, 'data_offset', None) + data_length = self._CheckOptionalField(op, 'data_length', None) + self._CheckPresentIff(data_offset, data_length, 'data_offset', + 'data_length', op_name) + + # Check: At least one dst_extent. + if not op.dst_extents: + raise error.PayloadError('%s: dst_extents is empty.' % op_name) + + # Check {src,dst}_length, if present. + if op.HasField('src_length'): + self._CheckLength(op.src_length, total_src_blocks, op_name, 'src_length') + if op.HasField('dst_length'): + self._CheckLength(op.dst_length, total_dst_blocks, op_name, 'dst_length') + + if op.HasField('data_sha256_hash'): + blob_hash_counts['hashed'] += 1 + + # Check: Operation carries data. + if data_offset is None: + raise error.PayloadError( + '%s: data_sha256_hash present but no data_{offset,length}.' % + op_name) + + # Check: Hash verifies correctly. + actual_hash = hashlib.sha256(self.payload.ReadDataBlob(data_offset, + data_length)) + if op.data_sha256_hash != actual_hash.digest(): + raise error.PayloadError( + '%s: data_sha256_hash (%s) does not match actual hash (%s).' % + (op_name, common.FormatSha256(op.data_sha256_hash), + common.FormatSha256(actual_hash.digest()))) + elif data_offset is not None: + if self.allow_unhashed: + blob_hash_counts['unhashed'] += 1 + else: + raise error.PayloadError('%s: unhashed operation not allowed.' % + op_name) + + if data_offset is not None: + # Check: Contiguous use of data section. + if data_offset != prev_data_offset: + raise error.PayloadError( + '%s: data offset (%d) not matching amount used so far (%d).' % + (op_name, data_offset, prev_data_offset)) + + # Type-specific checks. + if op.type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ, + common.OpType.REPLACE_XZ): + self._CheckReplaceOperation(op, data_length, total_dst_blocks, op_name) + elif op.type == common.OpType.ZERO and self.minor_version >= 4: + self._CheckZeroOperation(op, op_name) + elif op.type == common.OpType.SOURCE_COPY and self.minor_version >= 2: + self._CheckSourceCopyOperation(data_offset, total_src_blocks, + total_dst_blocks, op_name) + self._CheckAnySourceOperation(op, total_src_blocks, op_name) + elif op.type == common.OpType.SOURCE_BSDIFF and self.minor_version >= 2: + self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name) + self._CheckAnySourceOperation(op, total_src_blocks, op_name) + elif op.type == common.OpType.BROTLI_BSDIFF and self.minor_version >= 4: + self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name) + self._CheckAnySourceOperation(op, total_src_blocks, op_name) + elif op.type == common.OpType.PUFFDIFF and self.minor_version >= 5: + self._CheckAnyDiffOperation(op, data_length, total_dst_blocks, op_name) + self._CheckAnySourceOperation(op, total_src_blocks, op_name) + else: + raise error.PayloadError( + 'Operation %s (type %d) not allowed in minor version %d' % + (op_name, op.type, self.minor_version)) + return data_length if data_length is not None else 0 + + def _SizeToNumBlocks(self, size): + """Returns the number of blocks needed to contain a given byte size.""" + return (size + self.block_size - 1) // self.block_size + + def _AllocBlockCounters(self, total_size): + """Returns a freshly initialized array of block counters. + + Note that the generated array is not portable as is due to byte-ordering + issues, hence it should not be serialized. + + Args: + total_size: The total block size in bytes. + + Returns: + An array of unsigned short elements initialized to zero, one for each of + the blocks necessary for containing the partition. + """ + return array.array('H', + itertools.repeat(0, self._SizeToNumBlocks(total_size))) + + def _CheckOperations(self, operations, report, base_name, old_fs_size, + new_fs_size, old_usable_size, new_usable_size, + prev_data_offset): + """Checks a sequence of update operations. + + Args: + operations: The sequence of operations to check. + report: The report object to add to. + base_name: The name of the operation block. + old_fs_size: The old filesystem size in bytes. + new_fs_size: The new filesystem size in bytes. + old_usable_size: The overall usable size of the old partition in bytes. + new_usable_size: The overall usable size of the new partition in bytes. + prev_data_offset: Offset of last used data bytes. + + Returns: + The total data blob size used. + + Raises: + error.PayloadError if any of the checks fails. + """ + # The total size of data blobs used by operations scanned thus far. + total_data_used = 0 + # Counts of specific operation types. + op_counts = { + common.OpType.REPLACE: 0, + common.OpType.REPLACE_BZ: 0, + common.OpType.REPLACE_XZ: 0, + common.OpType.ZERO: 0, + common.OpType.SOURCE_COPY: 0, + common.OpType.SOURCE_BSDIFF: 0, + common.OpType.PUFFDIFF: 0, + common.OpType.BROTLI_BSDIFF: 0, + } + # Total blob sizes for each operation type. + op_blob_totals = { + common.OpType.REPLACE: 0, + common.OpType.REPLACE_BZ: 0, + common.OpType.REPLACE_XZ: 0, + # SOURCE_COPY operations don't have blobs. + common.OpType.SOURCE_BSDIFF: 0, + common.OpType.PUFFDIFF: 0, + common.OpType.BROTLI_BSDIFF: 0, + } + # Counts of hashed vs unhashed operations. + blob_hash_counts = { + 'hashed': 0, + 'unhashed': 0, + } + + # Allocate old and new block counters. + old_block_counters = (self._AllocBlockCounters(old_usable_size) + if old_fs_size else None) + new_block_counters = self._AllocBlockCounters(new_usable_size) + + # Process and verify each operation. + op_num = 0 + for op, op_name in common.OperationIter(operations, base_name): + op_num += 1 + + # Check: Type is valid. + if op.type not in op_counts: + raise error.PayloadError('%s: invalid type (%d).' % (op_name, op.type)) + op_counts[op.type] += 1 + + curr_data_used = self._CheckOperation( + op, op_name, old_block_counters, new_block_counters, + old_usable_size, new_usable_size, + prev_data_offset + total_data_used, blob_hash_counts) + if curr_data_used: + op_blob_totals[op.type] += curr_data_used + total_data_used += curr_data_used + + # Report totals and breakdown statistics. + report.AddField('total operations', op_num) + report.AddField( + None, + histogram.Histogram.FromCountDict(op_counts, + key_names=common.OpType.NAMES), + indent=1) + report.AddField('total blobs', sum(blob_hash_counts.values())) + report.AddField(None, + histogram.Histogram.FromCountDict(blob_hash_counts), + indent=1) + report.AddField('total blob size', _AddHumanReadableSize(total_data_used)) + report.AddField( + None, + histogram.Histogram.FromCountDict(op_blob_totals, + formatter=_AddHumanReadableSize, + key_names=common.OpType.NAMES), + indent=1) + + # Report read/write histograms. + if old_block_counters: + report.AddField('block read hist', + histogram.Histogram.FromKeyList(old_block_counters), + linebreak=True, indent=1) + + new_write_hist = histogram.Histogram.FromKeyList( + new_block_counters[:self._SizeToNumBlocks(new_fs_size)]) + report.AddField('block write hist', new_write_hist, linebreak=True, + indent=1) + + # Check: Full update must write each dst block once. + if self.payload_type == _TYPE_FULL and new_write_hist.GetKeys() != [1]: + raise error.PayloadError( + '%s: not all blocks written exactly once during full update.' % + base_name) + + return total_data_used + + def _CheckSignatures(self, report, pubkey_file_name): + """Checks a payload's signature block.""" + sigs_raw = self.payload.ReadDataBlob(self.sigs_offset, self.sigs_size) + sigs = update_metadata_pb2.Signatures() + sigs.ParseFromString(sigs_raw) + report.AddSection('signatures') + + # Check: At least one signature present. + if not sigs.signatures: + raise error.PayloadError('Signature block is empty.') + + # Check that we don't have the signature operation blob at the end (used to + # be for major version 1). + last_partition = self.payload.manifest.partitions[-1] + if last_partition.operations: + last_op = last_partition.operations[-1] + # Check: signatures_{offset,size} must match the last (fake) operation. + if (last_op.type == common.OpType.REPLACE and + last_op.data_offset == self.sigs_offset and + last_op.data_length == self.sigs_size): + raise error.PayloadError('It seems like the last operation is the ' + 'signature blob. This is an invalid payload.') + + # Compute the checksum of all data up to signature blob. + # TODO(garnold) we're re-reading the whole data section into a string + # just to compute the checksum; instead, we could do it incrementally as + # we read the blobs one-by-one, under the assumption that we're reading + # them in order (which currently holds). This should be reconsidered. + payload_hasher = self.payload.manifest_hasher.copy() + common.Read(self.payload.payload_file, self.sigs_offset, + offset=self.payload.data_offset, hasher=payload_hasher) + + for sig, sig_name in common.SignatureIter(sigs.signatures, 'signatures'): + sig_report = report.AddSubReport(sig_name) + + # Check: Signature contains mandatory fields. + self._CheckMandatoryField(sig, 'version', sig_report, sig_name) + self._CheckMandatoryField(sig, 'data', None, sig_name) + sig_report.AddField('data len', len(sig.data)) + + # Check: Signatures pertains to actual payload hash. + if sig.version == 1: + self._CheckSha256Signature(sig.data, pubkey_file_name, + payload_hasher.digest(), sig_name) + else: + raise error.PayloadError('Unknown signature version (%d).' % + sig.version) + + def Run(self, pubkey_file_name=None, metadata_sig_file=None, metadata_size=0, + part_sizes=None, report_out_file=None): + """Checker entry point, invoking all checks. + + Args: + pubkey_file_name: Public key used for signature verification. + metadata_sig_file: Metadata signature, if verification is desired. + metadata_size: Metadata size, if verification is desired. + part_sizes: Mapping of partition label to size in bytes (default: infer + based on payload type and version or filesystem). + report_out_file: File object to dump the report to. + + Raises: + error.PayloadError if payload verification failed. + """ + if not pubkey_file_name: + pubkey_file_name = _DEFAULT_PUBKEY_FILE_NAME + + report = _PayloadReport() + + # Get payload file size. + self.payload.payload_file.seek(0, 2) + payload_file_size = self.payload.payload_file.tell() + self.payload.ResetFile() + + try: + # Check metadata_size (if provided). + if metadata_size and self.payload.metadata_size != metadata_size: + raise error.PayloadError('Invalid payload metadata size in payload(%d) ' + 'vs given(%d)' % (self.payload.metadata_size, + metadata_size)) + + # Check metadata signature (if provided). + if metadata_sig_file: + metadata_sig = base64.b64decode(metadata_sig_file.read()) + self._CheckSha256Signature(metadata_sig, pubkey_file_name, + self.payload.manifest_hasher.digest(), + 'metadata signature') + + # Part 1: Check the file header. + report.AddSection('header') + # Check: Payload version is valid. + if self.payload.header.version not in (1, 2): + raise error.PayloadError('Unknown payload version (%d).' % + self.payload.header.version) + report.AddField('version', self.payload.header.version) + report.AddField('manifest len', self.payload.header.manifest_len) + + # Part 2: Check the manifest. + self._CheckManifest(report, part_sizes) + assert self.payload_type, 'payload type should be known by now' + + # Make sure deprecated values are not present in the payload. + for field in ('install_operations', 'kernel_install_operations'): + self._CheckRepeatedElemNotPresent(self.payload.manifest, field, + 'manifest') + for field in ('old_kernel_info', 'old_rootfs_info', + 'new_kernel_info', 'new_rootfs_info'): + self._CheckElemNotPresent(self.payload.manifest, field, 'manifest') + + total_blob_size = 0 + for part, operations in ((p.partition_name, p.operations) + for p in self.payload.manifest.partitions): + report.AddSection('%s operations' % part) + + new_fs_usable_size = self.new_fs_sizes[part] + old_fs_usable_size = self.old_fs_sizes[part] + + if part_sizes is not None and part_sizes.get(part, None): + new_fs_usable_size = old_fs_usable_size = part_sizes[part] + + # TODO(chromium:243559) only default to the filesystem size if no + # explicit size provided *and* the partition size is not embedded in the + # payload; see issue for more details. + total_blob_size += self._CheckOperations( + operations, report, '%s_install_operations' % part, + self.old_fs_sizes[part], self.new_fs_sizes[part], + old_fs_usable_size, new_fs_usable_size, total_blob_size) + + # Check: Operations data reach the end of the payload file. + used_payload_size = self.payload.data_offset + total_blob_size + # Major versions 2 and higher have a signature at the end, so it should be + # considered in the total size of the image. + if self.sigs_size: + used_payload_size += self.sigs_size + + if used_payload_size != payload_file_size: + raise error.PayloadError( + 'Used payload size (%d) different from actual file size (%d).' % + (used_payload_size, payload_file_size)) + + # Part 4: Handle payload signatures message. + if self.check_payload_sig and self.sigs_size: + self._CheckSignatures(report, pubkey_file_name) + + # Part 5: Summary. + report.AddSection('summary') + report.AddField('update type', self.payload_type) + + report.Finalize() + finally: + if report_out_file: + report.Dump(report_out_file) diff --git a/update-payload-extractor/update_payload/checker_unittest.py b/update-payload-extractor/update_payload/checker_unittest.py new file mode 100755 index 0000000..993b785 --- /dev/null +++ b/update-payload-extractor/update_payload/checker_unittest.py @@ -0,0 +1,1252 @@ +#!/usr/bin/env python +# +# Copyright (C) 2013 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Unit testing checker.py.""" + +# Disable check for function names to avoid errors based on old code +# pylint: disable-msg=invalid-name + +from __future__ import absolute_import + +import array +import collections +import hashlib +import io +import itertools +import os +import unittest + +from six.moves import zip + +import mock # pylint: disable=import-error + +from update_payload import checker +from update_payload import common +from update_payload import test_utils +from update_payload import update_metadata_pb2 +from update_payload.error import PayloadError +from update_payload.payload import Payload # Avoid name conflicts later. + + +def _OpTypeByName(op_name): + """Returns the type of an operation from its name.""" + op_name_to_type = { + 'REPLACE': common.OpType.REPLACE, + 'REPLACE_BZ': common.OpType.REPLACE_BZ, + 'SOURCE_COPY': common.OpType.SOURCE_COPY, + 'SOURCE_BSDIFF': common.OpType.SOURCE_BSDIFF, + 'ZERO': common.OpType.ZERO, + 'DISCARD': common.OpType.DISCARD, + 'REPLACE_XZ': common.OpType.REPLACE_XZ, + 'PUFFDIFF': common.OpType.PUFFDIFF, + 'BROTLI_BSDIFF': common.OpType.BROTLI_BSDIFF, + } + return op_name_to_type[op_name] + + +def _GetPayloadChecker(payload_gen_write_to_file_func, payload_gen_dargs=None, + checker_init_dargs=None): + """Returns a payload checker from a given payload generator.""" + if payload_gen_dargs is None: + payload_gen_dargs = {} + if checker_init_dargs is None: + checker_init_dargs = {} + + payload_file = io.BytesIO() + payload_gen_write_to_file_func(payload_file, **payload_gen_dargs) + payload_file.seek(0) + payload = Payload(payload_file) + payload.Init() + return checker.PayloadChecker(payload, **checker_init_dargs) + + +def _GetPayloadCheckerWithData(payload_gen): + """Returns a payload checker from a given payload generator.""" + payload_file = io.BytesIO() + payload_gen.WriteToFile(payload_file) + payload_file.seek(0) + payload = Payload(payload_file) + payload.Init() + return checker.PayloadChecker(payload) + + +# This class doesn't need an __init__(). +# pylint: disable=W0232 +# Unit testing is all about running protected methods. +# pylint: disable=W0212 +# Don't bark about missing members of classes you cannot import. +# pylint: disable=E1101 +class PayloadCheckerTest(unittest.TestCase): + """Tests the PayloadChecker class. + + In addition to ordinary testFoo() methods, which are automatically invoked by + the unittest framework, in this class we make use of DoBarTest() calls that + implement parametric tests of certain features. In order to invoke each test, + which embodies a unique combination of parameter values, as a complete unit + test, we perform explicit enumeration of the parameter space and create + individual invocation contexts for each, which are then bound as + testBar__param1=val1__param2=val2(). The enumeration of parameter spaces for + all such tests is done in AddAllParametricTests(). + """ + + def setUp(self): + """setUp function for unittest testcase""" + self.mock_checks = [] + + def tearDown(self): + """tearDown function for unittest testcase""" + # Verify that all mock functions were called. + for check in self.mock_checks: + check.mock_fn.assert_called_once_with(*check.exp_args, **check.exp_kwargs) + + class MockChecksAtTearDown(object): + """Mock data storage. + + This class stores the mock functions and its arguments to be checked at a + later point. + """ + def __init__(self, mock_fn, *args, **kwargs): + self.mock_fn = mock_fn + self.exp_args = args + self.exp_kwargs = kwargs + + def addPostCheckForMockFunction(self, mock_fn, *args, **kwargs): + """Store a mock function and its arguments to self.mock_checks + + Args: + mock_fn: mock function object + args: expected positional arguments for the mock_fn + kwargs: expected named arguments for the mock_fn + """ + self.mock_checks.append(self.MockChecksAtTearDown(mock_fn, *args, **kwargs)) + + def MockPayload(self): + """Create a mock payload object, complete with a mock manifest.""" + payload = mock.create_autospec(Payload) + payload.is_init = True + payload.manifest = mock.create_autospec( + update_metadata_pb2.DeltaArchiveManifest) + return payload + + @staticmethod + def NewExtent(start_block, num_blocks): + """Returns an Extent message. + + Each of the provided fields is set iff it is >= 0; otherwise, it's left at + its default state. + + Args: + start_block: The starting block of the extent. + num_blocks: The number of blocks in the extent. + + Returns: + An Extent message. + """ + ex = update_metadata_pb2.Extent() + if start_block >= 0: + ex.start_block = start_block + if num_blocks >= 0: + ex.num_blocks = num_blocks + return ex + + @staticmethod + def NewExtentList(*args): + """Returns an list of extents. + + Args: + *args: (start_block, num_blocks) pairs defining the extents. + + Returns: + A list of Extent objects. + """ + ex_list = [] + for start_block, num_blocks in args: + ex_list.append(PayloadCheckerTest.NewExtent(start_block, num_blocks)) + return ex_list + + @staticmethod + def AddToMessage(repeated_field, field_vals): + for field_val in field_vals: + new_field = repeated_field.add() + new_field.CopyFrom(field_val) + + def SetupAddElemTest(self, is_present, is_submsg, convert=str, + linebreak=False, indent=0): + """Setup for testing of _CheckElem() and its derivatives. + + Args: + is_present: Whether or not the element is found in the message. + is_submsg: Whether the element is a sub-message itself. + convert: A representation conversion function. + linebreak: Whether or not a linebreak is to be used in the report. + indent: Indentation used for the report. + + Returns: + msg: A mock message object. + report: A mock report object. + subreport: A mock sub-report object. + name: An element name to check. + val: Expected element value. + """ + name = 'foo' + val = 'fake submsg' if is_submsg else 'fake field' + subreport = 'fake subreport' + + # Create a mock message. + msg = mock.create_autospec(update_metadata_pb2._message.Message) + self.addPostCheckForMockFunction(msg.HasField, name) + msg.HasField.return_value = is_present + setattr(msg, name, val) + # Create a mock report. + report = mock.create_autospec(checker._PayloadReport) + if is_present: + if is_submsg: + self.addPostCheckForMockFunction(report.AddSubReport, name) + report.AddSubReport.return_value = subreport + else: + self.addPostCheckForMockFunction(report.AddField, name, convert(val), + linebreak=linebreak, indent=indent) + + return (msg, report, subreport, name, val) + + def DoAddElemTest(self, is_present, is_mandatory, is_submsg, convert, + linebreak, indent): + """Parametric testing of _CheckElem(). + + Args: + is_present: Whether or not the element is found in the message. + is_mandatory: Whether or not it's a mandatory element. + is_submsg: Whether the element is a sub-message itself. + convert: A representation conversion function. + linebreak: Whether or not a linebreak is to be used in the report. + indent: Indentation used for the report. + """ + msg, report, subreport, name, val = self.SetupAddElemTest( + is_present, is_submsg, convert, linebreak, indent) + + args = (msg, name, report, is_mandatory, is_submsg) + kwargs = {'convert': convert, 'linebreak': linebreak, 'indent': indent} + if is_mandatory and not is_present: + self.assertRaises(PayloadError, + checker.PayloadChecker._CheckElem, *args, **kwargs) + else: + ret_val, ret_subreport = checker.PayloadChecker._CheckElem(*args, + **kwargs) + self.assertEqual(val if is_present else None, ret_val) + self.assertEqual(subreport if is_present and is_submsg else None, + ret_subreport) + + def DoAddFieldTest(self, is_mandatory, is_present, convert, linebreak, + indent): + """Parametric testing of _Check{Mandatory,Optional}Field(). + + Args: + is_mandatory: Whether we're testing a mandatory call. + is_present: Whether or not the element is found in the message. + convert: A representation conversion function. + linebreak: Whether or not a linebreak is to be used in the report. + indent: Indentation used for the report. + """ + msg, report, _, name, val = self.SetupAddElemTest( + is_present, False, convert, linebreak, indent) + + # Prepare for invocation of the tested method. + args = [msg, name, report] + kwargs = {'convert': convert, 'linebreak': linebreak, 'indent': indent} + if is_mandatory: + args.append('bar') + tested_func = checker.PayloadChecker._CheckMandatoryField + else: + tested_func = checker.PayloadChecker._CheckOptionalField + + # Test the method call. + if is_mandatory and not is_present: + self.assertRaises(PayloadError, tested_func, *args, **kwargs) + else: + ret_val = tested_func(*args, **kwargs) + self.assertEqual(val if is_present else None, ret_val) + + def DoAddSubMsgTest(self, is_mandatory, is_present): + """Parametrized testing of _Check{Mandatory,Optional}SubMsg(). + + Args: + is_mandatory: Whether we're testing a mandatory call. + is_present: Whether or not the element is found in the message. + """ + msg, report, subreport, name, val = self.SetupAddElemTest(is_present, True) + + # Prepare for invocation of the tested method. + args = [msg, name, report] + if is_mandatory: + args.append('bar') + tested_func = checker.PayloadChecker._CheckMandatorySubMsg + else: + tested_func = checker.PayloadChecker._CheckOptionalSubMsg + + # Test the method call. + if is_mandatory and not is_present: + self.assertRaises(PayloadError, tested_func, *args) + else: + ret_val, ret_subreport = tested_func(*args) + self.assertEqual(val if is_present else None, ret_val) + self.assertEqual(subreport if is_present else None, ret_subreport) + + def testCheckPresentIff(self): + """Tests _CheckPresentIff().""" + self.assertIsNone(checker.PayloadChecker._CheckPresentIff( + None, None, 'foo', 'bar', 'baz')) + self.assertIsNone(checker.PayloadChecker._CheckPresentIff( + 'a', 'b', 'foo', 'bar', 'baz')) + self.assertRaises(PayloadError, checker.PayloadChecker._CheckPresentIff, + 'a', None, 'foo', 'bar', 'baz') + self.assertRaises(PayloadError, checker.PayloadChecker._CheckPresentIff, + None, 'b', 'foo', 'bar', 'baz') + + def DoCheckSha256SignatureTest(self, expect_pass, expect_subprocess_call, + sig_data, sig_asn1_header, + returned_signed_hash, expected_signed_hash): + """Parametric testing of _CheckSha256SignatureTest(). + + Args: + expect_pass: Whether or not it should pass. + expect_subprocess_call: Whether to expect the openssl call to happen. + sig_data: The signature raw data. + sig_asn1_header: The ASN1 header. + returned_signed_hash: The signed hash data retuned by openssl. + expected_signed_hash: The signed hash data to compare against. + """ + # Stub out the subprocess invocation. + with mock.patch.object(checker.PayloadChecker, '_Run') \ + as mock_payload_checker: + if expect_subprocess_call: + mock_payload_checker([], send_data=sig_data) + mock_payload_checker.return_value = ( + sig_asn1_header + returned_signed_hash, None) + + if expect_pass: + self.assertIsNone(checker.PayloadChecker._CheckSha256Signature( + sig_data, 'foo', expected_signed_hash, 'bar')) + else: + self.assertRaises(PayloadError, + checker.PayloadChecker._CheckSha256Signature, + sig_data, 'foo', expected_signed_hash, 'bar') + + def testCheckSha256Signature_Pass(self): + """Tests _CheckSha256Signature(); pass case.""" + sig_data = 'fake-signature'.ljust(256) + signed_hash = hashlib.sha256(b'fake-data').digest() + self.DoCheckSha256SignatureTest(True, True, sig_data, + common.SIG_ASN1_HEADER, signed_hash, + signed_hash) + + def testCheckSha256Signature_FailBadSignature(self): + """Tests _CheckSha256Signature(); fails due to malformed signature.""" + sig_data = 'fake-signature' # Malformed (not 256 bytes in length). + signed_hash = hashlib.sha256(b'fake-data').digest() + self.DoCheckSha256SignatureTest(False, False, sig_data, + common.SIG_ASN1_HEADER, signed_hash, + signed_hash) + + def testCheckSha256Signature_FailBadOutputLength(self): + """Tests _CheckSha256Signature(); fails due to unexpected output length.""" + sig_data = 'fake-signature'.ljust(256) + signed_hash = b'fake-hash' # Malformed (not 32 bytes in length). + self.DoCheckSha256SignatureTest(False, True, sig_data, + common.SIG_ASN1_HEADER, signed_hash, + signed_hash) + + def testCheckSha256Signature_FailBadAsnHeader(self): + """Tests _CheckSha256Signature(); fails due to bad ASN1 header.""" + sig_data = 'fake-signature'.ljust(256) + signed_hash = hashlib.sha256(b'fake-data').digest() + bad_asn1_header = b'bad-asn-header'.ljust(len(common.SIG_ASN1_HEADER)) + self.DoCheckSha256SignatureTest(False, True, sig_data, bad_asn1_header, + signed_hash, signed_hash) + + def testCheckSha256Signature_FailBadHash(self): + """Tests _CheckSha256Signature(); fails due to bad hash returned.""" + sig_data = 'fake-signature'.ljust(256) + expected_signed_hash = hashlib.sha256(b'fake-data').digest() + returned_signed_hash = hashlib.sha256(b'bad-fake-data').digest() + self.DoCheckSha256SignatureTest(False, True, sig_data, + common.SIG_ASN1_HEADER, + expected_signed_hash, returned_signed_hash) + + def testCheckBlocksFitLength_Pass(self): + """Tests _CheckBlocksFitLength(); pass case.""" + self.assertIsNone(checker.PayloadChecker._CheckBlocksFitLength( + 64, 4, 16, 'foo')) + self.assertIsNone(checker.PayloadChecker._CheckBlocksFitLength( + 60, 4, 16, 'foo')) + self.assertIsNone(checker.PayloadChecker._CheckBlocksFitLength( + 49, 4, 16, 'foo')) + self.assertIsNone(checker.PayloadChecker._CheckBlocksFitLength( + 48, 3, 16, 'foo')) + + def testCheckBlocksFitLength_TooManyBlocks(self): + """Tests _CheckBlocksFitLength(); fails due to excess blocks.""" + self.assertRaises(PayloadError, + checker.PayloadChecker._CheckBlocksFitLength, + 64, 5, 16, 'foo') + self.assertRaises(PayloadError, + checker.PayloadChecker._CheckBlocksFitLength, + 60, 5, 16, 'foo') + self.assertRaises(PayloadError, + checker.PayloadChecker._CheckBlocksFitLength, + 49, 5, 16, 'foo') + self.assertRaises(PayloadError, + checker.PayloadChecker._CheckBlocksFitLength, + 48, 4, 16, 'foo') + + def testCheckBlocksFitLength_TooFewBlocks(self): + """Tests _CheckBlocksFitLength(); fails due to insufficient blocks.""" + self.assertRaises(PayloadError, + checker.PayloadChecker._CheckBlocksFitLength, + 64, 3, 16, 'foo') + self.assertRaises(PayloadError, + checker.PayloadChecker._CheckBlocksFitLength, + 60, 3, 16, 'foo') + self.assertRaises(PayloadError, + checker.PayloadChecker._CheckBlocksFitLength, + 49, 3, 16, 'foo') + self.assertRaises(PayloadError, + checker.PayloadChecker._CheckBlocksFitLength, + 48, 2, 16, 'foo') + + def DoCheckManifestTest(self, fail_mismatched_block_size, fail_bad_sigs, + fail_mismatched_oki_ori, fail_bad_oki, fail_bad_ori, + fail_bad_nki, fail_bad_nri, fail_old_kernel_fs_size, + fail_old_rootfs_fs_size, fail_new_kernel_fs_size, + fail_new_rootfs_fs_size): + """Parametric testing of _CheckManifest(). + + Args: + fail_mismatched_block_size: Simulate a missing block_size field. + fail_bad_sigs: Make signatures descriptor inconsistent. + fail_mismatched_oki_ori: Make old rootfs/kernel info partially present. + fail_bad_oki: Tamper with old kernel info. + fail_bad_ori: Tamper with old rootfs info. + fail_bad_nki: Tamper with new kernel info. + fail_bad_nri: Tamper with new rootfs info. + fail_old_kernel_fs_size: Make old kernel fs size too big. + fail_old_rootfs_fs_size: Make old rootfs fs size too big. + fail_new_kernel_fs_size: Make new kernel fs size too big. + fail_new_rootfs_fs_size: Make new rootfs fs size too big. + """ + # Generate a test payload. For this test, we only care about the manifest + # and don't need any data blobs, hence we can use a plain paylaod generator + # (which also gives us more control on things that can be screwed up). + payload_gen = test_utils.PayloadGenerator() + + # Tamper with block size, if required. + if fail_mismatched_block_size: + payload_gen.SetBlockSize(test_utils.KiB(1)) + else: + payload_gen.SetBlockSize(test_utils.KiB(4)) + + # Add some operations. + payload_gen.AddOperation(common.ROOTFS, common.OpType.SOURCE_COPY, + src_extents=[(0, 16), (16, 497)], + dst_extents=[(16, 496), (0, 16)]) + payload_gen.AddOperation(common.KERNEL, common.OpType.SOURCE_COPY, + src_extents=[(0, 8), (8, 8)], + dst_extents=[(8, 8), (0, 8)]) + + # Set an invalid signatures block (offset but no size), if required. + if fail_bad_sigs: + payload_gen.SetSignatures(32, None) + + # Set partition / filesystem sizes. + rootfs_part_size = test_utils.MiB(8) + kernel_part_size = test_utils.KiB(512) + old_rootfs_fs_size = new_rootfs_fs_size = rootfs_part_size + old_kernel_fs_size = new_kernel_fs_size = kernel_part_size + if fail_old_kernel_fs_size: + old_kernel_fs_size += 100 + if fail_old_rootfs_fs_size: + old_rootfs_fs_size += 100 + if fail_new_kernel_fs_size: + new_kernel_fs_size += 100 + if fail_new_rootfs_fs_size: + new_rootfs_fs_size += 100 + + # Add old kernel/rootfs partition info, as required. + if fail_mismatched_oki_ori or fail_old_kernel_fs_size or fail_bad_oki: + oki_hash = (None if fail_bad_oki + else hashlib.sha256(b'fake-oki-content').digest()) + payload_gen.SetPartInfo(common.KERNEL, False, old_kernel_fs_size, + oki_hash) + if not fail_mismatched_oki_ori and (fail_old_rootfs_fs_size or + fail_bad_ori): + ori_hash = (None if fail_bad_ori + else hashlib.sha256(b'fake-ori-content').digest()) + payload_gen.SetPartInfo(common.ROOTFS, False, old_rootfs_fs_size, + ori_hash) + + # Add new kernel/rootfs partition info. + payload_gen.SetPartInfo( + common.KERNEL, True, new_kernel_fs_size, + None if fail_bad_nki else hashlib.sha256(b'fake-nki-content').digest()) + payload_gen.SetPartInfo( + common.ROOTFS, True, new_rootfs_fs_size, + None if fail_bad_nri else hashlib.sha256(b'fake-nri-content').digest()) + + # Set the minor version. + payload_gen.SetMinorVersion(0) + + # Create the test object. + payload_checker = _GetPayloadChecker(payload_gen.WriteToFile) + report = checker._PayloadReport() + + should_fail = (fail_mismatched_block_size or fail_bad_sigs or + fail_mismatched_oki_ori or fail_bad_oki or fail_bad_ori or + fail_bad_nki or fail_bad_nri or fail_old_kernel_fs_size or + fail_old_rootfs_fs_size or fail_new_kernel_fs_size or + fail_new_rootfs_fs_size) + part_sizes = { + common.ROOTFS: rootfs_part_size, + common.KERNEL: kernel_part_size + } + + if should_fail: + self.assertRaises(PayloadError, payload_checker._CheckManifest, report, + part_sizes) + else: + self.assertIsNone(payload_checker._CheckManifest(report, part_sizes)) + + def testCheckLength(self): + """Tests _CheckLength().""" + payload_checker = checker.PayloadChecker(self.MockPayload()) + block_size = payload_checker.block_size + + # Passes. + self.assertIsNone(payload_checker._CheckLength( + int(3.5 * block_size), 4, 'foo', 'bar')) + # Fails, too few blocks. + self.assertRaises(PayloadError, payload_checker._CheckLength, + int(3.5 * block_size), 3, 'foo', 'bar') + # Fails, too many blocks. + self.assertRaises(PayloadError, payload_checker._CheckLength, + int(3.5 * block_size), 5, 'foo', 'bar') + + def testCheckExtents(self): + """Tests _CheckExtents().""" + payload_checker = checker.PayloadChecker(self.MockPayload()) + block_size = payload_checker.block_size + + # Passes w/ all real extents. + extents = self.NewExtentList((0, 4), (8, 3), (1024, 16)) + self.assertEqual( + 23, + payload_checker._CheckExtents(extents, (1024 + 16) * block_size, + collections.defaultdict(int), 'foo')) + + # Fails, extent missing a start block. + extents = self.NewExtentList((-1, 4), (8, 3), (1024, 16)) + self.assertRaises( + PayloadError, payload_checker._CheckExtents, extents, + (1024 + 16) * block_size, collections.defaultdict(int), 'foo') + + # Fails, extent missing block count. + extents = self.NewExtentList((0, -1), (8, 3), (1024, 16)) + self.assertRaises( + PayloadError, payload_checker._CheckExtents, extents, + (1024 + 16) * block_size, collections.defaultdict(int), 'foo') + + # Fails, extent has zero blocks. + extents = self.NewExtentList((0, 4), (8, 3), (1024, 0)) + self.assertRaises( + PayloadError, payload_checker._CheckExtents, extents, + (1024 + 16) * block_size, collections.defaultdict(int), 'foo') + + # Fails, extent exceeds partition boundaries. + extents = self.NewExtentList((0, 4), (8, 3), (1024, 16)) + self.assertRaises( + PayloadError, payload_checker._CheckExtents, extents, + (1024 + 15) * block_size, collections.defaultdict(int), 'foo') + + def testCheckReplaceOperation(self): + """Tests _CheckReplaceOperation() where op.type == REPLACE.""" + payload_checker = checker.PayloadChecker(self.MockPayload()) + block_size = payload_checker.block_size + data_length = 10000 + + op = mock.create_autospec(update_metadata_pb2.InstallOperation) + op.type = common.OpType.REPLACE + + # Pass. + op.src_extents = [] + self.assertIsNone( + payload_checker._CheckReplaceOperation( + op, data_length, (data_length + block_size - 1) // block_size, + 'foo')) + + # Fail, src extents founds. + op.src_extents = ['bar'] + self.assertRaises( + PayloadError, payload_checker._CheckReplaceOperation, + op, data_length, (data_length + block_size - 1) // block_size, 'foo') + + # Fail, missing data. + op.src_extents = [] + self.assertRaises( + PayloadError, payload_checker._CheckReplaceOperation, + op, None, (data_length + block_size - 1) // block_size, 'foo') + + # Fail, length / block number mismatch. + op.src_extents = ['bar'] + self.assertRaises( + PayloadError, payload_checker._CheckReplaceOperation, + op, data_length, (data_length + block_size - 1) // block_size + 1, + 'foo') + + def testCheckReplaceBzOperation(self): + """Tests _CheckReplaceOperation() where op.type == REPLACE_BZ.""" + payload_checker = checker.PayloadChecker(self.MockPayload()) + block_size = payload_checker.block_size + data_length = block_size * 3 + + op = mock.create_autospec( + update_metadata_pb2.InstallOperation) + op.type = common.OpType.REPLACE_BZ + + # Pass. + op.src_extents = [] + self.assertIsNone( + payload_checker._CheckReplaceOperation( + op, data_length, (data_length + block_size - 1) // block_size + 5, + 'foo')) + + # Fail, src extents founds. + op.src_extents = ['bar'] + self.assertRaises( + PayloadError, payload_checker._CheckReplaceOperation, + op, data_length, (data_length + block_size - 1) // block_size + 5, + 'foo') + + # Fail, missing data. + op.src_extents = [] + self.assertRaises( + PayloadError, payload_checker._CheckReplaceOperation, + op, None, (data_length + block_size - 1) // block_size, 'foo') + + # Fail, too few blocks to justify BZ. + op.src_extents = [] + self.assertRaises( + PayloadError, payload_checker._CheckReplaceOperation, + op, data_length, (data_length + block_size - 1) // block_size, 'foo') + + # Fail, total_dst_blocks is a floating point value. + op.src_extents = [] + self.assertRaises( + PayloadError, payload_checker._CheckReplaceOperation, + op, data_length, (data_length + block_size - 1) / block_size, 'foo') + + def testCheckReplaceXzOperation(self): + """Tests _CheckReplaceOperation() where op.type == REPLACE_XZ.""" + payload_checker = checker.PayloadChecker(self.MockPayload()) + block_size = payload_checker.block_size + data_length = block_size * 3 + + op = mock.create_autospec( + update_metadata_pb2.InstallOperation) + op.type = common.OpType.REPLACE_XZ + + # Pass. + op.src_extents = [] + self.assertIsNone( + payload_checker._CheckReplaceOperation( + op, data_length, (data_length + block_size - 1) // block_size + 5, + 'foo')) + + # Fail, src extents founds. + op.src_extents = ['bar'] + self.assertRaises( + PayloadError, payload_checker._CheckReplaceOperation, + op, data_length, (data_length + block_size - 1) // block_size + 5, + 'foo') + + # Fail, missing data. + op.src_extents = [] + self.assertRaises( + PayloadError, payload_checker._CheckReplaceOperation, + op, None, (data_length + block_size - 1) // block_size, 'foo') + + # Fail, too few blocks to justify XZ. + op.src_extents = [] + self.assertRaises( + PayloadError, payload_checker._CheckReplaceOperation, + op, data_length, (data_length + block_size - 1) // block_size, 'foo') + + # Fail, total_dst_blocks is a floating point value. + op.src_extents = [] + self.assertRaises( + PayloadError, payload_checker._CheckReplaceOperation, + op, data_length, (data_length + block_size - 1) / block_size, 'foo') + + def testCheckAnyDiff(self): + """Tests _CheckAnyDiffOperation().""" + payload_checker = checker.PayloadChecker(self.MockPayload()) + op = update_metadata_pb2.InstallOperation() + + # Pass. + self.assertIsNone( + payload_checker._CheckAnyDiffOperation(op, 10000, 3, 'foo')) + + # Fail, missing data blob. + self.assertRaises( + PayloadError, payload_checker._CheckAnyDiffOperation, + op, None, 3, 'foo') + + # Fail, too big of a diff blob (unjustified). + self.assertRaises( + PayloadError, payload_checker._CheckAnyDiffOperation, + op, 10000, 2, 'foo') + + def testCheckSourceCopyOperation_Pass(self): + """Tests _CheckSourceCopyOperation(); pass case.""" + payload_checker = checker.PayloadChecker(self.MockPayload()) + self.assertIsNone( + payload_checker._CheckSourceCopyOperation(None, 134, 134, 'foo')) + + def testCheckSourceCopyOperation_FailContainsData(self): + """Tests _CheckSourceCopyOperation(); message contains data.""" + payload_checker = checker.PayloadChecker(self.MockPayload()) + self.assertRaises(PayloadError, payload_checker._CheckSourceCopyOperation, + 134, 0, 0, 'foo') + + def testCheckSourceCopyOperation_FailBlockCountsMismatch(self): + """Tests _CheckSourceCopyOperation(); src and dst block totals not equal.""" + payload_checker = checker.PayloadChecker(self.MockPayload()) + self.assertRaises(PayloadError, payload_checker._CheckSourceCopyOperation, + None, 0, 1, 'foo') + + def DoCheckOperationTest(self, op_type_name, allow_unhashed, + fail_src_extents, fail_dst_extents, + fail_mismatched_data_offset_length, + fail_missing_dst_extents, fail_src_length, + fail_dst_length, fail_data_hash, + fail_prev_data_offset, fail_bad_minor_version): + """Parametric testing of _CheckOperation(). + + Args: + op_type_name: 'REPLACE', 'REPLACE_BZ', 'REPLACE_XZ', + 'SOURCE_COPY', 'SOURCE_BSDIFF', BROTLI_BSDIFF or 'PUFFDIFF'. + allow_unhashed: Whether we're allowing to not hash the data. + fail_src_extents: Tamper with src extents. + fail_dst_extents: Tamper with dst extents. + fail_mismatched_data_offset_length: Make data_{offset,length} + inconsistent. + fail_missing_dst_extents: Do not include dst extents. + fail_src_length: Make src length inconsistent. + fail_dst_length: Make dst length inconsistent. + fail_data_hash: Tamper with the data blob hash. + fail_prev_data_offset: Make data space uses incontiguous. + fail_bad_minor_version: Make minor version incompatible with op. + """ + op_type = _OpTypeByName(op_type_name) + + # Create the test object. + payload = self.MockPayload() + payload_checker = checker.PayloadChecker(payload, + allow_unhashed=allow_unhashed) + block_size = payload_checker.block_size + + # Create auxiliary arguments. + old_part_size = test_utils.MiB(4) + new_part_size = test_utils.MiB(8) + old_block_counters = array.array( + 'B', [0] * ((old_part_size + block_size - 1) // block_size)) + new_block_counters = array.array( + 'B', [0] * ((new_part_size + block_size - 1) // block_size)) + prev_data_offset = 1876 + blob_hash_counts = collections.defaultdict(int) + + # Create the operation object for the test. + op = update_metadata_pb2.InstallOperation() + op.type = op_type + + total_src_blocks = 0 + if op_type in (common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF, + common.OpType.PUFFDIFF, common.OpType.BROTLI_BSDIFF): + if fail_src_extents: + self.AddToMessage(op.src_extents, + self.NewExtentList((1, 0))) + else: + self.AddToMessage(op.src_extents, + self.NewExtentList((1, 16))) + total_src_blocks = 16 + + payload_checker.major_version = common.BRILLO_MAJOR_PAYLOAD_VERSION + if op_type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ): + payload_checker.minor_version = 0 + elif op_type in (common.OpType.SOURCE_COPY, common.OpType.SOURCE_BSDIFF): + payload_checker.minor_version = 1 if fail_bad_minor_version else 2 + if op_type == common.OpType.REPLACE_XZ: + payload_checker.minor_version = 2 if fail_bad_minor_version else 3 + elif op_type in (common.OpType.ZERO, common.OpType.DISCARD, + common.OpType.BROTLI_BSDIFF): + payload_checker.minor_version = 3 if fail_bad_minor_version else 4 + elif op_type == common.OpType.PUFFDIFF: + payload_checker.minor_version = 4 if fail_bad_minor_version else 5 + + if op_type != common.OpType.SOURCE_COPY: + if not fail_mismatched_data_offset_length: + op.data_length = 16 * block_size - 8 + if fail_prev_data_offset: + op.data_offset = prev_data_offset + 16 + else: + op.data_offset = prev_data_offset + + fake_data = 'fake-data'.ljust(op.data_length) + if not allow_unhashed and not fail_data_hash: + # Create a valid data blob hash. + op.data_sha256_hash = hashlib.sha256(fake_data.encode('utf-8')).digest() + payload.ReadDataBlob.return_value = fake_data.encode('utf-8') + + elif fail_data_hash: + # Create an invalid data blob hash. + op.data_sha256_hash = hashlib.sha256( + fake_data.replace(' ', '-').encode('utf-8')).digest() + payload.ReadDataBlob.return_value = fake_data.encode('utf-8') + + total_dst_blocks = 0 + if not fail_missing_dst_extents: + total_dst_blocks = 16 + if fail_dst_extents: + self.AddToMessage(op.dst_extents, + self.NewExtentList((4, 16), (32, 0))) + else: + self.AddToMessage(op.dst_extents, + self.NewExtentList((4, 8), (64, 8))) + + if total_src_blocks: + if fail_src_length: + op.src_length = total_src_blocks * block_size + 8 + elif (op_type == common.OpType.SOURCE_BSDIFF and + payload_checker.minor_version <= 3): + op.src_length = total_src_blocks * block_size + elif fail_src_length: + # Add an orphaned src_length. + op.src_length = 16 + + if total_dst_blocks: + if fail_dst_length: + op.dst_length = total_dst_blocks * block_size + 8 + elif (op_type == common.OpType.SOURCE_BSDIFF and + payload_checker.minor_version <= 3): + op.dst_length = total_dst_blocks * block_size + + should_fail = (fail_src_extents or fail_dst_extents or + fail_mismatched_data_offset_length or + fail_missing_dst_extents or fail_src_length or + fail_dst_length or fail_data_hash or fail_prev_data_offset or + fail_bad_minor_version) + args = (op, 'foo', old_block_counters, new_block_counters, + old_part_size, new_part_size, prev_data_offset, + blob_hash_counts) + if should_fail: + self.assertRaises(PayloadError, payload_checker._CheckOperation, *args) + else: + self.assertEqual(op.data_length if op.HasField('data_length') else 0, + payload_checker._CheckOperation(*args)) + + def testAllocBlockCounters(self): + """Tests _CheckMoveOperation().""" + payload_checker = checker.PayloadChecker(self.MockPayload()) + block_size = payload_checker.block_size + + # Check allocation for block-aligned partition size, ensure it's integers. + result = payload_checker._AllocBlockCounters(16 * block_size) + self.assertEqual(16, len(result)) + self.assertEqual(int, type(result[0])) + + # Check allocation of unaligned partition sizes. + result = payload_checker._AllocBlockCounters(16 * block_size - 1) + self.assertEqual(16, len(result)) + result = payload_checker._AllocBlockCounters(16 * block_size + 1) + self.assertEqual(17, len(result)) + + def DoCheckOperationsTest(self, fail_nonexhaustive_full_update): + """Tests _CheckOperations().""" + # Generate a test payload. For this test, we only care about one + # (arbitrary) set of operations, so we'll only be generating kernel and + # test with them. + payload_gen = test_utils.PayloadGenerator() + + block_size = test_utils.KiB(4) + payload_gen.SetBlockSize(block_size) + + rootfs_part_size = test_utils.MiB(8) + + # Fake rootfs operations in a full update, tampered with as required. + rootfs_op_type = common.OpType.REPLACE + rootfs_data_length = rootfs_part_size + if fail_nonexhaustive_full_update: + rootfs_data_length -= block_size + + payload_gen.AddOperation(common.ROOTFS, rootfs_op_type, + dst_extents= + [(0, rootfs_data_length // block_size)], + data_offset=0, + data_length=rootfs_data_length) + + # Create the test object. + payload_checker = _GetPayloadChecker(payload_gen.WriteToFile, + checker_init_dargs={ + 'allow_unhashed': True}) + payload_checker.payload_type = checker._TYPE_FULL + report = checker._PayloadReport() + partition = next((p for p in payload_checker.payload.manifest.partitions + if p.partition_name == common.ROOTFS), None) + args = (partition.operations, report, 'foo', + 0, rootfs_part_size, rootfs_part_size, rootfs_part_size, 0) + if fail_nonexhaustive_full_update: + self.assertRaises(PayloadError, payload_checker._CheckOperations, *args) + else: + self.assertEqual(rootfs_data_length, + payload_checker._CheckOperations(*args)) + + def DoCheckSignaturesTest(self, fail_empty_sigs_blob, fail_sig_missing_fields, + fail_unknown_sig_version, fail_incorrect_sig): + """Tests _CheckSignatures().""" + # Generate a test payload. For this test, we only care about the signature + # block and how it relates to the payload hash. Therefore, we're generating + # a random (otherwise useless) payload for this purpose. + payload_gen = test_utils.EnhancedPayloadGenerator() + block_size = test_utils.KiB(4) + payload_gen.SetBlockSize(block_size) + rootfs_part_size = test_utils.MiB(2) + kernel_part_size = test_utils.KiB(16) + payload_gen.SetPartInfo(common.ROOTFS, True, rootfs_part_size, + hashlib.sha256(b'fake-new-rootfs-content').digest()) + payload_gen.SetPartInfo(common.KERNEL, True, kernel_part_size, + hashlib.sha256(b'fake-new-kernel-content').digest()) + payload_gen.SetMinorVersion(0) + payload_gen.AddOperationWithData( + common.ROOTFS, common.OpType.REPLACE, + dst_extents=[(0, rootfs_part_size // block_size)], + data_blob=os.urandom(rootfs_part_size)) + + do_forge_sigs_data = (fail_empty_sigs_blob or fail_sig_missing_fields or + fail_unknown_sig_version or fail_incorrect_sig) + + sigs_data = None + if do_forge_sigs_data: + sigs_gen = test_utils.SignaturesGenerator() + if not fail_empty_sigs_blob: + if fail_sig_missing_fields: + sig_data = None + else: + sig_data = test_utils.SignSha256(b'fake-payload-content', + test_utils._PRIVKEY_FILE_NAME) + sigs_gen.AddSig(5 if fail_unknown_sig_version else 1, sig_data) + + sigs_data = sigs_gen.ToBinary() + payload_gen.SetSignatures(payload_gen.curr_offset, len(sigs_data)) + + # Generate payload (complete w/ signature) and create the test object. + payload_checker = _GetPayloadChecker( + payload_gen.WriteToFileWithData, + payload_gen_dargs={ + 'sigs_data': sigs_data, + 'privkey_file_name': test_utils._PRIVKEY_FILE_NAME}) + payload_checker.payload_type = checker._TYPE_FULL + report = checker._PayloadReport() + + # We have to check the manifest first in order to set signature attributes. + payload_checker._CheckManifest(report, { + common.ROOTFS: rootfs_part_size, + common.KERNEL: kernel_part_size + }) + + should_fail = (fail_empty_sigs_blob or fail_sig_missing_fields or + fail_unknown_sig_version or fail_incorrect_sig) + args = (report, test_utils._PUBKEY_FILE_NAME) + if should_fail: + self.assertRaises(PayloadError, payload_checker._CheckSignatures, *args) + else: + self.assertIsNone(payload_checker._CheckSignatures(*args)) + + def DoCheckManifestMinorVersionTest(self, minor_version, payload_type): + """Parametric testing for CheckManifestMinorVersion(). + + Args: + minor_version: The payload minor version to test with. + payload_type: The type of the payload we're testing, delta or full. + """ + # Create the test object. + payload = self.MockPayload() + payload.manifest.minor_version = minor_version + payload_checker = checker.PayloadChecker(payload) + payload_checker.payload_type = payload_type + report = checker._PayloadReport() + + should_succeed = ( + (minor_version == 0 and payload_type == checker._TYPE_FULL) or + (minor_version == 2 and payload_type == checker._TYPE_DELTA) or + (minor_version == 3 and payload_type == checker._TYPE_DELTA) or + (minor_version == 4 and payload_type == checker._TYPE_DELTA) or + (minor_version == 5 and payload_type == checker._TYPE_DELTA)) + args = (report,) + + if should_succeed: + self.assertIsNone(payload_checker._CheckManifestMinorVersion(*args)) + else: + self.assertRaises(PayloadError, + payload_checker._CheckManifestMinorVersion, *args) + + def DoRunTest(self, rootfs_part_size_provided, kernel_part_size_provided, + fail_wrong_payload_type, fail_invalid_block_size, + fail_mismatched_metadata_size, fail_mismatched_block_size, + fail_excess_data, fail_rootfs_part_size_exceeded, + fail_kernel_part_size_exceeded): + """Tests Run().""" + # Generate a test payload. For this test, we generate a full update that + # has sample kernel and rootfs operations. Since most testing is done with + # internal PayloadChecker methods that are tested elsewhere, here we only + # tamper with what's actually being manipulated and/or tested in the Run() + # method itself. Note that the checker doesn't verify partition hashes, so + # they're safe to fake. + payload_gen = test_utils.EnhancedPayloadGenerator() + block_size = test_utils.KiB(4) + payload_gen.SetBlockSize(block_size) + kernel_filesystem_size = test_utils.KiB(16) + rootfs_filesystem_size = test_utils.MiB(2) + payload_gen.SetPartInfo(common.ROOTFS, True, rootfs_filesystem_size, + hashlib.sha256(b'fake-new-rootfs-content').digest()) + payload_gen.SetPartInfo(common.KERNEL, True, kernel_filesystem_size, + hashlib.sha256(b'fake-new-kernel-content').digest()) + payload_gen.SetMinorVersion(0) + + rootfs_part_size = 0 + if rootfs_part_size_provided: + rootfs_part_size = rootfs_filesystem_size + block_size + rootfs_op_size = rootfs_part_size or rootfs_filesystem_size + if fail_rootfs_part_size_exceeded: + rootfs_op_size += block_size + payload_gen.AddOperationWithData( + common.ROOTFS, common.OpType.REPLACE, + dst_extents=[(0, rootfs_op_size // block_size)], + data_blob=os.urandom(rootfs_op_size)) + + kernel_part_size = 0 + if kernel_part_size_provided: + kernel_part_size = kernel_filesystem_size + block_size + kernel_op_size = kernel_part_size or kernel_filesystem_size + if fail_kernel_part_size_exceeded: + kernel_op_size += block_size + payload_gen.AddOperationWithData( + common.KERNEL, common.OpType.REPLACE, + dst_extents=[(0, kernel_op_size // block_size)], + data_blob=os.urandom(kernel_op_size)) + + # Generate payload (complete w/ signature) and create the test object. + if fail_invalid_block_size: + use_block_size = block_size + 5 # Not a power of two. + elif fail_mismatched_block_size: + use_block_size = block_size * 2 # Different that payload stated. + else: + use_block_size = block_size + + # For the unittests 237 is the value that generated for the payload. + metadata_size = 237 + if fail_mismatched_metadata_size: + metadata_size += 1 + + kwargs = { + 'payload_gen_dargs': { + 'privkey_file_name': test_utils._PRIVKEY_FILE_NAME, + 'padding': os.urandom(1024) if fail_excess_data else None}, + 'checker_init_dargs': { + 'assert_type': 'delta' if fail_wrong_payload_type else 'full', + 'block_size': use_block_size}} + if fail_invalid_block_size: + self.assertRaises(PayloadError, _GetPayloadChecker, + payload_gen.WriteToFileWithData, **kwargs) + else: + payload_checker = _GetPayloadChecker(payload_gen.WriteToFileWithData, + **kwargs) + + kwargs2 = { + 'pubkey_file_name': test_utils._PUBKEY_FILE_NAME, + 'metadata_size': metadata_size, + 'part_sizes': { + common.KERNEL: kernel_part_size, + common.ROOTFS: rootfs_part_size}} + + should_fail = (fail_wrong_payload_type or fail_mismatched_block_size or + fail_mismatched_metadata_size or fail_excess_data or + fail_rootfs_part_size_exceeded or + fail_kernel_part_size_exceeded) + if should_fail: + self.assertRaises(PayloadError, payload_checker.Run, **kwargs2) + else: + self.assertIsNone(payload_checker.Run(**kwargs2)) + + +# This implements a generic API, hence the occasional unused args. +# pylint: disable=W0613 +def ValidateCheckOperationTest(op_type_name, allow_unhashed, + fail_src_extents, fail_dst_extents, + fail_mismatched_data_offset_length, + fail_missing_dst_extents, fail_src_length, + fail_dst_length, fail_data_hash, + fail_prev_data_offset, fail_bad_minor_version): + """Returns True iff the combination of arguments represents a valid test.""" + op_type = _OpTypeByName(op_type_name) + + # REPLACE/REPLACE_BZ/REPLACE_XZ operations don't read data from src + # partition. They are compatible with all valid minor versions, so we don't + # need to check that. + if (op_type in (common.OpType.REPLACE, common.OpType.REPLACE_BZ, + common.OpType.REPLACE_XZ) and (fail_src_extents or + fail_src_length or + fail_bad_minor_version)): + return False + + # SOURCE_COPY operation does not carry data. + if (op_type == common.OpType.SOURCE_COPY and ( + fail_mismatched_data_offset_length or fail_data_hash or + fail_prev_data_offset)): + return False + + return True + + +def TestMethodBody(run_method_name, run_dargs): + """Returns a function that invokes a named method with named arguments.""" + return lambda self: getattr(self, run_method_name)(**run_dargs) + + +def AddParametricTests(tested_method_name, arg_space, validate_func=None): + """Enumerates and adds specific parametric tests to PayloadCheckerTest. + + This function enumerates a space of test parameters (defined by arg_space), + then binds a new, unique method name in PayloadCheckerTest to a test function + that gets handed the said parameters. This is a preferable approach to doing + the enumeration and invocation during the tests because this way each test is + treated as a complete run by the unittest framework, and so benefits from the + usual setUp/tearDown mechanics. + + Args: + tested_method_name: Name of the tested PayloadChecker method. + arg_space: A dictionary containing variables (keys) and lists of values + (values) associated with them. + validate_func: A function used for validating test argument combinations. + """ + for value_tuple in itertools.product(*iter(arg_space.values())): + run_dargs = dict(zip(iter(arg_space.keys()), value_tuple)) + if validate_func and not validate_func(**run_dargs): + continue + run_method_name = 'Do%sTest' % tested_method_name + test_method_name = 'test%s' % tested_method_name + for arg_key, arg_val in run_dargs.items(): + if arg_val or isinstance(arg_val, int): + test_method_name += '__%s=%s' % (arg_key, arg_val) + setattr(PayloadCheckerTest, test_method_name, + TestMethodBody(run_method_name, run_dargs)) + + +def AddAllParametricTests(): + """Enumerates and adds all parametric tests to PayloadCheckerTest.""" + # Add all _CheckElem() test cases. + AddParametricTests('AddElem', + {'linebreak': (True, False), + 'indent': (0, 1, 2), + 'convert': (str, lambda s: s[::-1]), + 'is_present': (True, False), + 'is_mandatory': (True, False), + 'is_submsg': (True, False)}) + + # Add all _Add{Mandatory,Optional}Field tests. + AddParametricTests('AddField', + {'is_mandatory': (True, False), + 'linebreak': (True, False), + 'indent': (0, 1, 2), + 'convert': (str, lambda s: s[::-1]), + 'is_present': (True, False)}) + + # Add all _Add{Mandatory,Optional}SubMsg tests. + AddParametricTests('AddSubMsg', + {'is_mandatory': (True, False), + 'is_present': (True, False)}) + + # Add all _CheckManifest() test cases. + AddParametricTests('CheckManifest', + {'fail_mismatched_block_size': (True, False), + 'fail_bad_sigs': (True, False), + 'fail_mismatched_oki_ori': (True, False), + 'fail_bad_oki': (True, False), + 'fail_bad_ori': (True, False), + 'fail_bad_nki': (True, False), + 'fail_bad_nri': (True, False), + 'fail_old_kernel_fs_size': (True, False), + 'fail_old_rootfs_fs_size': (True, False), + 'fail_new_kernel_fs_size': (True, False), + 'fail_new_rootfs_fs_size': (True, False)}) + + # Add all _CheckOperation() test cases. + AddParametricTests('CheckOperation', + {'op_type_name': ('REPLACE', 'REPLACE_BZ', 'REPLACE_XZ', + 'SOURCE_COPY', 'SOURCE_BSDIFF', + 'PUFFDIFF', 'BROTLI_BSDIFF'), + 'allow_unhashed': (True, False), + 'fail_src_extents': (True, False), + 'fail_dst_extents': (True, False), + 'fail_mismatched_data_offset_length': (True, False), + 'fail_missing_dst_extents': (True, False), + 'fail_src_length': (True, False), + 'fail_dst_length': (True, False), + 'fail_data_hash': (True, False), + 'fail_prev_data_offset': (True, False), + 'fail_bad_minor_version': (True, False)}, + validate_func=ValidateCheckOperationTest) + + # Add all _CheckOperations() test cases. + AddParametricTests('CheckOperations', + {'fail_nonexhaustive_full_update': (True, False)}) + + # Add all _CheckOperations() test cases. + AddParametricTests('CheckSignatures', + {'fail_empty_sigs_blob': (True, False), + 'fail_sig_missing_fields': (True, False), + 'fail_unknown_sig_version': (True, False), + 'fail_incorrect_sig': (True, False)}) + + # Add all _CheckManifestMinorVersion() test cases. + AddParametricTests('CheckManifestMinorVersion', + {'minor_version': (None, 0, 2, 3, 4, 5, 555), + 'payload_type': (checker._TYPE_FULL, + checker._TYPE_DELTA)}) + + # Add all Run() test cases. + AddParametricTests('Run', + {'rootfs_part_size_provided': (True, False), + 'kernel_part_size_provided': (True, False), + 'fail_wrong_payload_type': (True, False), + 'fail_invalid_block_size': (True, False), + 'fail_mismatched_metadata_size': (True, False), + 'fail_mismatched_block_size': (True, False), + 'fail_excess_data': (True, False), + 'fail_rootfs_part_size_exceeded': (True, False), + 'fail_kernel_part_size_exceeded': (True, False)}) + + +if __name__ == '__main__': + AddAllParametricTests() + unittest.main() diff --git a/update-payload-extractor/update_payload/common.py b/update-payload-extractor/update_payload/common.py new file mode 100755 index 0000000..b934cf8 --- /dev/null +++ b/update-payload-extractor/update_payload/common.py @@ -0,0 +1,218 @@ +# +# Copyright (C) 2013 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Utilities for update payload processing.""" + +from __future__ import absolute_import +from __future__ import print_function + +import base64 + +from update_payload import update_metadata_pb2 +from update_payload.error import PayloadError + + +# +# Constants. +# +SIG_ASN1_HEADER = ( + b'\x30\x31\x30\x0d\x06\x09\x60\x86' + b'\x48\x01\x65\x03\x04\x02\x01\x05' + b'\x00\x04\x20' +) + +BRILLO_MAJOR_PAYLOAD_VERSION = 2 + +SOURCE_MINOR_PAYLOAD_VERSION = 2 +OPSRCHASH_MINOR_PAYLOAD_VERSION = 3 +BROTLI_BSDIFF_MINOR_PAYLOAD_VERSION = 4 +PUFFDIFF_MINOR_PAYLOAD_VERSION = 5 + +KERNEL = 'kernel' +ROOTFS = 'root' +# Tuple of (name in system, name in protobuf). +CROS_PARTITIONS = ((KERNEL, KERNEL), (ROOTFS, 'rootfs')) + + +# +# Payload operation types. +# +class OpType(object): + """Container for operation type constants.""" + _CLASS = update_metadata_pb2.InstallOperation + REPLACE = _CLASS.REPLACE + REPLACE_BZ = _CLASS.REPLACE_BZ + SOURCE_COPY = _CLASS.SOURCE_COPY + SOURCE_BSDIFF = _CLASS.SOURCE_BSDIFF + ZERO = _CLASS.ZERO + DISCARD = _CLASS.DISCARD + REPLACE_XZ = _CLASS.REPLACE_XZ + PUFFDIFF = _CLASS.PUFFDIFF + BROTLI_BSDIFF = _CLASS.BROTLI_BSDIFF + ALL = (REPLACE, REPLACE_BZ, SOURCE_COPY, SOURCE_BSDIFF, ZERO, + DISCARD, REPLACE_XZ, PUFFDIFF, BROTLI_BSDIFF) + NAMES = { + REPLACE: 'REPLACE', + REPLACE_BZ: 'REPLACE_BZ', + SOURCE_COPY: 'SOURCE_COPY', + SOURCE_BSDIFF: 'SOURCE_BSDIFF', + ZERO: 'ZERO', + DISCARD: 'DISCARD', + REPLACE_XZ: 'REPLACE_XZ', + PUFFDIFF: 'PUFFDIFF', + BROTLI_BSDIFF: 'BROTLI_BSDIFF', + } + + def __init__(self): + pass + + +# +# Checked and hashed reading of data. +# +def IntPackingFmtStr(size, is_unsigned): + """Returns an integer format string for use by the struct module. + + Args: + size: the integer size in bytes (2, 4 or 8) + is_unsigned: whether it is signed or not + + Returns: + A format string for packing/unpacking integer values; assumes network byte + order (big-endian). + + Raises: + PayloadError if something is wrong with the arguments. + """ + # Determine the base conversion format. + if size == 2: + fmt = 'h' + elif size == 4: + fmt = 'i' + elif size == 8: + fmt = 'q' + else: + raise PayloadError('unsupport numeric field size (%s)' % size) + + # Signed or unsigned? + if is_unsigned: + fmt = fmt.upper() + + # Make it network byte order (big-endian). + fmt = '!' + fmt + + return fmt + + +def Read(file_obj, length, offset=None, hasher=None): + """Reads binary data from a file. + + Args: + file_obj: an open file object + length: the length of the data to read + offset: an offset to seek to prior to reading; this is an absolute offset + from either the beginning (non-negative) or end (negative) of the + file. (optional) + hasher: a hashing object to pass the read data through (optional) + + Returns: + A string containing the read data. + + Raises: + PayloadError if a read error occurred or not enough data was read. + """ + if offset is not None: + if offset >= 0: + file_obj.seek(offset) + else: + file_obj.seek(offset, 2) + + try: + data = file_obj.read(length) + except IOError as e: + raise PayloadError('error reading from file (%s): %s' % (file_obj.name, e)) + + if len(data) != length: + raise PayloadError( + 'reading from file (%s) too short (%d instead of %d bytes)' % + (file_obj.name, len(data), length)) + + if hasher: + hasher.update(data) + + return data + + +# +# Formatting functions. +# +def FormatExtent(ex, block_size=0): + end_block = ex.start_block + ex.num_blocks + if block_size: + return '%d->%d * %d' % (ex.start_block, end_block, block_size) + return '%d->%d' % (ex.start_block, end_block) + + +def FormatSha256(digest): + """Returns a canonical string representation of a SHA256 digest.""" + return base64.b64encode(digest).decode('utf-8') + + +# +# Useful iterators. +# +def _ObjNameIter(items, base_name, reverse=False, name_format_func=None): + """A generic (item, name) tuple iterators. + + Args: + items: the sequence of objects to iterate on + base_name: the base name for all objects + reverse: whether iteration should be in reverse order + name_format_func: a function to apply to the name string + + Yields: + An iterator whose i-th invocation returns (items[i], name), where name == + base_name + '[i]' (with a formatting function optionally applied to it). + """ + idx, inc = (len(items), -1) if reverse else (1, 1) + if reverse: + items = reversed(items) + for item in items: + item_name = '%s[%d]' % (base_name, idx) + if name_format_func: + item_name = name_format_func(item, item_name) + yield (item, item_name) + idx += inc + + +def _OperationNameFormatter(op, op_name): + return '%s(%s)' % (op_name, OpType.NAMES.get(op.type, '?')) + + +def OperationIter(operations, base_name, reverse=False): + """An (item, name) iterator for update operations.""" + return _ObjNameIter(operations, base_name, reverse=reverse, + name_format_func=_OperationNameFormatter) + + +def ExtentIter(extents, base_name, reverse=False): + """An (item, name) iterator for operation extents.""" + return _ObjNameIter(extents, base_name, reverse=reverse) + + +def SignatureIter(sigs, base_name, reverse=False): + """An (item, name) iterator for signatures.""" + return _ObjNameIter(sigs, base_name, reverse=reverse) diff --git a/update-payload-extractor/update_payload/error.py b/update-payload-extractor/update_payload/error.py new file mode 100755 index 0000000..6f95433 --- /dev/null +++ b/update-payload-extractor/update_payload/error.py @@ -0,0 +1,21 @@ +# +# Copyright (C) 2013 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Payload handling errors.""" + + +class PayloadError(Exception): + """An update payload general processing error.""" diff --git a/update-payload-extractor/update_payload/format_utils.py b/update-payload-extractor/update_payload/format_utils.py new file mode 100755 index 0000000..e73badf --- /dev/null +++ b/update-payload-extractor/update_payload/format_utils.py @@ -0,0 +1,111 @@ +# +# Copyright (C) 2013 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Various formatting functions.""" + +from __future__ import division + + +def NumToPercent(num, total, min_precision=1, max_precision=5): + """Returns the percentage (string) of |num| out of |total|. + + If the percentage includes a fraction, it will be computed down to the least + precision that yields a non-zero and ranging between |min_precision| and + |max_precision|. Values are always rounded down. All arithmetic operations + are integer built-ins. Examples (using default precision): + + (1, 1) => 100% + (3, 10) => 30% + (3, 9) => 33.3% + (3, 900) => 0.3% + (3, 9000000) => 0.00003% + (3, 900000000) => 0% + (5, 2) => 250% + + Args: + num: the value of the part + total: the value of the whole + min_precision: minimum precision for fractional percentage + max_precision: maximum precision for fractional percentage + Returns: + Percentage string, or None if percent cannot be computed (i.e. total is + zero). + + """ + if total == 0: + return None + + percent = 0 + precision = min(min_precision, max_precision) + factor = 10 ** precision + while precision <= max_precision: + percent = num * 100 * factor // total + if percent: + break + factor *= 10 + precision += 1 + + whole, frac = divmod(percent, factor) + while frac and not frac % 10: + frac /= 10 + precision -= 1 + + return '%d%s%%' % (whole, '.%0*d' % (precision, frac) if frac else '') + + +def BytesToHumanReadable(size, precision=1, decimal=False): + """Returns a human readable representation of a given |size|. + + The returned string includes unit notations in either binary (KiB, MiB, etc) + or decimal (kB, MB, etc), based on the value of |decimal|. The chosen unit is + the largest that yields a whole (or mixed) number. It may contain up to + |precision| fractional digits. Values are always rounded down. Largest unit + is an exabyte. All arithmetic operations are integer built-ins. Examples + (using default precision and binary units): + + 4096 => 4 KiB + 5000 => 4.8 KiB + 500000 => 488.2 KiB + 5000000 => 4.7 MiB + + Args: + size: the size in bytes + precision: the number of digits past the decimal point + decimal: whether to compute/present decimal or binary units + Returns: + Readable size string, or None if no conversion is applicable (i.e. size is + less than the smallest unit). + + """ + constants = ( + (('KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB'), 1024), + (('kB', 'MB', 'GB', 'TB', 'PB', 'EB'), 1000) + ) + suffixes, base = constants[decimal] + exp, magnitude = 0, 1 + while exp < len(suffixes): + next_magnitude = magnitude * base + if size < next_magnitude: + break + exp += 1 + magnitude = next_magnitude + + if exp != 0: + whole = size // magnitude + frac = (size % magnitude) * (10 ** precision) // magnitude + while frac and not frac % 10: + frac /= 10 + return '%d%s %s' % (whole, '.%d' % frac if frac else '', suffixes[exp - 1]) diff --git a/update-payload-extractor/update_payload/format_utils_unittest.py b/update-payload-extractor/update_payload/format_utils_unittest.py new file mode 100755 index 0000000..4dcd652 --- /dev/null +++ b/update-payload-extractor/update_payload/format_utils_unittest.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python +# +# Copyright (C) 2013 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Unit tests for format_utils.py.""" + +# Disable check for function names to avoid errors based on old code +# pylint: disable-msg=invalid-name + +from __future__ import absolute_import + +import unittest + +from update_payload import format_utils + + +class NumToPercentTest(unittest.TestCase): + """ Tests number conversion to percentage format.""" + def testHundredPercent(self): + self.assertEqual(format_utils.NumToPercent(1, 1), '100%') + + def testOverHundredPercent(self): + self.assertEqual(format_utils.NumToPercent(5, 2), '250%') + + def testWholePercent(self): + self.assertEqual(format_utils.NumToPercent(3, 10), '30%') + + def testDefaultMinPrecision(self): + self.assertEqual(format_utils.NumToPercent(3, 9), '33.3%') + self.assertEqual(format_utils.NumToPercent(3, 900), '0.3%') + + def testDefaultMaxPrecision(self): + self.assertEqual(format_utils.NumToPercent(3, 9000000), '0.00003%') + self.assertEqual(format_utils.NumToPercent(3, 90000000), '0%') + + def testCustomMinPrecision(self): + self.assertEqual(format_utils.NumToPercent(3, 9, min_precision=3), + '33.333%') + self.assertEqual(format_utils.NumToPercent(3, 9, min_precision=0), + '33%') + + def testCustomMaxPrecision(self): + self.assertEqual(format_utils.NumToPercent(3, 900, max_precision=1), + '0.3%') + self.assertEqual(format_utils.NumToPercent(3, 9000, max_precision=1), + '0%') + + +class BytesToHumanReadableTest(unittest.TestCase): + """ Tests number conversion to human readable format.""" + def testBaseTwo(self): + self.assertEqual(format_utils.BytesToHumanReadable(0x1000), '4 KiB') + self.assertEqual(format_utils.BytesToHumanReadable(0x400000), '4 MiB') + self.assertEqual(format_utils.BytesToHumanReadable(0x100000000), '4 GiB') + self.assertEqual(format_utils.BytesToHumanReadable(0x40000000000), '4 TiB') + + def testDecimal(self): + self.assertEqual(format_utils.BytesToHumanReadable(5000, decimal=True), + '5 kB') + self.assertEqual(format_utils.BytesToHumanReadable(5000000, decimal=True), + '5 MB') + self.assertEqual(format_utils.BytesToHumanReadable(5000000000, + decimal=True), + '5 GB') + + def testDefaultPrecision(self): + self.assertEqual(format_utils.BytesToHumanReadable(5000), '4.8 KiB') + self.assertEqual(format_utils.BytesToHumanReadable(500000), '488.2 KiB') + self.assertEqual(format_utils.BytesToHumanReadable(5000000), '4.7 MiB') + + def testCustomPrecision(self): + self.assertEqual(format_utils.BytesToHumanReadable(5000, precision=3), + '4.882 KiB') + self.assertEqual(format_utils.BytesToHumanReadable(500000, precision=0), + '488 KiB') + self.assertEqual(format_utils.BytesToHumanReadable(5000000, precision=5), + '4.76837 MiB') + + +if __name__ == '__main__': + unittest.main() diff --git a/update-payload-extractor/update_payload/histogram.py b/update-payload-extractor/update_payload/histogram.py new file mode 100755 index 0000000..bad2dc3 --- /dev/null +++ b/update-payload-extractor/update_payload/histogram.py @@ -0,0 +1,132 @@ +# +# Copyright (C) 2013 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Histogram generation tools.""" + +from __future__ import absolute_import +from __future__ import division + +from collections import defaultdict + +from update_payload import format_utils + + +class Histogram(object): + """A histogram generating object. + + This object serves the sole purpose of formatting (key, val) pairs as an + ASCII histogram, including bars and percentage markers, and taking care of + label alignment, scaling, etc. In addition to the standard __init__ + interface, two static methods are provided for conveniently converting data + in different formats into a histogram. Histogram generation is exported via + its __str__ method, and looks as follows: + + Yes |################ | 5 (83.3%) + No |### | 1 (16.6%) + + TODO(garnold) we may want to add actual methods for adding data or tweaking + the output layout and formatting. For now, though, this is fine. + + """ + + def __init__(self, data, scale=20, formatter=None): + """Initialize a histogram object. + + Args: + data: list of (key, count) pairs constituting the histogram + scale: number of characters used to indicate 100% + formatter: function used for formatting raw histogram values + + """ + self.data = data + self.scale = scale + self.formatter = formatter or str + self.max_key_len = max([len(str(key)) for key, count in self.data]) + self.total = sum([count for key, count in self.data]) + + @staticmethod + def FromCountDict(count_dict, scale=20, formatter=None, key_names=None): + """Takes a dictionary of counts and returns a histogram object. + + This simply converts a mapping from names to counts into a list of (key, + count) pairs, optionally translating keys into name strings, then + generating and returning a histogram for them. This is a useful convenience + call for clients that update a dictionary of counters as they (say) scan a + data stream. + + Args: + count_dict: dictionary mapping keys to occurrence counts + scale: number of characters used to indicate 100% + formatter: function used for formatting raw histogram values + key_names: dictionary mapping keys to name strings + Returns: + A histogram object based on the given data. + + """ + namer = None + if key_names: + namer = lambda key: key_names[key] + else: + namer = lambda key: key + + hist = [(namer(key), count) for key, count in count_dict.items()] + return Histogram(hist, scale, formatter) + + @staticmethod + def FromKeyList(key_list, scale=20, formatter=None, key_names=None): + """Takes a list of (possibly recurring) keys and returns a histogram object. + + This converts the list into a dictionary of counters, then uses + FromCountDict() to generate the actual histogram. For example: + + ['a', 'a', 'b', 'a', 'b'] --> {'a': 3, 'b': 2} --> ... + + Args: + key_list: list of (possibly recurring) keys + scale: number of characters used to indicate 100% + formatter: function used for formatting raw histogram values + key_names: dictionary mapping keys to name strings + Returns: + A histogram object based on the given data. + + """ + count_dict = defaultdict(int) # Unset items default to zero + for key in key_list: + count_dict[key] += 1 + return Histogram.FromCountDict(count_dict, scale, formatter, key_names) + + def __str__(self): + hist_lines = [] + hist_bar = '|' + for key, count in self.data: + if self.total: + bar_len = count * self.scale // self.total + hist_bar = '|%s|' % ('#' * bar_len).ljust(self.scale) + + line = '%s %s %s' % ( + str(key).ljust(self.max_key_len), + hist_bar, + self.formatter(count)) + percent_str = format_utils.NumToPercent(count, self.total) + if percent_str: + line += ' (%s)' % percent_str + hist_lines.append(line) + + return '\n'.join(hist_lines) + + def GetKeys(self): + """Returns the keys of the histogram.""" + return [key for key, _ in self.data] diff --git a/update-payload-extractor/update_payload/histogram_unittest.py b/update-payload-extractor/update_payload/histogram_unittest.py new file mode 100755 index 0000000..ccde2bb --- /dev/null +++ b/update-payload-extractor/update_payload/histogram_unittest.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python +# +# Copyright (C) 2013 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Unit tests for histogram.py.""" + +# Disable check for function names to avoid errors based on old code +# pylint: disable-msg=invalid-name + +from __future__ import absolute_import + +import unittest + +from update_payload import format_utils +from update_payload import histogram + + +class HistogramTest(unittest.TestCase): + """ Tests histogram""" + + @staticmethod + def AddHumanReadableSize(size): + fmt = format_utils.BytesToHumanReadable(size) + return '%s (%s)' % (size, fmt) if fmt else str(size) + + def CompareToExpectedDefault(self, actual_str): + expected_str = ( + 'Yes |################ | 5 (83.3%)\n' + 'No |### | 1 (16.6%)' + ) + self.assertEqual(actual_str, expected_str) + + def testExampleHistogram(self): + self.CompareToExpectedDefault(str(histogram.Histogram( + [('Yes', 5), ('No', 1)]))) + + def testFromCountDict(self): + self.CompareToExpectedDefault(str(histogram.Histogram.FromCountDict( + {'Yes': 5, 'No': 1}))) + + def testFromKeyList(self): + self.CompareToExpectedDefault(str(histogram.Histogram.FromKeyList( + ['Yes', 'Yes', 'No', 'Yes', 'Yes', 'Yes']))) + + def testCustomScale(self): + expected_str = ( + 'Yes |#### | 5 (83.3%)\n' + 'No | | 1 (16.6%)' + ) + actual_str = str(histogram.Histogram([('Yes', 5), ('No', 1)], scale=5)) + self.assertEqual(actual_str, expected_str) + + def testCustomFormatter(self): + expected_str = ( + 'Yes |################ | 5000 (4.8 KiB) (83.3%)\n' + 'No |### | 1000 (16.6%)' + ) + actual_str = str(histogram.Histogram( + [('Yes', 5000), ('No', 1000)], formatter=self.AddHumanReadableSize)) + self.assertEqual(actual_str, expected_str) + + +if __name__ == '__main__': + unittest.main() diff --git a/update-payload-extractor/update_payload/payload-test-key.pem b/update-payload-extractor/update_payload/payload-test-key.pem new file mode 100755 index 0000000..342e923 --- /dev/null +++ b/update-payload-extractor/update_payload/payload-test-key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAvtGHtqO21Uhy2wGz9fluIpIUR8G7dZoCZhZukGkm4mlfgL71 +xPSArjx02/w/FhYxOusV6/XQeKgL3i8cni3HCkCOurZLpi2L5Ver6qrxKFh6WBVZ +0Dj7N6P/Mf5jZdhfvVyweLlsNK8Ypeb+RazfrsXhd4cy3dBMxouGwH7R7QQXTFCo +Cc8kgJBTxILl3jfvY8OrNKgYiCETa7tQdFkP0bfPwH9cAXuMjHXiZatim0tF+ivp +kM2v/6LTxtD6Rq1wks/N6CHi8efrRaviFp7c0mNmBNFaV54cHEUW2SlNIiRun7L0 +1nAz/D8kuoHfx4E3Mtj0DbvngZJMX/X+rJQ5cQIDAQABAoIBADmE2X7hbJxwAUcp +BUExFdTP6dMTf9lcOjrhqiRXvgPjtYkOhvD+rsdWq/cf2zhiKibTdEEzUMr+BM3N +r7eyntvlR+DaUIVgF1pjigvryVPbD837aZ5NftRv194PC5FInttq1Dsf0ZEz8p8X +uS/xg1+ggG1SUK/yOSJkLpNZ5xelbclQJ9bnJST8PR8XbEieA83xt5M2DcooPzq0 +/99m/daA5hmSWs6n8sFrIZDQxDhLyyW4J72jjoNTE87eCpwK855yXMelpEPDZNQi +nB3x5Y/bGbl81PInqL2q14lekrVYdYZ7bOBVlsmyvz6f1e4OOE1aaAM+w6ArA4az +6elZQE0CgYEA4GOU6BBu9jLqFdqV9jIkWsgz5ZWINz8PLJPtZzk5I9KO1m+GAUy2 +h/1IGGR6qRQR49hMtq4C0lUifxquq0xivzJ87U9oxKC9yEeTxkmDe5csVHsnAtqT +xRgVM7Ysrut5NLU1zm0q3jBmkDu7d99LvscM/3n7eJ6RiYpnA54O6I8CgYEA2bNA +34PTvxBS2deRoxKQNlVU14FtirE+q0+k0wcE85wr7wIMpR13al8T1TpE8J1yvvZM +92HMGFGfYNDB46b8VfJ5AxEUFwdruec6sTVVfkMZMOqM/A08yiaLzQ1exDxNwaja +fLuG5FAVRD/2g7fLBcsmosyNgcgNr1XA8Q/nvf8CgYEAwaSOg7py19rWcqehlMZu +4z00tCNYWzz7LmA2l0clzYlPJTU3MvXt6+ujhRFpXXJpgfRPN7Nx0ewQihoPtNqF +uTSr5OwLoOyK+0Tx/UPByS2L3xgscWUJ8yQ2X9sOMqIZhmf/mDZTsU2ZpU03GlrE +dk43JF4zq0NEm6qp/dAwU3cCgYEAvECl+KKmmLIk8vvWlI2Y52Mi2rixYR2kc7+L +aHDJd1+1HhlHlgDFItbU765Trz5322phZArN0rnCeJYNFC9yRWBIBL7gAIoKPdgW +iOb15xlez04EXHGV/7kVa1wEdu0u0CiTxwjivMwDl+E36u8kQP5LirwYIgI800H0 +doCqhUECgYEAjvA38OS7hy56Q4LQtmHFBuRIn4E5SrIGMwNIH6TGbEKQix3ajTCQ +0fSoLDGTkU6dH+T4v0WheveN2a2Kofqm0UQx5V2rfnY/Ut1fAAWgL/lsHLDnzPUZ +bvTOANl8TbT49xAfNXTaGWe7F7nYz+bK0UDif1tJNDLQw7USD5I8lbQ= +-----END RSA PRIVATE KEY----- diff --git a/update-payload-extractor/update_payload/payload-test-key.pub b/update-payload-extractor/update_payload/payload-test-key.pub new file mode 100755 index 0000000..fdae963 --- /dev/null +++ b/update-payload-extractor/update_payload/payload-test-key.pub @@ -0,0 +1,9 @@ +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvtGHtqO21Uhy2wGz9flu +IpIUR8G7dZoCZhZukGkm4mlfgL71xPSArjx02/w/FhYxOusV6/XQeKgL3i8cni3H +CkCOurZLpi2L5Ver6qrxKFh6WBVZ0Dj7N6P/Mf5jZdhfvVyweLlsNK8Ypeb+Razf +rsXhd4cy3dBMxouGwH7R7QQXTFCoCc8kgJBTxILl3jfvY8OrNKgYiCETa7tQdFkP +0bfPwH9cAXuMjHXiZatim0tF+ivpkM2v/6LTxtD6Rq1wks/N6CHi8efrRaviFp7c +0mNmBNFaV54cHEUW2SlNIiRun7L01nAz/D8kuoHfx4E3Mtj0DbvngZJMX/X+rJQ5 +cQIDAQAB +-----END PUBLIC KEY----- diff --git a/update-payload-extractor/update_payload/payload.py b/update-payload-extractor/update_payload/payload.py new file mode 100755 index 0000000..ea5ed30 --- /dev/null +++ b/update-payload-extractor/update_payload/payload.py @@ -0,0 +1,330 @@ +# +# Copyright (C) 2013 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Tools for reading, verifying and applying Chrome OS update payloads.""" + +from __future__ import absolute_import +from __future__ import print_function + +import hashlib +import struct + +from update_payload import applier +from update_payload import checker +from update_payload import common +from update_payload import update_metadata_pb2 +from update_payload.error import PayloadError + + +# +# Helper functions. +# +def _ReadInt(file_obj, size, is_unsigned, hasher=None): + """Reads a binary-encoded integer from a file. + + It will do the correct conversion based on the reported size and whether or + not a signed number is expected. Assumes a network (big-endian) byte + ordering. + + Args: + file_obj: a file object + size: the integer size in bytes (2, 4 or 8) + is_unsigned: whether it is signed or not + hasher: an optional hasher to pass the value through + + Returns: + An "unpacked" (Python) integer value. + + Raises: + PayloadError if an read error occurred. + """ + return struct.unpack(common.IntPackingFmtStr(size, is_unsigned), + common.Read(file_obj, size, hasher=hasher))[0] + + +# +# Update payload. +# +class Payload(object): + """Chrome OS update payload processor.""" + + class _PayloadHeader(object): + """Update payload header struct.""" + + # Header constants; sizes are in bytes. + _MAGIC = b'CrAU' + _VERSION_SIZE = 8 + _MANIFEST_LEN_SIZE = 8 + _METADATA_SIGNATURE_LEN_SIZE = 4 + + def __init__(self): + self.version = None + self.manifest_len = None + self.metadata_signature_len = None + self.size = None + + def ReadFromPayload(self, payload_file, hasher=None): + """Reads the payload header from a file. + + Reads the payload header from the |payload_file| and updates the |hasher| + if one is passed. The parsed header is stored in the _PayloadHeader + instance attributes. + + Args: + payload_file: a file object + hasher: an optional hasher to pass the value through + + Returns: + None. + + Raises: + PayloadError if a read error occurred or the header is invalid. + """ + # Verify magic + magic = common.Read(payload_file, len(self._MAGIC), hasher=hasher) + if magic != self._MAGIC: + raise PayloadError('invalid payload magic: %s' % magic) + + self.version = _ReadInt(payload_file, self._VERSION_SIZE, True, + hasher=hasher) + self.manifest_len = _ReadInt(payload_file, self._MANIFEST_LEN_SIZE, True, + hasher=hasher) + self.size = (len(self._MAGIC) + self._VERSION_SIZE + + self._MANIFEST_LEN_SIZE) + self.metadata_signature_len = 0 + + if self.version == common.BRILLO_MAJOR_PAYLOAD_VERSION: + self.size += self._METADATA_SIGNATURE_LEN_SIZE + self.metadata_signature_len = _ReadInt( + payload_file, self._METADATA_SIGNATURE_LEN_SIZE, True, + hasher=hasher) + + def __init__(self, payload_file, payload_file_offset=0): + """Initialize the payload object. + + Args: + payload_file: update payload file object open for reading + payload_file_offset: the offset of the actual payload + """ + self.payload_file = payload_file + self.payload_file_offset = payload_file_offset + self.manifest_hasher = None + self.is_init = False + self.header = None + self.manifest = None + self.data_offset = None + self.metadata_signature = None + self.metadata_size = None + + def _ReadHeader(self): + """Reads and returns the payload header. + + Returns: + A payload header object. + + Raises: + PayloadError if a read error occurred. + """ + header = self._PayloadHeader() + header.ReadFromPayload(self.payload_file, self.manifest_hasher) + return header + + def _ReadManifest(self): + """Reads and returns the payload manifest. + + Returns: + A string containing the payload manifest in binary form. + + Raises: + PayloadError if a read error occurred. + """ + if not self.header: + raise PayloadError('payload header not present') + + return common.Read(self.payload_file, self.header.manifest_len, + hasher=self.manifest_hasher) + + def _ReadMetadataSignature(self): + """Reads and returns the metadata signatures. + + Returns: + A string containing the metadata signatures protobuf in binary form or + an empty string if no metadata signature found in the payload. + + Raises: + PayloadError if a read error occurred. + """ + if not self.header: + raise PayloadError('payload header not present') + + return common.Read( + self.payload_file, self.header.metadata_signature_len, + offset=self.payload_file_offset + self.header.size + + self.header.manifest_len) + + def ReadDataBlob(self, offset, length): + """Reads and returns a single data blob from the update payload. + + Args: + offset: offset to the beginning of the blob from the end of the manifest + length: the blob's length + + Returns: + A string containing the raw blob data. + + Raises: + PayloadError if a read error occurred. + """ + return common.Read(self.payload_file, length, + offset=self.payload_file_offset + self.data_offset + + offset) + + def Init(self): + """Initializes the payload object. + + This is a prerequisite for any other public API call. + + Raises: + PayloadError if object already initialized or fails to initialize + correctly. + """ + if self.is_init: + raise PayloadError('payload object already initialized') + + self.manifest_hasher = hashlib.sha256() + + # Read the file header. + self.payload_file.seek(self.payload_file_offset) + self.header = self._ReadHeader() + + # Read the manifest. + manifest_raw = self._ReadManifest() + self.manifest = update_metadata_pb2.DeltaArchiveManifest() + self.manifest.ParseFromString(manifest_raw) + + # Read the metadata signature (if any). + metadata_signature_raw = self._ReadMetadataSignature() + if metadata_signature_raw: + self.metadata_signature = update_metadata_pb2.Signatures() + self.metadata_signature.ParseFromString(metadata_signature_raw) + + self.metadata_size = self.header.size + self.header.manifest_len + self.data_offset = self.metadata_size + self.header.metadata_signature_len + + self.is_init = True + + def Describe(self): + """Emits the payload embedded description data to standard output.""" + def _DescribeImageInfo(description, image_info): + """Display info about the image.""" + def _DisplayIndentedValue(name, value): + print(' {:<14} {}'.format(name+':', value)) + + print('%s:' % description) + _DisplayIndentedValue('Channel', image_info.channel) + _DisplayIndentedValue('Board', image_info.board) + _DisplayIndentedValue('Version', image_info.version) + _DisplayIndentedValue('Key', image_info.key) + + if image_info.build_channel != image_info.channel: + _DisplayIndentedValue('Build channel', image_info.build_channel) + + if image_info.build_version != image_info.version: + _DisplayIndentedValue('Build version', image_info.build_version) + + if self.manifest.HasField('old_image_info'): + _DescribeImageInfo('Old Image', self.manifest.old_image_info) + + if self.manifest.HasField('new_image_info'): + _DescribeImageInfo('New Image', self.manifest.new_image_info) + + def _AssertInit(self): + """Raises an exception if the object was not initialized.""" + if not self.is_init: + raise PayloadError('payload object not initialized') + + def ResetFile(self): + """Resets the offset of the payload file to right past the manifest.""" + self.payload_file.seek(self.payload_file_offset + self.data_offset) + + def IsDelta(self): + """Returns True iff the payload appears to be a delta.""" + self._AssertInit() + return (any(partition.HasField('old_partition_info') + for partition in self.manifest.partitions)) + + def IsFull(self): + """Returns True iff the payload appears to be a full.""" + return not self.IsDelta() + + def Check(self, pubkey_file_name=None, metadata_sig_file=None, + metadata_size=0, report_out_file=None, assert_type=None, + block_size=0, part_sizes=None, allow_unhashed=False, + disabled_tests=()): + """Checks the payload integrity. + + Args: + pubkey_file_name: public key used for signature verification + metadata_sig_file: metadata signature, if verification is desired + metadata_size: metadata size, if verification is desired + report_out_file: file object to dump the report to + assert_type: assert that payload is either 'full' or 'delta' + block_size: expected filesystem / payload block size + part_sizes: map of partition label to (physical) size in bytes + allow_unhashed: allow unhashed operation blobs + disabled_tests: list of tests to disable + + Raises: + PayloadError if payload verification failed. + """ + self._AssertInit() + + # Create a short-lived payload checker object and run it. + helper = checker.PayloadChecker( + self, assert_type=assert_type, block_size=block_size, + allow_unhashed=allow_unhashed, disabled_tests=disabled_tests) + helper.Run(pubkey_file_name=pubkey_file_name, + metadata_sig_file=metadata_sig_file, + metadata_size=metadata_size, + part_sizes=part_sizes, + report_out_file=report_out_file) + + def Apply(self, new_parts, old_parts=None, bsdiff_in_place=True, + bspatch_path=None, puffpatch_path=None, + truncate_to_expected_size=True): + """Applies the update payload. + + Args: + new_parts: map of partition name to dest partition file + old_parts: map of partition name to partition file (optional) + bsdiff_in_place: whether to perform BSDIFF operations in-place (optional) + bspatch_path: path to the bspatch binary (optional) + puffpatch_path: path to the puffpatch binary (optional) + truncate_to_expected_size: whether to truncate the resulting partitions + to their expected sizes, as specified in the + payload (optional) + + Raises: + PayloadError if payload application failed. + """ + self._AssertInit() + + # Create a short-lived payload applier object and run it. + helper = applier.PayloadApplier( + self, bsdiff_in_place=bsdiff_in_place, bspatch_path=bspatch_path, + puffpatch_path=puffpatch_path, + truncate_to_expected_size=truncate_to_expected_size) + helper.Run(new_parts, old_parts=old_parts) diff --git a/update-payload-extractor/update_payload/test_utils.py b/update-payload-extractor/update_payload/test_utils.py new file mode 100755 index 0000000..e153669 --- /dev/null +++ b/update-payload-extractor/update_payload/test_utils.py @@ -0,0 +1,359 @@ +# +# Copyright (C) 2013 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +"""Utilities for unit testing.""" + +from __future__ import absolute_import +from __future__ import print_function + +import io +import hashlib +import os +import struct +import subprocess + +from update_payload import common +from update_payload import payload +from update_payload import update_metadata_pb2 + + +class TestError(Exception): + """An error during testing of update payload code.""" + + +# Private/public RSA keys used for testing. +_PRIVKEY_FILE_NAME = os.path.join(os.path.dirname(__file__), + 'payload-test-key.pem') +_PUBKEY_FILE_NAME = os.path.join(os.path.dirname(__file__), + 'payload-test-key.pub') + + +def KiB(count): + return count << 10 + + +def MiB(count): + return count << 20 + + +def GiB(count): + return count << 30 + + +def _WriteInt(file_obj, size, is_unsigned, val): + """Writes a binary-encoded integer to a file. + + It will do the correct conversion based on the reported size and whether or + not a signed number is expected. Assumes a network (big-endian) byte + ordering. + + Args: + file_obj: a file object + size: the integer size in bytes (2, 4 or 8) + is_unsigned: whether it is signed or not + val: integer value to encode + + Raises: + PayloadError if a write error occurred. + """ + try: + file_obj.write(struct.pack(common.IntPackingFmtStr(size, is_unsigned), val)) + except IOError as e: + raise payload.PayloadError('error writing to file (%s): %s' % + (file_obj.name, e)) + + +def _SetMsgField(msg, field_name, val): + """Sets or clears a field in a protobuf message.""" + if val is None: + msg.ClearField(field_name) + else: + setattr(msg, field_name, val) + + +def SignSha256(data, privkey_file_name): + """Signs the data's SHA256 hash with an RSA private key. + + Args: + data: the data whose SHA256 hash we want to sign + privkey_file_name: private key used for signing data + + Returns: + The signature string, prepended with an ASN1 header. + + Raises: + TestError if something goes wrong. + """ + data_sha256_hash = common.SIG_ASN1_HEADER + hashlib.sha256(data).digest() + sign_cmd = ['openssl', 'rsautl', '-sign', '-inkey', privkey_file_name] + try: + sign_process = subprocess.Popen(sign_cmd, stdin=subprocess.PIPE, + stdout=subprocess.PIPE) + sig, _ = sign_process.communicate(input=data_sha256_hash) + except Exception as e: + raise TestError('signing subprocess failed: %s' % e) + + return sig + + +class SignaturesGenerator(object): + """Generates a payload signatures data block.""" + + def __init__(self): + self.sigs = update_metadata_pb2.Signatures() + + def AddSig(self, version, data): + """Adds a signature to the signature sequence. + + Args: + version: signature version (None means do not assign) + data: signature binary data (None means do not assign) + """ + sig = self.sigs.signatures.add() + if version is not None: + sig.version = version + if data is not None: + sig.data = data + + def ToBinary(self): + """Returns the binary representation of the signature block.""" + return self.sigs.SerializeToString() + + +class PayloadGenerator(object): + """Generates an update payload allowing low-level control. + + Attributes: + manifest: the protobuf containing the payload manifest + version: the payload version identifier + block_size: the block size pertaining to update operations + + """ + + def __init__(self, version=1): + self.manifest = update_metadata_pb2.DeltaArchiveManifest() + self.version = version + self.block_size = 0 + + @staticmethod + def _WriteExtent(ex, val): + """Returns an Extent message.""" + start_block, num_blocks = val + _SetMsgField(ex, 'start_block', start_block) + _SetMsgField(ex, 'num_blocks', num_blocks) + + @staticmethod + def _AddValuesToRepeatedField(repeated_field, values, write_func): + """Adds values to a repeated message field.""" + if values: + for val in values: + new_item = repeated_field.add() + write_func(new_item, val) + + @staticmethod + def _AddExtents(extents_field, values): + """Adds extents to an extents field.""" + PayloadGenerator._AddValuesToRepeatedField( + extents_field, values, PayloadGenerator._WriteExtent) + + def SetBlockSize(self, block_size): + """Sets the payload's block size.""" + self.block_size = block_size + _SetMsgField(self.manifest, 'block_size', block_size) + + def SetPartInfo(self, part_name, is_new, part_size, part_hash): + """Set the partition info entry. + + Args: + part_name: The name of the partition. + is_new: Whether to set old (False) or new (True) info. + part_size: The partition size (in fact, filesystem size). + part_hash: The partition hash. + """ + partition = next((x for x in self.manifest.partitions + if x.partition_name == part_name), None) + if partition is None: + partition = self.manifest.partitions.add() + partition.partition_name = part_name + + part_info = (partition.new_partition_info if is_new + else partition.old_partition_info) + _SetMsgField(part_info, 'size', part_size) + _SetMsgField(part_info, 'hash', part_hash) + + def AddOperation(self, part_name, op_type, data_offset=None, + data_length=None, src_extents=None, src_length=None, + dst_extents=None, dst_length=None, data_sha256_hash=None): + """Adds an InstallOperation entry.""" + partition = next((x for x in self.manifest.partitions + if x.partition_name == part_name), None) + if partition is None: + partition = self.manifest.partitions.add() + partition.partition_name = part_name + + operations = partition.operations + op = operations.add() + op.type = op_type + + _SetMsgField(op, 'data_offset', data_offset) + _SetMsgField(op, 'data_length', data_length) + + self._AddExtents(op.src_extents, src_extents) + _SetMsgField(op, 'src_length', src_length) + + self._AddExtents(op.dst_extents, dst_extents) + _SetMsgField(op, 'dst_length', dst_length) + + _SetMsgField(op, 'data_sha256_hash', data_sha256_hash) + + def SetSignatures(self, sigs_offset, sigs_size): + """Set the payload's signature block descriptors.""" + _SetMsgField(self.manifest, 'signatures_offset', sigs_offset) + _SetMsgField(self.manifest, 'signatures_size', sigs_size) + + def SetMinorVersion(self, minor_version): + """Set the payload's minor version field.""" + _SetMsgField(self.manifest, 'minor_version', minor_version) + + def _WriteHeaderToFile(self, file_obj, manifest_len): + """Writes a payload heaer to a file.""" + # We need to access protected members in Payload for writing the header. + # pylint: disable=W0212 + file_obj.write(payload.Payload._PayloadHeader._MAGIC) + _WriteInt(file_obj, payload.Payload._PayloadHeader._VERSION_SIZE, True, + self.version) + _WriteInt(file_obj, payload.Payload._PayloadHeader._MANIFEST_LEN_SIZE, True, + manifest_len) + + def WriteToFile(self, file_obj, manifest_len=-1, data_blobs=None, + sigs_data=None, padding=None): + """Writes the payload content to a file. + + Args: + file_obj: a file object open for writing + manifest_len: manifest len to dump (otherwise computed automatically) + data_blobs: a list of data blobs to be concatenated to the payload + sigs_data: a binary Signatures message to be concatenated to the payload + padding: stuff to dump past the normal data blobs provided (optional) + """ + manifest = self.manifest.SerializeToString() + if manifest_len < 0: + manifest_len = len(manifest) + self._WriteHeaderToFile(file_obj, manifest_len) + file_obj.write(manifest) + if data_blobs: + for data_blob in data_blobs: + file_obj.write(data_blob) + if sigs_data: + file_obj.write(sigs_data) + if padding: + file_obj.write(padding) + + +class EnhancedPayloadGenerator(PayloadGenerator): + """Payload generator with automatic handling of data blobs. + + Attributes: + data_blobs: a list of blobs, in the order they were added + curr_offset: the currently consumed offset of blobs added to the payload + """ + + def __init__(self): + super(EnhancedPayloadGenerator, self).__init__() + self.data_blobs = [] + self.curr_offset = 0 + + def AddData(self, data_blob): + """Adds a (possibly orphan) data blob.""" + data_length = len(data_blob) + data_offset = self.curr_offset + self.curr_offset += data_length + self.data_blobs.append(data_blob) + return data_length, data_offset + + def AddOperationWithData(self, part_name, op_type, src_extents=None, + src_length=None, dst_extents=None, dst_length=None, + data_blob=None, do_hash_data_blob=True): + """Adds an install operation and associated data blob. + + This takes care of obtaining a hash of the data blob (if so instructed) + and appending it to the internally maintained list of blobs, including the + necessary offset/length accounting. + + Args: + part_name: The name of the partition (e.g. kernel or root). + op_type: one of REPLACE, REPLACE_BZ, REPLACE_XZ. + src_extents: list of (start, length) pairs indicating src block ranges + src_length: size of the src data in bytes (needed for diff operations) + dst_extents: list of (start, length) pairs indicating dst block ranges + dst_length: size of the dst data in bytes (needed for diff operations) + data_blob: a data blob associated with this operation + do_hash_data_blob: whether or not to compute and add a data blob hash + """ + data_offset = data_length = data_sha256_hash = None + if data_blob is not None: + if do_hash_data_blob: + data_sha256_hash = hashlib.sha256(data_blob).digest() + data_length, data_offset = self.AddData(data_blob) + + self.AddOperation(part_name, op_type, data_offset=data_offset, + data_length=data_length, src_extents=src_extents, + src_length=src_length, dst_extents=dst_extents, + dst_length=dst_length, data_sha256_hash=data_sha256_hash) + + def WriteToFileWithData(self, file_obj, sigs_data=None, + privkey_file_name=None, padding=None): + """Writes the payload content to a file, optionally signing the content. + + Args: + file_obj: a file object open for writing + sigs_data: signatures blob to be appended to the payload (optional; + payload signature fields assumed to be preset by the caller) + privkey_file_name: key used for signing the payload (optional; used only + if explicit signatures blob not provided) + padding: stuff to dump past the normal data blobs provided (optional) + + Raises: + TestError: if arguments are inconsistent or something goes wrong. + """ + sigs_len = len(sigs_data) if sigs_data else 0 + + # Do we need to generate a genuine signatures blob? + do_generate_sigs_data = sigs_data is None and privkey_file_name + + if do_generate_sigs_data: + # First, sign some arbitrary data to obtain the size of a signature blob. + fake_sig = SignSha256(b'fake-payload-data', privkey_file_name) + fake_sigs_gen = SignaturesGenerator() + fake_sigs_gen.AddSig(1, fake_sig) + sigs_len = len(fake_sigs_gen.ToBinary()) + + # Update the payload with proper signature attributes. + self.SetSignatures(self.curr_offset, sigs_len) + + if do_generate_sigs_data: + # Once all payload fields are updated, dump and sign it. + temp_payload_file = io.BytesIO() + self.WriteToFile(temp_payload_file, data_blobs=self.data_blobs) + sig = SignSha256(temp_payload_file.getvalue(), privkey_file_name) + sigs_gen = SignaturesGenerator() + sigs_gen.AddSig(1, sig) + sigs_data = sigs_gen.ToBinary() + assert len(sigs_data) == sigs_len, 'signature blob lengths mismatch' + + # Dump the whole thing, complete with data and signature blob, to a file. + self.WriteToFile(file_obj, data_blobs=self.data_blobs, sigs_data=sigs_data, + padding=padding) diff --git a/update-payload-extractor/update_payload/update-payload-key.pub.pem b/update-payload-extractor/update_payload/update-payload-key.pub.pem new file mode 100755 index 0000000..7ac369f --- /dev/null +++ b/update-payload-extractor/update_payload/update-payload-key.pub.pem @@ -0,0 +1,9 @@ +-----BEGIN PUBLIC KEY----- +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1Bg9BnjWhX3jJyECeXqF +O28nkYTF1NHWLlFHgzAGg+ysva22BL3S5LlsNejnYVg/xzx3izvAQyOF3I1TJVOy +2fH1DoZOWyKuckMyUrFQbO6OV1VIvPUPKckHadWcXSsHj2lBdDPH9xRDEBsXeztf +nAGBD8GlAyTU7iH+Bf+xzyK9k4BmITf4Nx4xWhRZ6gm2Fc2SEP3x5N5fohkLv5ZP +kFr0fj5wUK+0XF95rkGFBLIq2XACS3dmxMFToFl1HMM1HonUg9TAH+3dVH93zue1 +y81mkTuGnNX+zYya5ov2kD8zW1V10iTOSJfOlho5T8FpKbG37o3yYcUiyMHKO1Iv +PQIDAQAB +-----END PUBLIC KEY----- diff --git a/update-payload-extractor/update_payload/update_metadata_pb2.py b/update-payload-extractor/update_payload/update_metadata_pb2.py new file mode 100755 index 0000000..bcd8187 --- /dev/null +++ b/update-payload-extractor/update_payload/update_metadata_pb2.py @@ -0,0 +1,824 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: update_metadata.proto + +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='update_metadata.proto', + package='chromeos_update_engine', + syntax='proto2', + serialized_options=b'H\003', + serialized_pb=b'\n\x15update_metadata.proto\x12\x16\x63hromeos_update_engine\"1\n\x06\x45xtent\x12\x13\n\x0bstart_block\x18\x01 \x01(\x04\x12\x12\n\nnum_blocks\x18\x02 \x01(\x04\"z\n\nSignatures\x12@\n\nsignatures\x18\x01 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x1a*\n\tSignature\x12\x0f\n\x07version\x18\x01 \x01(\r\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\"+\n\rPartitionInfo\x12\x0c\n\x04size\x18\x01 \x01(\x04\x12\x0c\n\x04hash\x18\x02 \x01(\x0c\"w\n\tImageInfo\x12\r\n\x05\x62oard\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0f\n\x07\x63hannel\x18\x03 \x01(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\x12\x15\n\rbuild_channel\x18\x05 \x01(\t\x12\x15\n\rbuild_version\x18\x06 \x01(\t\"\xe6\x03\n\x10InstallOperation\x12;\n\x04type\x18\x01 \x02(\x0e\x32-.chromeos_update_engine.InstallOperation.Type\x12\x13\n\x0b\x64\x61ta_offset\x18\x02 \x01(\r\x12\x13\n\x0b\x64\x61ta_length\x18\x03 \x01(\r\x12\x33\n\x0bsrc_extents\x18\x04 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\nsrc_length\x18\x05 \x01(\x04\x12\x33\n\x0b\x64st_extents\x18\x06 \x03(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x12\n\ndst_length\x18\x07 \x01(\x04\x12\x18\n\x10\x64\x61ta_sha256_hash\x18\x08 \x01(\x0c\x12\x17\n\x0fsrc_sha256_hash\x18\t \x01(\x0c\"\xa5\x01\n\x04Type\x12\x0b\n\x07REPLACE\x10\x00\x12\x0e\n\nREPLACE_BZ\x10\x01\x12\x08\n\x04MOVE\x10\x02\x12\n\n\x06\x42SDIFF\x10\x03\x12\x0f\n\x0bSOURCE_COPY\x10\x04\x12\x11\n\rSOURCE_BSDIFF\x10\x05\x12\x0e\n\nREPLACE_XZ\x10\x08\x12\x08\n\x04ZERO\x10\x06\x12\x0b\n\x07\x44ISCARD\x10\x07\x12\x11\n\rBROTLI_BSDIFF\x10\n\x12\x0c\n\x08PUFFDIFF\x10\t\"\xd7\x05\n\x0fPartitionUpdate\x12\x16\n\x0epartition_name\x18\x01 \x02(\t\x12\x17\n\x0frun_postinstall\x18\x02 \x01(\x08\x12\x18\n\x10postinstall_path\x18\x03 \x01(\t\x12\x17\n\x0f\x66ilesystem_type\x18\x04 \x01(\t\x12M\n\x17new_partition_signature\x18\x05 \x03(\x0b\x32,.chromeos_update_engine.Signatures.Signature\x12\x41\n\x12old_partition_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x41\n\x12new_partition_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12<\n\noperations\x18\x08 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x1c\n\x14postinstall_optional\x18\t \x01(\x08\x12=\n\x15hash_tree_data_extent\x18\n \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x38\n\x10hash_tree_extent\x18\x0b \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x1b\n\x13hash_tree_algorithm\x18\x0c \x01(\t\x12\x16\n\x0ehash_tree_salt\x18\r \x01(\x0c\x12\x37\n\x0f\x66\x65\x63_data_extent\x18\x0e \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x32\n\nfec_extent\x18\x0f \x01(\x0b\x32\x1e.chromeos_update_engine.Extent\x12\x14\n\tfec_roots\x18\x10 \x01(\r:\x01\x32\"L\n\x15\x44ynamicPartitionGroup\x12\x0c\n\x04name\x18\x01 \x02(\t\x12\x0c\n\x04size\x18\x02 \x01(\x04\x12\x17\n\x0fpartition_names\x18\x03 \x03(\t\"Y\n\x18\x44ynamicPartitionMetadata\x12=\n\x06groups\x18\x01 \x03(\x0b\x32-.chromeos_update_engine.DynamicPartitionGroup\"\xdb\x05\n\x14\x44\x65ltaArchiveManifest\x12\x44\n\x12install_operations\x18\x01 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12K\n\x19kernel_install_operations\x18\x02 \x03(\x0b\x32(.chromeos_update_engine.InstallOperation\x12\x18\n\nblock_size\x18\x03 \x01(\r:\x04\x34\x30\x39\x36\x12\x19\n\x11signatures_offset\x18\x04 \x01(\x04\x12\x17\n\x0fsignatures_size\x18\x05 \x01(\x04\x12>\n\x0fold_kernel_info\x18\x06 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_kernel_info\x18\x07 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fold_rootfs_info\x18\x08 \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12>\n\x0fnew_rootfs_info\x18\t \x01(\x0b\x32%.chromeos_update_engine.PartitionInfo\x12\x39\n\x0eold_image_info\x18\n \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x39\n\x0enew_image_info\x18\x0b \x01(\x0b\x32!.chromeos_update_engine.ImageInfo\x12\x18\n\rminor_version\x18\x0c \x01(\r:\x01\x30\x12;\n\npartitions\x18\r \x03(\x0b\x32\'.chromeos_update_engine.PartitionUpdate\x12\x15\n\rmax_timestamp\x18\x0e \x01(\x03\x42\x02H\x03' +) + + + +_INSTALLOPERATION_TYPE = _descriptor.EnumDescriptor( + name='Type', + full_name='chromeos_update_engine.InstallOperation.Type', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='REPLACE', index=0, number=0, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='REPLACE_BZ', index=1, number=1, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='MOVE', index=2, number=2, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BSDIFF', index=3, number=3, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SOURCE_COPY', index=4, number=4, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='SOURCE_BSDIFF', index=5, number=5, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='REPLACE_XZ', index=6, number=8, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='ZERO', index=7, number=6, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='DISCARD', index=8, number=7, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='BROTLI_BSDIFF', index=9, number=10, + serialized_options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='PUFFDIFF', index=10, number=9, + serialized_options=None, + type=None), + ], + containing_type=None, + serialized_options=None, + serialized_start=712, + serialized_end=877, +) +_sym_db.RegisterEnumDescriptor(_INSTALLOPERATION_TYPE) + + +_EXTENT = _descriptor.Descriptor( + name='Extent', + full_name='chromeos_update_engine.Extent', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='start_block', full_name='chromeos_update_engine.Extent.start_block', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='num_blocks', full_name='chromeos_update_engine.Extent.num_blocks', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=49, + serialized_end=98, +) + + +_SIGNATURES_SIGNATURE = _descriptor.Descriptor( + name='Signature', + full_name='chromeos_update_engine.Signatures.Signature', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='version', full_name='chromeos_update_engine.Signatures.Signature.version', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='data', full_name='chromeos_update_engine.Signatures.Signature.data', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=180, + serialized_end=222, +) + +_SIGNATURES = _descriptor.Descriptor( + name='Signatures', + full_name='chromeos_update_engine.Signatures', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='signatures', full_name='chromeos_update_engine.Signatures.signatures', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_SIGNATURES_SIGNATURE, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=100, + serialized_end=222, +) + + +_PARTITIONINFO = _descriptor.Descriptor( + name='PartitionInfo', + full_name='chromeos_update_engine.PartitionInfo', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='size', full_name='chromeos_update_engine.PartitionInfo.size', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='hash', full_name='chromeos_update_engine.PartitionInfo.hash', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=224, + serialized_end=267, +) + + +_IMAGEINFO = _descriptor.Descriptor( + name='ImageInfo', + full_name='chromeos_update_engine.ImageInfo', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='board', full_name='chromeos_update_engine.ImageInfo.board', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='key', full_name='chromeos_update_engine.ImageInfo.key', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='channel', full_name='chromeos_update_engine.ImageInfo.channel', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='version', full_name='chromeos_update_engine.ImageInfo.version', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='build_channel', full_name='chromeos_update_engine.ImageInfo.build_channel', index=4, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='build_version', full_name='chromeos_update_engine.ImageInfo.build_version', index=5, + number=6, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=269, + serialized_end=388, +) + + +_INSTALLOPERATION = _descriptor.Descriptor( + name='InstallOperation', + full_name='chromeos_update_engine.InstallOperation', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='chromeos_update_engine.InstallOperation.type', index=0, + number=1, type=14, cpp_type=8, label=2, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='data_offset', full_name='chromeos_update_engine.InstallOperation.data_offset', index=1, + number=2, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='data_length', full_name='chromeos_update_engine.InstallOperation.data_length', index=2, + number=3, type=13, cpp_type=3, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='src_extents', full_name='chromeos_update_engine.InstallOperation.src_extents', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='src_length', full_name='chromeos_update_engine.InstallOperation.src_length', index=4, + number=5, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='dst_extents', full_name='chromeos_update_engine.InstallOperation.dst_extents', index=5, + number=6, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='dst_length', full_name='chromeos_update_engine.InstallOperation.dst_length', index=6, + number=7, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='data_sha256_hash', full_name='chromeos_update_engine.InstallOperation.data_sha256_hash', index=7, + number=8, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='src_sha256_hash', full_name='chromeos_update_engine.InstallOperation.src_sha256_hash', index=8, + number=9, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _INSTALLOPERATION_TYPE, + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=391, + serialized_end=877, +) + + +_PARTITIONUPDATE = _descriptor.Descriptor( + name='PartitionUpdate', + full_name='chromeos_update_engine.PartitionUpdate', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='partition_name', full_name='chromeos_update_engine.PartitionUpdate.partition_name', index=0, + number=1, type=9, cpp_type=9, label=2, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='run_postinstall', full_name='chromeos_update_engine.PartitionUpdate.run_postinstall', index=1, + number=2, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='postinstall_path', full_name='chromeos_update_engine.PartitionUpdate.postinstall_path', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='filesystem_type', full_name='chromeos_update_engine.PartitionUpdate.filesystem_type', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='new_partition_signature', full_name='chromeos_update_engine.PartitionUpdate.new_partition_signature', index=4, + number=5, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='old_partition_info', full_name='chromeos_update_engine.PartitionUpdate.old_partition_info', index=5, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='new_partition_info', full_name='chromeos_update_engine.PartitionUpdate.new_partition_info', index=6, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='operations', full_name='chromeos_update_engine.PartitionUpdate.operations', index=7, + number=8, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='postinstall_optional', full_name='chromeos_update_engine.PartitionUpdate.postinstall_optional', index=8, + number=9, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='hash_tree_data_extent', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_data_extent', index=9, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='hash_tree_extent', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_extent', index=10, + number=11, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='hash_tree_algorithm', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_algorithm', index=11, + number=12, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='hash_tree_salt', full_name='chromeos_update_engine.PartitionUpdate.hash_tree_salt', index=12, + number=13, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='fec_data_extent', full_name='chromeos_update_engine.PartitionUpdate.fec_data_extent', index=13, + number=14, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='fec_extent', full_name='chromeos_update_engine.PartitionUpdate.fec_extent', index=14, + number=15, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='fec_roots', full_name='chromeos_update_engine.PartitionUpdate.fec_roots', index=15, + number=16, type=13, cpp_type=3, label=1, + has_default_value=True, default_value=2, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=880, + serialized_end=1607, +) + + +_DYNAMICPARTITIONGROUP = _descriptor.Descriptor( + name='DynamicPartitionGroup', + full_name='chromeos_update_engine.DynamicPartitionGroup', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='chromeos_update_engine.DynamicPartitionGroup.name', index=0, + number=1, type=9, cpp_type=9, label=2, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='size', full_name='chromeos_update_engine.DynamicPartitionGroup.size', index=1, + number=2, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='partition_names', full_name='chromeos_update_engine.DynamicPartitionGroup.partition_names', index=2, + number=3, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1609, + serialized_end=1685, +) + + +_DYNAMICPARTITIONMETADATA = _descriptor.Descriptor( + name='DynamicPartitionMetadata', + full_name='chromeos_update_engine.DynamicPartitionMetadata', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='groups', full_name='chromeos_update_engine.DynamicPartitionMetadata.groups', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1687, + serialized_end=1776, +) + + +_DELTAARCHIVEMANIFEST = _descriptor.Descriptor( + name='DeltaArchiveManifest', + full_name='chromeos_update_engine.DeltaArchiveManifest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.install_operations', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='kernel_install_operations', full_name='chromeos_update_engine.DeltaArchiveManifest.kernel_install_operations', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='block_size', full_name='chromeos_update_engine.DeltaArchiveManifest.block_size', index=2, + number=3, type=13, cpp_type=3, label=1, + has_default_value=True, default_value=4096, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='signatures_offset', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_offset', index=3, + number=4, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='signatures_size', full_name='chromeos_update_engine.DeltaArchiveManifest.signatures_size', index=4, + number=5, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='old_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_kernel_info', index=5, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='new_kernel_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_kernel_info', index=6, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='old_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_rootfs_info', index=7, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='new_rootfs_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_rootfs_info', index=8, + number=9, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='old_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.old_image_info', index=9, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='new_image_info', full_name='chromeos_update_engine.DeltaArchiveManifest.new_image_info', index=10, + number=11, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='minor_version', full_name='chromeos_update_engine.DeltaArchiveManifest.minor_version', index=11, + number=12, type=13, cpp_type=3, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='partitions', full_name='chromeos_update_engine.DeltaArchiveManifest.partitions', index=12, + number=13, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='max_timestamp', full_name='chromeos_update_engine.DeltaArchiveManifest.max_timestamp', index=13, + number=14, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1779, + serialized_end=2510, +) + +_SIGNATURES_SIGNATURE.containing_type = _SIGNATURES +_SIGNATURES.fields_by_name['signatures'].message_type = _SIGNATURES_SIGNATURE +_INSTALLOPERATION.fields_by_name['type'].enum_type = _INSTALLOPERATION_TYPE +_INSTALLOPERATION.fields_by_name['src_extents'].message_type = _EXTENT +_INSTALLOPERATION.fields_by_name['dst_extents'].message_type = _EXTENT +_INSTALLOPERATION_TYPE.containing_type = _INSTALLOPERATION +_PARTITIONUPDATE.fields_by_name['new_partition_signature'].message_type = _SIGNATURES_SIGNATURE +_PARTITIONUPDATE.fields_by_name['old_partition_info'].message_type = _PARTITIONINFO +_PARTITIONUPDATE.fields_by_name['new_partition_info'].message_type = _PARTITIONINFO +_PARTITIONUPDATE.fields_by_name['operations'].message_type = _INSTALLOPERATION +_PARTITIONUPDATE.fields_by_name['hash_tree_data_extent'].message_type = _EXTENT +_PARTITIONUPDATE.fields_by_name['hash_tree_extent'].message_type = _EXTENT +_PARTITIONUPDATE.fields_by_name['fec_data_extent'].message_type = _EXTENT +_PARTITIONUPDATE.fields_by_name['fec_extent'].message_type = _EXTENT +_DYNAMICPARTITIONMETADATA.fields_by_name['groups'].message_type = _DYNAMICPARTITIONGROUP +_DELTAARCHIVEMANIFEST.fields_by_name['install_operations'].message_type = _INSTALLOPERATION +_DELTAARCHIVEMANIFEST.fields_by_name['kernel_install_operations'].message_type = _INSTALLOPERATION +_DELTAARCHIVEMANIFEST.fields_by_name['old_kernel_info'].message_type = _PARTITIONINFO +_DELTAARCHIVEMANIFEST.fields_by_name['new_kernel_info'].message_type = _PARTITIONINFO +_DELTAARCHIVEMANIFEST.fields_by_name['old_rootfs_info'].message_type = _PARTITIONINFO +_DELTAARCHIVEMANIFEST.fields_by_name['new_rootfs_info'].message_type = _PARTITIONINFO +_DELTAARCHIVEMANIFEST.fields_by_name['old_image_info'].message_type = _IMAGEINFO +_DELTAARCHIVEMANIFEST.fields_by_name['new_image_info'].message_type = _IMAGEINFO +_DELTAARCHIVEMANIFEST.fields_by_name['partitions'].message_type = _PARTITIONUPDATE +DESCRIPTOR.message_types_by_name['Extent'] = _EXTENT +DESCRIPTOR.message_types_by_name['Signatures'] = _SIGNATURES +DESCRIPTOR.message_types_by_name['PartitionInfo'] = _PARTITIONINFO +DESCRIPTOR.message_types_by_name['ImageInfo'] = _IMAGEINFO +DESCRIPTOR.message_types_by_name['InstallOperation'] = _INSTALLOPERATION +DESCRIPTOR.message_types_by_name['PartitionUpdate'] = _PARTITIONUPDATE +DESCRIPTOR.message_types_by_name['DynamicPartitionGroup'] = _DYNAMICPARTITIONGROUP +DESCRIPTOR.message_types_by_name['DynamicPartitionMetadata'] = _DYNAMICPARTITIONMETADATA +DESCRIPTOR.message_types_by_name['DeltaArchiveManifest'] = _DELTAARCHIVEMANIFEST +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Extent = _reflection.GeneratedProtocolMessageType('Extent', (_message.Message,), { + 'DESCRIPTOR' : _EXTENT, + '__module__' : 'update_metadata_pb2' + # @@protoc_insertion_point(class_scope:chromeos_update_engine.Extent) + }) +_sym_db.RegisterMessage(Extent) + +Signatures = _reflection.GeneratedProtocolMessageType('Signatures', (_message.Message,), { + + 'Signature' : _reflection.GeneratedProtocolMessageType('Signature', (_message.Message,), { + 'DESCRIPTOR' : _SIGNATURES_SIGNATURE, + '__module__' : 'update_metadata_pb2' + # @@protoc_insertion_point(class_scope:chromeos_update_engine.Signatures.Signature) + }) + , + 'DESCRIPTOR' : _SIGNATURES, + '__module__' : 'update_metadata_pb2' + # @@protoc_insertion_point(class_scope:chromeos_update_engine.Signatures) + }) +_sym_db.RegisterMessage(Signatures) +_sym_db.RegisterMessage(Signatures.Signature) + +PartitionInfo = _reflection.GeneratedProtocolMessageType('PartitionInfo', (_message.Message,), { + 'DESCRIPTOR' : _PARTITIONINFO, + '__module__' : 'update_metadata_pb2' + # @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionInfo) + }) +_sym_db.RegisterMessage(PartitionInfo) + +ImageInfo = _reflection.GeneratedProtocolMessageType('ImageInfo', (_message.Message,), { + 'DESCRIPTOR' : _IMAGEINFO, + '__module__' : 'update_metadata_pb2' + # @@protoc_insertion_point(class_scope:chromeos_update_engine.ImageInfo) + }) +_sym_db.RegisterMessage(ImageInfo) + +InstallOperation = _reflection.GeneratedProtocolMessageType('InstallOperation', (_message.Message,), { + 'DESCRIPTOR' : _INSTALLOPERATION, + '__module__' : 'update_metadata_pb2' + # @@protoc_insertion_point(class_scope:chromeos_update_engine.InstallOperation) + }) +_sym_db.RegisterMessage(InstallOperation) + +PartitionUpdate = _reflection.GeneratedProtocolMessageType('PartitionUpdate', (_message.Message,), { + 'DESCRIPTOR' : _PARTITIONUPDATE, + '__module__' : 'update_metadata_pb2' + # @@protoc_insertion_point(class_scope:chromeos_update_engine.PartitionUpdate) + }) +_sym_db.RegisterMessage(PartitionUpdate) + +DynamicPartitionGroup = _reflection.GeneratedProtocolMessageType('DynamicPartitionGroup', (_message.Message,), { + 'DESCRIPTOR' : _DYNAMICPARTITIONGROUP, + '__module__' : 'update_metadata_pb2' + # @@protoc_insertion_point(class_scope:chromeos_update_engine.DynamicPartitionGroup) + }) +_sym_db.RegisterMessage(DynamicPartitionGroup) + +DynamicPartitionMetadata = _reflection.GeneratedProtocolMessageType('DynamicPartitionMetadata', (_message.Message,), { + 'DESCRIPTOR' : _DYNAMICPARTITIONMETADATA, + '__module__' : 'update_metadata_pb2' + # @@protoc_insertion_point(class_scope:chromeos_update_engine.DynamicPartitionMetadata) + }) +_sym_db.RegisterMessage(DynamicPartitionMetadata) + +DeltaArchiveManifest = _reflection.GeneratedProtocolMessageType('DeltaArchiveManifest', (_message.Message,), { + 'DESCRIPTOR' : _DELTAARCHIVEMANIFEST, + '__module__' : 'update_metadata_pb2' + # @@protoc_insertion_point(class_scope:chromeos_update_engine.DeltaArchiveManifest) + }) +_sym_db.RegisterMessage(DeltaArchiveManifest) + + +DESCRIPTOR._options = None +# @@protoc_insertion_point(module_scope)