You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
407 lines
14 KiB
407 lines
14 KiB
// rougly based on https://github.com/balena-io-modules/etcher-sdk/blob/f7d318fa67a10601a703b6b9758118825de9dc7a/examples/multi-destination.ts |
|
// and https://github.com/balena-io/etcher/blob/b2c4f7a25004bd83a2dc74ccb31be96f49523223/lib/gui/modules/child-writer.ts#L114 |
|
|
|
|
|
// This file is meant to be invoked by runNodeWithElevatedPermissions.js |
|
// it is spawned as a separate process so it can have admin permissions to write disks. |
|
// it connects to a node-ipc server hosted by the main electron process to get its instructions, |
|
// and it reports its status back to that server. |
|
|
|
/* |
|
* Copyright 2017 balena.io |
|
* |
|
* Licensed under the Apache License, Version 2.0 (the "License"); |
|
* you may not use this file except in compliance with the License. |
|
* You may obtain a copy of the License at |
|
* |
|
* http://www.apache.org/licenses/LICENSE-2.0 |
|
* |
|
* Unless required by applicable law or agreed to in writing, software |
|
* distributed under the License is distributed on an "AS IS" BASIS, |
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
|
* See the License for the specific language governing permissions and |
|
* limitations under the License. |
|
*/ |
|
|
|
const fs = require('fs'); |
|
const util = require('util'); |
|
const path = require('path'); |
|
const Readable = require('stream').Readable; |
|
const tar = require('tar'); |
|
const etcherSDK = require('etcher-sdk'); |
|
const balenaImageFS = require('balena-image-fs'); |
|
const balenaPartitionInfo = require('partitioninfo'); |
|
const balenaFileDisk = require('file-disk'); |
|
const cleanupTmpFiles = require('etcher-sdk/build/tmp').cleanupTmpFiles; |
|
const nodeIPC = require('node-ipc'); |
|
|
|
const myUtils = require('./utils.js'); |
|
|
|
// https://en.wikipedia.org/wiki/Partition_type |
|
// these are hexadecimal integer ids representing the ext filesystem |
|
// in a MBR partition record. |
|
const extFileSystemMBRPartitionTypes = { |
|
"93": 1, |
|
"83": 1, |
|
"43": 1, |
|
} |
|
|
|
//const rootsystemLocation = "/opt"; |
|
|
|
nodeIPC.config.id = process.env.IPC_CLIENT_ID; |
|
nodeIPC.config.socketRoot = process.env.IPC_SOCKET_ROOT; |
|
|
|
// NOTE: Ensure this isn't disabled, as it will cause |
|
// the stdout maxBuffer size to be exceeded when flashing |
|
nodeIPC.config.silent = true; |
|
|
|
// > If set to 0, the client will NOT try to reconnect. |
|
// See https://github.com/RIAEvangelist/node-ipc/ |
|
// |
|
// The purpose behind this change is for this process |
|
// to emit a "disconnect" event as soon as the GUI |
|
// process is closed, so we can kill this process as well. |
|
nodeIPC.config.stopRetrying = 0; |
|
|
|
const DISCONNECT_DELAY = 100; |
|
const IPC_SERVER_ID = process.env.IPC_SERVER_ID; |
|
|
|
function terminate(exitCode) { |
|
nodeIPC.disconnect(IPC_SERVER_ID); |
|
process.nextTick(() => { |
|
process.exit(exitCode || 0); |
|
}); |
|
} |
|
|
|
|
|
myUtils.startLoggingToFile("seedpacket-write-image-to-block-device.log") |
|
let myLog = myUtils.myLog; |
|
let isLoggingToServer = false; |
|
|
|
const globalUncaughtError = (err) => { |
|
myLog("uncaughtException: " + err.stack); |
|
if(nodeIPC.of[IPC_SERVER_ID]) { |
|
nodeIPC.of[IPC_SERVER_ID].emit('error', errorToJSON(err)); |
|
} |
|
setTimeout(() => { |
|
terminate(1); |
|
}, DISCONNECT_DELAY); |
|
}; |
|
|
|
myLog("writeImageToBlockDevice.js starting, nodejs version: " + process.version ); |
|
|
|
// see runNodeWithElevatedPermissions.js |
|
|
|
nodeIPC.connectTo(IPC_SERVER_ID, () => { |
|
|
|
// monnkey patch myLog to send log messages to main process. |
|
const originalMyLog = myLog; |
|
myLog = (message) => { |
|
originalMyLog(message); |
|
|
|
// isLoggingToServer mutex to prevent stack overflow |
|
// when .emit() causes the error event to be raised, |
|
// which in turn causes myLog() to be called again. |
|
if(!isLoggingToServer) { |
|
isLoggingToServer = true; |
|
if(nodeIPC.of[IPC_SERVER_ID]) { |
|
nodeIPC.of[IPC_SERVER_ID].emit('log', message); |
|
} |
|
isLoggingToServer = false; |
|
} |
|
|
|
}; |
|
|
|
myLog("connected to " + IPC_SERVER_ID); |
|
|
|
// Remove leftover tmp files older than 1 hour |
|
cleanupTmpFiles(Date.now() - 60 * 60 * 1000); |
|
|
|
process.once('uncaughtException', globalUncaughtError); |
|
|
|
// Gracefully exit on the following cases. If the parent |
|
// process detects that child exit successfully but |
|
// no flashing information is available, then it will |
|
// assume that the child died halfway through. |
|
process.once('SIGINT', () => { |
|
myLog("SIGINT"); |
|
terminate(); |
|
}); |
|
process.once('SIGTERM', () => { |
|
myLog("SIGTERM"); |
|
terminate(); |
|
}); |
|
// The IPC server failed. Abort. |
|
nodeIPC.of[IPC_SERVER_ID].on('error', () => { |
|
myLog(`got error event from ${IPC_SERVER_ID}`); |
|
terminate(); |
|
}); |
|
nodeIPC.of[IPC_SERVER_ID].on('disconnect', () => { |
|
myLog(`got disconnect event from ${IPC_SERVER_ID}`); |
|
terminate(); |
|
}); |
|
|
|
nodeIPC.of[IPC_SERVER_ID].on('args', (argumentsArray) => { |
|
const argsString = JSON.stringify({ |
|
imagePath: argumentsArray[0], |
|
overlay: "{...}", |
|
blockDevicePath: argumentsArray[2] |
|
}); |
|
myLog(`got args event from ${IPC_SERVER_ID}: ${argsString}`); |
|
writeImageToBlockDevice(...argumentsArray).catch(err => { |
|
myLog(`writeImageToBlockDevice rejected with error: ${err.stack}`); |
|
terminate(1); |
|
}); |
|
}); |
|
|
|
nodeIPC.of[IPC_SERVER_ID].on('connect', () => { |
|
myLog( |
|
`Successfully connected to IPC server: ${IPC_SERVER_ID}, socket root ${nodeIPC.config.socketRoot}`, |
|
); |
|
nodeIPC.of[IPC_SERVER_ID].emit('ready', {}); |
|
}); |
|
|
|
}); |
|
|
|
|
|
async function writeImageToBlockDevice(imagePath, overlay, blockDevicePath) { |
|
|
|
myLog(`writeImageToBlockDevice('${imagePath}', '${blockDevicePath}')`) |
|
|
|
const adapters = [ |
|
new etcherSDK.scanner.adapters.BlockDeviceAdapter({ |
|
includeSystemDrives: () => false, |
|
unmountOnSuccess: false, |
|
write: true, |
|
direct: true, |
|
}), |
|
]; |
|
const deviceScanner = new etcherSDK.scanner.Scanner(adapters); |
|
|
|
// Wait for the deviceScanner to be ready |
|
await new Promise((resolve, reject) => { |
|
deviceScanner.on('error', reject); |
|
deviceScanner.on('ready', resolve); |
|
deviceScanner.start(); |
|
}); |
|
|
|
myLog(`deviceScanner ready`) |
|
|
|
const destinationDrives = Array.from(deviceScanner.drives.values()) |
|
.filter(x => x.path == blockDevicePath ); |
|
|
|
myLog(`found ${Array.from(deviceScanner.drives.values()).length} drives. `); |
|
myLog(`destinationDrives: [${destinationDrives.map(x => x.path).join(', ')}]`); |
|
|
|
if(destinationDrives.length == 0) { |
|
throw new Error(`block device ${blockDevicePath} not found`); |
|
} |
|
|
|
if(destinationDrives.length > 1) { |
|
throw new Error(`more than 1 block device found matching ${blockDevicePath}`); |
|
} |
|
|
|
// default values lifted from: |
|
// https://github.com/balena-io/etcher/blob/a24be20e952ac041755b8e29c84cd72d1149d6c9/lib/gui/app/models/settings.ts#L75 |
|
|
|
// decompressThenFlash usage example lifted from: |
|
//https://github.com/balena-io/etcher/blob/b2c4f7a25004bd83a2dc74ccb31be96f49523223/lib/gui/modules/child-writer.ts#L114 |
|
|
|
let source = new etcherSDK.sourceDestination.File({ path: imagePath }); |
|
|
|
source = await source.getInnerSource(); |
|
|
|
myLog(`decompressThenFlash(...) `); |
|
|
|
await new Promise(async (resolve, reject) => { |
|
await etcherSDK.multiWrite.decompressThenFlash({ |
|
source: source, // etcherSDK.sourceDestination.SourceDestination |
|
destinations: destinationDrives, // etcherSDK.sourceDestination.BlockDevice[]; |
|
onProgress: (progress) => { // OnProgressFunction: (progress: MultiDestinationProgress) => void; |
|
// too verbose |
|
// myLog(`onProgress: ${JSON.stringify(progress, null, " ")}`); |
|
const progressTypeStringIds = { |
|
"flashing": "description_Flashing2of3", |
|
"extracting": "description_Extracting1of3", |
|
"decompressing": "description_Extracting1of3", |
|
"verifying": "description_Verifying3of3" |
|
}; |
|
|
|
if(!progressTypeStringIds[progress.type] ) { |
|
myLog(`progressTypeStringIds does not include: ${progress.type}`); |
|
} |
|
|
|
nodeIPC.of[IPC_SERVER_ID].emit('status', { |
|
progress: progress.percentage/100, |
|
phase: progressTypeStringIds[progress.type], |
|
bytesPerSecond: progress.speed, |
|
}); |
|
}, |
|
onFail: (destination, err) => { |
|
myLog(`decompressThenFlash onFail(): ${JSON.stringify(destination)}: ${err}`); |
|
reject(err); |
|
}, |
|
verify: true, // boolean; |
|
trim: true, // autoBlockmapping: true, |
|
numBuffers: 256, // number of 1mb buffers to use when writing. |
|
decompressFirst: true, |
|
}); |
|
|
|
resolve(); |
|
}); |
|
|
|
|
|
myLog(`decompressThenFlash resolved! Now inspecting the disk ${destinationDrives[0].path} `); |
|
|
|
await balenaFileDisk.withOpenFile(destinationDrives[0].path, 'r+', async (handle) => { |
|
|
|
myLog(`balenaFileDisk.withOpenFile('${destinationDrives[0].path}, 'r+' (handle) => {`) |
|
myLog(` called back with a file handle!`) |
|
|
|
const disk = new balenaFileDisk.FileDisk(handle); |
|
|
|
const partitionInfo = await balenaPartitionInfo.getPartitions(disk); |
|
|
|
const mainPartitions = []; |
|
|
|
for(const partition of partitionInfo.partitions) { |
|
|
|
myLog(`checking partition #${partition.index}, type ${partition.type.toString(16)}, offset ${partition.offset}`); |
|
|
|
// if( extFileSystemMBRPartitionTypes[partition.type.toString(16)] ) { |
|
// } |
|
const thePartition = partition.index; |
|
try { |
|
await balenaImageFS.interact(disk, thePartition, async (imageFs) => { |
|
files = await util.promisify(imageFs.readdir)("/"); |
|
if(files.includes('bin') && files.includes('etc')) { |
|
myLog(`balenaImageFS.interact(fs => fs.readdir('/')): located main file system on partition #${thePartition}`); |
|
mainPartitions.push(thePartition); |
|
} |
|
}); |
|
} catch (err) { |
|
// no one cares as long as we get 1 mainPartition |
|
myLog(` partition #${partition.index} failed to mount/readdir: ${err}`); |
|
} |
|
|
|
} |
|
|
|
if(mainPartitions.length == 0) { |
|
throw new Error(`No main partition was found on ${destinationDrives[0].path}`); |
|
} |
|
if(mainPartitions.length > 1) { |
|
throw new Error(`More than one main partition was found on ${destinationDrives[0].path}`); |
|
} |
|
|
|
myLog(`writing configuration overlay onto the disk...`); |
|
|
|
await balenaImageFS.interact(disk, mainPartitions[0], async (imageFs) => { |
|
|
|
const imageFsReadDirAsync = util.promisify(imageFs.readdir); |
|
const imageFsMkDirAsync = util.promisify(imageFs.mkdir); |
|
|
|
const knownPaths = {"/":true}; |
|
(await imageFsReadDirAsync("/")).forEach(x => knownPaths[x] = {"$unexplored":true}); |
|
|
|
for(const [path, file] of Object.entries(overlay)) { |
|
|
|
myLog(`trying to ensure directories exist for : ${path}\n`); |
|
//myLog(`knownPaths : ${JSON.stringify(knownPaths, null, " ")}\n`); |
|
|
|
let pathSplit = path.split("/").filter(x => x != ""); |
|
pathSplit = pathSplit.splice(0, pathSplit.length-1); |
|
|
|
//myLog(`pathSplit : ${JSON.stringify(pathSplit)}\n`); |
|
let currentKnownPaths = knownPaths; |
|
let currentPath = []; |
|
while(pathSplit.length > 0) { |
|
const nextPath = pathSplit.shift(); |
|
currentPath.push(nextPath); |
|
|
|
//myLog(`currentPath: /${currentPath.join("/")}, nextPath: ${nextPath} \n`); |
|
if(!currentKnownPaths[nextPath]) { |
|
const toCreate = `/${currentPath.join("/")}`; |
|
const mode = file[toCreate] !== undefined ? file[toCreate] : file.mode; |
|
|
|
myLog(`mkdir: ${toCreate} mode: ${mode.toString(8)}`); |
|
await imageFsMkDirAsync(toCreate, mode); |
|
currentKnownPaths[nextPath] = {"$unexplored": false}; |
|
} else if(currentKnownPaths[nextPath]["$unexplored"]) { |
|
myLog(`listing: /${currentPath.join("/")}`); |
|
(await imageFsReadDirAsync(`/${currentPath.join("/")}`)).forEach(x => currentKnownPaths[nextPath][x] = {"$unexplored":true}); |
|
currentKnownPaths[nextPath]["$unexplored"] = false; |
|
} |
|
|
|
currentKnownPaths = currentKnownPaths[nextPath] |
|
} |
|
myLog(`writeFile: ${path}`); |
|
|
|
let originalContents = ''; |
|
|
|
if(file.replaceLine) { |
|
originalContents = await util.promisify(imageFs.readFile)(path); |
|
//myLog(`originalContents: ${originalContents}`); |
|
} |
|
|
|
const writeStream = imageFs.createWriteStream(path, { mode: file.mode }); |
|
|
|
const readStream = new Readable(); |
|
readStream._read = () => {}; // _read is required but you can noop it |
|
readStream.pipe(writeStream); |
|
|
|
if(file.content) { |
|
readStream.push(new Buffer.from(file.content, "utf8")); |
|
} else if(file.replaceLine) { |
|
let replaced = false; |
|
const newContents = String(originalContents).split("\n").map(line => { |
|
if(line.match(new RegExp(file.replaceLine.match))) { |
|
myLog(`${path}: replacing line\n${line}\nwith\n${file.replaceLine.replace}\n`); |
|
replaced = true; |
|
return file.replaceLine.replace; |
|
} |
|
return line; |
|
}).join("\n"); |
|
|
|
if(!replaced) { |
|
throw new Error(`file.replaceLine was specified but no line matching ${file.replaceLine.match} was found.`); |
|
} |
|
|
|
readStream.push(new Buffer.from(newContents, "utf8")); |
|
} else { |
|
throw new Error("file.content or file.replaceLine is required on overlay file") |
|
} |
|
|
|
readStream.push(null); |
|
|
|
await new Promise((resolve, reject) => { |
|
writeStream.on('error', reject); |
|
writeStream.on('close', resolve); |
|
}); |
|
|
|
} |
|
|
|
}); |
|
|
|
}); |
|
|
|
myLog(`configuration of disk image complete!`); |
|
|
|
terminate(0); |
|
|
|
} |
|
|
|
function errorToJSON(err) { |
|
return { |
|
name: err.name, |
|
message: err.message, |
|
description: err.description, |
|
stack: err.stack, |
|
report: err.report, |
|
code: err.code, |
|
syscall: err.syscall, |
|
errno: err.errno, |
|
stdout: err.stdout, |
|
stderr: err.stderr, |
|
device: err.device, |
|
}; |
|
} |