diff --git a/Cargo.toml b/Cargo.toml index 28d85d25a75263d7f41065d568c4d3b606a6c880..3c944a2d54a471256b3e7ec1ca6defe6336e911e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "redox-initfs" -version = "0.1.0" -authors = ["4lDO2 <4lDO2@protonmail.com>"] -edition = "2018" +version = "0.2.0" +authors = ["4lDO2 <4lDO2@protonmail.com>", "Kamil Koczurek <koczurekk@gmail.com>"] +edition = "2021" license = "MIT" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -17,11 +17,13 @@ std = [] [dev-dependencies] # FIXME remove loggers -log = "0.4" -env_logger = "0.8" anyhow = "1" +archive-common = {path = "archive-common"} +env_logger = "0.8" +log = "0.4" [workspace] members = [ + "archive-common", "tools", ] diff --git a/archive-common/Cargo.toml b/archive-common/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..cb40b568b6fa7a3a1bb35e4dae263c44ea2e7e90 --- /dev/null +++ b/archive-common/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "archive-common" +version = "0.1.0" +authors = ["4lDO2 <4lDO2@protonmail.com>", "Kamil Koczurek <koczurekk@gmail.com>"] +edition = "2021" + +[dependencies] +anyhow = "1" +log = "0.4" +pathdiff = "0.2.1" +plain = "0.2" +redox-initfs = {path = ".."} diff --git a/tools/src/archive_common.rs b/archive-common/src/lib.rs similarity index 76% rename from tools/src/archive_common.rs rename to archive-common/src/lib.rs index b7a0b250b94219fa354e1ef63698defdbcd1791f..25fd8f4e799a260c2f96cbac00a7a4034fdeb5ea 100644 --- a/tools/src/archive_common.rs +++ b/archive-common/src/lib.rs @@ -1,12 +1,12 @@ use std::convert::{TryFrom, TryInto}; use std::fs::{DirEntry, File, Metadata, OpenOptions}; use std::io::{prelude::*, SeekFrom}; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::os::unix::ffi::OsStrExt; use std::os::unix::fs::{FileExt, FileTypeExt, MetadataExt}; -use anyhow::{anyhow, Context, Result}; +use anyhow::{anyhow, bail, Context, Result}; use redox_initfs::types as initfs; @@ -17,6 +17,7 @@ pub const DEFAULT_MAX_SIZE: u64 = 64 * MEBIBYTE; enum EntryKind { File(File), Dir(Dir), + Link(PathBuf), } struct Entry { @@ -39,11 +40,16 @@ struct State<'path> { fn write_all_at(file: &File, buf: &[u8], offset: u64, r#where: &str) -> Result<()> { file.write_all_at(buf, offset)?; - log::trace!("Wrote {}..{} within {}", offset, offset + buf.len() as u64, r#where); + log::trace!( + "Wrote {}..{} within {}", + offset, + offset + buf.len() as u64, + r#where + ); Ok(()) } -fn read_directory(state: &mut State, path: &Path) -> Result<Dir> { +fn read_directory(state: &mut State, path: &Path, root_path: &Path) -> Result<Dir> { let read_dir = path .read_dir() .with_context(|| anyhow!("failed to read directory `{}`", path.to_string_lossy(),))?; @@ -79,9 +85,7 @@ fn read_directory(state: &mut State, path: &Path) -> Result<Dir> { .as_bytes() .to_owned(); - let entry_kind = if file_type.is_symlink() { - return unsupported_type("symlink", &entry); - } else if file_type.is_socket() { + let entry_kind = if file_type.is_socket() { return unsupported_type("socket", &entry); } else if file_type.is_fifo() { return unsupported_type("FIFO", &entry); @@ -90,11 +94,35 @@ fn read_directory(state: &mut State, path: &Path) -> Result<Dir> { } else if file_type.is_char_device() { return unsupported_type("character device", &entry); } else if file_type.is_file() { - EntryKind::File(File::open(&entry.path()).with_context(|| { + EntryKind::File(File::open(entry.path()).with_context(|| { anyhow!("failed to open file `{}`", entry.path().to_string_lossy(),) })?) } else if file_type.is_dir() { - EntryKind::Dir(read_directory(state, &entry.path())?) + EntryKind::Dir(read_directory(state, &entry.path(), root_path)?) + } else if file_type.is_symlink() { + let link_file_path = entry.path(); + + let link_path = std::fs::read_link(&link_file_path)?; + let cannonical = if link_path.is_absolute() { + link_path.clone() + } else { + let Some(link_parent) = link_file_path.parent() else { + bail!("Link at `{}` has no parent", link_file_path.display()) + }; + link_parent.canonicalize()?.join(link_path.clone()) + }; + + let root_path = root_path + .canonicalize() + .context("Failed to cannonicalize root path")?; + let path = pathdiff::diff_paths(cannonical, &root_path).ok_or_else(|| { + anyhow!( + "Failed to diff symlink path [{}] to root path [{}]", + link_path.display(), + root_path.display() + ) + })?; + EntryKind::Link(path) } else { return Err(anyhow!( "unknown file type at `{}`", @@ -164,14 +192,40 @@ fn allocate_and_write_file(state: &mut State, mut file: &File) -> Result<WriteRe file.read(&mut state.buffer[..allowed_length]) .context("failed to read from source file")?; - write_all_at(&*state.file, &state.buffer[..allowed_length], u64::from(offset + relative_offset), "allocate_and_write_file buffer chunk") - .context("failed to write source file into destination image")?; + write_all_at( + &state.file, + &state.buffer[..allowed_length], + u64::from(offset + relative_offset), + "allocate_and_write_file buffer chunk", + ) + .context("failed to write source file into destination image")?; relative_offset += buffer_size; } Ok(WriteResult { size, offset }) } + +fn allocate_and_write_link(state: &mut State, link: &Path) -> Result<WriteResult> { + let data = link.as_os_str().as_bytes(); + let size: u32 = data.len().try_into().unwrap(); + + let offset: u32 = bump_alloc(state, size.into(), "allocate space for file") + .context("failed to allocate space for file")? + .try_into() + .context("file offset too high")?; + + write_all_at( + &state.file, + data, + u64::from(offset), + "allocate_and_write_link target path", + ) + .context("failed to write source file into destination image")?; + + Ok(WriteResult { size, offset }) +} + fn write_inode( state: &mut State, ty: initfs::InodeType, @@ -183,7 +237,7 @@ fn write_inode( .try_into() .expect("inode header length cannot fit within u32"); - let type_and_mode = ((ty as u32) << initfs::TYPE_SHIFT) | u32::from(metadata.mode() & 0xFFF); + let type_and_mode = ((ty as u32) << initfs::TYPE_SHIFT) | (metadata.mode() & 0xFFF); // TODO: Use main buffer and write in bulk. let mut inode_buf = [0_u8; std::mem::size_of::<initfs::InodeHeader>()]; @@ -196,13 +250,17 @@ fn write_inode( length: write_result.size.into(), offset: initfs::Offset(write_result.offset.into()), - gid: 0.into(),//metadata.gid().into(), - uid: 0.into(),//metadata.uid().into(), + gid: 0.into(), //metadata.gid().into(), + uid: 0.into(), //metadata.uid().into(), }; - log::debug!("Writing inode index {} from offset {}", inode, state.inode_table_offset); + log::debug!( + "Writing inode index {} from offset {}", + inode, + state.inode_table_offset + ); write_all_at( - &*state.file, + &state.file, &inode_buf, u64::from(state.inode_table_offset + u32::from(inode) * inode_size), "write_inode", @@ -222,22 +280,22 @@ fn allocate_and_write_dir( .checked_mul(u32::from(entry_size)) .ok_or_else(|| anyhow!("entry table length too large when multiplying by size"))?; - let entry_table_offset: u32 = bump_alloc(state, entry_table_length.into(), "allocate entry table") - .context("failed to allocate entry table")? - .try_into() - .context("directory entries offset too high")?; + let entry_table_offset: u32 = + bump_alloc(state, entry_table_length.into(), "allocate entry table") + .context("failed to allocate entry table")? + .try_into() + .context("directory entries offset too high")?; for (index, entry) in dir.entries.iter().enumerate() { let (write_result, ty) = match entry.kind { EntryKind::Dir(ref subdir) => { - let write_result = - allocate_and_write_dir(state, subdir, current_inode) - .with_context(|| { - anyhow!( - "failed to copy directory entries from `{}` into image", - String::from_utf8_lossy(&entry.name) - ) - })?; + let write_result = allocate_and_write_dir(state, subdir, current_inode) + .with_context(|| { + anyhow!( + "failed to copy directory entries from `{}` into image", + String::from_utf8_lossy(&entry.name) + ) + })?; (write_result, initfs::InodeType::Dir) } @@ -248,6 +306,12 @@ fn allocate_and_write_dir( (write_result, initfs::InodeType::RegularFile) } + + EntryKind::Link(ref path) => { + let write_result = allocate_and_write_link(state, path) + .context("failed to copy symbolic link into image")?; + (write_result, initfs::InodeType::Link) + } }; let index: u16 = index @@ -255,13 +319,7 @@ fn allocate_and_write_dir( .expect("expected dir entry count not to exceed u32"); *current_inode += 1; - write_inode( - state, - ty, - &entry.metadata, - write_result, - *current_inode, - )?; + write_inode(state, ty, &entry.metadata, write_result, *current_inode)?; let (name_offset, name_len) = { let name_len: u16 = entry.name.len().try_into().context("file name too long")?; @@ -271,7 +329,8 @@ fn allocate_and_write_dir( .try_into() .context("file name offset too high up")?; - write_all_at(&*state.file, &entry.name, offset.into(), "writing file name").context("failed to write file name")?; + write_all_at(&state.file, &entry.name, offset.into(), "writing file name") + .context("failed to write file name")?; (offset, name_len) }; @@ -281,7 +340,12 @@ fn allocate_and_write_dir( let direntry = plain::from_mut_bytes::<initfs::DirEntry>(&mut direntry_buf) .expect("expected dir entry struct to have alignment 1, and buffer size to match"); - log::debug!("Linking inode {} into dir entry index {}, file name `{}`", current_inode, index, String::from_utf8_lossy(&entry.name)); + log::debug!( + "Linking inode {} into dir entry index {}, file name `{}`", + current_inode, + index, + String::from_utf8_lossy(&entry.name) + ); *direntry = initfs::DirEntry { inode: (*current_inode).into(), @@ -290,7 +354,7 @@ fn allocate_and_write_dir( }; write_all_at( - &*state.file, + &state.file, &direntry_buf, u64::from(entry_table_offset + u32::from(index) * u32::from(entry_size)), "allocate_and_write_dir entry", @@ -384,6 +448,7 @@ pub fn archive( .read(false) .write(true) .create(true) + .truncate(true) .create_new(false) .open(&destination_temp_path) .context("failed to open destination file")?; @@ -410,7 +475,7 @@ pub fn archive( let root_metadata = root_path .metadata() .context("failed to obtain metadata for root")?; - let root = read_directory(&mut state, root_path).context("failed to read root")?; + let root = read_directory(&mut state, root_path, root_path).context("failed to read root")?; log::debug!("there are {} inodes", state.inode_count); @@ -461,11 +526,7 @@ pub fn archive( state.inode_table_offset = inode_table_offset.0.get(); - allocate_contents_and_write_inodes( - &mut state, - &root, - root_metadata, - )?; + allocate_contents_and_write_inodes(&mut state, &root, root_metadata)?; let current_system_time = std::time::SystemTime::now(); @@ -487,9 +548,14 @@ pub fn archive( inode_count: state.inode_count.into(), inode_table_offset, bootstrap_entry: bootstrap_entry.into(), - initfs_size: state.file.metadata().context("failed to get initfs size")?.len().into(), + initfs_size: state + .file + .metadata() + .context("failed to get initfs size")? + .len() + .into(), }; - write_all_at(&*state.file, &header_bytes, header_offset, "writing header") + write_all_at(&state.file, &header_bytes, header_offset, "writing header") .context("failed to write header")?; } diff --git a/data/foo/file-link.txt b/data/foo/file-link.txt new file mode 120000 index 0000000000000000000000000000000000000000..4c330738cc959751fb6760a91a50d9e58cfe5cb9 --- /dev/null +++ b/data/foo/file-link.txt @@ -0,0 +1 @@ +file.txt \ No newline at end of file diff --git a/data/file.txt b/data/foo/file.txt similarity index 100% rename from data/file.txt rename to data/foo/file.txt diff --git a/src/lib.rs b/src/lib.rs index a250ebb6d78ca671d34205b42eff4110021a5ef0..0e77a0dcc7041382f89fdbe198b26ea9649a3e05 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,5 +1,4 @@ #![no_std] -#![feature(offset_of)] //! A super simple initfs, only meant to be loaded into RAM by the bootloader, and then directly be //! read. @@ -91,6 +90,20 @@ impl<'initfs> InodeDir<'initfs> { } } +#[derive(Clone, Copy)] +pub struct InodeLink<'initfs> { + inner: InodeStruct<'initfs>, +} + +impl<'initfs> InodeLink<'initfs> { + pub fn inode(self) -> InodeStruct<'initfs> { + self.inner + } + pub fn data(&self) -> Result<&'initfs [u8]> { + self.inner.data() + } +} + #[derive(Clone, Copy)] pub struct Entry<'initfs> { initfs: InitFs<'initfs>, @@ -109,7 +122,7 @@ impl<'initfs> Entry<'initfs> { .get() .try_into() .map_err(|_| Error)?; - let name_length: usize = self.entry.name_len.get().try_into().map_err(|_| Error)?; + let name_length: usize = self.entry.name_len.get().into(); let name_end = name_offset.checked_add(name_length).ok_or(Error)?; @@ -121,6 +134,7 @@ impl<'initfs> Entry<'initfs> { pub enum InodeKind<'initfs> { File(InodeFile<'initfs>), Dir(InodeDir<'initfs>), + Link(InodeLink<'initfs>), Unknown, } @@ -150,14 +164,18 @@ impl<'initfs> InodeStruct<'initfs> { InodeType::Dir } else if raw == InodeType::RegularFile as u32 { InodeType::RegularFile + } else if raw == InodeType::Link as u32 { + InodeType::Link } else { return None; }) } pub fn kind(&self) -> InodeKind<'initfs> { + let inner = *self; match self.ty() { - Some(InodeType::Dir) => InodeKind::Dir(InodeDir { inner: *self }), - Some(InodeType::RegularFile) => InodeKind::File(InodeFile { inner: *self }), + Some(InodeType::Dir) => InodeKind::Dir(InodeDir { inner }), + Some(InodeType::RegularFile) => InodeKind::File(InodeFile { inner }), + Some(InodeType::Link) => InodeKind::Link(InodeLink { inner }), None => InodeKind::Unknown, } } diff --git a/src/types.rs b/src/types.rs index c08458adf5c8fb9ecf9c97b7f94d5d76f1f6d7a7..6951bd86dc49b0fba7dbdbc15532f555386b2afe 100644 --- a/src/types.rs +++ b/src/types.rs @@ -101,7 +101,8 @@ pub const TYPE_MASK: u32 = 0xF000_0000; pub enum InodeType { RegularFile = 0x0, Dir = 0x1, - // All other bit patterns are reserved... for now. TODO: Add symlinks? + Link = 0x2, + // All other bit patterns are reserved... for now. } #[repr(C, packed)] diff --git a/tests/archive_and_read.rs b/tests/archive_and_read.rs index ac4f87caafa36da9ad7c35cb5b6faf5b8dda1a2f..294d9defe0539c841dbc6240bdda0e4632e5219b 100644 --- a/tests/archive_and_read.rs +++ b/tests/archive_and_read.rs @@ -1,47 +1,107 @@ -#![feature(array_methods)] +use std::{collections::HashMap, path::Path}; -use std::path::Path; +use anyhow::{anyhow, Context, Result}; +use redox_initfs::{InitFs, InodeKind, InodeStruct}; -use anyhow::{anyhow, bail, Context, Result}; +#[derive(Debug, Clone, PartialEq)] +enum Node { + Link { to: Vec<u8> }, + File { data: Vec<u8> }, + Dir(HashMap<Vec<u8>, Node>), + Unknown, +} + +impl Node { + fn link(to: impl Into<Vec<u8>>) -> Self { + Node::Link { to: to.into() } + } + + fn file(data: impl Into<Vec<u8>>) -> Self { + Node::File { data: data.into() } + } + + fn dir(entries: impl IntoIterator<Item = (impl Into<Vec<u8>>, Node)>) -> Self { + Self::Dir( + entries + .into_iter() + .map(|(name, node)| (name.into(), node)) + .collect(), + ) + } +} + +fn build_tree<'a>(fs: InitFs<'a>, inode: InodeStruct<'a>) -> anyhow::Result<Node> { + use InodeKind::*; + let node = match inode.kind() { + File(file) => { + let data = file.data().context("failed to get file data")?.to_owned(); + Node::File { data } + } + Link(link) => { + let data = link.data().context("failed to get link data")?.to_owned(); + Node::Link { to: data } + } + Dir(dir) => { + let mut entries = HashMap::new(); + for idx in 0..dir + .entry_count() + .context("failed to get inode entry count")? + { + let entry = dir + .get_entry(idx) + .context("failed to get entry for index")? + .ok_or_else(|| anyhow!("no entry found"))?; -#[path = "../tools/src/archive_common.rs"] -mod archive; + let entry_name = entry.name().context("failed to get entry name")?; + let inode = fs + .get_inode(entry.inode()) + .context("failed to load file inode")?; + + let entry_node = build_tree(fs, inode)?; + + entries.insert(entry_name.to_owned(), entry_node); + } + + Node::Dir(entries) + } + Unknown => Node::Unknown, + }; + + Ok(node) +} #[test] fn archive_and_read() -> Result<()> { env_logger::init(); - let args = self::archive::Args { + let args = archive_common::Args { destination_path: Path::new("out.img"), source: Path::new("data"), bootstrap_code: None, - max_size: self::archive::DEFAULT_MAX_SIZE, + max_size: archive_common::DEFAULT_MAX_SIZE, }; - self::archive::archive(&args).context("failed to archive")?; + archive_common::archive(&args).context("failed to archive")?; let data = std::fs::read(args.destination_path).context("failed to read new archive")?; let filesystem = redox_initfs::InitFs::new(&data).context("failed to parse archive header")?; - let root_inode = filesystem.get_inode(redox_initfs::InitFs::ROOT_INODE).ok_or_else(|| anyhow!("Failed to get root inode"))?; - - let dir = match root_inode.kind() { - redox_initfs::InodeKind::Dir(dir) => dir, - _ => bail!("root inode was not a directory"), - }; + let inode = filesystem + .get_inode(redox_initfs::InitFs::ROOT_INODE) + .ok_or_else(|| anyhow!("Failed to get root inode"))?; - for idx in 0..dir.entry_count().context("failed to get inode entry count")? { - let entry = dir.get_entry(idx).context("failed to get entry for index")?.ok_or_else(|| anyhow!("no entry found"))?; + let tree = build_tree(filesystem, inode)?; - if entry.name().context("failed to get entry name")? == b"file.txt".as_slice() { - let inode = filesystem.get_inode(entry.inode()).context("failed to load file inode")?; + let reference_tree = Node::dir([( + b"foo", + Node::dir([ + ( + b"file.txt".as_slice(), + Node::file(b"This is a file meant to be used in a redox-initfs test.\n"), + ), + (b"file-link.txt".as_slice(), Node::link(b"foo/file.txt")), + ]), + )]); - let file = match inode.kind() { - redox_initfs::InodeKind::File(file) => file, - _ => bail!("file.txt was a directory"), - }; - let data = file.data().context("failed to get file.txt data")?; - assert_eq!(data, std::fs::read("data/file.txt").context("failed to read the real file.txt")?); - } - } + assert_eq!(tree, reference_tree); Ok(()) } diff --git a/tools/Cargo.toml b/tools/Cargo.toml index fb9acfae241e1df04c805b47ab95af95b343ced7..e201f04c1aeabe530450e1327013367d49ac6b10 100644 --- a/tools/Cargo.toml +++ b/tools/Cargo.toml @@ -1,27 +1,28 @@ [package] -name = "redox-initfs-ar" -version = "0.1.0" -authors = ["4lDO2 <4lDO2@protonmail.com>"] -edition = "2018" +name = "redox-initfs-tools" +version = "0.2.0" +authors = ["4lDO2 <4lDO2@protonmail.com>", "Kamil Koczurek <koczurekk@gmail.com>"] +edition = "2021" description = "Archive a directory into a Redox initfs image" license = "MIT" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [[bin]] -path = "src/bin/archive.rs" name = "redox-initfs-ar" +path = "src/bin/archive.rs" [[bin]] -path = "src/bin/dump.rs" name = "redox-initfs-dump" +path = "src/bin/dump.rs" [dependencies] anyhow = "1" -clap = { version = "4", features = ["cargo"] } +clap = {version = "4", features = ["cargo"]} env_logger = "0.8" log = "0.4" plain = "0.2" twox-hash = "1.6" -redox-initfs = { path = ".." } +archive-common = {path = "../archive-common"} +redox-initfs = {path = ".."} diff --git a/tools/src/bin/archive.rs b/tools/src/bin/archive.rs index 5258f75ee3598cd5fbbdd5685e5f1d32d479dd5f..5e6291417a6ab7121cdcb6de31281fb71ef929cc 100644 --- a/tools/src/bin/archive.rs +++ b/tools/src/bin/archive.rs @@ -3,9 +3,7 @@ use std::path::Path; use anyhow::{Context, Result}; use clap::{Arg, Command}; -#[path = "../archive_common.rs"] -mod archive_common; -use self::archive_common::{self as archive, Args, DEFAULT_MAX_SIZE}; +use archive_common::{self as archive, Args, DEFAULT_MAX_SIZE}; fn main() -> Result<()> { let matches = Command::new("redox-initfs-ar") @@ -19,6 +17,7 @@ fn main() -> Result<()> { .required(false) .help("Set the upper limit for how large the image can become (default 64 MiB)."), ) + // TODO: support non-utf8 paths (applies to other paths as well) .arg( Arg::new("SOURCE") .required(true) @@ -27,7 +26,7 @@ fn main() -> Result<()> { .arg( Arg::new("BOOTSTRAP_CODE") .required(false) - .help("Specify the bootstrap ELF file to include in the image.") + .help("Specify the bootstrap ELF file to include in the image."), ) .arg( Arg::new("OUTPUT") @@ -60,7 +59,7 @@ fn main() -> Result<()> { let args = Args { source: Path::new(source), - bootstrap_code: bootstrap_code.map(|bootstrap_code| Path::new(bootstrap_code)), + bootstrap_code: bootstrap_code.map(Path::new), destination_path: Path::new(destination), max_size, }; diff --git a/tools/src/bin/dump.rs b/tools/src/bin/dump.rs index 1c0a06e33083c25184716674f7bbbd95592f170c..414d58be0d5b2c8df1d141bcc35c4c4beca0ce85 100644 --- a/tools/src/bin/dump.rs +++ b/tools/src/bin/dump.rs @@ -1,4 +1,4 @@ -use std::ffi::OsString; +use std::{ffi::OsStr, path::Path}; use anyhow::{Context, Result}; use clap::{Arg, Command}; @@ -17,8 +17,9 @@ fn main() -> Result<()> { ) .get_matches(); + // TODO: support non-utf8 paths let source = matches - .get_one::<OsString>("IMAGE") + .get_one::<String>("IMAGE") .expect("expected the required arg IMAGE to exist"); let bytes = std::fs::read(source).context("failed to read image into memory")?; @@ -99,6 +100,19 @@ fn main() -> Result<()> { println!("}}"); } + InodeKind::Link(link) => { + print!("link{{"); + match link.data().ok() { + Some(d) => { + use std::os::unix::ffi::OsStrExt; + print!("dst={}", Path::new(OsStr::from_bytes(d)).display()); + } + None => { + print!("(failed to get data)"); + } + } + println!("}}"); + } } }