diff --git a/benches/benchmark.rs b/benches/benchmark.rs index c31f4b0..f6eafd6 100755 --- a/benches/benchmark.rs +++ b/benches/benchmark.rs @@ -8,7 +8,4 @@ fn bench_calculate_hash() { IndexFile::calculate_hash("exd/root.exl"); } -brunch::benches!( - Bench::new("hash c alc") - .run(bench_calculate_hash), -); +brunch::benches!(Bench::new("hash c alc").run(bench_calculate_hash),); diff --git a/benches/retail_benchmark.rs b/benches/retail_benchmark.rs index c1d0566..fbfffe8 100644 --- a/benches/retail_benchmark.rs +++ b/benches/retail_benchmark.rs @@ -8,13 +8,20 @@ use physis::common::Platform; fn reload_repos() { let game_dir = env::var("FFXIV_GAME_DIR").unwrap(); - physis::gamedata::GameData::from_existing(Platform::Win32, format!("{}/game", game_dir).as_str()).unwrap(); + physis::gamedata::GameData::from_existing( + Platform::Win32, + format!("{}/game", game_dir).as_str(), + ) + .unwrap(); } fn fetch_data() { let game_dir = env::var("FFXIV_GAME_DIR").unwrap(); - let mut gamedata = - physis::gamedata::GameData::from_existing(Platform::Win32, format!("{}/game", game_dir).as_str()).unwrap(); + let mut gamedata = physis::gamedata::GameData::from_existing( + Platform::Win32, + format!("{}/game", game_dir).as_str(), + ) + .unwrap(); gamedata.extract("exd/root.exl"); } diff --git a/build.rs b/build.rs index c86621c..0c9e8e3 100644 --- a/build.rs +++ b/build.rs @@ -5,4 +5,3 @@ fn main() { #[cfg(feature = "game_install")] println!("cargo::rustc-link-lib=unshield"); } - diff --git a/src/blowfish.rs b/src/blowfish.rs index 5cb0d8f..d82d953 100755 --- a/src/blowfish.rs +++ b/src/blowfish.rs @@ -171,9 +171,17 @@ mod tests { fn test_encrypt_decrypt() { let blowfish = Blowfish::new(b"test_case"); - let expected_encrypted = [63, 149, 97, 229, 5, 35, 46, 128, 194, 107, 69, 132, 85, 202, 2, 126]; + let expected_encrypted = [ + 63, 149, 97, 229, 5, 35, 46, 128, 194, 107, 69, 132, 85, 202, 2, 126, + ]; - assert_eq!(blowfish.encrypt(b"hello, world!").unwrap(), expected_encrypted); - assert_eq!(String::from_utf8(blowfish.decrypt(&expected_encrypted).unwrap()).unwrap(), "hello, world!\0\0\0"); + assert_eq!( + blowfish.encrypt(b"hello, world!").unwrap(), + expected_encrypted + ); + assert_eq!( + String::from_utf8(blowfish.decrypt(&expected_encrypted).unwrap()).unwrap(), + "hello, world!\0\0\0" + ); } } diff --git a/src/cfg.rs b/src/cfg.rs index 3f30f64..0e6fbcd 100644 --- a/src/cfg.rs +++ b/src/cfg.rs @@ -1,9 +1,9 @@ // SPDX-FileCopyrightText: 2023 Joshua Goins // SPDX-License-Identifier: GPL-3.0-or-later +use crate::{ByteBuffer, ByteSpan}; use std::collections::HashMap; use std::io::{BufRead, BufReader, BufWriter, Cursor, Write}; -use crate::{ByteBuffer, ByteSpan}; /// Represents a collection of keys, mapped to their values. #[derive(Debug)] @@ -26,14 +26,14 @@ impl ConfigFile { pub fn from_existing(buffer: ByteSpan) -> Option { let mut cfg = ConfigFile { categories: Vec::new(), - settings: HashMap::new() + settings: HashMap::new(), }; let cursor = Cursor::new(buffer); let reader = BufReader::new(cursor); let mut current_category: Option = None; - + for line in reader.lines().map_while(Result::ok) { if !line.is_empty() && line != "\0" { if line.contains('<') || line.contains('>') { @@ -41,10 +41,17 @@ impl ConfigFile { let name = &line[1..line.len() - 1]; current_category = Some(String::from(name)); cfg.categories.push(String::from(name)); - } else if let (Some(category), Some((key, value))) = (¤t_category, line.split_once('\t')) { + } else if let (Some(category), Some((key, value))) = + (¤t_category, line.split_once('\t')) + { // Key-value pair - cfg.settings.entry(category.clone()).or_insert_with(|| ConfigMap{ keys: Vec::new() }); - cfg.settings.get_mut(category)?.keys.push((key.to_string(), value.to_string())); + cfg.settings + .entry(category.clone()) + .or_insert_with(|| ConfigMap { keys: Vec::new() }); + cfg.settings + .get_mut(category)? + .keys + .push((key.to_string(), value.to_string())); } } } @@ -61,11 +68,15 @@ impl ConfigFile { let mut writer = BufWriter::new(cursor); for category in &self.categories { - writer.write_all(format!("\r\n<{}>\r\n", category).as_ref()).ok()?; + writer + .write_all(format!("\r\n<{}>\r\n", category).as_ref()) + .ok()?; if self.settings.contains_key(category) { for key in &self.settings[category].keys { - writer.write_all(format!("{}\t{}\r\n", key.0, key.1).as_ref()).ok()?; + writer + .write_all(format!("{}\t{}\r\n", key.0, key.1).as_ref()) + .ok()?; } } } @@ -73,7 +84,6 @@ impl ConfigFile { writer.write_all(b"\0").ok()?; } - Some(buffer) } @@ -107,7 +117,6 @@ impl ConfigFile { } } - #[cfg(test)] mod tests { use std::fs::read; diff --git a/src/chardat.rs b/src/chardat.rs index dee11bb..6619bab 100644 --- a/src/chardat.rs +++ b/src/chardat.rs @@ -3,10 +3,10 @@ use std::io::{BufWriter, Cursor}; -use binrw::{BinRead, BinWrite}; -use binrw::binrw; -use crate::{ByteBuffer, ByteSpan}; use crate::common_file_operations::{read_bool_from, write_bool_as}; +use crate::{ByteBuffer, ByteSpan}; +use binrw::binrw; +use binrw::{BinRead, BinWrite}; use crate::race::{Gender, Race, Subrace}; @@ -20,7 +20,7 @@ fn convert_dat_race(x: u8) -> Race { 6 => Race::AuRa, 7 => Race::Hrothgar, 8 => Race::Viera, - _ => Race::Hyur + _ => Race::Hyur, } } @@ -33,7 +33,7 @@ fn convert_race_dat(race: &Race) -> u8 { Race::Roegadyn => 5, Race::AuRa => 6, Race::Hrothgar => 7, - Race::Viera => 8 + Race::Viera => 8, } } @@ -41,7 +41,7 @@ fn convert_dat_gender(x: u8) -> Gender { match x { 0 => Gender::Male, 1 => Gender::Female, - _ => Gender::Male + _ => Gender::Male, } } @@ -62,7 +62,7 @@ fn convert_dat_subrace(x: u8) -> Subrace { 6 => Subrace::Dunesfolk, 7 => Subrace::Seeker, 8 => Subrace::Keeper, - 9 => Subrace:: SeaWolf, + 9 => Subrace::SeaWolf, 10 => Subrace::Hellsguard, 11 => Subrace::Raen, 12 => Subrace::Xaela, @@ -70,7 +70,7 @@ fn convert_dat_subrace(x: u8) -> Subrace { 14 => Subrace::Lost, 15 => Subrace::Rava, 16 => Subrace::Veena, - _ => Subrace::Midlander + _ => Subrace::Midlander, } } @@ -84,14 +84,14 @@ fn convert_subrace_dat(subrace: &Subrace) -> u8 { Subrace::Dunesfolk => 6, Subrace::Seeker => 7, Subrace::Keeper => 8, - Subrace:: SeaWolf => 9, + Subrace::SeaWolf => 9, Subrace::Hellsguard => 10, Subrace::Raen => 11, Subrace::Xaela => 12, Subrace::Hellion => 13, Subrace::Lost => 14, Subrace::Rava => 15, - Subrace::Veena => 16 + Subrace::Veena => 16, } } @@ -101,7 +101,8 @@ fn convert_subrace_dat(subrace: &Subrace) -> u8 { #[repr(C)] #[br(magic = 0x2013FF14u32)] #[derive(Debug)] -pub struct CharacterData { // version 4 +pub struct CharacterData { + // version 4 /// The version of the character data, the only supported version right now is 4. pub version: u32, @@ -197,7 +198,7 @@ pub struct CharacterData { // version 4 /// The timestamp when the preset was created. #[br(pad_before = 1)] - pub timestamp: [u8; 4] + pub timestamp: [u8; 4], } impl CharacterData { @@ -229,7 +230,7 @@ mod tests { use std::path::PathBuf; use super::*; - + #[test] fn test_invalid() { let mut d = PathBuf::from(env!("CARGO_MANIFEST_DIR")); diff --git a/src/cmp.rs b/src/cmp.rs index 7b5ccd1..0406c1f 100644 --- a/src/cmp.rs +++ b/src/cmp.rs @@ -3,9 +3,9 @@ use std::io::{Cursor, Seek, SeekFrom}; -use binrw::BinRead; -use binrw::binrw; use crate::ByteSpan; +use binrw::binrw; +use binrw::BinRead; #[binrw] #[br(little)] @@ -44,13 +44,13 @@ pub struct RacialScalingParameters { /// Maximum bust size on the Y-axis pub bust_max_y: f32, /// Maximum bust size on the Z-axis - pub bust_max_z: f32 + pub bust_max_z: f32, } #[derive(Debug)] pub struct CMP { /// The racial scaling parameters - pub parameters: Vec + pub parameters: Vec, } impl CMP { @@ -69,9 +69,7 @@ impl CMP { parameters.push(RacialScalingParameters::read(&mut cursor).ok()?); } - Some(CMP { - parameters - }) + Some(CMP { parameters }) } } diff --git a/src/common.rs b/src/common.rs index 6a2e576..eb0971f 100755 --- a/src/common.rs +++ b/src/common.rs @@ -74,4 +74,4 @@ pub fn get_platform_string(id: &Platform) -> &'static str { Platform::PS3 => "ps3", Platform::PS4 => "ps4", // TODO: confirm if this "ps4" is correct } -} \ No newline at end of file +} diff --git a/src/common_file_operations.rs b/src/common_file_operations.rs index 5c293ef..140675f 100644 --- a/src/common_file_operations.rs +++ b/src/common_file_operations.rs @@ -6,7 +6,11 @@ pub(crate) fn read_bool_from + std::cmp::PartialEq>(x: } pub(crate) fn write_bool_as>(x: &bool) -> T { - if *x { T::from(1u8) } else { T::from(0u8) } + if *x { + T::from(1u8) + } else { + T::from(0u8) + } } #[cfg(test)] @@ -28,4 +32,4 @@ mod tests { assert_eq!(write_bool_as::(&false), DATA[0]); assert_eq!(write_bool_as::(&true), DATA[1]); } -} \ No newline at end of file +} diff --git a/src/dat.rs b/src/dat.rs index 3ce7d1d..824472b 100755 --- a/src/dat.rs +++ b/src/dat.rs @@ -1,18 +1,18 @@ // SPDX-FileCopyrightText: 2023 Joshua Goins // SPDX-License-Identifier: GPL-3.0-or-later -use std::io::{Cursor, Read, Seek, SeekFrom}; use std::io::Write; +use std::io::{Cursor, Read, Seek, SeekFrom}; -use binrw::{BinReaderExt, binrw}; +use crate::ByteBuffer; use binrw::BinRead; use binrw::BinWrite; -use crate::ByteBuffer; +use binrw::{binrw, BinReaderExt}; +use crate::common_file_operations::read_bool_from; #[cfg(feature = "visual_data")] use crate::model::ModelFileHeader; use crate::sqpack::read_data_block; -use crate::common_file_operations::read_bool_from; #[binrw] #[brw(repr = i32)] @@ -46,13 +46,23 @@ struct TextureLodBlock { block_count: u32, } -pub trait AnyNumberType<'a>: BinRead = ()> + BinWrite = ()> + std::ops::AddAssign + Copy + Default + 'static {} +pub trait AnyNumberType<'a>: + BinRead = ()> + BinWrite = ()> + std::ops::AddAssign + Copy + Default + 'static +{ +} -impl<'a, T> AnyNumberType<'a> for T where T: BinRead = ()> + BinWrite = ()> + std::ops::AddAssign + Copy + Default + 'static {} +impl<'a, T> AnyNumberType<'a> for T where + T: BinRead = ()> + + BinWrite = ()> + + std::ops::AddAssign + + Copy + + Default + + 'static +{ +} #[derive(BinRead, BinWrite)] -pub struct ModelMemorySizes AnyNumberType<'a>> -{ +pub struct ModelMemorySizes AnyNumberType<'a>> { pub stack_size: T, pub runtime_size: T, @@ -61,8 +71,7 @@ pub struct ModelMemorySizes AnyNumberType<'a>> pub index_buffer_size: [T; 3], } -impl AnyNumberType<'a>> ModelMemorySizes -{ +impl AnyNumberType<'a>> ModelMemorySizes { pub fn total(&self) -> T { let mut total: T = T::default(); @@ -214,7 +223,7 @@ impl DatFile { { panic!("Tried to extract a model without the visual_data feature enabled!") } - }, + } FileType::Texture => self.read_texture_file(offset, &file_info), } } diff --git a/src/dic.rs b/src/dic.rs index f665309..039f0c6 100644 --- a/src/dic.rs +++ b/src/dic.rs @@ -4,9 +4,9 @@ use std::collections::HashMap; use std::io::{Cursor, Seek, SeekFrom}; -use binrw::{BinRead, BinReaderExt}; -use binrw::binrw; use crate::ByteSpan; +use binrw::binrw; +use binrw::{BinRead, BinReaderExt}; // Based off of https://github.com/Lotlab/ffxiv-vulgar-words-reader/ // Credit goes to Jim Kirisame for documenting this format @@ -19,7 +19,7 @@ pub struct EntryItem { flag: u32, sibling: u32, child: u32, - offset: u32 + offset: u32, } #[binrw] @@ -64,7 +64,7 @@ struct DictionaryHeader { pub struct Dictionary { header: DictionaryHeader, - pub words: Vec + pub words: Vec, } impl Dictionary { @@ -113,7 +113,7 @@ impl Dictionary { let mut dict = Dictionary { header: dict, - words: Vec::new() + words: Vec::new(), }; // TODO: lol @@ -158,7 +158,7 @@ impl Dictionary { (((*new_val as u32) << 8) + lower) as i32 } else { 0 - } + }; } fn dump_dict_node(&self, vec: &mut Vec, entry_id: i32, prev: String) { @@ -238,4 +238,4 @@ mod tests { // Feeding it invalid data should not panic Dictionary::from_existing(&read(d).unwrap()); } -} \ No newline at end of file +} diff --git a/src/equipment.rs b/src/equipment.rs index ebc2d2f..bb503c0 100755 --- a/src/equipment.rs +++ b/src/equipment.rs @@ -1,7 +1,7 @@ // SPDX-FileCopyrightText: 2023 Joshua Goins // SPDX-License-Identifier: GPL-3.0-or-later -use crate::race::{Gender, get_race_id, Race, Subrace}; +use crate::race::{get_race_id, Gender, Race, Subrace}; #[repr(u8)] #[derive(Debug, PartialEq, Eq)] @@ -100,7 +100,7 @@ pub enum CharacterCategory { Hair, Face, Tail, - Ear + Ear, } pub fn get_character_category_path(category: CharacterCategory) -> &'static str { @@ -109,7 +109,7 @@ pub fn get_character_category_path(category: CharacterCategory) -> &'static str CharacterCategory::Hair => "hair", CharacterCategory::Face => "face", CharacterCategory::Tail => "tail", - CharacterCategory::Ear => "zear" + CharacterCategory::Ear => "zear", } } @@ -119,7 +119,7 @@ pub fn get_character_category_abbreviation(category: CharacterCategory) -> &'sta CharacterCategory::Hair => "hir", CharacterCategory::Face => "fac", CharacterCategory::Tail => "til", - CharacterCategory::Ear => "ear" + CharacterCategory::Ear => "ear", } } @@ -129,7 +129,7 @@ pub fn get_character_category_prefix(category: CharacterCategory) -> &'static st CharacterCategory::Hair => "h", CharacterCategory::Face => "f", CharacterCategory::Tail => "t", - CharacterCategory::Ear => "e" + CharacterCategory::Ear => "e", } } @@ -139,7 +139,7 @@ pub fn build_character_path( body_ver: i32, race: Race, subrace: Subrace, - gender: Gender + gender: Gender, ) -> String { let category_path = get_character_category_path(category); let race_id = get_race_id(race, subrace, gender).unwrap(); diff --git a/src/exd.rs b/src/exd.rs index 5a8b1d0..28c4e95 100644 --- a/src/exd.rs +++ b/src/exd.rs @@ -3,8 +3,8 @@ use std::io::{Cursor, Seek, SeekFrom}; -use binrw::{BinRead, Endian}; use binrw::binrw; +use binrw::{BinRead, Endian}; use crate::common::Language; use crate::exh::{ColumnDataType, ExcelColumnDefinition, ExcelDataPagination, EXH}; @@ -81,7 +81,7 @@ impl EXD { let row_header = ExcelDataRowHeader::read(&mut cursor).ok()?; - let header_offset = offset.offset + 6;// std::mem::size_of::() as u32; + let header_offset = offset.offset + 6; // std::mem::size_of::() as u32; let mut read_row = |row_offset: u32| -> Option { let mut subrow = ExcelRow { @@ -93,9 +93,9 @@ impl EXD { .seek(SeekFrom::Start((row_offset + column.offset as u32).into())) .ok()?; - subrow - .data - .push(Self::read_column(&mut cursor, exh, row_offset, column).unwrap()); + subrow.data.push( + Self::read_column(&mut cursor, exh, row_offset, column).unwrap(), + ); } Some(subrow) @@ -117,14 +117,9 @@ impl EXD { Some(exd) } - - fn read_data_raw = ()>>(cursor: &mut Cursor) -> Option - { - Z::read_options( - cursor, - Endian::Big, - (), - ).ok() + + fn read_data_raw = ()>>(cursor: &mut Cursor) -> Option { + Z::read_options(cursor, Endian::Big, ()).ok() } fn read_column( @@ -214,9 +209,9 @@ impl EXD { #[cfg(test)] mod tests { + use crate::exh::EXHHeader; use std::fs::read; use std::path::PathBuf; - use crate::exh::EXHHeader; use super::*; diff --git a/src/execlookup.rs b/src/execlookup.rs index 632cf07..247cbc4 100644 --- a/src/execlookup.rs +++ b/src/execlookup.rs @@ -20,7 +20,7 @@ fn find_needle(installer_file: &[u8], needle: &str) -> Option { let mut position = installer_file .windows(bytes.len()) .position(|window| window == bytes)?; - + let parse_char_at_position = |position: usize| { let upper = installer_file[position]; let lower = installer_file[position + 1]; @@ -60,4 +60,4 @@ pub fn extract_frontier_url(launcher_path: &str) -> Option { } None -} \ No newline at end of file +} diff --git a/src/exh.rs b/src/exh.rs index 4f71be6..c7cfd5b 100644 --- a/src/exh.rs +++ b/src/exh.rs @@ -5,8 +5,8 @@ use std::io::Cursor; -use binrw::BinRead; use binrw::binrw; +use binrw::BinRead; use crate::common::Language; use crate::ByteSpan; @@ -107,4 +107,3 @@ mod tests { EXH::from_existing(&read(d).unwrap()); } } - diff --git a/src/exl.rs b/src/exl.rs index 662c303..396954d 100755 --- a/src/exl.rs +++ b/src/exl.rs @@ -1,8 +1,8 @@ // SPDX-FileCopyrightText: 2023 Joshua Goins // SPDX-License-Identifier: GPL-3.0-or-later -use std::io::{BufRead, BufReader, BufWriter, Cursor, Write}; use crate::{ByteBuffer, ByteSpan}; +use std::io::{BufRead, BufReader, BufWriter, Cursor, Write}; /// Represents an Excel List. pub struct EXL { @@ -47,7 +47,9 @@ impl EXL { let cursor = Cursor::new(&mut buffer); let mut writer = BufWriter::new(cursor); - writer.write_all(format!("EXLT,{}", self.version).as_ref()).ok()?; + writer + .write_all(format!("EXLT,{}", self.version).as_ref()) + .ok()?; for (key, value) in &self.entries { writer.write_all(format!("\n{key},{value}").as_ref()).ok()?; @@ -120,12 +122,13 @@ mod tests { let exl = read(d).unwrap(); let mut out = std::io::stdout(); - out.write_all(&existing_exl.write_to_buffer().unwrap()).unwrap(); + out.write_all(&existing_exl.write_to_buffer().unwrap()) + .unwrap(); out.flush().unwrap(); assert_eq!(existing_exl.write_to_buffer().unwrap(), exl); } - + #[test] fn test_invalid() { let mut d = PathBuf::from(env!("CARGO_MANIFEST_DIR")); diff --git a/src/fiin.rs b/src/fiin.rs index 4f53928..ce6ead9 100644 --- a/src/fiin.rs +++ b/src/fiin.rs @@ -5,9 +5,9 @@ use std::fs::read; use std::io::Cursor; use std::path::Path; -use binrw::{BinRead, BinWrite}; -use binrw::binrw; use crate::{ByteBuffer, ByteSpan}; +use binrw::binrw; +use binrw::{BinRead, BinWrite}; use crate::sha1::Sha1; @@ -74,7 +74,7 @@ impl FileInfo { /// Creates a new FileInfo structure from a list of filenames. These filenames must be present in /// the current working directory in order to be read properly, since it also generates SHA1 /// hashes. - /// + /// /// These paths are converted to just their filenames. /// /// The new FileInfo structure can then be serialized back into retail-compatible form. @@ -135,14 +135,11 @@ mod tests { d3.push("resources/tests"); d3.push("test.exl"); - let testing_fiin = FileInfo::new(&[ - d2.to_str().unwrap(), - d3.to_str().unwrap() - ]).unwrap(); + let testing_fiin = FileInfo::new(&[d2.to_str().unwrap(), d3.to_str().unwrap()]).unwrap(); assert_eq!(*valid_fiin, testing_fiin.write_to_buffer().unwrap()); } - + #[test] fn test_invalid() { let mut d = PathBuf::from(env!("CARGO_MANIFEST_DIR")); diff --git a/src/gamedata.rs b/src/gamedata.rs index 9766fcb..f110344 100755 --- a/src/gamedata.rs +++ b/src/gamedata.rs @@ -8,15 +8,15 @@ use std::path::PathBuf; use tracing::{debug, warn}; -use crate::common::{Language, Platform, read_version}; +use crate::common::{read_version, Language, Platform}; use crate::dat::DatFile; use crate::exd::EXD; use crate::exh::EXH; use crate::exl::EXL; use crate::index::{Index2File, IndexFile, IndexHashBitfield}; -use crate::ByteBuffer; use crate::patch::{apply_patch, PatchError}; -use crate::repository::{Category, Repository, string_to_category}; +use crate::repository::{string_to_category, Category, Repository}; +use crate::ByteBuffer; /// Framework for operating on game data. pub struct GameData { @@ -27,7 +27,7 @@ pub struct GameData { pub repositories: Vec, index_files: HashMap, - index2_files: HashMap + index2_files: HashMap, } fn is_valid(path: &str) -> bool { @@ -79,7 +79,7 @@ impl GameData { game_directory: String::from(directory), repositories: vec![], index_files: HashMap::new(), - index2_files: HashMap::new() + index2_files: HashMap::new(), }; data.reload_repositories(platform); Some(data) @@ -90,14 +90,16 @@ impl GameData { } } } - + fn reload_repositories(&mut self, platform: Platform) { self.repositories.clear(); let mut d = PathBuf::from(self.game_directory.as_str()); // add initial ffxiv directory - if let Some(base_repository) = Repository::from_existing_base(platform.clone(), d.to_str().unwrap()) { + if let Some(base_repository) = + Repository::from_existing_base(platform.clone(), d.to_str().unwrap()) + { self.repositories.push(base_repository); } @@ -105,15 +107,18 @@ impl GameData { d.push("sqpack"); if let Ok(repository_paths) = fs::read_dir(d.as_path()) { - let repository_paths : ReadDir = repository_paths; + let repository_paths: ReadDir = repository_paths; - let repository_paths : Vec = repository_paths + let repository_paths: Vec = repository_paths .filter_map(Result::ok) .filter(|s| s.file_type().unwrap().is_dir()) .collect(); for repository_path in repository_paths { - if let Some(expansion_repository) = Repository::from_existing_expansion(platform.clone(), repository_path.path().to_str().unwrap()) { + if let Some(expansion_repository) = Repository::from_existing_expansion( + platform.clone(), + repository_path.path().to_str().unwrap(), + ) { self.repositories.push(expansion_repository); } } @@ -183,7 +188,7 @@ impl GameData { /// file.write(data.as_slice()).unwrap(); /// ``` pub fn extract(&mut self, path: &str) -> Option { - debug!(file=path, "Extracting file"); + debug!(file = path, "Extracting file"); let slice = self.find_entry(path); match slice { @@ -223,8 +228,8 @@ impl GameData { &repository.name, &repository.index_filename(category), ] - .iter() - .collect(); + .iter() + .collect(); let index2_path: PathBuf = [ &self.game_directory, @@ -232,10 +237,13 @@ impl GameData { &repository.name, &repository.index2_filename(category), ] - .iter() - .collect(); + .iter() + .collect(); - (index_path.into_os_string().into_string().unwrap(), index2_path.into_os_string().into_string().unwrap()) + ( + index_path.into_os_string().into_string().unwrap(), + index2_path.into_os_string().into_string().unwrap(), + ) } /// Read an excel sheet by name (e.g. "Achievement") @@ -387,7 +395,7 @@ impl GameData { Ok(()) } - fn cache_index_file(&mut self, filenames: (&str, &str)) { + fn cache_index_file(&mut self, filenames: (&str, &str)) { if !self.index_files.contains_key(filenames.0) { if let Some(index_file) = IndexFile::from_existing(filenames.0) { self.index_files.insert(filenames.0.to_string(), index_file); @@ -396,7 +404,8 @@ impl GameData { if !self.index2_files.contains_key(filenames.1) { if let Some(index_file) = Index2File::from_existing(filenames.1) { - self.index2_files.insert(filenames.1.to_string(), index_file); + self.index2_files + .insert(filenames.1.to_string(), index_file); } } } @@ -411,7 +420,11 @@ impl GameData { fn find_entry(&mut self, path: &str) -> Option { let index_path = self.get_index_filenames(path); - debug!("Trying index files {index_path}, {index2_path}", index_path=index_path.0, index2_path=index_path.1); + debug!( + "Trying index files {index_path}, {index2_path}", + index_path = index_path.0, + index2_path = index_path.1 + ); self.cache_index_file((&index_path.0, &index_path.1)); diff --git a/src/havok/animation_binding.rs b/src/havok/animation_binding.rs index 91ffd23..392f751 100644 --- a/src/havok/animation_binding.rs +++ b/src/havok/animation_binding.rs @@ -1,11 +1,11 @@ // SPDX-FileCopyrightText: 2020 Inseok Lee // SPDX-License-Identifier: MIT -use core::cell::RefCell; -use std::sync::Arc; -use crate::havok::HavokAnimation; use crate::havok::object::HavokObject; use crate::havok::spline_compressed_animation::HavokSplineCompressedAnimation; +use crate::havok::HavokAnimation; +use core::cell::RefCell; +use std::sync::Arc; #[repr(u8)] pub enum HavokAnimationBlendHint { @@ -33,14 +33,20 @@ impl HavokAnimationBinding { pub fn new(object: Arc>) -> Self { let root = object.borrow(); - let raw_transform_track_to_bone_indices = root.get("transformTrackToBoneIndices").as_array(); - let transform_track_to_bone_indices = raw_transform_track_to_bone_indices.iter().map(|x| x.as_int() as u16).collect::>(); + let raw_transform_track_to_bone_indices = + root.get("transformTrackToBoneIndices").as_array(); + let transform_track_to_bone_indices = raw_transform_track_to_bone_indices + .iter() + .map(|x| x.as_int() as u16) + .collect::>(); let blend_hint = HavokAnimationBlendHint::from_raw(root.get("blendHint").as_int() as u8); let raw_animation = root.get("animation").as_object(); let animation = match &*raw_animation.borrow().object_type.name { - "hkaSplineCompressedAnimation" => Box::new(HavokSplineCompressedAnimation::new(raw_animation.clone())), + "hkaSplineCompressedAnimation" => { + Box::new(HavokSplineCompressedAnimation::new(raw_animation.clone())) + } _ => panic!(), }; diff --git a/src/havok/animation_container.rs b/src/havok/animation_container.rs index 773fb2b..018dd2b 100644 --- a/src/havok/animation_container.rs +++ b/src/havok/animation_container.rs @@ -1,11 +1,11 @@ // SPDX-FileCopyrightText: 2020 Inseok Lee // SPDX-License-Identifier: MIT -use std::cell::RefCell; -use std::sync::Arc; use crate::havok::animation_binding::HavokAnimationBinding; use crate::havok::object::HavokObject; use crate::havok::skeleton::HavokSkeleton; +use std::cell::RefCell; +use std::sync::Arc; pub struct HavokAnimationContainer { pub skeletons: Vec, @@ -17,11 +17,20 @@ impl HavokAnimationContainer { let root = object.borrow(); let raw_skeletons = root.get("skeletons").as_array(); - let skeletons = raw_skeletons.iter().map(|x| HavokSkeleton::new(x.as_object())).collect::>(); + let skeletons = raw_skeletons + .iter() + .map(|x| HavokSkeleton::new(x.as_object())) + .collect::>(); let raw_bindings = root.get("bindings").as_array(); - let bindings = raw_bindings.iter().map(|x| HavokAnimationBinding::new(x.as_object())).collect::>(); + let bindings = raw_bindings + .iter() + .map(|x| HavokAnimationBinding::new(x.as_object())) + .collect::>(); - Self { skeletons, bindings } + Self { + skeletons, + bindings, + } } } diff --git a/src/havok/binary_tag_file_reader.rs b/src/havok/binary_tag_file_reader.rs index 853e9b2..3965dfd 100644 --- a/src/havok/binary_tag_file_reader.rs +++ b/src/havok/binary_tag_file_reader.rs @@ -3,12 +3,15 @@ #![allow(clippy::arc_with_non_send_sync)] +use crate::havok::byte_reader::ByteReader; +use crate::havok::object::{ + HavokInteger, HavokObject, HavokObjectType, HavokObjectTypeMember, HavokRootObject, HavokValue, + HavokValueType, +}; +use crate::havok::slice_ext::SliceByteOrderExt; use core::cell::RefCell; use std::collections::HashMap; use std::sync::Arc; -use crate::havok::byte_reader::ByteReader; -use crate::havok::object::{HavokInteger, HavokObject, HavokObjectType, HavokObjectTypeMember, HavokRootObject, HavokValue, HavokValueType}; -use crate::havok::slice_ext::SliceByteOrderExt; #[repr(i8)] enum HavokTagType { @@ -59,7 +62,11 @@ impl<'a> HavokBinaryTagFileReader<'a> { fn new(reader: ByteReader<'a>) -> Self { let file_version = 0; let remembered_strings = vec![Arc::from("string"), Arc::from("")]; - let remembered_types = vec![Arc::new(HavokObjectType::new(Arc::from("object"), None, Vec::new()))]; + let remembered_types = vec![Arc::new(HavokObjectType::new( + Arc::from("object"), + None, + Vec::new(), + ))]; let remembered_objects = Vec::new(); let objects = Vec::new(); @@ -87,7 +94,10 @@ impl<'a> HavokBinaryTagFileReader<'a> { self.file_version = self.read_packed_int() as u8; assert_eq!(self.file_version, 3, "Unimplemented version"); self.remembered_objects - .push(Arc::new(RefCell::new(HavokObject::new(self.remembered_types[0].clone(), HashMap::new())))) + .push(Arc::new(RefCell::new(HavokObject::new( + self.remembered_types[0].clone(), + HashMap::new(), + )))) } HavokTagType::Type => { let object_type = self.read_type(); @@ -152,7 +162,9 @@ impl<'a> HavokBinaryTagFileReader<'a> { HavokValueType::INT => HavokValue::Integer(self.read_packed_int()), HavokValueType::REAL => HavokValue::Real(self.reader.read_f32_le()), HavokValueType::STRING => HavokValue::String(self.read_string()), - HavokValueType::OBJECT => HavokValue::ObjectReference(self.read_packed_int() as usize), + HavokValueType::OBJECT => { + HavokValue::ObjectReference(self.read_packed_int() as usize) + } _ => panic!("unimplemented {}", member.type_.bits()), } } @@ -161,14 +173,19 @@ impl<'a> HavokBinaryTagFileReader<'a> { fn read_array(&mut self, member: &HavokObjectTypeMember, array_len: usize) -> Vec { let base_type = member.type_.base_type(); match base_type { - HavokValueType::STRING => (0..array_len).map(|_| HavokValue::String(self.read_string())).collect::>(), + HavokValueType::STRING => (0..array_len) + .map(|_| HavokValue::String(self.read_string())) + .collect::>(), HavokValueType::STRUCT => { let target_type = self.find_type(member.class_name.as_ref().unwrap()); let data_existence = self.read_bit_field(target_type.member_count()); let mut result_objects = Vec::new(); for _ in 0..array_len { - let object = Arc::new(RefCell::new(HavokObject::new(target_type.clone(), HashMap::new()))); + let object = Arc::new(RefCell::new(HavokObject::new( + target_type.clone(), + HashMap::new(), + ))); result_objects.push(object.clone()); self.objects.push(object); @@ -188,7 +205,10 @@ impl<'a> HavokBinaryTagFileReader<'a> { } } - result_objects.into_iter().map(HavokValue::Object).collect::>() + result_objects + .into_iter() + .map(HavokValue::Object) + .collect::>() } HavokValueType::OBJECT => (0..array_len) .map(|_| { @@ -204,16 +224,33 @@ impl<'a> HavokBinaryTagFileReader<'a> { if self.file_version >= 3 { self.read_packed_int(); // type? } - (0..array_len).map(|_| HavokValue::Integer(self.read_packed_int())).collect::>() - } - HavokValueType::REAL => (0..array_len).map(|_| HavokValue::Real(self.reader.read_f32_le())).collect::>(), - HavokValueType::VEC4 | HavokValueType::VEC8 | HavokValueType::VEC12 | HavokValueType::VEC16 => { - let vec_size = member.type_.base_type().vec_size() as usize; (0..array_len) - .map(|_| HavokValue::Vec((0..vec_size).map(|_| self.reader.read_f32_le()).collect::>())) + .map(|_| HavokValue::Integer(self.read_packed_int())) .collect::>() } - _ => panic!("unimplemented {} {}", member.type_.bits(), member.type_.base_type().bits()), + HavokValueType::REAL => (0..array_len) + .map(|_| HavokValue::Real(self.reader.read_f32_le())) + .collect::>(), + HavokValueType::VEC4 + | HavokValueType::VEC8 + | HavokValueType::VEC12 + | HavokValueType::VEC16 => { + let vec_size = member.type_.base_type().vec_size() as usize; + (0..array_len) + .map(|_| { + HavokValue::Vec( + (0..vec_size) + .map(|_| self.reader.read_f32_le()) + .collect::>(), + ) + }) + .collect::>() + } + _ => panic!( + "unimplemented {} {}", + member.type_.bits(), + member.type_.base_type().bits() + ), } } @@ -229,8 +266,14 @@ impl<'a> HavokBinaryTagFileReader<'a> { let member_name = self.read_string(); let type_ = HavokValueType::from_bits(self.read_packed_int() as u32).unwrap(); - let tuple_size = if type_.is_tuple() { self.read_packed_int() } else { 0 }; - let type_name = if type_.base_type() == HavokValueType::OBJECT || type_.base_type() == HavokValueType::STRUCT { + let tuple_size = if type_.is_tuple() { + self.read_packed_int() + } else { + 0 + }; + let type_name = if type_.base_type() == HavokValueType::OBJECT + || type_.base_type() == HavokValueType::STRUCT + { Some(self.read_string()) } else { None @@ -249,7 +292,11 @@ impl<'a> HavokBinaryTagFileReader<'a> { return self.remembered_strings[-length as usize].clone(); } - let result = Arc::from(std::str::from_utf8(self.reader.read_bytes(length as usize)).unwrap().to_owned()); + let result = Arc::from( + std::str::from_utf8(self.reader.read_bytes(length as usize)) + .unwrap() + .to_owned(), + ); self.remembered_strings.push(Arc::clone(&result)); result @@ -296,7 +343,11 @@ impl<'a> HavokBinaryTagFileReader<'a> { } fn find_type(&self, type_name: &str) -> Arc { - self.remembered_types.iter().find(|&x| &*x.name == type_name).unwrap().clone() + self.remembered_types + .iter() + .find(|&x| &*x.name == type_name) + .unwrap() + .clone() } fn fill_object_reference(&self, object: &mut HavokObject) { @@ -327,7 +378,11 @@ impl<'a> HavokBinaryTagFileReader<'a> { fn default_value(type_: HavokValueType) -> HavokValue { if type_.is_vec() { - HavokValue::Array((0..type_.vec_size()).map(|_| Self::default_value(type_.base_type())).collect::>()) + HavokValue::Array( + (0..type_.vec_size()) + .map(|_| Self::default_value(type_.base_type())) + .collect::>(), + ) } else if type_.is_array() || type_.is_tuple() { HavokValue::Array(Vec::new()) } else { diff --git a/src/havok/mod.rs b/src/havok/mod.rs index 6caba37..bfdaf88 100644 --- a/src/havok/mod.rs +++ b/src/havok/mod.rs @@ -10,9 +10,9 @@ mod binary_tag_file_reader; mod byte_reader; mod object; mod skeleton; +mod slice_ext; mod spline_compressed_animation; mod transform; -mod slice_ext; pub use animation::HavokAnimation; pub use animation_container::HavokAnimationContainer; diff --git a/src/havok/object.rs b/src/havok/object.rs index 9ba1632..39268d0 100644 --- a/src/havok/object.rs +++ b/src/havok/object.rs @@ -170,7 +170,12 @@ pub struct HavokObjectTypeMember { } impl HavokObjectTypeMember { - pub fn new(name: Arc, type_: HavokValueType, tuple_size: u32, type_name: Option>) -> Self { + pub fn new( + name: Arc, + type_: HavokValueType, + tuple_size: u32, + type_name: Option>, + ) -> Self { Self { name, type_, @@ -187,20 +192,35 @@ pub struct HavokObjectType { } impl HavokObjectType { - pub fn new(name: Arc, parent: Option>, members: Vec) -> Self { - Self { name, parent, members } + pub fn new( + name: Arc, + parent: Option>, + members: Vec, + ) -> Self { + Self { + name, + parent, + members, + } } pub fn members(&self) -> Vec<&HavokObjectTypeMember> { if let Some(x) = &self.parent { - x.members().into_iter().chain(self.members.iter()).collect::>() + x.members() + .into_iter() + .chain(self.members.iter()) + .collect::>() } else { self.members.iter().collect::>() } } pub fn member_count(&self) -> usize { - (if let Some(x) = &self.parent { x.members.len() } else { 0 }) + self.members.len() + (if let Some(x) = &self.parent { + x.members.len() + } else { + 0 + }) + self.members.len() } } @@ -219,7 +239,12 @@ impl HavokObject { } pub fn get(&self, member_name: &str) -> &HavokValue { - let member_index = self.object_type.members().iter().position(|&x| &*x.name == member_name).unwrap(); + let member_index = self + .object_type + .members() + .iter() + .position(|&x| &*x.name == member_name) + .unwrap(); self.data.get(&member_index).unwrap() } diff --git a/src/havok/skeleton.rs b/src/havok/skeleton.rs index fc74a5c..2928123 100644 --- a/src/havok/skeleton.rs +++ b/src/havok/skeleton.rs @@ -1,10 +1,10 @@ // SPDX-FileCopyrightText: 2020 Inseok Lee // SPDX-License-Identifier: MIT -use core::cell::RefCell; -use std::sync::Arc; use crate::havok::object::HavokObject; use crate::havok::transform::HavokTransform; +use core::cell::RefCell; +use std::sync::Arc; #[derive(Debug)] pub struct HavokSkeleton { @@ -28,10 +28,16 @@ impl HavokSkeleton { .collect::>(); let raw_parent_indices = root.get("parentIndices").as_array(); - let parent_indices = raw_parent_indices.iter().map(|x| x.as_int() as usize).collect::>(); + let parent_indices = raw_parent_indices + .iter() + .map(|x| x.as_int() as usize) + .collect::>(); let raw_reference_pose = root.get("referencePose").as_array(); - let reference_pose = raw_reference_pose.iter().map(|x| HavokTransform::new(x.as_vec())).collect::>(); + let reference_pose = raw_reference_pose + .iter() + .map(|x| HavokTransform::new(x.as_vec())) + .collect::>(); Self { bone_names, diff --git a/src/havok/slice_ext.rs b/src/havok/slice_ext.rs index 0f0f052..e1b663c 100644 --- a/src/havok/slice_ext.rs +++ b/src/havok/slice_ext.rs @@ -5,22 +5,22 @@ use core::convert::TryInto; pub trait SliceByteOrderExt { fn to_int_be(&self) -> T - where - T: Integer; + where + T: Integer; fn to_int_le(&self) -> T - where - T: Integer; + where + T: Integer; fn to_float_be(&self) -> T - where - T: Float; + where + T: Float; } impl SliceByteOrderExt for &[u8] { fn to_int_be(&self) -> T - where - T: Integer, + where + T: Integer, { let sliced = &self[..core::mem::size_of::()]; @@ -28,8 +28,8 @@ impl SliceByteOrderExt for &[u8] { } fn to_int_le(&self) -> T - where - T: Integer, + where + T: Integer, { let sliced = &self[..core::mem::size_of::()]; @@ -37,8 +37,8 @@ impl SliceByteOrderExt for &[u8] { } fn to_float_be(&self) -> T - where - T: Float, + where + T: Float, { let sliced = &self[..core::mem::size_of::()]; @@ -119,4 +119,4 @@ impl Float for f32 { fn from_be_bytes(bytes: &[u8]) -> Self { Self::from_be_bytes(bytes.try_into().unwrap()) } -} \ No newline at end of file +} diff --git a/src/havok/spline_compressed_animation.rs b/src/havok/spline_compressed_animation.rs index d3e439e..2bc7929 100644 --- a/src/havok/spline_compressed_animation.rs +++ b/src/havok/spline_compressed_animation.rs @@ -1,13 +1,13 @@ // SPDX-FileCopyrightText: 2020 Inseok Lee // SPDX-License-Identifier: MIT +use crate::havok::byte_reader::ByteReader; +use crate::havok::object::HavokObject; +use crate::havok::transform::HavokTransform; +use crate::havok::HavokAnimation; use core::{cell::RefCell, cmp}; use std::f32; use std::sync::Arc; -use crate::havok::byte_reader::ByteReader; -use crate::havok::HavokAnimation; -use crate::havok::object::HavokObject; -use crate::havok::transform::HavokTransform; #[repr(u8)] #[allow(clippy::upper_case_acronyms)] @@ -107,10 +107,16 @@ impl HavokSplineCompressedAnimation { let frame_duration = root.get("frameDuration").as_real(); let raw_block_offsets = root.get("blockOffsets").as_array(); - let block_offsets = raw_block_offsets.iter().map(|x| x.as_int() as u32).collect::>(); + let block_offsets = raw_block_offsets + .iter() + .map(|x| x.as_int() as u32) + .collect::>(); let raw_data = root.get("data").as_array(); - let data = raw_data.iter().map(|x| x.as_int() as u8).collect::>(); + let data = raw_data + .iter() + .map(|x| x.as_int() as u8) + .collect::>(); Self { duration, @@ -136,7 +142,8 @@ impl HavokSplineCompressedAnimation { let real_frame = (frame - first_frame_of_block) as f32 + delta; let block_time_out = real_frame * self.frame_duration; - let quantized_time_out = ((block_time_out * self.block_inverse_duration) * (self.max_frames_per_block as f32 - 1.)) as u8; + let quantized_time_out = ((block_time_out * self.block_inverse_duration) + * (self.max_frames_per_block as f32 - 1.)) as u8; (block_out, block_time_out, quantized_time_out) } @@ -164,7 +171,11 @@ impl HavokSplineCompressedAnimation { mid } - fn read_knots(data: &mut ByteReader, u: u8, frame_duration: f32) -> (usize, usize, Vec, usize) { + fn read_knots( + data: &mut ByteReader, + u: u8, + frame_duration: f32, + ) -> (usize, usize, Vec, usize) { let n = data.read_u16_le() as usize; let p = data.read() as usize; let raw = data.raw(); @@ -199,7 +210,12 @@ impl HavokSplineCompressedAnimation { let phi = b * c * (f32::consts::PI / 2.); // spherical coordinate to cartesian coordinate - let mut result = [f32::sin(theta) * f32::cos(phi), f32::sin(theta) * f32::sin(phi), f32::cos(theta), 1.]; + let mut result = [ + f32::sin(theta) * f32::cos(phi), + f32::sin(theta) * f32::sin(phi), + f32::cos(theta), + 1., + ]; for item in result.iter_mut() { *item *= f32::sqrt(1. - value * value); } @@ -226,9 +242,18 @@ impl HavokSplineCompressedAnimation { let mut buf = [0u32; 4]; unsafe { - let m = core::slice::from_raw_parts(permute.as_ptr() as *const u8, permute.len() * core::mem::size_of::()); - let a = core::slice::from_raw_parts(data.as_ptr() as *const u8, data.len() * core::mem::size_of::()); - let r = core::slice::from_raw_parts_mut(buf.as_mut_ptr() as *mut u8, buf.len() * core::mem::size_of::()); + let m = core::slice::from_raw_parts( + permute.as_ptr() as *const u8, + permute.len() * core::mem::size_of::(), + ); + let a = core::slice::from_raw_parts( + data.as_ptr() as *const u8, + data.len() * core::mem::size_of::(), + ); + let r = core::slice::from_raw_parts_mut( + buf.as_mut_ptr() as *mut u8, + buf.len() * core::mem::size_of::(), + ); for i in 0..16 { r[i] = a[m[i] as usize]; } @@ -310,7 +335,13 @@ impl HavokSplineCompressedAnimation { } } - fn read_packed_quaternions(quantization: RotationQuantization, data: &mut ByteReader, n: usize, p: usize, span: usize) -> Vec<[f32; 4]> { + fn read_packed_quaternions( + quantization: RotationQuantization, + data: &mut ByteReader, + n: usize, + p: usize, + span: usize, + ) -> Vec<[f32; 4]> { data.align(quantization.align()); let bytes_per_quaternion = quantization.bytes_per_quaternion(); @@ -409,7 +440,9 @@ impl HavokSplineCompressedAnimation { &base[offset..] } - fn unpack_quantization_types(packed_quantization_types: u8) -> (ScalarQuantization, RotationQuantization, ScalarQuantization) { + fn unpack_quantization_types( + packed_quantization_types: u8, + ) -> (ScalarQuantization, RotationQuantization, ScalarQuantization) { let translation = ScalarQuantization::from_raw(packed_quantization_types & 0x03); let rotation = RotationQuantization::from_raw((packed_quantization_types >> 2) & 0x0F); let scale = ScalarQuantization::from_raw((packed_quantization_types >> 6) & 0x03); @@ -417,9 +450,24 @@ impl HavokSplineCompressedAnimation { (translation, rotation, scale) } - fn sample_translation(&self, quantization: ScalarQuantization, time: f32, quantized_time: u8, mask: u8, data: &mut ByteReader) -> [f32; 4] { + fn sample_translation( + &self, + quantization: ScalarQuantization, + time: f32, + quantized_time: u8, + mask: u8, + data: &mut ByteReader, + ) -> [f32; 4] { let result = if mask != 0 { - Self::read_nurbs_curve(quantization, data, quantized_time, self.frame_duration, time, mask, [0., 0., 0., 0.]) + Self::read_nurbs_curve( + quantization, + data, + quantized_time, + self.frame_duration, + time, + mask, + [0., 0., 0., 0.], + ) } else { [0., 0., 0., 0.] }; @@ -429,17 +477,46 @@ impl HavokSplineCompressedAnimation { result } - fn sample_rotation(&self, quantization: RotationQuantization, time: f32, quantized_time: u8, mask: u8, data: &mut ByteReader) -> [f32; 4] { - let result = Self::read_nurbs_quaternion(quantization, data, quantized_time, self.frame_duration, time, mask); + fn sample_rotation( + &self, + quantization: RotationQuantization, + time: f32, + quantized_time: u8, + mask: u8, + data: &mut ByteReader, + ) -> [f32; 4] { + let result = Self::read_nurbs_quaternion( + quantization, + data, + quantized_time, + self.frame_duration, + time, + mask, + ); data.align(4); result } - fn sample_scale(&self, quantization: ScalarQuantization, time: f32, quantized_time: u8, mask: u8, data: &mut ByteReader) -> [f32; 4] { + fn sample_scale( + &self, + quantization: ScalarQuantization, + time: f32, + quantized_time: u8, + mask: u8, + data: &mut ByteReader, + ) -> [f32; 4] { let result = if mask != 0 { - Self::read_nurbs_curve(quantization, data, quantized_time, self.frame_duration, time, mask, [1., 1., 1., 1.]) + Self::read_nurbs_curve( + quantization, + data, + quantized_time, + self.frame_duration, + time, + mask, + [1., 1., 1., 1.], + ) } else { [1., 1., 1., 1.] }; @@ -575,17 +652,41 @@ impl HavokAnimation for HavokSplineCompressedAnimation { block, self.mask_and_quantization_size, )); - let mut mask = ByteReader::new(Self::compute_packed_nurbs_offsets(&self.data, &self.block_offsets, block, 0x8000_0000)); + let mut mask = ByteReader::new(Self::compute_packed_nurbs_offsets( + &self.data, + &self.block_offsets, + block, + 0x8000_0000, + )); let mut result = Vec::with_capacity(self.number_of_transform_tracks); for _ in 0..self.number_of_transform_tracks { let packed_quantization_types = mask.read(); - let (translation_type, rotation_type, scale_type) = Self::unpack_quantization_types(packed_quantization_types); + let (translation_type, rotation_type, scale_type) = + Self::unpack_quantization_types(packed_quantization_types); - let translation = self.sample_translation(translation_type, block_time, quantized_time, mask.read(), &mut data); - let rotation = self.sample_rotation(rotation_type, block_time, quantized_time, mask.read(), &mut data); - let scale = self.sample_scale(scale_type, block_time, quantized_time, mask.read(), &mut data); + let translation = self.sample_translation( + translation_type, + block_time, + quantized_time, + mask.read(), + &mut data, + ); + let rotation = self.sample_rotation( + rotation_type, + block_time, + quantized_time, + mask.read(), + &mut data, + ); + let scale = self.sample_scale( + scale_type, + block_time, + quantized_time, + mask.read(), + &mut data, + ); result.push(HavokTransform::from_trs(translation, rotation, scale)); } diff --git a/src/index.rs b/src/index.rs index 13933db..f9c5061 100755 --- a/src/index.rs +++ b/src/index.rs @@ -5,11 +5,11 @@ use std::io::SeekFrom; -use binrw::BinRead; -use binrw::binrw; -use modular_bitfield::prelude::*; use crate::common::Platform; use crate::crc::Jamcrc; +use binrw::binrw; +use binrw::BinRead; +use modular_bitfield::prelude::*; #[binrw] #[br(magic = b"SqPack")] diff --git a/src/lgb.rs b/src/lgb.rs index 1f152a4..50a2583 100644 --- a/src/lgb.rs +++ b/src/lgb.rs @@ -3,9 +3,9 @@ use std::io::{Cursor, Seek, SeekFrom}; -use binrw::{BinRead, binread, BinReaderExt}; -use binrw::binrw; use crate::ByteSpan; +use binrw::binrw; +use binrw::{binread, BinRead, BinReaderExt}; // From https://github.com/NotAdam/Lumina/tree/40dab50183eb7ddc28344378baccc2d63ae71d35/src/Lumina/Data/Parsing/Layer @@ -13,8 +13,7 @@ use crate::ByteSpan; #[binrw] #[repr(i32)] #[derive(Debug, PartialEq)] -enum LayerEntryType -{ +enum LayerEntryType { #[brw(magic = 0x0i32)] AssetNone, #[brw(magic = 0x1i32)] @@ -29,7 +28,7 @@ enum LayerEntryType render_shadow_enabled: u8, render_light_shadow_enabled: u8, padding: u8, - render_model_clip_range: f32 + render_model_clip_range: f32, }, #[brw(magic = 0x2i32)] Attribute, @@ -41,16 +40,16 @@ enum LayerEntryType PositionMarker, #[brw(magic = 0x6i32)] SharedGroup, - Sound = 0x7, // // - EventNPC = 0x8, // // + Sound = 0x7, // // + EventNPC = 0x8, // // BattleNPC = 0x9, // // RoutePath = 0xA, Character = 0xB, - Aetheryte = 0xC, // // - EnvSet = 0xD, // // - Gathering = 0xE, // // + Aetheryte = 0xC, // // + EnvSet = 0xD, // // + Gathering = 0xE, // // HelperObject = 0xF, // - Treasure = 0x10, // // + Treasure = 0x10, // // Clip = 0x11, ClipCtrlPoint = 0x12, ClipCamera = 0x13, @@ -73,13 +72,13 @@ enum LayerEntryType CutAssetOnlySelectable = 0x24, Player = 0x25, Monster = 0x26, - Weapon = 0x27, // - PopRange = 0x28, // // + Weapon = 0x27, // + PopRange = 0x28, // // ExitRange = 0x29, // // Lvb = 0x2A, - MapRange = 0x2B, // // + MapRange = 0x2B, // // NaviMeshRange = 0x2C, // // - EventObject = 0x2D, // // + EventObject = 0x2D, // // DemiHuman = 0x2E, EnvLocation = 0x2F, // // ControlPoint = 0x30, @@ -92,21 +91,21 @@ enum LayerEntryType ScenarioExd = 0x37, ScenarioText = 0x38, CollisionBox = 0x39, // // - DoorRange = 0x3A, // - LineVFX = 0x3B, // // + DoorRange = 0x3A, // + LineVFX = 0x3B, // // SoundEnvSet = 0x3C, CutActionTimeline = 0x3D, CharaScene = 0x3E, CutAction = 0x3F, EquipPreset = 0x40, - ClientPath = 0x41, // // - ServerPath = 0x42, // // - GimmickRange = 0x43, // // - TargetMarker = 0x44, // // - ChairMarker = 0x45, // // + ClientPath = 0x41, // // + ServerPath = 0x42, // // + GimmickRange = 0x43, // // + TargetMarker = 0x44, // // + ChairMarker = 0x45, // // ClickableRange = 0x46, // - PrefetchRange = 0x47, // // - FateRange = 0x48, // // + PrefetchRange = 0x47, // // + FateRange = 0x48, // // PartyMember = 0x49, KeepRange = 0x4A, // SphereCastRange = 0x4B, @@ -120,8 +119,7 @@ enum LayerEntryType #[binrw] #[brw(repr = u8)] #[derive(Debug, PartialEq)] -enum DoorState -{ +enum DoorState { Auto = 0x1, Open = 0x2, Closed = 0x3, @@ -130,8 +128,7 @@ enum DoorState #[binrw] #[brw(repr = u8)] #[derive(Debug, PartialEq)] -enum RotationState -{ +enum RotationState { Rounding = 0x1, Stopped = 0x2, } @@ -139,8 +136,7 @@ enum RotationState #[binrw] #[brw(repr = u8)] #[derive(Debug, PartialEq)] -enum TransformState -{ +enum TransformState { Play = 0x0, Stop = 0x1, Replay = 0x2, @@ -150,8 +146,7 @@ enum TransformState #[binrw] #[brw(repr = u8)] #[derive(Debug, PartialEq)] -enum ColourState -{ +enum ColourState { Play = 0x0, Stop = 0x1, Replay = 0x2, @@ -161,8 +156,7 @@ enum ColourState #[binrw] #[brw(repr = u8)] #[derive(Debug, PartialEq)] -enum TriggerBoxShape -{ +enum TriggerBoxShape { Box = 0x1, Sphere = 0x2, Cylinder = 0x3, @@ -174,8 +168,7 @@ enum TriggerBoxShape #[binrw] #[brw(repr = i32)] #[derive(Debug, PartialEq)] -enum ModelCollisionType -{ +enum ModelCollisionType { None = 0x0, Replace = 0x1, Box = 0x2, @@ -184,8 +177,7 @@ enum ModelCollisionType #[binrw] #[brw(repr = u8)] #[derive(Debug, PartialEq)] -enum LightType -{ +enum LightType { None = 0x0, Directional = 0x1, Point = 0x2, @@ -198,8 +190,7 @@ enum LightType #[binrw] #[brw(repr = u8)] #[derive(Debug, PartialEq)] -enum PointLightType -{ +enum PointLightType { Sphere = 0x0, Hemisphere = 0x1, } @@ -207,8 +198,7 @@ enum PointLightType #[binrw] #[brw(repr = u8)] #[derive(Debug, PartialEq)] -enum PositionMarkerType -{ +enum PositionMarkerType { DebugZonePop = 0x1, DebugJump = 0x2, NaviMesh = 0x3, @@ -218,8 +208,7 @@ enum PositionMarkerType #[binrw] #[brw(repr = u8)] #[derive(Debug, PartialEq)] -enum EnvSetShape -{ +enum EnvSetShape { Ellipsoid = 0x1, Cuboid = 0x2, Cylinder = 0x3, @@ -228,8 +217,7 @@ enum EnvSetShape #[binrw] #[brw(repr = u8)] #[derive(Debug, PartialEq)] -enum HelperObjectType -{ +enum HelperObjectType { ProxyActor = 0x0, NullObject = 0x1, } @@ -237,8 +225,7 @@ enum HelperObjectType #[binrw] #[brw(repr = u8)] #[derive(Debug, PartialEq)] -enum TargetType -{ +enum TargetType { None = 0x0, ENPCInstanceID = 0x1, Player = 0x2, @@ -257,8 +244,7 @@ enum TargetType #[binread] #[derive(Debug, PartialEq)] -enum PopType -{ +enum PopType { #[br(magic = 0x1u8)] PC = 0x1, #[br(magic = 0x2u8)] @@ -270,16 +256,14 @@ enum PopType #[binrw] #[brw(repr = u8)] #[derive(Debug, PartialEq)] -enum ExitType -{ +enum ExitType { ZoneLine = 0x1, } #[binrw] #[brw(repr = u8)] #[derive(Debug, PartialEq)] -enum RangeType -{ +enum RangeType { Type01 = 0x1, Type02 = 0x2, Type03 = 0x3, @@ -292,8 +276,7 @@ enum RangeType #[binrw] #[brw(repr = u8)] #[derive(Debug, PartialEq)] -enum LineStyle -{ +enum LineStyle { Red = 0x1, Blue = 0x2, } @@ -301,8 +284,7 @@ enum LineStyle #[binrw] #[brw(repr = u8)] #[derive(Debug, PartialEq)] -enum GimmickType -{ +enum GimmickType { Fishing = 0x1, Content = 0x2, Room = 0x3, @@ -311,8 +293,7 @@ enum GimmickType #[binrw] #[brw(repr = u8)] #[derive(Debug, PartialEq)] -enum TargetMarkerType -{ +enum TargetMarkerType { UiTarget = 0x0, UiNameplate = 0x1, LookAt = 0x2, @@ -324,8 +305,7 @@ enum TargetMarkerType #[binrw] #[brw(repr = u8)] #[derive(Debug, PartialEq)] -enum ObjectType -{ +enum ObjectType { ObjectChair = 0x0, ObjectBed = 0x1, } @@ -333,8 +313,7 @@ enum ObjectType #[binrw] #[brw(repr = u8)] #[derive(Debug, PartialEq)] -enum CharacterSize -{ +enum CharacterSize { DefaultSize = 0x0, VerySmall = 0x1, Small = 0x2, @@ -346,8 +325,7 @@ enum CharacterSize #[binrw] #[brw(repr = u8)] #[derive(Debug, PartialEq)] -enum DrawHeadParts -{ +enum DrawHeadParts { Default = 0x0, ForceOn = 0x1, ForceOff = 0x2, @@ -356,8 +334,7 @@ enum DrawHeadParts #[binrw] #[brw(repr = u8)] #[derive(Debug, PartialEq)] -enum RotationType -{ +enum RotationType { NoRotate = 0x0, AllAxis = 0x1, YAxisOnly = 0x2, @@ -366,8 +343,7 @@ enum RotationType #[binrw] #[brw(repr = u8)] #[derive(Debug, PartialEq)] -enum MovePathMode -{ +enum MovePathMode { None = 0x0, SharedGroupAction = 0x1, Timeline = 0x2, @@ -376,8 +352,7 @@ enum MovePathMode #[binrw] #[brw(repr = i32)] #[derive(Debug, PartialEq)] -enum LayerSetReferencedType -{ +enum LayerSetReferencedType { All = 0x0, Include = 0x1, Exclude = 0x2, @@ -387,8 +362,7 @@ enum LayerSetReferencedType #[binrw] #[brw(repr = u8)] #[derive(Debug, PartialEq)] -enum SoundEffectType -{ +enum SoundEffectType { Point = 0x3, PointDir = 0x4, Line = 0x5, @@ -424,7 +398,7 @@ struct LayerHeader { is_temporary: u8, is_housing: u8, version_mask: u16, - + #[br(pad_before = 4)] ob_set_referenced_list: i32, ob_set_referenced_list_count: i32, @@ -439,7 +413,7 @@ struct LayerHeader { struct LayerSetReferencedList { referenced_type: LayerSetReferencedType, layer_sets: i32, - layer_set_count: i32 + layer_set_count: i32, } #[binread] @@ -450,7 +424,7 @@ struct LgbHeader { #[br(count = 4)] file_id: Vec, file_size: i32, - total_chunk_count: i32 + total_chunk_count: i32, } #[binread] @@ -464,7 +438,7 @@ struct LayerChunk { layer_group_id: i32, name_offset: u32, layer_offset: i32, - layer_count: i32 + layer_count: i32, } #[binread] @@ -474,13 +448,11 @@ struct LayerChunk { struct InstanceObject { asset_type: LayerEntryType, instance_id: u32, - name_offset: u32 + name_offset: u32, } #[derive(Debug)] -pub struct Layer { - -} +pub struct Layer {} impl Layer { /// Reads an existing PBD file @@ -491,35 +463,51 @@ impl Layer { if file_header.file_size < 0 || file_header.total_chunk_count < 0 { return None; } - + let chunk_header = LayerChunk::read(&mut cursor).unwrap(); - + let old_pos = cursor.position(); let mut layer_offsets = vec![0i32; chunk_header.layer_count as usize]; - for i in 0.. chunk_header.layer_count { + for i in 0..chunk_header.layer_count { layer_offsets[i as usize] = cursor.read_le::().unwrap(); } - for i in 0.. chunk_header.layer_count { - cursor.seek(SeekFrom::Start(old_pos + layer_offsets[i as usize] as u64)).unwrap(); + for i in 0..chunk_header.layer_count { + cursor + .seek(SeekFrom::Start(old_pos + layer_offsets[i as usize] as u64)) + .unwrap(); let old_pos = cursor.position(); let header = LayerHeader::read(&mut cursor).unwrap(); - cursor.seek(SeekFrom::Start(old_pos + header.instance_object_offset as u64)).unwrap(); + cursor + .seek(SeekFrom::Start( + old_pos + header.instance_object_offset as u64, + )) + .unwrap(); let mut instance_offsets = vec![0i32; header.instance_object_count as usize]; for i in 0..header.instance_object_count { instance_offsets[i as usize] = cursor.read_le::().unwrap(); } - cursor.seek(SeekFrom::Start(old_pos + header.layer_set_referenced_list_offset as u64)).unwrap(); + cursor + .seek(SeekFrom::Start( + old_pos + header.layer_set_referenced_list_offset as u64, + )) + .unwrap(); LayerSetReferencedList::read(&mut cursor).unwrap(); for i in 0..header.instance_object_count { - cursor.seek(SeekFrom::Start(old_pos + header.instance_object_offset as u64 + instance_offsets[i as usize] as u64)).unwrap(); + cursor + .seek(SeekFrom::Start( + old_pos + + header.instance_object_offset as u64 + + instance_offsets[i as usize] as u64, + )) + .unwrap(); let instance_object = InstanceObject::read(&mut cursor).unwrap(); println!("{:#?}", instance_object); @@ -549,4 +537,3 @@ mod tests { Layer::from_existing(&read(d).unwrap()); } } - diff --git a/src/lib.rs b/src/lib.rs index 46d3a10..1aa35aa 100755 --- a/src/lib.rs +++ b/src/lib.rs @@ -6,7 +6,7 @@ extern crate core; /// Represents a continuous block of memory which is not owned, and comes either from an in-memory location or from a file. -pub type ByteSpan<'a> = &'a[u8]; +pub type ByteSpan<'a> = &'a [u8]; /// Represents a continuous block of memory which is owned. pub type ByteBuffer = Vec; diff --git a/src/log.rs b/src/log.rs index aeefe59..ed213f0 100644 --- a/src/log.rs +++ b/src/log.rs @@ -3,9 +3,9 @@ use std::io::{Cursor, Seek, SeekFrom}; -use binrw::BinRead; -use binrw::binrw; use crate::ByteSpan; +use binrw::binrw; +use binrw::BinRead; #[binrw] #[allow(dead_code)] @@ -121,7 +121,7 @@ impl ChatLog { if header.content_size as usize > buffer.len() || header.file_size as usize > buffer.len() { return None; } - + let content_offset = (8 + header.file_size * 4) as u64; // beginning of content offset @@ -143,10 +143,9 @@ impl ChatLog { }; // TODO: handle the coloring properly, in some way - entry.message = String::from_utf8_lossy( - &buffer[cursor.position() as usize..next_offset], - ) - .to_string(); + entry.message = + String::from_utf8_lossy(&buffer[cursor.position() as usize..next_offset]) + .to_string(); entries.push(entry); } diff --git a/src/model.rs b/src/model.rs index 8822fdd..d000dea 100755 --- a/src/model.rs +++ b/src/model.rs @@ -6,13 +6,16 @@ use std::io::{Cursor, Seek, SeekFrom}; use std::mem::size_of; -use binrw::{binrw, BinWrite, BinWriterExt}; use binrw::BinRead; use binrw::BinReaderExt; +use binrw::{binrw, BinWrite, BinWriterExt}; -use crate::{ByteBuffer, ByteSpan}; use crate::common_file_operations::{read_bool_from, write_bool_as}; -use crate::model_vertex_declarations::{vertex_element_parser, VERTEX_ELEMENT_SIZE, vertex_element_writer, VertexDeclaration, VertexType, VertexUsage}; +use crate::model_vertex_declarations::{ + vertex_element_parser, vertex_element_writer, VertexDeclaration, VertexType, VertexUsage, + VERTEX_ELEMENT_SIZE, +}; +use crate::{ByteBuffer, ByteSpan}; pub const NUM_VERTICES: u32 = 17; @@ -46,7 +49,7 @@ pub struct ModelFileHeader { #[binrw] #[brw(repr = u8)] -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq)] enum ModelFlags1 { DustOcclusionEnabled = 0x80, SnowOcclusionEnabled = 0x40, @@ -126,7 +129,7 @@ pub struct ModelHeader { unknown7: u16, unknown8: u16, #[brw(pad_after = 6)] - unknown9: u16 + unknown9: u16, } #[binrw] @@ -221,7 +224,7 @@ struct BoneTableV2 { // align to 4 bytes // TODO: use br align_to? #[br(if(bone_count % 2 == 0))] - padding: u16 + padding: u16, } #[binrw] @@ -243,7 +246,7 @@ struct TerrainShadowMesh { submesh_index: u16, submesh_count: u16, vertex_buffer_stride: u8, - padding: u8 + padding: u8, } #[binrw] @@ -253,7 +256,7 @@ struct TerrainShadowSubmesh { index_offset: u32, index_count: u32, unknown1: u16, - unknown2: u16 + unknown2: u16, } #[binrw] @@ -262,16 +265,16 @@ struct TerrainShadowSubmesh { struct ShapeStruct { string_offset: u32, shape_mesh_start_index: [u16; 3], - shape_mesh_count: [u16; 3] + shape_mesh_count: [u16; 3], } #[binrw] -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq)] #[allow(dead_code)] struct ShapeMesh { mesh_index_offset: u32, shape_value_count: u32, - shape_value_offset: u32 + shape_value_offset: u32, } #[binrw] @@ -279,7 +282,7 @@ struct ShapeMesh { #[allow(dead_code)] struct ShapeValue { base_indices_index: u16, - replacing_vertex_index: u16 + replacing_vertex_index: u16, } #[binrw] @@ -362,7 +365,7 @@ struct ModelData { } #[binrw] -#[derive(Debug, Clone, PartialEq)] +#[derive(Debug, Clone, PartialEq)] #[allow(dead_code)] struct ElementId { element_id: u32, @@ -371,7 +374,7 @@ struct ElementId { rotate: [f32; 3], } -#[derive(Clone, Copy, PartialEq)] +#[derive(Clone, Copy, PartialEq)] #[repr(C)] pub struct Vertex { pub position: [f32; 3], @@ -405,7 +408,7 @@ impl Default for Vertex { #[repr(C)] pub struct NewShapeValue { pub base_index: u32, - pub replacing_vertex: Vertex + pub replacing_vertex: Vertex, } #[derive(Clone, Copy)] @@ -413,13 +416,13 @@ pub struct NewShapeValue { pub struct SubMesh { submesh_index: usize, pub index_count: u32, - pub index_offset: u32 + pub index_offset: u32, } #[derive(Clone)] pub struct Shape { pub name: String, - pub morphed_vertices: Vec + pub morphed_vertices: Vec, } /// Corresponds to a "Mesh" in an LOD @@ -430,7 +433,7 @@ pub struct Part { pub indices: Vec, pub material_index: u16, pub submeshes: Vec, - pub shapes: Vec + pub shapes: Vec, } #[derive(Clone)] @@ -453,8 +456,12 @@ impl MDL { let mut cursor = Cursor::new(buffer); let model_file_header = ModelFileHeader::read(&mut cursor).ok()?; - let model = ModelData::read_args(&mut cursor, binrw::args! { file_header: &model_file_header }).ok()?; - + let model = ModelData::read_args( + &mut cursor, + binrw::args! { file_header: &model_file_header }, + ) + .ok()?; + let mut affected_bone_names = vec![]; for offset in &model.bone_name_offsets { @@ -507,151 +514,173 @@ impl MDL { .seek(SeekFrom::Start( (model.lods[i as usize].vertex_data_offset + model.meshes[j as usize].vertex_buffer_offsets - [element.stream as usize] + [element.stream as usize] + element.offset as u32 + model.meshes[j as usize].vertex_buffer_strides - [element.stream as usize] - as u32 - * k as u32) as u64, + [element.stream as usize] + as u32 + * k as u32) as u64, )) .ok()?; match element.vertex_usage { - VertexUsage::Position => { - match element.vertex_type { - VertexType::Single4 => { - vertices[k as usize].position.clone_from_slice(&MDL::read_single4(&mut cursor).unwrap()[0..3]); - } - VertexType::Half4 => { - vertices[k as usize].position.clone_from_slice(&MDL::read_half4(&mut cursor).unwrap()[0..3]); - } - VertexType::Single3 => { - vertices[k as usize].position = MDL::read_single3(&mut cursor).unwrap(); - } - _ => { - panic!("Unexpected vertex type for position: {:#?}", element.vertex_type); - } + VertexUsage::Position => match element.vertex_type { + VertexType::Single4 => { + vertices[k as usize].position.clone_from_slice( + &MDL::read_single4(&mut cursor).unwrap()[0..3], + ); } - } - VertexUsage::BlendWeights => { - match element.vertex_type { - VertexType::ByteFloat4 => { - vertices[k as usize].bone_weight = MDL::read_byte_float4(&mut cursor).unwrap(); - } - VertexType::Byte4 => { - let bytes = MDL::read_byte4(&mut cursor).unwrap(); - vertices[k as usize].bone_weight = [ - f32::from(bytes[0]), - f32::from(bytes[1]), - f32::from(bytes[2]), - f32::from(bytes[3]) - ]; - } - VertexType::UnsignedShort4 => { - let bytes = MDL::read_unsigned_short4(&mut cursor).unwrap(); - vertices[k as usize].bone_weight = [ - f32::from(bytes[0]), - f32::from(bytes[1]), - f32::from(bytes[2]), - f32::from(bytes[3]) - ]; - } - _ => { - panic!("Unexpected vertex type for blendweight: {:#?}", element.vertex_type); - } + VertexType::Half4 => { + vertices[k as usize].position.clone_from_slice( + &MDL::read_half4(&mut cursor).unwrap()[0..3], + ); } - } - VertexUsage::BlendIndices => { - match element.vertex_type { - VertexType::Byte4 => { - vertices[k as usize].bone_id = MDL::read_byte4(&mut cursor).unwrap(); - } - VertexType::UnsignedShort4 => { - let shorts = MDL::read_unsigned_short4(&mut cursor).unwrap(); - vertices[k as usize].bone_id = [ - shorts[0] as u8, - shorts[1] as u8, - shorts[2] as u8, - shorts[3] as u8 - ]; - } - _ => { - panic!("Unexpected vertex type for blendindice: {:#?}", element.vertex_type); - } + VertexType::Single3 => { + vertices[k as usize].position = + MDL::read_single3(&mut cursor).unwrap(); } - } - VertexUsage::Normal => { - match element.vertex_type { - VertexType::Half4 => { - vertices[k as usize].normal.clone_from_slice(&MDL::read_half4(&mut cursor).unwrap()[0..3]); - } - VertexType::Single3 => { - vertices[k as usize].normal = MDL::read_single3(&mut cursor).unwrap(); - } - _ => { - panic!("Unexpected vertex type for normal: {:#?}", element.vertex_type); - } + _ => { + panic!( + "Unexpected vertex type for position: {:#?}", + element.vertex_type + ); } - } - VertexUsage::UV => { - match element.vertex_type { - VertexType::ByteFloat4 => { - let combined = MDL::read_byte_float4(&mut cursor).unwrap(); + }, + VertexUsage::BlendWeights => match element.vertex_type { + VertexType::ByteFloat4 => { + vertices[k as usize].bone_weight = + MDL::read_byte_float4(&mut cursor).unwrap(); + } + VertexType::Byte4 => { + let bytes = MDL::read_byte4(&mut cursor).unwrap(); + vertices[k as usize].bone_weight = [ + f32::from(bytes[0]), + f32::from(bytes[1]), + f32::from(bytes[2]), + f32::from(bytes[3]), + ]; + } + VertexType::UnsignedShort4 => { + let bytes = MDL::read_unsigned_short4(&mut cursor).unwrap(); + vertices[k as usize].bone_weight = [ + f32::from(bytes[0]), + f32::from(bytes[1]), + f32::from(bytes[2]), + f32::from(bytes[3]), + ]; + } + _ => { + panic!( + "Unexpected vertex type for blendweight: {:#?}", + element.vertex_type + ); + } + }, + VertexUsage::BlendIndices => match element.vertex_type { + VertexType::Byte4 => { + vertices[k as usize].bone_id = + MDL::read_byte4(&mut cursor).unwrap(); + } + VertexType::UnsignedShort4 => { + let shorts = MDL::read_unsigned_short4(&mut cursor).unwrap(); + vertices[k as usize].bone_id = [ + shorts[0] as u8, + shorts[1] as u8, + shorts[2] as u8, + shorts[3] as u8, + ]; + } + _ => { + panic!( + "Unexpected vertex type for blendindice: {:#?}", + element.vertex_type + ); + } + }, + VertexUsage::Normal => match element.vertex_type { + VertexType::Half4 => { + vertices[k as usize].normal.clone_from_slice( + &MDL::read_half4(&mut cursor).unwrap()[0..3], + ); + } + VertexType::Single3 => { + vertices[k as usize].normal = + MDL::read_single3(&mut cursor).unwrap(); + } + _ => { + panic!( + "Unexpected vertex type for normal: {:#?}", + element.vertex_type + ); + } + }, + VertexUsage::UV => match element.vertex_type { + VertexType::ByteFloat4 => { + let combined = MDL::read_byte_float4(&mut cursor).unwrap(); - vertices[k as usize].uv0.clone_from_slice(&combined[0..2]); - vertices[k as usize].uv1.clone_from_slice(&combined[2..4]); - } - VertexType::Half4 => { - let combined = MDL::read_half4(&mut cursor).unwrap(); - - vertices[k as usize].uv0.clone_from_slice(&combined[0..2]); - vertices[k as usize].uv1.clone_from_slice(&combined[2..4]); - } - VertexType::Single4 => { - let combined = MDL::read_single4(&mut cursor).unwrap(); - - vertices[k as usize].uv0.clone_from_slice(&combined[0..2]); - vertices[k as usize].uv1.clone_from_slice(&combined[2..4]); - } - VertexType::Half2 => { - let combined = MDL::read_half2(&mut cursor).unwrap(); - - vertices[k as usize].uv0.clone_from_slice(&combined[0..2]); - } - _ => { - panic!("Unexpected vertex type for uv: {:#?}", element.vertex_type); - } + vertices[k as usize].uv0.clone_from_slice(&combined[0..2]); + vertices[k as usize].uv1.clone_from_slice(&combined[2..4]); } - } - VertexUsage::BiTangent => { - match element.vertex_type { - VertexType::ByteFloat4 => { - vertices[k as usize].bitangent = MDL::read_tangent(&mut cursor).unwrap(); - } - _ => { - panic!("Unexpected vertex type for bitangent: {:#?}", element.vertex_type); - } + VertexType::Half4 => { + let combined = MDL::read_half4(&mut cursor).unwrap(); + + vertices[k as usize].uv0.clone_from_slice(&combined[0..2]); + vertices[k as usize].uv1.clone_from_slice(&combined[2..4]); } - } + VertexType::Single4 => { + let combined = MDL::read_single4(&mut cursor).unwrap(); + + vertices[k as usize].uv0.clone_from_slice(&combined[0..2]); + vertices[k as usize].uv1.clone_from_slice(&combined[2..4]); + } + VertexType::Half2 => { + let combined = MDL::read_half2(&mut cursor).unwrap(); + + vertices[k as usize].uv0.clone_from_slice(&combined[0..2]); + } + _ => { + panic!( + "Unexpected vertex type for uv: {:#?}", + element.vertex_type + ); + } + }, + VertexUsage::BiTangent => match element.vertex_type { + VertexType::ByteFloat4 => { + vertices[k as usize].bitangent = + MDL::read_tangent(&mut cursor).unwrap(); + } + _ => { + panic!( + "Unexpected vertex type for bitangent: {:#?}", + element.vertex_type + ); + } + }, VertexUsage::Tangent => { match element.vertex_type { // Used for... terrain..? VertexType::ByteFloat4 => {} _ => { - panic!("Unexpected vertex type for tangent: {:#?}", element.vertex_type); + panic!( + "Unexpected vertex type for tangent: {:#?}", + element.vertex_type + ); } } } - VertexUsage::Color => { - match element.vertex_type { - VertexType::ByteFloat4 => { - vertices[k as usize].color = MDL::read_byte_float4(&mut cursor).unwrap(); - } - _ => { - panic!("Unexpected vertex type for color: {:#?}", element.vertex_type); - } + VertexUsage::Color => match element.vertex_type { + VertexType::ByteFloat4 => { + vertices[k as usize].color = + MDL::read_byte_float4(&mut cursor).unwrap(); } - } + _ => { + panic!( + "Unexpected vertex type for color: {:#?}", + element.vertex_type + ); + } + }, } } } @@ -671,12 +700,17 @@ impl MDL { indices.push(cursor.read_le::().ok()?); } - let mut submeshes: Vec = Vec::with_capacity(model.meshes[j as usize].submesh_count as usize); + let mut submeshes: Vec = + Vec::with_capacity(model.meshes[j as usize].submesh_count as usize); for i in 0..model.meshes[j as usize].submesh_count { submeshes.push(SubMesh { submesh_index: model.meshes[j as usize].submesh_index as usize + i as usize, - index_count: model.submeshes[model.meshes[j as usize].submesh_index as usize + i as usize].index_count, - index_offset: model.submeshes[model.meshes[j as usize].submesh_index as usize + i as usize].index_offset, + index_count: model.submeshes + [model.meshes[j as usize].submesh_index as usize + i as usize] + .index_count, + index_offset: model.submeshes + [model.meshes[j as usize].submesh_index as usize + i as usize] + .index_offset, }); } @@ -684,23 +718,45 @@ impl MDL { for shape in &model.shapes { // Adapted from https://github.com/xivdev/Penumbra/blob/master/Penumbra/Import/Models/Export/MeshExporter.cs - let affected_shape_mesh: Vec<&ShapeMesh> = model.shape_meshes.iter() + let affected_shape_mesh: Vec<&ShapeMesh> = model + .shape_meshes + .iter() .skip(shape.shape_mesh_start_index[i as usize] as usize) .take(shape.shape_mesh_count[i as usize] as usize) - .filter(|shape_mesh| shape_mesh.mesh_index_offset == model.meshes[j as usize].start_index).collect(); + .filter(|shape_mesh| { + shape_mesh.mesh_index_offset == model.meshes[j as usize].start_index + }) + .collect(); - let shape_values: Vec<&ShapeValue> = affected_shape_mesh.iter() - .flat_map(|shape_mesh| model.shape_values.iter().skip(shape_mesh.shape_value_offset as usize).take(shape_mesh.shape_value_count as usize)) - .filter(|shape_value| shape_value.base_indices_index >= model.meshes[j as usize].start_index as u16 && shape_value.base_indices_index < (model.meshes[j as usize].start_index + model.meshes[j as usize].index_count) as u16) + let shape_values: Vec<&ShapeValue> = affected_shape_mesh + .iter() + .flat_map(|shape_mesh| { + model + .shape_values + .iter() + .skip(shape_mesh.shape_value_offset as usize) + .take(shape_mesh.shape_value_count as usize) + }) + .filter(|shape_value| { + shape_value.base_indices_index + >= model.meshes[j as usize].start_index as u16 + && shape_value.base_indices_index + < (model.meshes[j as usize].start_index + + model.meshes[j as usize].index_count) + as u16 + }) .collect(); let mut morphed_vertices = vec![Vertex::default(); vertices.len()]; if !shape_values.is_empty() { for shape_value in shape_values { - let old_vertex = vertices[indices[shape_value.base_indices_index as usize] as usize]; - let new_vertex = vertices[shape_value.replacing_vertex_index as usize - model.meshes[j as usize].start_index as usize]; - let vertex = &mut morphed_vertices[indices[shape_value.base_indices_index as usize] as usize]; + let old_vertex = + vertices[indices[shape_value.base_indices_index as usize] as usize]; + let new_vertex = vertices[shape_value.replacing_vertex_index as usize + - model.meshes[j as usize].start_index as usize]; + let vertex = &mut morphed_vertices + [indices[shape_value.base_indices_index as usize] as usize]; vertex.position[0] = new_vertex.position[0] - old_vertex.position[0]; vertex.position[1] = new_vertex.position[1] - old_vertex.position[1]; @@ -719,12 +775,19 @@ impl MDL { shapes.push(Shape { name: string, - morphed_vertices + morphed_vertices, }); } } - parts.push(Part { mesh_index: j, vertices, indices, material_index, submeshes, shapes }); + parts.push(Part { + mesh_index: j, + vertices, + indices, + material_index, + submeshes, + shapes, + }); } lods.push(Lod { parts }); @@ -735,11 +798,18 @@ impl MDL { model_data: model, lods, affected_bone_names, - material_names + material_names, }) } - pub fn replace_vertices(&mut self, lod_index: usize, part_index: usize, vertices: &[Vertex], indices: &[u16], submeshes: &[SubMesh]) { + pub fn replace_vertices( + &mut self, + lod_index: usize, + part_index: usize, + vertices: &[Vertex], + indices: &[u16], + submeshes: &[SubMesh], + ) { let part = &mut self.lods[lod_index].parts[part_index]; part.vertices = Vec::from(vertices); @@ -747,8 +817,10 @@ impl MDL { for (i, submesh) in part.submeshes.iter().enumerate() { if i < submeshes.len() { - self.model_data.submeshes[submesh.submesh_index].index_offset = submeshes[i].index_offset; - self.model_data.submeshes[submesh.submesh_index].index_count = submeshes[i].index_count; + self.model_data.submeshes[submesh.submesh_index].index_offset = + submeshes[i].index_offset; + self.model_data.submeshes[submesh.submesh_index].index_count = + submeshes[i].index_count; } } @@ -773,26 +845,38 @@ impl MDL { self.update_headers(); } - pub fn add_shape_mesh(&mut self, lod_index: usize, shape_index: usize, shape_mesh_index: usize, part_index: usize, shape_values: &[NewShapeValue]) { + pub fn add_shape_mesh( + &mut self, + lod_index: usize, + shape_index: usize, + shape_mesh_index: usize, + part_index: usize, + shape_values: &[NewShapeValue], + ) { let part = &mut self.lods[lod_index].parts[part_index]; // TODO: this is assuming they are added in order if shape_mesh_index == 0 { - self.model_data.shapes[shape_index].shape_mesh_start_index[lod_index] = self.model_data.shape_meshes.len() as u16; + self.model_data.shapes[shape_index].shape_mesh_start_index[lod_index] = + self.model_data.shape_meshes.len() as u16; } self.model_data.shape_meshes.push(ShapeMesh { mesh_index_offset: self.model_data.meshes[part.mesh_index as usize].start_index, shape_value_count: shape_values.len() as u32, - shape_value_offset: self.model_data.shape_values.len() as u32 + shape_value_offset: self.model_data.shape_values.len() as u32, }); for shape_value in shape_values { part.vertices.push(shape_value.replacing_vertex); self.model_data.shape_values.push(ShapeValue { - base_indices_index: self.model_data.meshes[part.mesh_index as usize].start_index as u16 + shape_value.base_index as u16, - replacing_vertex_index: self.model_data.meshes[part.mesh_index as usize].start_index as u16 + (part.vertices.len() - 1) as u16 + base_indices_index: self.model_data.meshes[part.mesh_index as usize].start_index + as u16 + + shape_value.base_index as u16, + replacing_vertex_index: self.model_data.meshes[part.mesh_index as usize].start_index + as u16 + + (part.vertices.len() - 1) as u16, }) } @@ -807,15 +891,18 @@ impl MDL { let mut vertex_offset = 0; for j in self.model_data.lods[i as usize].mesh_index - ..self.model_data.lods[i as usize].mesh_index + self.model_data.lods[i as usize].mesh_count + ..self.model_data.lods[i as usize].mesh_index + + self.model_data.lods[i as usize].mesh_count { let mesh = &mut self.model_data.meshes[j as usize]; - mesh.start_index = self.model_data.submeshes[mesh.submesh_index as usize].index_offset; + mesh.start_index = + self.model_data.submeshes[mesh.submesh_index as usize].index_offset; for i in 0..mesh.vertex_stream_count as usize { mesh.vertex_buffer_offsets[i] = vertex_offset; - vertex_offset += mesh.vertex_count as u32 * mesh.vertex_buffer_strides[i] as u32; + vertex_offset += + mesh.vertex_count as u32 * mesh.vertex_buffer_strides[i] as u32; } } } @@ -825,15 +912,14 @@ impl MDL { let mut total_index_buffer_size = 0; // still slightly off? - for j in lod.mesh_index - ..lod.mesh_index + lod.mesh_count - { + for j in lod.mesh_index..lod.mesh_index + lod.mesh_count { let vertex_count = self.model_data.meshes[j as usize].vertex_count; let index_count = self.model_data.meshes[j as usize].index_count; let mut total_vertex_stride: u32 = 0; for i in 0..self.model_data.meshes[j as usize].vertex_stream_count as usize { - total_vertex_stride += self.model_data.meshes[j as usize].vertex_buffer_strides[i] as u32; + total_vertex_stride += + self.model_data.meshes[j as usize].vertex_buffer_strides[i] as u32; } total_vertex_buffer_size += vertex_count as u32 * total_vertex_stride; @@ -912,97 +998,117 @@ impl MDL { for (l, lod) in self.lods.iter().enumerate() { for part in lod.parts.iter() { - let declaration = &self.model_data.header.vertex_declarations[part.mesh_index as usize]; + let declaration = + &self.model_data.header.vertex_declarations[part.mesh_index as usize]; for (k, vert) in part.vertices.iter().enumerate() { for element in &declaration.elements { cursor .seek(SeekFrom::Start( (self.model_data.lods[l].vertex_data_offset - + self.model_data.meshes[part.mesh_index as usize].vertex_buffer_offsets - [element.stream as usize] + + self.model_data.meshes[part.mesh_index as usize] + .vertex_buffer_offsets + [element.stream as usize] + element.offset as u32 - + self.model_data.meshes[part.mesh_index as usize].vertex_buffer_strides - [element.stream as usize] - as u32 - * k as u32) as u64, + + self.model_data.meshes[part.mesh_index as usize] + .vertex_buffer_strides + [element.stream as usize] + as u32 + * k as u32) as u64, )) .ok()?; match element.vertex_usage { - VertexUsage::Position => { - match element.vertex_type { - VertexType::Half4 => { - MDL::write_half4(&mut cursor, &MDL::pad_slice(&vert.position, 1.0)).ok()?; - } - VertexType::Single3 => { - MDL::write_single3(&mut cursor, &vert.position).ok()?; - } - _ => { - panic!("Unexpected vertex type for position: {:#?}", element.vertex_type); - } + VertexUsage::Position => match element.vertex_type { + VertexType::Half4 => { + MDL::write_half4( + &mut cursor, + &MDL::pad_slice(&vert.position, 1.0), + ) + .ok()?; } - } - VertexUsage::BlendWeights => { - match element.vertex_type { - VertexType::ByteFloat4 => { - MDL::write_byte_float4(&mut cursor, &vert.bone_weight).ok()?; - } - _ => { - panic!("Unexpected vertex type for blendweight: {:#?}", element.vertex_type); - } + VertexType::Single3 => { + MDL::write_single3(&mut cursor, &vert.position).ok()?; } - } - VertexUsage::BlendIndices => { - match element.vertex_type { - VertexType::Byte4 => { - MDL::write_byte4(&mut cursor, &vert.bone_id).ok()?; - } - _ => { - panic!("Unexpected vertex type for blendindice: {:#?}", element.vertex_type); - } + _ => { + panic!( + "Unexpected vertex type for position: {:#?}", + element.vertex_type + ); } - } - VertexUsage::Normal => { - match element.vertex_type { - VertexType::Half4 => { - MDL::write_half4(&mut cursor, &MDL::pad_slice(&vert.normal, 0.0)).ok()?; - } - VertexType::Single3 => { - MDL::write_single3(&mut cursor, &vert.normal).ok()?; - } - _ => { - panic!("Unexpected vertex type for normal: {:#?}", element.vertex_type); - } + }, + VertexUsage::BlendWeights => match element.vertex_type { + VertexType::ByteFloat4 => { + MDL::write_byte_float4(&mut cursor, &vert.bone_weight) + .ok()?; } - } - VertexUsage::UV => { - match element.vertex_type { - VertexType::Half4 => { - let combined = [vert.uv0[0], vert.uv0[1], vert.uv1[0], vert.uv1[1]]; + _ => { + panic!( + "Unexpected vertex type for blendweight: {:#?}", + element.vertex_type + ); + } + }, + VertexUsage::BlendIndices => match element.vertex_type { + VertexType::Byte4 => { + MDL::write_byte4(&mut cursor, &vert.bone_id).ok()?; + } + _ => { + panic!( + "Unexpected vertex type for blendindice: {:#?}", + element.vertex_type + ); + } + }, + VertexUsage::Normal => match element.vertex_type { + VertexType::Half4 => { + MDL::write_half4( + &mut cursor, + &MDL::pad_slice(&vert.normal, 0.0), + ) + .ok()?; + } + VertexType::Single3 => { + MDL::write_single3(&mut cursor, &vert.normal).ok()?; + } + _ => { + panic!( + "Unexpected vertex type for normal: {:#?}", + element.vertex_type + ); + } + }, + VertexUsage::UV => match element.vertex_type { + VertexType::Half4 => { + let combined = + [vert.uv0[0], vert.uv0[1], vert.uv1[0], vert.uv1[1]]; - MDL::write_half4(&mut cursor, &combined).ok()?; - } - VertexType::Single4 => { - let combined = [vert.uv0[0], vert.uv0[1], vert.uv1[0], vert.uv1[1]]; + MDL::write_half4(&mut cursor, &combined).ok()?; + } + VertexType::Single4 => { + let combined = + [vert.uv0[0], vert.uv0[1], vert.uv1[0], vert.uv1[1]]; - MDL::write_single4(&mut cursor, &combined).ok()?; - } - _ => { - panic!("Unexpected vertex type for uv: {:#?}", element.vertex_type); - } + MDL::write_single4(&mut cursor, &combined).ok()?; } - } - VertexUsage::BiTangent => { - match element.vertex_type { - VertexType::ByteFloat4 => { - MDL::write_tangent(&mut cursor, &vert.bitangent).ok()?; - } - _ => { - panic!("Unexpected vertex type for bitangent: {:#?}", element.vertex_type); - } + _ => { + panic!( + "Unexpected vertex type for uv: {:#?}", + element.vertex_type + ); } - } + }, + VertexUsage::BiTangent => match element.vertex_type { + VertexType::ByteFloat4 => { + MDL::write_tangent(&mut cursor, &vert.bitangent).ok()?; + } + _ => { + panic!( + "Unexpected vertex type for bitangent: {:#?}", + element.vertex_type + ); + } + }, VertexUsage::Tangent => { #[allow(clippy::match_single_binding)] // TODO match element.vertex_type { @@ -1010,20 +1116,24 @@ impl MDL { MDL::write_tangent(&mut cursor, &vert.binormal).ok()?; }*/ _ => { - panic!("Unexpected vertex type for tangent: {:#?}", element.vertex_type); + panic!( + "Unexpected vertex type for tangent: {:#?}", + element.vertex_type + ); } } } - VertexUsage::Color => { - match element.vertex_type { - VertexType::ByteFloat4 => { - MDL::write_byte_float4(&mut cursor, &vert.color).ok()?; - } - _ => { - panic!("Unexpected vertex type for color: {:#?}", element.vertex_type); - } + VertexUsage::Color => match element.vertex_type { + VertexType::ByteFloat4 => { + MDL::write_byte_float4(&mut cursor, &vert.color).ok()?; } - } + _ => { + panic!( + "Unexpected vertex type for color: {:#?}", + element.vertex_type + ); + } + }, } } } @@ -1031,8 +1141,8 @@ impl MDL { cursor .seek(SeekFrom::Start( (self.file_header.index_offsets[l] - + (self.model_data.meshes[part.mesh_index as usize].start_index * size_of::() as u32)) - as u64, + + (self.model_data.meshes[part.mesh_index as usize].start_index + * size_of::() as u32)) as u64, )) .ok()?; @@ -1166,7 +1276,13 @@ mod tests { for l in 0..old_mdl.lods.len() { for p in 0..old_mdl.lods[l].parts.len() { - mdl.replace_vertices(l, p, &old_mdl.lods[l].parts[p].vertices, &old_mdl.lods[l].parts[p].indices, &old_mdl.lods[l].parts[p].submeshes); + mdl.replace_vertices( + l, + p, + &old_mdl.lods[l].parts[p].vertices, + &old_mdl.lods[l].parts[p].indices, + &old_mdl.lods[l].parts[p].submeshes, + ); } } @@ -1186,9 +1302,15 @@ mod tests { // file header assert_eq!(mdl.file_header.version, 16777221); assert_eq!(mdl.file_header.stack_size, 816); - assert_eq!(mdl.file_header.stack_size, mdl.file_header.calculate_stack_size()); + assert_eq!( + mdl.file_header.stack_size, + mdl.file_header.calculate_stack_size() + ); assert_eq!(mdl.file_header.runtime_size, 12544); - assert_eq!(mdl.file_header.runtime_size, mdl.model_data.calculate_runtime_size()); + assert_eq!( + mdl.file_header.runtime_size, + mdl.model_data.calculate_runtime_size() + ); assert_eq!(mdl.file_header.vertex_declaration_count, 6); assert_eq!(mdl.file_header.material_count, 2); assert_eq!(mdl.file_header.lod_count, 3); @@ -1208,4 +1330,4 @@ mod tests { // Feeding it invalid data should not panic MDL::from_existing(&read(d).unwrap()); } -} \ No newline at end of file +} diff --git a/src/model_file_operations.rs b/src/model_file_operations.rs index 73cf56b..f4f1161 100644 --- a/src/model_file_operations.rs +++ b/src/model_file_operations.rs @@ -1,11 +1,11 @@ // SPDX-FileCopyrightText: 2023 Joshua Goins // SPDX-License-Identifier: GPL-3.0-or-later -use std::io::Cursor; +use crate::model::MDL; +use crate::ByteSpan; use binrw::{BinReaderExt, BinResult, BinWriterExt}; use half::f16; -use crate::ByteSpan; -use crate::model::MDL; +use std::io::Cursor; /// Maximum value of byte, used to divide and multiply floats in that space [0.0..1.0] to [0..255] const MAX_BYTE_FLOAT: f32 = u8::MAX as f32; @@ -16,16 +16,20 @@ impl MDL { (f32::from(cursor.read_le::().ok()?) / MAX_BYTE_FLOAT), (f32::from(cursor.read_le::().ok()?) / MAX_BYTE_FLOAT), (f32::from(cursor.read_le::().ok()?) / MAX_BYTE_FLOAT), - (f32::from(cursor.read_le::().ok()?) / MAX_BYTE_FLOAT) + (f32::from(cursor.read_le::().ok()?) / MAX_BYTE_FLOAT), ]) } - pub(crate) fn write_byte_float4(cursor: &mut T, vec: &[f32; 4]) -> BinResult<()> { + pub(crate) fn write_byte_float4( + cursor: &mut T, + vec: &[f32; 4], + ) -> BinResult<()> { cursor.write_le::<[u8; 4]>(&[ (vec[0] * MAX_BYTE_FLOAT).round() as u8, (vec[1] * MAX_BYTE_FLOAT).round() as u8, (vec[2] * MAX_BYTE_FLOAT).round() as u8, - (vec[3] * MAX_BYTE_FLOAT).round() as u8]) + (vec[3] * MAX_BYTE_FLOAT).round() as u8, + ]) } pub(crate) fn read_tangent(cursor: &mut Cursor) -> Option<[f32; 4]> { @@ -33,7 +37,11 @@ impl MDL { (f32::from(cursor.read_le::().ok()?) * 2.0 / MAX_BYTE_FLOAT - 1.0), (f32::from(cursor.read_le::().ok()?) * 2.0 / MAX_BYTE_FLOAT - 1.0), (f32::from(cursor.read_le::().ok()?) * 2.0 / MAX_BYTE_FLOAT - 1.0), - if (f32::from(cursor.read_le::().ok()?) * 2.0 / MAX_BYTE_FLOAT - 1.0) == 1.0 { 1.0 } else { -1.0 } + if (f32::from(cursor.read_le::().ok()?) * 2.0 / MAX_BYTE_FLOAT - 1.0) == 1.0 { + 1.0 + } else { + -1.0 + }, ]) } @@ -42,7 +50,8 @@ impl MDL { ((vec[0] + 1.0) * (MAX_BYTE_FLOAT / 2.0)).round() as u8, ((vec[1] + 1.0) * (MAX_BYTE_FLOAT / 2.0)).round() as u8, ((vec[2] + 1.0) * (MAX_BYTE_FLOAT / 2.0)).round() as u8, - if vec[3] > 0.0 { 255 } else { 0 }]) // SqEx uses 0 as -1, not 1 + if vec[3] > 0.0 { 255 } else { 0 }, + ]) // SqEx uses 0 as -1, not 1 } pub(crate) fn read_half4(cursor: &mut Cursor) -> Option<[f32; 4]> { @@ -50,7 +59,7 @@ impl MDL { f16::from_bits(cursor.read_le::().ok()?).to_f32(), f16::from_bits(cursor.read_le::().ok()?).to_f32(), f16::from_bits(cursor.read_le::().ok()?).to_f32(), - f16::from_bits(cursor.read_le::().ok()?).to_f32() + f16::from_bits(cursor.read_le::().ok()?).to_f32(), ]) } @@ -59,13 +68,14 @@ impl MDL { f16::from_f32(vec[0]).to_bits(), f16::from_f32(vec[1]).to_bits(), f16::from_f32(vec[2]).to_bits(), - f16::from_f32(vec[3]).to_bits()]) + f16::from_f32(vec[3]).to_bits(), + ]) } pub(crate) fn read_half2(cursor: &mut Cursor) -> Option<[f32; 2]> { Some([ f16::from_bits(cursor.read_le::().ok()?).to_f32(), - f16::from_bits(cursor.read_le::().ok()?).to_f32() + f16::from_bits(cursor.read_le::().ok()?).to_f32(), ]) } @@ -73,7 +83,8 @@ impl MDL { pub(crate) fn write_half2(cursor: &mut T, vec: &[f32; 2]) -> BinResult<()> { cursor.write_le::<[u16; 2]>(&[ f16::from_f32(vec[0]).to_bits(), - f16::from_f32(vec[1]).to_bits()]) + f16::from_f32(vec[1]).to_bits(), + ]) } pub(crate) fn read_byte4(cursor: &mut Cursor) -> BinResult<[u8; 4]> { @@ -113,15 +124,17 @@ impl MDL { #[cfg(test)] mod tests { - use std::io::Cursor; use crate::model::MDL; + use std::io::Cursor; macro_rules! assert_delta { ($x:expr, $y:expr, $d:expr) => { for i in 0..4 { - if !($x[i] - $y[i] < $d || $y[i] - $x[i] < $d) { panic!(); } + if !($x[i] - $y[i] < $d || $y[i] - $x[i] < $d) { + panic!(); + } } - } + }; } #[test] diff --git a/src/model_vertex_declarations.rs b/src/model_vertex_declarations.rs index ad940ed..e83f1c7 100644 --- a/src/model_vertex_declarations.rs +++ b/src/model_vertex_declarations.rs @@ -1,9 +1,9 @@ // SPDX-FileCopyrightText: 2023 Joshua Goins // SPDX-License-Identifier: GPL-3.0-or-later -use std::io::SeekFrom; -use binrw::{BinRead, BinResult, binrw, BinWrite}; use crate::model::NUM_VERTICES; +use binrw::{binrw, BinRead, BinResult, BinWrite}; +use std::io::SeekFrom; /// Marker for end of stream (0xFF) const END_OF_STREAM: u8 = 0xFF; @@ -46,7 +46,7 @@ pub enum VertexType { /// 2 16-bit unsigned integers UnsignedShort2 = 16, /// 4 16-bit unsigned integers - UnsignedShort4 = 17 + UnsignedShort4 = 17, } /// What the vertex stream is used for. @@ -78,7 +78,7 @@ pub struct VertexElement { pub usage_index: u8, } -/// Represents the true size of VertexElement. Always use this value instead of std::mem::size_of. +/// Represents the true size of VertexElement. Always use this value instead of std::mem::size_of. // 3 extra bytes to account for the padding that doesn't appear in the struct itself pub const VERTEX_ELEMENT_SIZE: usize = std::mem::size_of::() + 3; @@ -90,10 +90,7 @@ pub struct VertexDeclaration { #[binrw::parser(reader, endian)] pub(crate) fn vertex_element_parser(count: u16) -> BinResult> { let mut vertex_declarations: Vec = - vec![ - VertexDeclaration { elements: vec![] }; - count.into() - ]; + vec![VertexDeclaration { elements: vec![] }; count.into()]; for declaration in &mut vertex_declarations { let mut element = VertexElement::read_options(reader, endian, ())?; @@ -115,9 +112,7 @@ pub(crate) fn vertex_element_parser(count: u16) -> BinResult, -) -> BinResult<()> { +pub(crate) fn vertex_element_writer(declarations: &Vec) -> BinResult<()> { // write vertex declarations for declaration in declarations { for element in &declaration.elements { @@ -129,8 +124,9 @@ pub(crate) fn vertex_element_writer( offset: 0, vertex_type: VertexType::Single1, vertex_usage: VertexUsage::Position, - usage_index: 0 - }.write_options(writer, endian, ())?; + usage_index: 0, + } + .write_options(writer, endian, ())?; let to_seek = (NUM_VERTICES as usize - 1 - declaration.elements.len()) * 8; writer.seek(SeekFrom::Current(to_seek as i64))?; @@ -138,4 +134,3 @@ pub(crate) fn vertex_element_writer( Ok(()) } - diff --git a/src/mtrl.rs b/src/mtrl.rs index 0934dd2..f8ed459 100644 --- a/src/mtrl.rs +++ b/src/mtrl.rs @@ -5,8 +5,8 @@ use std::io::Cursor; -use binrw::{BinRead, binrw}; use crate::ByteSpan; +use binrw::{binrw, BinRead}; #[binrw] #[derive(Debug)] @@ -79,8 +79,7 @@ struct Constant { // from https://github.com/NotAdam/Lumina/blob/master/src/Lumina/Data/Parsing/MtrlStructs.cs #[binrw] #[derive(Debug)] -enum TextureUsage -{ +enum TextureUsage { #[brw(magic = 0x88408C04u32)] Sampler, #[brw(magic = 0x213CB439u32)] @@ -123,7 +122,7 @@ enum TextureUsage SamplerWhitecapMap, #[brw(magic = 0x565f8fd8u32)] - UnknownDawntrail1 + UnknownDawntrail1, } #[binrw] @@ -183,14 +182,14 @@ struct MaterialData { pub struct Material { pub shader_package_name: String, pub texture_paths: Vec, - pub shader_keys: Vec + pub shader_keys: Vec, } impl Material { pub fn from_existing(buffer: ByteSpan) -> Option { let mut cursor = Cursor::new(buffer); let mat_data = MaterialData::read(&mut cursor).ok()?; - + let mut texture_paths = vec![]; let mut offset = 0; @@ -224,7 +223,7 @@ impl Material { Some(Material { shader_package_name, texture_paths, - shader_keys: mat_data.shader_keys + shader_keys: mat_data.shader_keys, }) } } diff --git a/src/patch.rs b/src/patch.rs index b98bb44..ded3b09 100755 --- a/src/patch.rs +++ b/src/patch.rs @@ -12,8 +12,8 @@ use binrw::BinRead; use tracing::{debug, warn}; use crate::common::{get_platform_string, Platform, Region}; -use crate::sqpack::read_data_block_patch; use crate::common_file_operations::read_bool_from; +use crate::sqpack::read_data_block_patch; #[binread] #[derive(Debug)] @@ -148,7 +148,7 @@ enum SqpkOperation { #[br(magic = b'T')] TargetInfo(SqpkTargetInfo), #[br(magic = b'I')] - Index(SqpkIndex) + Index(SqpkIndex), } #[derive(BinRead, PartialEq, Debug)] @@ -301,7 +301,7 @@ struct SqpkIndex { block_offset: u32, #[br(pad_after = 8)] // data? - block_number: u32 + block_number: u32, } #[derive(BinRead, PartialEq, Debug)] @@ -463,8 +463,11 @@ pub fn apply_patch(data_dir: &str, patch_path: &str) -> Result<(), PatchError> { let (left, _) = filename.rsplit_once('/').unwrap(); fs::create_dir_all(left)?; - let mut new_file = - OpenOptions::new().write(true).create(true).truncate(false).open(filename)?; + let mut new_file = OpenOptions::new() + .write(true) + .create(true) + .truncate(false) + .open(filename)?; new_file.seek(SeekFrom::Start(add.block_offset as u64))?; @@ -480,8 +483,11 @@ pub fn apply_patch(data_dir: &str, patch_path: &str) -> Result<(), PatchError> { delete.file_id, ); - let new_file = - OpenOptions::new().write(true).create(true).truncate(false).open(filename)?; + let new_file = OpenOptions::new() + .write(true) + .create(true) + .truncate(false) + .open(filename)?; write_empty_file_block_at( &new_file, @@ -500,8 +506,11 @@ pub fn apply_patch(data_dir: &str, patch_path: &str) -> Result<(), PatchError> { let (left, _) = filename.rsplit_once('/').unwrap(); fs::create_dir_all(left)?; - let new_file = - OpenOptions::new().write(true).create(true).truncate(false).open(filename)?; + let new_file = OpenOptions::new() + .write(true) + .create(true) + .truncate(false) + .open(filename)?; write_empty_file_block_at( &new_file, diff --git a/src/pbd.rs b/src/pbd.rs index 797f7a8..58bc334 100644 --- a/src/pbd.rs +++ b/src/pbd.rs @@ -3,9 +3,9 @@ use std::io::{Cursor, Seek, SeekFrom}; -use binrw::{BinRead, BinReaderExt}; -use binrw::binrw; use crate::ByteSpan; +use binrw::binrw; +use binrw::{BinRead, BinReaderExt}; #[binrw] #[derive(Debug)] @@ -14,7 +14,7 @@ struct PreBoneDeformerItem { body_id: u16, link_index: u16, #[br(pad_after = 4)] - data_offset: u32 + data_offset: u32, } #[binrw] @@ -39,7 +39,7 @@ struct PreBoneDeformerHeader { links: Vec, #[br(ignore)] - raw_data: Vec + raw_data: Vec, } pub struct PreBoneDeformer { @@ -51,13 +51,13 @@ pub struct PreBoneDeformBone { /// Name of the affected bone pub name: String, /// The deform matrix - pub deform: [f32; 12] + pub deform: [f32; 12], } #[derive(Debug)] pub struct PreBoneDeformMatrices { /// The prebone deform bones - pub bones: Vec + pub bones: Vec, } impl PreBoneDeformer { @@ -68,18 +68,24 @@ impl PreBoneDeformer { header.raw_data = buffer.to_vec(); - Some(PreBoneDeformer { - header - }) + Some(PreBoneDeformer { header }) } /// Calculates the deform matrices between two races - pub fn get_deform_matrices(&self, from_body_id: u16, to_body_id: u16) -> Option { + pub fn get_deform_matrices( + &self, + from_body_id: u16, + to_body_id: u16, + ) -> Option { if from_body_id == to_body_id { return None; } - let mut item = self.header.items.iter().find(|x| x.body_id == from_body_id)?; + let mut item = self + .header + .items + .iter() + .find(|x| x.body_id == from_body_id)?; let mut next = &self.header.links[item.link_index as usize]; if next.next_index == -1 { @@ -96,7 +102,9 @@ impl PreBoneDeformer { let string_offsets_base = item.data_offset as usize + core::mem::size_of::(); - cursor.seek(SeekFrom::Start(string_offsets_base as u64)).ok()?; + cursor + .seek(SeekFrom::Start(string_offsets_base as u64)) + .ok()?; let mut strings_offset = vec![]; for _ in 0..bone_name_count { strings_offset.push(cursor.read_le::().unwrap()); @@ -125,7 +133,7 @@ impl PreBoneDeformer { let matrix = matrices[i]; bones.push(PreBoneDeformBone { name: string, - deform: matrix + deform: matrix, }); } @@ -137,9 +145,7 @@ impl PreBoneDeformer { } } - Some(PreBoneDeformMatrices { - bones - }) + Some(PreBoneDeformMatrices { bones }) } } @@ -160,4 +166,3 @@ mod tests { PreBoneDeformer::from_existing(&read(d).unwrap()); } } - diff --git a/src/race.rs b/src/race.rs index 82c2922..3936770 100755 --- a/src/race.rs +++ b/src/race.rs @@ -51,65 +51,47 @@ pub enum Race { pub fn get_race_id(race: Race, subrace: Subrace, gender: Gender) -> Option { // TODO: should we check for invalid subraces like the Hyur branch does? match race { - Race::Hyur => { - match subrace { - Subrace::Midlander => { - match gender { - Gender::Male => Some(101), - Gender::Female => Some(201) - } - } - Subrace::Highlander => { - match gender { - Gender::Male => Some(301), - Gender::Female => Some(401) - } - } - _ => None - } - } - Race::Elezen => { - match gender { - Gender::Male => Some(501), - Gender::Female => Some(601) - } - } - Race::Lalafell => { - match gender { - Gender::Male => Some(501), - Gender::Female => Some(601) - } - } - Race::Miqote => { - match gender { - Gender::Male => Some(701), - Gender::Female => Some(801) - } - } - Race::Roegadyn => { - match gender { - Gender::Male => Some(901), - Gender::Female => Some(1001) - } - } - Race::AuRa => { - match gender { - Gender::Male => Some(1301), - Gender::Female => Some(1401) - } - } + Race::Hyur => match subrace { + Subrace::Midlander => match gender { + Gender::Male => Some(101), + Gender::Female => Some(201), + }, + Subrace::Highlander => match gender { + Gender::Male => Some(301), + Gender::Female => Some(401), + }, + _ => None, + }, + Race::Elezen => match gender { + Gender::Male => Some(501), + Gender::Female => Some(601), + }, + Race::Lalafell => match gender { + Gender::Male => Some(501), + Gender::Female => Some(601), + }, + Race::Miqote => match gender { + Gender::Male => Some(701), + Gender::Female => Some(801), + }, + Race::Roegadyn => match gender { + Gender::Male => Some(901), + Gender::Female => Some(1001), + }, + Race::AuRa => match gender { + Gender::Male => Some(1301), + Gender::Female => Some(1401), + }, Race::Hrothgar => { match gender { Gender::Male => Some(1501), - Gender::Female => Some(1601) // TODO: is this accurate as of dawntrail? - } - } - Race::Viera => { - match gender { - Gender::Male => Some(1701), - Gender::Female => Some(1801) + Gender::Female => Some(1601), // TODO: is this accurate as of dawntrail? } } + Race::Viera => match gender { + Gender::Male => Some(1701), + Gender::Female => Some(1801), + }, } } @@ -131,7 +113,7 @@ pub fn get_supported_subraces(race: Race) -> [Subrace; 2] { Race::Roegadyn => [Subrace::SeaWolf, Subrace::Hellsguard], Race::AuRa => [Subrace::Raen, Subrace::Xaela], Race::Hrothgar => [Subrace::Hellion, Subrace::Lost], - Race::Viera => [Subrace::Raen, Subrace::Veena] + Race::Viera => [Subrace::Raen, Subrace::Veena], } } diff --git a/src/repository.rs b/src/repository.rs index 3e659d1..58da0a9 100755 --- a/src/repository.rs +++ b/src/repository.rs @@ -5,7 +5,7 @@ use std::cmp::Ordering; use std::cmp::Ordering::{Greater, Less}; use std::path::{Path, PathBuf}; -use crate::common::{get_platform_string, Platform, read_version}; +use crate::common::{get_platform_string, read_version, Platform}; use crate::repository::RepositoryType::{Base, Expansion}; /// The type of repository, discerning game data from expansion data. @@ -179,13 +179,10 @@ impl Repository { get_platform_string(&self.platform) ) } - + /// Calculate an index2 filename for a specific category, like _"0a0000.win32.index"_. pub fn index2_filename(&self, category: Category) -> String { - format!( - "{}2", - self.index_filename(category) - ) + format!("{}2", self.index_filename(category)) } /// Calculate a dat filename given a category and a data file id, returns something like _"0a0000.win32.dat0"_. @@ -210,8 +207,8 @@ impl Repository { #[cfg(test)] mod tests { - use std::path::PathBuf; use crate::common::Platform; + use std::path::PathBuf; use super::*; @@ -248,7 +245,10 @@ mod tests { assert_eq!(repo.index_filename(Category::Music), "0c0000.win32.index"); assert_eq!(repo.index2_filename(Category::Music), "0c0000.win32.index2"); - assert_eq!(repo.dat_filename(Category::GameScript, 1), "0b0000.win32.dat1"); + assert_eq!( + repo.dat_filename(Category::GameScript, 1), + "0b0000.win32.dat1" + ); } // TODO: We need to check if these console filenames are actually correct @@ -263,7 +263,10 @@ mod tests { assert_eq!(repo.index_filename(Category::Music), "0c0000.ps3.index"); assert_eq!(repo.index2_filename(Category::Music), "0c0000.ps3.index2"); - assert_eq!(repo.dat_filename(Category::GameScript, 1), "0b0000.ps3.dat1"); + assert_eq!( + repo.dat_filename(Category::GameScript, 1), + "0b0000.ps3.dat1" + ); } #[test] @@ -277,6 +280,9 @@ mod tests { assert_eq!(repo.index_filename(Category::Music), "0c0000.ps4.index"); assert_eq!(repo.index2_filename(Category::Music), "0c0000.ps4.index2"); - assert_eq!(repo.dat_filename(Category::GameScript, 1), "0b0000.ps4.dat1"); + assert_eq!( + repo.dat_filename(Category::GameScript, 1), + "0b0000.ps4.dat1" + ); } -} \ No newline at end of file +} diff --git a/src/shpk.rs b/src/shpk.rs index eef98fb..1b4e09a 100644 --- a/src/shpk.rs +++ b/src/shpk.rs @@ -3,8 +3,8 @@ use std::io::{Cursor, SeekFrom}; -use binrw::{BinRead, binread}; use crate::ByteSpan; +use binrw::{binread, BinRead}; #[binread] #[br(little, import { @@ -24,7 +24,7 @@ pub struct ResourceParameter { #[br(seek_before = SeekFrom::Start(strings_offset as u64 + local_string_offset as u64))] #[br(count = string_length, map = | x: Vec | String::from_utf8(x).unwrap().trim_matches(char::from(0)).to_string())] #[br(restore_position)] - pub name: String + pub name: String, } #[binread] @@ -55,7 +55,7 @@ pub struct Shader { #[br(seek_before = SeekFrom::Start(shader_data_offset as u64 + data_offset as u64))] #[br(count = data_size)] #[br(restore_position)] - pub bytecode: Vec + pub bytecode: Vec, } #[binread] @@ -64,7 +64,7 @@ pub struct Shader { pub struct MaterialParameter { id: u32, byte_offset: u16, - byte_size: u16 + byte_size: u16, } #[binread] @@ -72,7 +72,7 @@ pub struct MaterialParameter { #[allow(unused)] pub struct Key { id: u32, - default_value: u32 + default_value: u32, } #[binread] @@ -82,7 +82,7 @@ pub struct Key { pub struct Pass { id: u32, vertex_shader: u32, - pixel_shader: u32 + pixel_shader: u32, } #[binread] @@ -90,7 +90,7 @@ pub struct Pass { #[allow(unused)] pub struct NodeAlias { selector: u32, - node: u32 + node: u32, } #[binread] @@ -115,7 +115,7 @@ pub struct Node { #[br(count = subview_key_count)] pub subview_keys: Vec, #[br(count = pass_count, err_context("system_key_count = {}", material_key_count))] - pub passes: Vec + pub passes: Vec, } #[binread] @@ -183,7 +183,7 @@ pub struct ShaderPackage { node_selectors: Vec<(u32, u32)>, #[br(count = node_alias_count)] - node_aliases: Vec + node_aliases: Vec, } impl ShaderPackage { @@ -215,11 +215,26 @@ impl ShaderPackage { None } - pub fn build_selector_from_all_keys(system_keys: &[u32], scene_keys: &[u32], material_keys: &[u32], subview_keys: &[u32]) -> u32 { - Self::build_selector_from_keys(Self::build_selector(system_keys), Self::build_selector(scene_keys), Self::build_selector(material_keys), Self::build_selector(subview_keys)) + pub fn build_selector_from_all_keys( + system_keys: &[u32], + scene_keys: &[u32], + material_keys: &[u32], + subview_keys: &[u32], + ) -> u32 { + Self::build_selector_from_keys( + Self::build_selector(system_keys), + Self::build_selector(scene_keys), + Self::build_selector(material_keys), + Self::build_selector(subview_keys), + ) } - pub fn build_selector_from_keys(system_key: u32, scene_key: u32, material_key: u32, subview_key: u32) -> u32 { + pub fn build_selector_from_keys( + system_key: u32, + scene_key: u32, + material_key: u32, + subview_key: u32, + ) -> u32 { Self::build_selector(&[system_key, scene_key, material_key, subview_key]) } diff --git a/src/skeleton.rs b/src/skeleton.rs index 36aa786..8816fa8 100644 --- a/src/skeleton.rs +++ b/src/skeleton.rs @@ -5,9 +5,9 @@ #![allow(clippy::needless_late_init)] #![allow(clippy::upper_case_acronyms)] -use std::io::{Cursor, SeekFrom}; -use binrw::{binread, BinRead}; use binrw::helpers::until_eof; +use binrw::{binread, BinRead}; +use std::io::{Cursor, SeekFrom}; use crate::havok::{HavokAnimationContainer, HavokBinaryTagFileReader}; use crate::ByteSpan; @@ -32,7 +32,7 @@ struct SklbV2 { body_id: u32, mapper_body_id1: u32, mapper_body_id2: u32, - mapper_body_id3: u32 + mapper_body_id3: u32, } #[binread] @@ -49,7 +49,7 @@ struct SKLB { #[br(seek_before(SeekFrom::Start(if (version == 0x3132_3030u32) { sklb_v1.as_ref().unwrap().havok_offset as u64 } else { sklb_v2.as_ref().unwrap().havok_offset as u64 })))] #[br(parse_with = until_eof)] - raw_data: Vec + raw_data: Vec, } #[derive(Debug)] @@ -92,9 +92,17 @@ impl Skeleton { skeleton.bones.push(Bone { name: bone.clone(), parent_index: havok_skeleton.parent_indices[index] as i32, - position: [havok_skeleton.reference_pose[index].translation[0], havok_skeleton.reference_pose[index].translation[1], havok_skeleton.reference_pose[index].translation[2]], + position: [ + havok_skeleton.reference_pose[index].translation[0], + havok_skeleton.reference_pose[index].translation[1], + havok_skeleton.reference_pose[index].translation[2], + ], rotation: havok_skeleton.reference_pose[index].rotation, - scale: [havok_skeleton.reference_pose[index].scale[0], havok_skeleton.reference_pose[index].scale[1], havok_skeleton.reference_pose[index].scale[2]], + scale: [ + havok_skeleton.reference_pose[index].scale[0], + havok_skeleton.reference_pose[index].scale[1], + havok_skeleton.reference_pose[index].scale[2], + ], }); } diff --git a/src/tera.rs b/src/tera.rs index e80e3f5..c17633b 100644 --- a/src/tera.rs +++ b/src/tera.rs @@ -3,16 +3,16 @@ use std::io::Cursor; -use binrw::BinRead; -use binrw::binrw; use crate::ByteSpan; +use binrw::binrw; +use binrw::BinRead; #[binrw] #[derive(Debug, Clone, Copy)] #[brw(little)] struct PlatePosition { x: i16, - y: i16 + y: i16, } #[binrw] @@ -30,18 +30,18 @@ struct TerrainHeader { padding: Vec, #[br(count = plate_count)] - positions: Vec + positions: Vec, } #[derive(Debug)] pub struct PlateModel { pub position: (f32, f32), - pub filename: String + pub filename: String, } #[derive(Debug)] pub struct Terrain { - pub plates: Vec + pub plates: Vec, } impl Terrain { @@ -54,15 +54,15 @@ impl Terrain { for i in 0..header.plate_count { plates.push(PlateModel { - position: (header.plate_size as f32 * (header.positions[i as usize].x as f32 + 0.5), - header.plate_size as f32 * (header.positions[i as usize].y as f32 + 0.5)), - filename: format!("{:04}.mdl", i) + position: ( + header.plate_size as f32 * (header.positions[i as usize].x as f32 + 0.5), + header.plate_size as f32 * (header.positions[i as usize].y as f32 + 0.5), + ), + filename: format!("{:04}.mdl", i), }) } - Some(Terrain { - plates - }) + Some(Terrain { plates }) } } diff --git a/src/tex.rs b/src/tex.rs index 052083c..37874e6 100644 --- a/src/tex.rs +++ b/src/tex.rs @@ -5,11 +5,11 @@ use std::io::{Cursor, Read, Seek, SeekFrom}; -use binrw::BinRead; +use crate::ByteSpan; use binrw::binrw; +use binrw::BinRead; use bitflags::bitflags; use texture2ddecoder::{decode_bc1, decode_bc3, decode_bc5}; -use crate::ByteSpan; // Attributes and Format are adapted from Lumina (https://github.com/NotAdam/Lumina/blob/master/src/Lumina/Data/Files/TexFile.cs) bitflags! { @@ -94,7 +94,7 @@ impl Texture { let mut src = vec![0u8; buffer.len() - std::mem::size_of::()]; cursor.read_exact(src.as_mut_slice()).ok()?; - let mut dst : Vec; + let mut dst: Vec; match header.format { TextureFormat::B4G4R4A4 => { @@ -107,7 +107,7 @@ impl Texture { let short: u16 = ((src[offset] as u16) << 8) | src[offset + 1] as u16; let src_b = short & 0xF; - let src_g= (short >> 4) & 0xF; + let src_g = (short >> 4) & 0xF; let src_r = (short >> 8) & 0xF; let src_a = (short >> 12) & 0xF; @@ -124,13 +124,28 @@ impl Texture { dst = src; // TODO: not correct, of course } TextureFormat::BC1 => { - dst = Texture::decode(&src, header.width as usize, header.height as usize, decode_bc1); + dst = Texture::decode( + &src, + header.width as usize, + header.height as usize, + decode_bc1, + ); } TextureFormat::BC3 => { - dst = Texture::decode(&src, header.width as usize, header.height as usize, decode_bc3); + dst = Texture::decode( + &src, + header.width as usize, + header.height as usize, + decode_bc3, + ); } TextureFormat::BC5 => { - dst = Texture::decode(&src, header.width as usize, header.height as usize, decode_bc5); + dst = Texture::decode( + &src, + header.width as usize, + header.height as usize, + decode_bc5, + ); } } @@ -143,13 +158,7 @@ impl Texture { fn decode(src: &[u8], width: usize, height: usize, decode_func: DecodeFunction) -> Vec { let mut image: Vec = vec![0; width * height]; - decode_func( - src, - width, - height, - &mut image, - ) - .unwrap(); + decode_func(src, width, height, &mut image).unwrap(); image .iter() diff --git a/tests/integration_test.rs b/tests/integration_test.rs index be082be..e8dca3d 100755 --- a/tests/integration_test.rs +++ b/tests/integration_test.rs @@ -1,17 +1,17 @@ // SPDX-FileCopyrightText: 2023 Joshua Goins // SPDX-License-Identifier: GPL-3.0-or-later +use hmac_sha512::Hash; +use physis::patch::apply_patch; use std::env; use std::fs::{read, read_dir}; use std::process::Command; -use hmac_sha512::Hash; -use physis::patch::apply_patch; -use std::collections::HashMap; -use std::path::{Path, PathBuf}; use physis::common::Platform; use physis::fiin::FileInfo; use physis::index; +use std::collections::HashMap; +use std::path::{Path, PathBuf}; #[test] #[cfg_attr(not(feature = "retail_game_testing"), ignore)] @@ -28,9 +28,12 @@ fn test_index_read() { fn test_gamedata_extract() { let game_dir = env::var("FFXIV_GAME_DIR").unwrap(); - let mut gamedata = - physis::gamedata::GameData::from_existing(Platform::Win32, format!("{}/game", game_dir).as_str()).unwrap(); - + let mut gamedata = physis::gamedata::GameData::from_existing( + Platform::Win32, + format!("{}/game", game_dir).as_str(), + ) + .unwrap(); + assert!(gamedata.extract("exd/root.exl").is_some()); } @@ -70,34 +73,43 @@ fn make_temp_install_dir(name: &str) -> String { // Shamelessly taken from https://stackoverflow.com/a/76820878 fn recurse(path: impl AsRef) -> Vec { - let Ok(entries) = read_dir(path) else { return vec![] }; - entries.flatten().flat_map(|entry| { - let Ok(meta) = entry.metadata() else { return vec![] }; - if meta.is_dir() { return recurse(entry.path()); } - if meta.is_file() { return vec![entry.path()]; } - vec![] - }).collect() + let Ok(entries) = read_dir(path) else { + return vec![]; + }; + entries + .flatten() + .flat_map(|entry| { + let Ok(meta) = entry.metadata() else { + return vec![]; + }; + if meta.is_dir() { + return recurse(entry.path()); + } + if meta.is_file() { + return vec![entry.path()]; + } + vec![] + }) + .collect() } #[cfg(feature = "patch_testing")] fn fill_dir_hash(game_dir: &str) -> HashMap { let mut file_hashes: HashMap = HashMap::new(); - recurse(game_dir) - .into_iter() - .for_each(|x| { - let path = x.as_path(); - let file = std::fs::read(path).unwrap(); + recurse(game_dir).into_iter().for_each(|x| { + let path = x.as_path(); + let file = std::fs::read(path).unwrap(); - let mut hash = Hash::new(); - hash.update(&file); - let sha = hash.finalize(); + let mut hash = Hash::new(); + hash.update(&file); + let sha = hash.finalize(); - let mut rel_path = path; - rel_path = rel_path.strip_prefix(game_dir).unwrap(); + let mut rel_path = path; + rel_path = rel_path.strip_prefix(game_dir).unwrap(); - file_hashes.insert(rel_path.to_str().unwrap().to_string(), sha); - }); + file_hashes.insert(rel_path.to_str().unwrap().to_string(), sha); + }); file_hashes }