From c489d095dfb712bcc9071b7a48779cac44c69061 Mon Sep 17 00:00:00 2001 From: Avi Weinstock Date: Thu, 18 Feb 2021 17:22:15 -0500 Subject: [PATCH] Implement persistence for modular weapons. This stores the components as children of the item that contains them via the DB's `parent_container_item_id` feature, and ensures that things are loaded in a good order with breadth-first search. Squahed fixes: - Fix some constraint violations that occurred when swapping inventory items. - Comment out recipes for modular weapons. - Make update_item_at_slot_using_persistence_key and is_modular more idiomatic. - Add changelog entry. - Document `defer_foreign_keys` usage. --- CHANGELOG.md | 1 + common/src/comp/inventory/item/mod.rs | 54 +-- common/src/comp/inventory/loadout.rs | 20 + common/src/comp/inventory/mod.rs | 2 +- common/src/recipe.rs | 8 +- server/src/persistence/character.rs | 87 +++-- .../src/persistence/character/conversions.rs | 348 +++++++++++------- 7 files changed, 335 insertions(+), 185 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 019789c0c2..b5ab250558 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Basic NPC interaction - Lights in dungeons - Trading system (bound to the `R` key by default, currently only works with players) +- Support for modular weapons. ### Changed diff --git a/common/src/comp/inventory/item/mod.rs b/common/src/comp/inventory/item/mod.rs index 700b56767a..bc045dae59 100644 --- a/common/src/comp/inventory/item/mod.rs +++ b/common/src/comp/inventory/item/mod.rs @@ -240,13 +240,13 @@ impl ItemDef { } pub fn is_modular(&self) -> bool { - match &self.kind { - ItemKind::Tool(tool) => match tool.stats { - tool::StatKind::Direct { .. } => false, - tool::StatKind::Modular => true, - }, - _ => false, - } + matches!( + &self.kind, + ItemKind::Tool(tool::Tool { + stats: tool::StatKind::Modular, + .. + }) + ) } #[cfg(test)] @@ -372,25 +372,16 @@ impl Item { components.extend(input_components.iter().map(|comp| comp.duplicate())); } - let kind = inner_item.kind(); - let item_config = if let ItemKind::Tool(_) = kind { - Some(Box::new(ItemConfig::from(( - kind, - &*components, - &inner_item.ability_map, - )))) - } else { - None - }; - - Item { + let mut item = Item { item_id: Arc::new(AtomicCell::new(None)), amount: NonZeroU32::new(1).unwrap(), components, slots: vec![None; inner_item.slots as usize], item_def: inner_item, - item_config, - } + item_config: None, + }; + item.update_item_config(); + item } /// Creates a new instance of an `Item` from the provided asset identifier @@ -478,6 +469,27 @@ impl Item { } } + pub fn add_component(&mut self, component: Item) { + // TODO: hook for typechecking (not needed atm if this is only used by DB + // persistence, but will definitely be needed once enhancement slots are + // added to prevent putting a sword into another sword) + self.components.push(component); + // adding a component changes the stats, so recalculate the ItemConfig + self.update_item_config(); + } + + fn update_item_config(&mut self) { + self.item_config = if let ItemKind::Tool(_) = self.kind() { + Some(Box::new(ItemConfig::from(( + self.kind(), + self.components(), + &self.item_def.ability_map, + )))) + } else { + None + }; + } + /// Returns an iterator that drains items contained within the item's slots pub fn drain(&mut self) -> impl Iterator + '_ { self.slots.iter_mut().filter_map(|x| mem::take(x)) diff --git a/common/src/comp/inventory/loadout.rs b/common/src/comp/inventory/loadout.rs index c79f8c4d21..aaf8581e78 100644 --- a/common/src/comp/inventory/loadout.rs +++ b/common/src/comp/inventory/loadout.rs @@ -46,6 +46,7 @@ pub(super) struct LoadoutSlotId { pub enum LoadoutError { InvalidPersistenceKey, + NoParentAtSlot, } impl Loadout { @@ -134,6 +135,25 @@ impl Loadout { } } + pub fn update_item_at_slot_using_persistence_key( + &mut self, + persistence_key: &str, + f: F, + ) -> Result<(), LoadoutError> { + self.slots + .iter_mut() + .find(|loadout_slot| loadout_slot.persistence_key == persistence_key) + .map_or(Err(LoadoutError::InvalidPersistenceKey), |loadout_slot| { + loadout_slot + .slot + .as_mut() + .map_or(Err(LoadoutError::NoParentAtSlot), |item| { + f(item); + Ok(()) + }) + }) + } + /// Swaps the contents of two loadout slots pub(super) fn swap_slots(&mut self, equip_slot_a: EquipSlot, equip_slot_b: EquipSlot) { if self.slot(equip_slot_b).is_none() || self.slot(equip_slot_b).is_none() { diff --git a/common/src/comp/inventory/mod.rs b/common/src/comp/inventory/mod.rs index fd9f8af8be..2a7cf3713f 100644 --- a/common/src/comp/inventory/mod.rs +++ b/common/src/comp/inventory/mod.rs @@ -350,7 +350,7 @@ impl Inventory { } } - fn slot_mut(&mut self, inv_slot_id: InvSlotId) -> Option<&mut InvSlot> { + pub fn slot_mut(&mut self, inv_slot_id: InvSlotId) -> Option<&mut InvSlot> { match SlotId::from(inv_slot_id) { SlotId::Inventory(slot_idx) => self.slots.get_mut(slot_idx), SlotId::Loadout(loadout_slot_id) => self.loadout.inv_slot_mut(loadout_slot_id), diff --git a/common/src/recipe.rs b/common/src/recipe.rs index b998b443fd..f11b464d79 100644 --- a/common/src/recipe.rs +++ b/common/src/recipe.rs @@ -119,7 +119,13 @@ impl assets::Compound for RecipeBook { } let mut raw = cache.load::(specifier)?.read().clone(); - modular::append_modular_recipes(&mut raw); + + // Avoid showing purple-question-box recipes until the assets are added + // (the `if false` is needed because commenting out the call will add a warning + // that there are no other uses of append_modular_recipes) + if false { + modular::append_modular_recipes(&mut raw); + } let recipes = raw .0 diff --git a/server/src/persistence/character.rs b/server/src/persistence/character.rs index be5b993dd3..24cc6c85de 100644 --- a/server/src/persistence/character.rs +++ b/server/src/persistence/character.rs @@ -27,7 +27,7 @@ use crate::{ use common::character::{CharacterId, CharacterItem, MAX_CHARACTERS_PER_PLAYER}; use core::ops::Range; use diesel::{prelude::*, sql_query, sql_types::BigInt}; -use std::sync::Arc; +use std::{collections::VecDeque, sync::Arc}; use tracing::{error, trace, warn}; /// Private module for very tightly coupled database conversion methods. In @@ -50,6 +50,26 @@ struct CharacterContainers { loadout_container_id: EntityId, } +/// BFS the inventory/loadout to ensure that each is topologically sorted in the +/// sense required by convert_inventory_from_database_items to support recursive +/// items +pub fn load_items_bfs(connection: VelorenTransaction, root: i64) -> Result, Error> { + use schema::item::dsl::*; + let mut items = Vec::new(); + let mut queue = VecDeque::new(); + queue.push_front(root); + while let Some(id) = queue.pop_front() { + let frontier = item + .filter(parent_container_item_id.eq(id)) + .load::(&*connection)?; + for i in frontier.iter() { + queue.push_back(i.item_id); + } + items.extend(frontier); + } + Ok(items) +} + /// Load stored data for a character. /// /// After first logging in, and after a character is selected, we fetch this @@ -59,19 +79,12 @@ pub fn load_character_data( char_id: CharacterId, connection: VelorenTransaction, ) -> CharacterDataResult { - use schema::{body::dsl::*, character::dsl::*, item::dsl::*, skill_group::dsl::*}; + use schema::{body::dsl::*, character::dsl::*, skill_group::dsl::*}; let character_containers = get_pseudo_containers(connection, char_id)?; - // TODO: Make inventory and loadout item loading work with recursive items when - // container items are supported - let inventory_items = item - .filter(parent_container_item_id.eq(character_containers.inventory_container_id)) - .load::(&*connection)?; - - let loadout_items = item - .filter(parent_container_item_id.eq(character_containers.loadout_container_id)) - .load::(&*connection)?; + let inventory_items = load_items_bfs(connection, character_containers.inventory_container_id)?; + let loadout_items = load_items_bfs(connection, character_containers.loadout_container_id)?; let character_data = character .filter( @@ -109,7 +122,12 @@ pub fn load_character_data( Ok(( convert_body_from_database(&char_body)?, convert_stats_from_database(character_data.alias, &skill_data, &skill_group_data), - convert_inventory_from_database_items(&inventory_items, &loadout_items)?, + convert_inventory_from_database_items( + character_containers.inventory_container_id, + &inventory_items, + character_containers.loadout_container_id, + &loadout_items, + )?, char_waypoint, )) } @@ -125,7 +143,7 @@ pub fn load_character_list( player_uuid_: &str, connection: VelorenTransaction, ) -> CharacterListResult { - use schema::{body::dsl::*, character::dsl::*, item::dsl::*}; + use schema::{body::dsl::*, character::dsl::*}; let result = character .filter(player_uuid.eq(player_uuid_)) @@ -149,13 +167,10 @@ pub fn load_character_list( LOADOUT_PSEUDO_CONTAINER_POSITION, )?; - // TODO: Make work with recursive items if containers are ever supported as part - // of a loadout - let loadout_items = item - .filter(parent_container_item_id.eq(loadout_container_id)) - .load::(&*connection)?; + let loadout_items = load_items_bfs(connection, loadout_container_id)?; - let loadout = convert_loadout_from_database_items(&loadout_items)?; + let loadout = + convert_loadout_from_database_items(loadout_container_id, &loadout_items)?; Ok(CharacterItem { character: char, @@ -276,7 +291,7 @@ pub fn create_character( let mut inserts = Vec::new(); get_new_entity_ids(connection, |mut next_id| { - let (inserts_, _deletes) = convert_items_to_database_items( + let inserts_ = convert_items_to_database_items( loadout_container_id, &inventory, inventory_container_id, @@ -541,7 +556,7 @@ pub fn update( // First, get all the entity IDs for any new items, and identify which slots to // upsert and which ones to delete. get_new_entity_ids(connection, |mut next_id| { - let (upserts_, _deletes) = convert_items_to_database_items( + let upserts_ = convert_items_to_database_items( pseudo_containers.loadout_container_id, &inventory, pseudo_containers.inventory_container_id, @@ -553,9 +568,17 @@ pub fn update( // Next, delete any slots we aren't upserting. trace!("Deleting items for character_id {}", char_id); - let existing_items = parent_container_item_id - .eq(pseudo_containers.inventory_container_id) - .or(parent_container_item_id.eq(pseudo_containers.loadout_container_id)); + let mut existing_item_ids: Vec = vec![ + pseudo_containers.inventory_container_id, + pseudo_containers.loadout_container_id, + ]; + for it in load_items_bfs(connection, pseudo_containers.inventory_container_id)? { + existing_item_ids.push(it.item_id); + } + for it in load_items_bfs(connection, pseudo_containers.loadout_container_id)? { + existing_item_ids.push(it.item_id); + } + let existing_items = parent_container_item_id.eq_any(existing_item_ids); let non_upserted_items = item_id.ne_all( upserts .iter() @@ -573,7 +596,13 @@ pub fn update( if expected_upsert_count > 0 { let (upserted_items, upserted_comps_): (Vec<_>, Vec<_>) = upserts .into_iter() - .map(|model_pair| (model_pair.model, model_pair.comp)) + .map(|model_pair| { + debug_assert_eq!( + model_pair.model.item_id, + model_pair.comp.load().unwrap().get() as i64 + ); + (model_pair.model, model_pair.comp) + }) .unzip(); upserted_comps = upserted_comps_; trace!( @@ -582,9 +611,17 @@ pub fn update( char_id ); + // When moving inventory items around, foreign key constraints on + // `parent_container_item_id` can be temporarily violated by one upsert, but + // restored by another upsert. Deferred constraints allow SQLite to check this + // when committing the transaction. The `defer_foreign_keys` pragma treats the + // foreign key constraints as deferred for the next transaction (it turns itself + // off at the commit boundary). https://sqlite.org/foreignkeys.html#fk_deferred + connection.execute("PRAGMA defer_foreign_keys = ON;")?; let upsert_count = diesel::replace_into(item) .values(&upserted_items) .execute(&*connection)?; + trace!("upsert_count: {}", upsert_count); if upsert_count != expected_upsert_count { return Err(Error::OtherError(format!( "Expected upsertions={}, actual={}, for char_id {}--unsafe to continue \ diff --git a/server/src/persistence/character/conversions.rs b/server/src/persistence/character/conversions.rs index 77f8b67007..2cf8fa8932 100644 --- a/server/src/persistence/character/conversions.rs +++ b/server/src/persistence/character/conversions.rs @@ -21,16 +21,17 @@ use common::{ }; use core::{convert::TryFrom, num::NonZeroU64}; use hashbrown::HashMap; -use itertools::{Either, Itertools}; -use std::sync::Arc; +use std::{collections::VecDeque, sync::Arc}; +#[derive(Debug)] pub struct ItemModelPair { pub comp: Arc, pub model: Item, } -/// The left vector contains all item rows to upsert; the right-hand vector -/// contains all item rows to delete (by parent ID and position). +/// Returns a vector that contains all item rows to upsert; parent is +/// responsible for deleting items from the same owner that aren't affirmatively +/// kept by this. /// /// NOTE: This method does not yet handle persisting nested items within /// inventories. Although loadout items do store items inside them this does @@ -41,7 +42,7 @@ pub fn convert_items_to_database_items( inventory: &Inventory, inventory_container_id: EntityId, next_id: &mut i64, -) -> (Vec, Vec<(EntityId, String)>) { +) -> Vec { let loadout = inventory .loadout_items_with_persistence_key() .map(|(slot, item)| (slot.to_string(), item, loadout_container_id)); @@ -55,103 +56,125 @@ pub fn convert_items_to_database_items( ) }); - // Construct new items. - inventory.chain(loadout) - .partition_map(|(position, item, parent_container_item_id)| { - if let Some(item) = item { - // Try using the next available id in the sequence as the default for new items. - let new_item_id = NonZeroU64::new(u64::try_from(*next_id) - .expect("We are willing to crash if the next entity id overflows \ - (or is otherwise negative).")).expect("next_id should not be zero, either"); + // Use Breadth-first search to recurse into containers/modular weapons to store + // their parts + let mut bfs_queue: VecDeque<_> = inventory.chain(loadout).collect(); + let mut upserts = Vec::new(); + let mut depth = HashMap::new(); + depth.insert(inventory_container_id, 0); + depth.insert(loadout_container_id, 0); + while let Some((position, item, parent_container_item_id)) = bfs_queue.pop_front() { + // Construct new items. + if let Some(item) = item { + // Try using the next available id in the sequence as the default for new items. + let new_item_id = NonZeroU64::new(u64::try_from(*next_id).expect( + "We are willing to crash if the next entity id overflows (or is otherwise \ + negative).", + )) + .expect("next_id should not be zero, either"); - let comp = item.get_item_id_for_database(); - Either::Left(ItemModelPair { - model: Item { - item_definition_id: item.item_definition_id().to_owned(), - position, - parent_container_item_id, - // Fast (kinda) path: acquire read for the common case where an id has - // already been assigned. - item_id: comp.load() - // First, we filter out "impossible" entity IDs--IDs that are larger - // than the maximum sequence value (next_id). This is important - // because we update the item ID atomically, *before* we know whether - // this transaction has completed successfully, and we don't abort the - // process on a failed transaction. In such cases, new IDs from - // aborted transactions will show up as having a higher value than the - // current max sequence number. Because the only place that modifies - // the item_id through a shared reference is (supposed to be) this - // function, which is part of the batch update transaction, we can - // assume that any rollback during the update would fail to insert - // *any* new items for the current character; this means that any items - // inserted between the failure and now (i.e. values less than next_id) - // would either not be items at all, or items belonging to other - // characters, leading to an easily detectable SQLite failure that we - // can use to atomically set the id back to None (if it was still the - // same bad value). - // - // Note that this logic only requires that all the character's items be - // updated within the same serializable transaction; the argument does - // not depend on SQLite-specific details (like locking) or on the fact - // that a user's transactions are always serialized on their own - // session. Also note that since these IDs are in-memory, we don't - // have to worry about their values during, e.g., a process crash; - // serializability will take care of us in those cases. Finally, note - // that while we have not yet implemented the "liveness" part of the - // algorithm (resetting ids back to None if we detect errors), this is - // not needed for soundness, and this part can be deferred until we - // switch to an execution model where such races are actually possible - // during normal gameplay. - .and_then(|item_id| Some(if item_id >= new_item_id { - // Try to atomically exchange with our own, "correct" next id. - match comp.compare_exchange(Some(item_id), Some(new_item_id)) { - Ok(_) => { - let item_id = *next_id; - // We won the race, use next_id and increment it. - *next_id += 1; - item_id - }, - Err(item_id) => { - // We raced with someone, and they won the race, so we know - // this transaction must abort unless they finish first. So, - // just assume they will finish first, and use their assigned - // item_id. - EntityId::try_from(item_id?.get()) - .expect("We always choose legal EntityIds as item ids") - }, - } - } else { EntityId::try_from(item_id.get()).expect("We always choose legal EntityIds as item ids") })) - // Finally, we're in the case where no entity was assigned yet (either - // ever, or due to corrections after a rollback). This proceeds - // identically to the "impossible ID" case. - .unwrap_or_else(|| { - // Try to atomically compare with the empty id. - match comp.compare_exchange(None, Some(new_item_id)) { - Ok(_) => { - let item_id = *next_id; - *next_id += 1; - item_id - }, - Err(item_id) => { - EntityId::try_from(item_id.expect("TODO: Fix handling of reset to None when we have concurrent writers.").get()) - .expect("We always choose legal EntityIds as item ids") - }, - } - }), - stack_size: if item.is_stackable() { - item.amount() as i32 - } else { - 1 + // Fast (kinda) path: acquire read for the common case where an id has + // already been assigned. + let comp = item.get_item_id_for_database(); + let item_id = comp.load() + // First, we filter out "impossible" entity IDs--IDs that are larger + // than the maximum sequence value (next_id). This is important + // because we update the item ID atomically, *before* we know whether + // this transaction has completed successfully, and we don't abort the + // process on a failed transaction. In such cases, new IDs from + // aborted transactions will show up as having a higher value than the + // current max sequence number. Because the only place that modifies + // the item_id through a shared reference is (supposed to be) this + // function, which is part of the batch update transaction, we can + // assume that any rollback during the update would fail to insert + // *any* new items for the current character; this means that any items + // inserted between the failure and now (i.e. values less than next_id) + // would either not be items at all, or items belonging to other + // characters, leading to an easily detectable SQLite failure that we + // can use to atomically set the id back to None (if it was still the + // same bad value). + // + // Note that this logic only requires that all the character's items be + // updated within the same serializable transaction; the argument does + // not depend on SQLite-specific details (like locking) or on the fact + // that a user's transactions are always serialized on their own + // session. Also note that since these IDs are in-memory, we don't + // have to worry about their values during, e.g., a process crash; + // serializability will take care of us in those cases. Finally, note + // that while we have not yet implemented the "liveness" part of the + // algorithm (resetting ids back to None if we detect errors), this is + // not needed for soundness, and this part can be deferred until we + // switch to an execution model where such races are actually possible + // during normal gameplay. + .and_then(|item_id| Some(if item_id >= new_item_id { + // Try to atomically exchange with our own, "correct" next id. + match comp.compare_exchange(Some(item_id), Some(new_item_id)) { + Ok(_) => { + let item_id = *next_id; + // We won the race, use next_id and increment it. + *next_id += 1; + item_id }, - }, - // Continue to remember the atomic, in case we detect an error later and want - // to roll back to preserve liveness. - comp, - }) - } else { - Either::Right((parent_container_item_id, position)) + Err(item_id) => { + // We raced with someone, and they won the race, so we know + // this transaction must abort unless they finish first. So, + // just assume they will finish first, and use their assigned + // item_id. + EntityId::try_from(item_id?.get()) + .expect("We always choose legal EntityIds as item ids") + }, + } + } else { EntityId::try_from(item_id.get()).expect("We always choose legal EntityIds as item ids") })) + // Finally, we're in the case where no entity was assigned yet (either + // ever, or due to corrections after a rollback). This proceeds + // identically to the "impossible ID" case. + .unwrap_or_else(|| { + // Try to atomically compare with the empty id. + match comp.compare_exchange(None, Some(new_item_id)) { + Ok(_) => { + let item_id = *next_id; + *next_id += 1; + item_id + }, + Err(item_id) => { + EntityId::try_from(item_id.expect("TODO: Fix handling of reset to None when we have concurrent writers.").get()) + .expect("We always choose legal EntityIds as item ids") + }, + } + }); + + depth.insert(item_id, depth[&parent_container_item_id] + 1); + + for (i, component) in item.components().iter().enumerate() { + // recursive items' children have the same position as their parents, and since + // they occur afterwards in the topological sort of the parent graph (which + // should still always be a tree, even with recursive items), we + // have enough information to put them back into their parents on load + bfs_queue.push_back((format!("component_{}", i), Some(component), item_id)); } - }) + + let upsert = ItemModelPair { + model: Item { + item_definition_id: item.item_definition_id().to_owned(), + position, + parent_container_item_id, + item_id, + stack_size: if item.is_stackable() { + item.amount() as i32 + } else { + 1 + }, + }, + // Continue to remember the atomic, in case we detect an error later and want + // to roll back to preserve liveness. + comp, + }; + upserts.push(upsert); + } + } + upserts.sort_by_key(|pair| (depth[&pair.model.item_id], pair.model.item_id)); + tracing::debug!("upserts: {:#?}", upserts); + upserts } pub fn convert_body_to_database_json(body: &CompBody) -> Result { @@ -192,23 +215,30 @@ pub fn convert_waypoint_from_database_json(position: &str) -> Result Result { // Loadout items must be loaded before inventory items since loadout items // provide inventory slots. Since items stored inside loadout items actually // have their parent_container_item_id as the loadout pseudo-container we rely // on populating the loadout items first, and then inserting the items into the - // inventory at the correct position. When we want to support items inside the - // player's inventory containing other items (such as "right click to - // unwrap" gifts perhaps) then we will need to refactor inventory/loadout - // persistence to traverse the tree of items and load them from the root - // down. - let loadout = convert_loadout_from_database_items(loadout_items)?; + // inventory at the correct position. + // + let loadout = convert_loadout_from_database_items(loadout_container_id, loadout_items)?; let mut inventory = Inventory::new_with_loadout(loadout); + let mut item_indices = HashMap::new(); + + for (i, db_item) in inventory_items.iter().enumerate() { + item_indices.insert(db_item.item_id, i); - for db_item in inventory_items.iter() { let mut item = get_item_from_asset(db_item.item_definition_id.as_str())?; // NOTE: Since this is freshly loaded, the atomic is *unique.* @@ -234,55 +264,99 @@ pub fn convert_inventory_from_database_items( // Insert item into inventory // Slot position - let slot: InvSlotId = serde_json::from_str(&db_item.position).map_err(|_| { - Error::ConversionError(format!( - "Failed to parse item position: {:?}", - &db_item.position - )) - })?; + let slot = |s: &str| { + serde_json::from_str::(s).map_err(|_| { + Error::ConversionError(format!( + "Failed to parse item position: {:?}", + &db_item.position + )) + }) + }; - let insert_res = inventory.insert_at(slot, item).map_err(|_| { - // If this happens there were too many items in the database for the current - // inventory size - Error::ConversionError(format!( - "Error inserting item into inventory, position: {:?}", - slot - )) - })?; + if db_item.parent_container_item_id == inventory_container_id { + let slot = slot(&db_item.position)?; + let insert_res = inventory.insert_at(slot, item).map_err(|_| { + // If this happens there were too many items in the database for the current + // inventory size + Error::ConversionError(format!( + "Error inserting item into inventory, position: {:?}", + slot + )) + })?; - if insert_res.is_some() { - // If inventory.insert returns an item, it means it was swapped for an item that - // already occupied the slot. Multiple items being stored in the database for - // the same slot is an error. - return Err(Error::ConversionError( - "Inserted an item into the same slot twice".to_string(), - )); + if insert_res.is_some() { + // If inventory.insert returns an item, it means it was swapped for an item that + // already occupied the slot. Multiple items being stored in the database for + // the same slot is an error. + return Err(Error::ConversionError( + "Inserted an item into the same slot twice".to_string(), + )); + } + } else if let Some(&j) = item_indices.get(&db_item.parent_container_item_id) { + if let Some(Some(parent)) = inventory.slot_mut(slot(&inventory_items[j].position)?) { + parent.add_component(item); + } else { + return Err(Error::ConversionError(format!( + "Parent slot {} for component {} was empty even though it occurred earlier in \ + the loop?", + db_item.parent_container_item_id, db_item.item_id + ))); + } + } else { + return Err(Error::ConversionError(format!( + "Couldn't find parent item {} before item {} in inventory", + db_item.parent_container_item_id, db_item.item_id + ))); } } Ok(inventory) } -pub fn convert_loadout_from_database_items(database_items: &[Item]) -> Result { +pub fn convert_loadout_from_database_items( + loadout_container_id: i64, + database_items: &[Item], +) -> Result { let loadout_builder = LoadoutBuilder::new(); let mut loadout = loadout_builder.build(); + let mut item_indices = HashMap::new(); + + for (i, db_item) in database_items.iter().enumerate() { + item_indices.insert(db_item.item_id, i); - for db_item in database_items.iter() { let item = get_item_from_asset(db_item.item_definition_id.as_str())?; + // NOTE: item id is currently *unique*, so we can store the ID safely. let comp = item.get_item_id_for_database(); comp.store(Some(NonZeroU64::try_from(db_item.item_id as u64).map_err( |_| Error::ConversionError("Item with zero item_id".to_owned()), )?)); - loadout - .set_item_at_slot_using_persistence_key(&db_item.position, item) - .map_err(|err| match err { - LoadoutError::InvalidPersistenceKey => Error::ConversionError(format!( - "Invalid persistence key: {}", - &db_item.position - )), - })?; + let convert_error = |err| match err { + LoadoutError::InvalidPersistenceKey => { + Error::ConversionError(format!("Invalid persistence key: {}", &db_item.position)) + }, + LoadoutError::NoParentAtSlot => { + Error::ConversionError(format!("No parent item at slot: {}", &db_item.position)) + }, + }; + + if db_item.parent_container_item_id == loadout_container_id { + loadout + .set_item_at_slot_using_persistence_key(&db_item.position, item) + .map_err(convert_error)?; + } else if let Some(&j) = item_indices.get(&db_item.parent_container_item_id) { + loadout + .update_item_at_slot_using_persistence_key(&database_items[j].position, |parent| { + parent.add_component(item); + }) + .map_err(convert_error)?; + } else { + return Err(Error::ConversionError(format!( + "Couldn't find parent item {} before item {} in loadout", + db_item.parent_container_item_id, db_item.item_id + ))); + } } Ok(loadout)