mirror of
https://gitlab.com/veloren/veloren.git
synced 2024-08-30 18:12:32 +00:00
Implement persistence for modular weapons.
This stores the components as children of the item that contains them via the DB's `parent_container_item_id` feature, and ensures that things are loaded in a good order with breadth-first search. Squahed fixes: - Fix some constraint violations that occurred when swapping inventory items. - Comment out recipes for modular weapons. - Make update_item_at_slot_using_persistence_key and is_modular more idiomatic. - Add changelog entry. - Document `defer_foreign_keys` usage.
This commit is contained in:
parent
8bdbf4f7c9
commit
c489d095df
@ -31,6 +31,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
- Basic NPC interaction
|
- Basic NPC interaction
|
||||||
- Lights in dungeons
|
- Lights in dungeons
|
||||||
- Trading system (bound to the `R` key by default, currently only works with players)
|
- Trading system (bound to the `R` key by default, currently only works with players)
|
||||||
|
- Support for modular weapons.
|
||||||
|
|
||||||
### Changed
|
### Changed
|
||||||
|
|
||||||
|
@ -240,13 +240,13 @@ impl ItemDef {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_modular(&self) -> bool {
|
pub fn is_modular(&self) -> bool {
|
||||||
match &self.kind {
|
matches!(
|
||||||
ItemKind::Tool(tool) => match tool.stats {
|
&self.kind,
|
||||||
tool::StatKind::Direct { .. } => false,
|
ItemKind::Tool(tool::Tool {
|
||||||
tool::StatKind::Modular => true,
|
stats: tool::StatKind::Modular,
|
||||||
},
|
..
|
||||||
_ => false,
|
})
|
||||||
}
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
@ -372,25 +372,16 @@ impl Item {
|
|||||||
components.extend(input_components.iter().map(|comp| comp.duplicate()));
|
components.extend(input_components.iter().map(|comp| comp.duplicate()));
|
||||||
}
|
}
|
||||||
|
|
||||||
let kind = inner_item.kind();
|
let mut item = Item {
|
||||||
let item_config = if let ItemKind::Tool(_) = kind {
|
|
||||||
Some(Box::new(ItemConfig::from((
|
|
||||||
kind,
|
|
||||||
&*components,
|
|
||||||
&inner_item.ability_map,
|
|
||||||
))))
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
Item {
|
|
||||||
item_id: Arc::new(AtomicCell::new(None)),
|
item_id: Arc::new(AtomicCell::new(None)),
|
||||||
amount: NonZeroU32::new(1).unwrap(),
|
amount: NonZeroU32::new(1).unwrap(),
|
||||||
components,
|
components,
|
||||||
slots: vec![None; inner_item.slots as usize],
|
slots: vec![None; inner_item.slots as usize],
|
||||||
item_def: inner_item,
|
item_def: inner_item,
|
||||||
item_config,
|
item_config: None,
|
||||||
}
|
};
|
||||||
|
item.update_item_config();
|
||||||
|
item
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new instance of an `Item` from the provided asset identifier
|
/// Creates a new instance of an `Item` from the provided asset identifier
|
||||||
@ -478,6 +469,27 @@ impl Item {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn add_component(&mut self, component: Item) {
|
||||||
|
// TODO: hook for typechecking (not needed atm if this is only used by DB
|
||||||
|
// persistence, but will definitely be needed once enhancement slots are
|
||||||
|
// added to prevent putting a sword into another sword)
|
||||||
|
self.components.push(component);
|
||||||
|
// adding a component changes the stats, so recalculate the ItemConfig
|
||||||
|
self.update_item_config();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update_item_config(&mut self) {
|
||||||
|
self.item_config = if let ItemKind::Tool(_) = self.kind() {
|
||||||
|
Some(Box::new(ItemConfig::from((
|
||||||
|
self.kind(),
|
||||||
|
self.components(),
|
||||||
|
&self.item_def.ability_map,
|
||||||
|
))))
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns an iterator that drains items contained within the item's slots
|
/// Returns an iterator that drains items contained within the item's slots
|
||||||
pub fn drain(&mut self) -> impl Iterator<Item = Item> + '_ {
|
pub fn drain(&mut self) -> impl Iterator<Item = Item> + '_ {
|
||||||
self.slots.iter_mut().filter_map(|x| mem::take(x))
|
self.slots.iter_mut().filter_map(|x| mem::take(x))
|
||||||
|
@ -46,6 +46,7 @@ pub(super) struct LoadoutSlotId {
|
|||||||
|
|
||||||
pub enum LoadoutError {
|
pub enum LoadoutError {
|
||||||
InvalidPersistenceKey,
|
InvalidPersistenceKey,
|
||||||
|
NoParentAtSlot,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Loadout {
|
impl Loadout {
|
||||||
@ -134,6 +135,25 @@ impl Loadout {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn update_item_at_slot_using_persistence_key<F: FnOnce(&mut Item)>(
|
||||||
|
&mut self,
|
||||||
|
persistence_key: &str,
|
||||||
|
f: F,
|
||||||
|
) -> Result<(), LoadoutError> {
|
||||||
|
self.slots
|
||||||
|
.iter_mut()
|
||||||
|
.find(|loadout_slot| loadout_slot.persistence_key == persistence_key)
|
||||||
|
.map_or(Err(LoadoutError::InvalidPersistenceKey), |loadout_slot| {
|
||||||
|
loadout_slot
|
||||||
|
.slot
|
||||||
|
.as_mut()
|
||||||
|
.map_or(Err(LoadoutError::NoParentAtSlot), |item| {
|
||||||
|
f(item);
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
/// Swaps the contents of two loadout slots
|
/// Swaps the contents of two loadout slots
|
||||||
pub(super) fn swap_slots(&mut self, equip_slot_a: EquipSlot, equip_slot_b: EquipSlot) {
|
pub(super) fn swap_slots(&mut self, equip_slot_a: EquipSlot, equip_slot_b: EquipSlot) {
|
||||||
if self.slot(equip_slot_b).is_none() || self.slot(equip_slot_b).is_none() {
|
if self.slot(equip_slot_b).is_none() || self.slot(equip_slot_b).is_none() {
|
||||||
|
@ -350,7 +350,7 @@ impl Inventory {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn slot_mut(&mut self, inv_slot_id: InvSlotId) -> Option<&mut InvSlot> {
|
pub fn slot_mut(&mut self, inv_slot_id: InvSlotId) -> Option<&mut InvSlot> {
|
||||||
match SlotId::from(inv_slot_id) {
|
match SlotId::from(inv_slot_id) {
|
||||||
SlotId::Inventory(slot_idx) => self.slots.get_mut(slot_idx),
|
SlotId::Inventory(slot_idx) => self.slots.get_mut(slot_idx),
|
||||||
SlotId::Loadout(loadout_slot_id) => self.loadout.inv_slot_mut(loadout_slot_id),
|
SlotId::Loadout(loadout_slot_id) => self.loadout.inv_slot_mut(loadout_slot_id),
|
||||||
|
@ -119,7 +119,13 @@ impl assets::Compound for RecipeBook {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let mut raw = cache.load::<RawRecipeBook>(specifier)?.read().clone();
|
let mut raw = cache.load::<RawRecipeBook>(specifier)?.read().clone();
|
||||||
modular::append_modular_recipes(&mut raw);
|
|
||||||
|
// Avoid showing purple-question-box recipes until the assets are added
|
||||||
|
// (the `if false` is needed because commenting out the call will add a warning
|
||||||
|
// that there are no other uses of append_modular_recipes)
|
||||||
|
if false {
|
||||||
|
modular::append_modular_recipes(&mut raw);
|
||||||
|
}
|
||||||
|
|
||||||
let recipes = raw
|
let recipes = raw
|
||||||
.0
|
.0
|
||||||
|
@ -27,7 +27,7 @@ use crate::{
|
|||||||
use common::character::{CharacterId, CharacterItem, MAX_CHARACTERS_PER_PLAYER};
|
use common::character::{CharacterId, CharacterItem, MAX_CHARACTERS_PER_PLAYER};
|
||||||
use core::ops::Range;
|
use core::ops::Range;
|
||||||
use diesel::{prelude::*, sql_query, sql_types::BigInt};
|
use diesel::{prelude::*, sql_query, sql_types::BigInt};
|
||||||
use std::sync::Arc;
|
use std::{collections::VecDeque, sync::Arc};
|
||||||
use tracing::{error, trace, warn};
|
use tracing::{error, trace, warn};
|
||||||
|
|
||||||
/// Private module for very tightly coupled database conversion methods. In
|
/// Private module for very tightly coupled database conversion methods. In
|
||||||
@ -50,6 +50,26 @@ struct CharacterContainers {
|
|||||||
loadout_container_id: EntityId,
|
loadout_container_id: EntityId,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// BFS the inventory/loadout to ensure that each is topologically sorted in the
|
||||||
|
/// sense required by convert_inventory_from_database_items to support recursive
|
||||||
|
/// items
|
||||||
|
pub fn load_items_bfs(connection: VelorenTransaction, root: i64) -> Result<Vec<Item>, Error> {
|
||||||
|
use schema::item::dsl::*;
|
||||||
|
let mut items = Vec::new();
|
||||||
|
let mut queue = VecDeque::new();
|
||||||
|
queue.push_front(root);
|
||||||
|
while let Some(id) = queue.pop_front() {
|
||||||
|
let frontier = item
|
||||||
|
.filter(parent_container_item_id.eq(id))
|
||||||
|
.load::<Item>(&*connection)?;
|
||||||
|
for i in frontier.iter() {
|
||||||
|
queue.push_back(i.item_id);
|
||||||
|
}
|
||||||
|
items.extend(frontier);
|
||||||
|
}
|
||||||
|
Ok(items)
|
||||||
|
}
|
||||||
|
|
||||||
/// Load stored data for a character.
|
/// Load stored data for a character.
|
||||||
///
|
///
|
||||||
/// After first logging in, and after a character is selected, we fetch this
|
/// After first logging in, and after a character is selected, we fetch this
|
||||||
@ -59,19 +79,12 @@ pub fn load_character_data(
|
|||||||
char_id: CharacterId,
|
char_id: CharacterId,
|
||||||
connection: VelorenTransaction,
|
connection: VelorenTransaction,
|
||||||
) -> CharacterDataResult {
|
) -> CharacterDataResult {
|
||||||
use schema::{body::dsl::*, character::dsl::*, item::dsl::*, skill_group::dsl::*};
|
use schema::{body::dsl::*, character::dsl::*, skill_group::dsl::*};
|
||||||
|
|
||||||
let character_containers = get_pseudo_containers(connection, char_id)?;
|
let character_containers = get_pseudo_containers(connection, char_id)?;
|
||||||
|
|
||||||
// TODO: Make inventory and loadout item loading work with recursive items when
|
let inventory_items = load_items_bfs(connection, character_containers.inventory_container_id)?;
|
||||||
// container items are supported
|
let loadout_items = load_items_bfs(connection, character_containers.loadout_container_id)?;
|
||||||
let inventory_items = item
|
|
||||||
.filter(parent_container_item_id.eq(character_containers.inventory_container_id))
|
|
||||||
.load::<Item>(&*connection)?;
|
|
||||||
|
|
||||||
let loadout_items = item
|
|
||||||
.filter(parent_container_item_id.eq(character_containers.loadout_container_id))
|
|
||||||
.load::<Item>(&*connection)?;
|
|
||||||
|
|
||||||
let character_data = character
|
let character_data = character
|
||||||
.filter(
|
.filter(
|
||||||
@ -109,7 +122,12 @@ pub fn load_character_data(
|
|||||||
Ok((
|
Ok((
|
||||||
convert_body_from_database(&char_body)?,
|
convert_body_from_database(&char_body)?,
|
||||||
convert_stats_from_database(character_data.alias, &skill_data, &skill_group_data),
|
convert_stats_from_database(character_data.alias, &skill_data, &skill_group_data),
|
||||||
convert_inventory_from_database_items(&inventory_items, &loadout_items)?,
|
convert_inventory_from_database_items(
|
||||||
|
character_containers.inventory_container_id,
|
||||||
|
&inventory_items,
|
||||||
|
character_containers.loadout_container_id,
|
||||||
|
&loadout_items,
|
||||||
|
)?,
|
||||||
char_waypoint,
|
char_waypoint,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
@ -125,7 +143,7 @@ pub fn load_character_list(
|
|||||||
player_uuid_: &str,
|
player_uuid_: &str,
|
||||||
connection: VelorenTransaction,
|
connection: VelorenTransaction,
|
||||||
) -> CharacterListResult {
|
) -> CharacterListResult {
|
||||||
use schema::{body::dsl::*, character::dsl::*, item::dsl::*};
|
use schema::{body::dsl::*, character::dsl::*};
|
||||||
|
|
||||||
let result = character
|
let result = character
|
||||||
.filter(player_uuid.eq(player_uuid_))
|
.filter(player_uuid.eq(player_uuid_))
|
||||||
@ -149,13 +167,10 @@ pub fn load_character_list(
|
|||||||
LOADOUT_PSEUDO_CONTAINER_POSITION,
|
LOADOUT_PSEUDO_CONTAINER_POSITION,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// TODO: Make work with recursive items if containers are ever supported as part
|
let loadout_items = load_items_bfs(connection, loadout_container_id)?;
|
||||||
// of a loadout
|
|
||||||
let loadout_items = item
|
|
||||||
.filter(parent_container_item_id.eq(loadout_container_id))
|
|
||||||
.load::<Item>(&*connection)?;
|
|
||||||
|
|
||||||
let loadout = convert_loadout_from_database_items(&loadout_items)?;
|
let loadout =
|
||||||
|
convert_loadout_from_database_items(loadout_container_id, &loadout_items)?;
|
||||||
|
|
||||||
Ok(CharacterItem {
|
Ok(CharacterItem {
|
||||||
character: char,
|
character: char,
|
||||||
@ -276,7 +291,7 @@ pub fn create_character(
|
|||||||
let mut inserts = Vec::new();
|
let mut inserts = Vec::new();
|
||||||
|
|
||||||
get_new_entity_ids(connection, |mut next_id| {
|
get_new_entity_ids(connection, |mut next_id| {
|
||||||
let (inserts_, _deletes) = convert_items_to_database_items(
|
let inserts_ = convert_items_to_database_items(
|
||||||
loadout_container_id,
|
loadout_container_id,
|
||||||
&inventory,
|
&inventory,
|
||||||
inventory_container_id,
|
inventory_container_id,
|
||||||
@ -541,7 +556,7 @@ pub fn update(
|
|||||||
// First, get all the entity IDs for any new items, and identify which slots to
|
// First, get all the entity IDs for any new items, and identify which slots to
|
||||||
// upsert and which ones to delete.
|
// upsert and which ones to delete.
|
||||||
get_new_entity_ids(connection, |mut next_id| {
|
get_new_entity_ids(connection, |mut next_id| {
|
||||||
let (upserts_, _deletes) = convert_items_to_database_items(
|
let upserts_ = convert_items_to_database_items(
|
||||||
pseudo_containers.loadout_container_id,
|
pseudo_containers.loadout_container_id,
|
||||||
&inventory,
|
&inventory,
|
||||||
pseudo_containers.inventory_container_id,
|
pseudo_containers.inventory_container_id,
|
||||||
@ -553,9 +568,17 @@ pub fn update(
|
|||||||
|
|
||||||
// Next, delete any slots we aren't upserting.
|
// Next, delete any slots we aren't upserting.
|
||||||
trace!("Deleting items for character_id {}", char_id);
|
trace!("Deleting items for character_id {}", char_id);
|
||||||
let existing_items = parent_container_item_id
|
let mut existing_item_ids: Vec<i64> = vec![
|
||||||
.eq(pseudo_containers.inventory_container_id)
|
pseudo_containers.inventory_container_id,
|
||||||
.or(parent_container_item_id.eq(pseudo_containers.loadout_container_id));
|
pseudo_containers.loadout_container_id,
|
||||||
|
];
|
||||||
|
for it in load_items_bfs(connection, pseudo_containers.inventory_container_id)? {
|
||||||
|
existing_item_ids.push(it.item_id);
|
||||||
|
}
|
||||||
|
for it in load_items_bfs(connection, pseudo_containers.loadout_container_id)? {
|
||||||
|
existing_item_ids.push(it.item_id);
|
||||||
|
}
|
||||||
|
let existing_items = parent_container_item_id.eq_any(existing_item_ids);
|
||||||
let non_upserted_items = item_id.ne_all(
|
let non_upserted_items = item_id.ne_all(
|
||||||
upserts
|
upserts
|
||||||
.iter()
|
.iter()
|
||||||
@ -573,7 +596,13 @@ pub fn update(
|
|||||||
if expected_upsert_count > 0 {
|
if expected_upsert_count > 0 {
|
||||||
let (upserted_items, upserted_comps_): (Vec<_>, Vec<_>) = upserts
|
let (upserted_items, upserted_comps_): (Vec<_>, Vec<_>) = upserts
|
||||||
.into_iter()
|
.into_iter()
|
||||||
.map(|model_pair| (model_pair.model, model_pair.comp))
|
.map(|model_pair| {
|
||||||
|
debug_assert_eq!(
|
||||||
|
model_pair.model.item_id,
|
||||||
|
model_pair.comp.load().unwrap().get() as i64
|
||||||
|
);
|
||||||
|
(model_pair.model, model_pair.comp)
|
||||||
|
})
|
||||||
.unzip();
|
.unzip();
|
||||||
upserted_comps = upserted_comps_;
|
upserted_comps = upserted_comps_;
|
||||||
trace!(
|
trace!(
|
||||||
@ -582,9 +611,17 @@ pub fn update(
|
|||||||
char_id
|
char_id
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// When moving inventory items around, foreign key constraints on
|
||||||
|
// `parent_container_item_id` can be temporarily violated by one upsert, but
|
||||||
|
// restored by another upsert. Deferred constraints allow SQLite to check this
|
||||||
|
// when committing the transaction. The `defer_foreign_keys` pragma treats the
|
||||||
|
// foreign key constraints as deferred for the next transaction (it turns itself
|
||||||
|
// off at the commit boundary). https://sqlite.org/foreignkeys.html#fk_deferred
|
||||||
|
connection.execute("PRAGMA defer_foreign_keys = ON;")?;
|
||||||
let upsert_count = diesel::replace_into(item)
|
let upsert_count = diesel::replace_into(item)
|
||||||
.values(&upserted_items)
|
.values(&upserted_items)
|
||||||
.execute(&*connection)?;
|
.execute(&*connection)?;
|
||||||
|
trace!("upsert_count: {}", upsert_count);
|
||||||
if upsert_count != expected_upsert_count {
|
if upsert_count != expected_upsert_count {
|
||||||
return Err(Error::OtherError(format!(
|
return Err(Error::OtherError(format!(
|
||||||
"Expected upsertions={}, actual={}, for char_id {}--unsafe to continue \
|
"Expected upsertions={}, actual={}, for char_id {}--unsafe to continue \
|
||||||
|
@ -21,16 +21,17 @@ use common::{
|
|||||||
};
|
};
|
||||||
use core::{convert::TryFrom, num::NonZeroU64};
|
use core::{convert::TryFrom, num::NonZeroU64};
|
||||||
use hashbrown::HashMap;
|
use hashbrown::HashMap;
|
||||||
use itertools::{Either, Itertools};
|
use std::{collections::VecDeque, sync::Arc};
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
pub struct ItemModelPair {
|
pub struct ItemModelPair {
|
||||||
pub comp: Arc<common::comp::item::ItemId>,
|
pub comp: Arc<common::comp::item::ItemId>,
|
||||||
pub model: Item,
|
pub model: Item,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The left vector contains all item rows to upsert; the right-hand vector
|
/// Returns a vector that contains all item rows to upsert; parent is
|
||||||
/// contains all item rows to delete (by parent ID and position).
|
/// responsible for deleting items from the same owner that aren't affirmatively
|
||||||
|
/// kept by this.
|
||||||
///
|
///
|
||||||
/// NOTE: This method does not yet handle persisting nested items within
|
/// NOTE: This method does not yet handle persisting nested items within
|
||||||
/// inventories. Although loadout items do store items inside them this does
|
/// inventories. Although loadout items do store items inside them this does
|
||||||
@ -41,7 +42,7 @@ pub fn convert_items_to_database_items(
|
|||||||
inventory: &Inventory,
|
inventory: &Inventory,
|
||||||
inventory_container_id: EntityId,
|
inventory_container_id: EntityId,
|
||||||
next_id: &mut i64,
|
next_id: &mut i64,
|
||||||
) -> (Vec<ItemModelPair>, Vec<(EntityId, String)>) {
|
) -> Vec<ItemModelPair> {
|
||||||
let loadout = inventory
|
let loadout = inventory
|
||||||
.loadout_items_with_persistence_key()
|
.loadout_items_with_persistence_key()
|
||||||
.map(|(slot, item)| (slot.to_string(), item, loadout_container_id));
|
.map(|(slot, item)| (slot.to_string(), item, loadout_container_id));
|
||||||
@ -55,103 +56,125 @@ pub fn convert_items_to_database_items(
|
|||||||
)
|
)
|
||||||
});
|
});
|
||||||
|
|
||||||
// Construct new items.
|
// Use Breadth-first search to recurse into containers/modular weapons to store
|
||||||
inventory.chain(loadout)
|
// their parts
|
||||||
.partition_map(|(position, item, parent_container_item_id)| {
|
let mut bfs_queue: VecDeque<_> = inventory.chain(loadout).collect();
|
||||||
if let Some(item) = item {
|
let mut upserts = Vec::new();
|
||||||
// Try using the next available id in the sequence as the default for new items.
|
let mut depth = HashMap::new();
|
||||||
let new_item_id = NonZeroU64::new(u64::try_from(*next_id)
|
depth.insert(inventory_container_id, 0);
|
||||||
.expect("We are willing to crash if the next entity id overflows \
|
depth.insert(loadout_container_id, 0);
|
||||||
(or is otherwise negative).")).expect("next_id should not be zero, either");
|
while let Some((position, item, parent_container_item_id)) = bfs_queue.pop_front() {
|
||||||
|
// Construct new items.
|
||||||
|
if let Some(item) = item {
|
||||||
|
// Try using the next available id in the sequence as the default for new items.
|
||||||
|
let new_item_id = NonZeroU64::new(u64::try_from(*next_id).expect(
|
||||||
|
"We are willing to crash if the next entity id overflows (or is otherwise \
|
||||||
|
negative).",
|
||||||
|
))
|
||||||
|
.expect("next_id should not be zero, either");
|
||||||
|
|
||||||
let comp = item.get_item_id_for_database();
|
// Fast (kinda) path: acquire read for the common case where an id has
|
||||||
Either::Left(ItemModelPair {
|
// already been assigned.
|
||||||
model: Item {
|
let comp = item.get_item_id_for_database();
|
||||||
item_definition_id: item.item_definition_id().to_owned(),
|
let item_id = comp.load()
|
||||||
position,
|
// First, we filter out "impossible" entity IDs--IDs that are larger
|
||||||
parent_container_item_id,
|
// than the maximum sequence value (next_id). This is important
|
||||||
// Fast (kinda) path: acquire read for the common case where an id has
|
// because we update the item ID atomically, *before* we know whether
|
||||||
// already been assigned.
|
// this transaction has completed successfully, and we don't abort the
|
||||||
item_id: comp.load()
|
// process on a failed transaction. In such cases, new IDs from
|
||||||
// First, we filter out "impossible" entity IDs--IDs that are larger
|
// aborted transactions will show up as having a higher value than the
|
||||||
// than the maximum sequence value (next_id). This is important
|
// current max sequence number. Because the only place that modifies
|
||||||
// because we update the item ID atomically, *before* we know whether
|
// the item_id through a shared reference is (supposed to be) this
|
||||||
// this transaction has completed successfully, and we don't abort the
|
// function, which is part of the batch update transaction, we can
|
||||||
// process on a failed transaction. In such cases, new IDs from
|
// assume that any rollback during the update would fail to insert
|
||||||
// aborted transactions will show up as having a higher value than the
|
// *any* new items for the current character; this means that any items
|
||||||
// current max sequence number. Because the only place that modifies
|
// inserted between the failure and now (i.e. values less than next_id)
|
||||||
// the item_id through a shared reference is (supposed to be) this
|
// would either not be items at all, or items belonging to other
|
||||||
// function, which is part of the batch update transaction, we can
|
// characters, leading to an easily detectable SQLite failure that we
|
||||||
// assume that any rollback during the update would fail to insert
|
// can use to atomically set the id back to None (if it was still the
|
||||||
// *any* new items for the current character; this means that any items
|
// same bad value).
|
||||||
// inserted between the failure and now (i.e. values less than next_id)
|
//
|
||||||
// would either not be items at all, or items belonging to other
|
// Note that this logic only requires that all the character's items be
|
||||||
// characters, leading to an easily detectable SQLite failure that we
|
// updated within the same serializable transaction; the argument does
|
||||||
// can use to atomically set the id back to None (if it was still the
|
// not depend on SQLite-specific details (like locking) or on the fact
|
||||||
// same bad value).
|
// that a user's transactions are always serialized on their own
|
||||||
//
|
// session. Also note that since these IDs are in-memory, we don't
|
||||||
// Note that this logic only requires that all the character's items be
|
// have to worry about their values during, e.g., a process crash;
|
||||||
// updated within the same serializable transaction; the argument does
|
// serializability will take care of us in those cases. Finally, note
|
||||||
// not depend on SQLite-specific details (like locking) or on the fact
|
// that while we have not yet implemented the "liveness" part of the
|
||||||
// that a user's transactions are always serialized on their own
|
// algorithm (resetting ids back to None if we detect errors), this is
|
||||||
// session. Also note that since these IDs are in-memory, we don't
|
// not needed for soundness, and this part can be deferred until we
|
||||||
// have to worry about their values during, e.g., a process crash;
|
// switch to an execution model where such races are actually possible
|
||||||
// serializability will take care of us in those cases. Finally, note
|
// during normal gameplay.
|
||||||
// that while we have not yet implemented the "liveness" part of the
|
.and_then(|item_id| Some(if item_id >= new_item_id {
|
||||||
// algorithm (resetting ids back to None if we detect errors), this is
|
// Try to atomically exchange with our own, "correct" next id.
|
||||||
// not needed for soundness, and this part can be deferred until we
|
match comp.compare_exchange(Some(item_id), Some(new_item_id)) {
|
||||||
// switch to an execution model where such races are actually possible
|
Ok(_) => {
|
||||||
// during normal gameplay.
|
let item_id = *next_id;
|
||||||
.and_then(|item_id| Some(if item_id >= new_item_id {
|
// We won the race, use next_id and increment it.
|
||||||
// Try to atomically exchange with our own, "correct" next id.
|
*next_id += 1;
|
||||||
match comp.compare_exchange(Some(item_id), Some(new_item_id)) {
|
item_id
|
||||||
Ok(_) => {
|
|
||||||
let item_id = *next_id;
|
|
||||||
// We won the race, use next_id and increment it.
|
|
||||||
*next_id += 1;
|
|
||||||
item_id
|
|
||||||
},
|
|
||||||
Err(item_id) => {
|
|
||||||
// We raced with someone, and they won the race, so we know
|
|
||||||
// this transaction must abort unless they finish first. So,
|
|
||||||
// just assume they will finish first, and use their assigned
|
|
||||||
// item_id.
|
|
||||||
EntityId::try_from(item_id?.get())
|
|
||||||
.expect("We always choose legal EntityIds as item ids")
|
|
||||||
},
|
|
||||||
}
|
|
||||||
} else { EntityId::try_from(item_id.get()).expect("We always choose legal EntityIds as item ids") }))
|
|
||||||
// Finally, we're in the case where no entity was assigned yet (either
|
|
||||||
// ever, or due to corrections after a rollback). This proceeds
|
|
||||||
// identically to the "impossible ID" case.
|
|
||||||
.unwrap_or_else(|| {
|
|
||||||
// Try to atomically compare with the empty id.
|
|
||||||
match comp.compare_exchange(None, Some(new_item_id)) {
|
|
||||||
Ok(_) => {
|
|
||||||
let item_id = *next_id;
|
|
||||||
*next_id += 1;
|
|
||||||
item_id
|
|
||||||
},
|
|
||||||
Err(item_id) => {
|
|
||||||
EntityId::try_from(item_id.expect("TODO: Fix handling of reset to None when we have concurrent writers.").get())
|
|
||||||
.expect("We always choose legal EntityIds as item ids")
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
stack_size: if item.is_stackable() {
|
|
||||||
item.amount() as i32
|
|
||||||
} else {
|
|
||||||
1
|
|
||||||
},
|
},
|
||||||
},
|
Err(item_id) => {
|
||||||
// Continue to remember the atomic, in case we detect an error later and want
|
// We raced with someone, and they won the race, so we know
|
||||||
// to roll back to preserve liveness.
|
// this transaction must abort unless they finish first. So,
|
||||||
comp,
|
// just assume they will finish first, and use their assigned
|
||||||
})
|
// item_id.
|
||||||
} else {
|
EntityId::try_from(item_id?.get())
|
||||||
Either::Right((parent_container_item_id, position))
|
.expect("We always choose legal EntityIds as item ids")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
} else { EntityId::try_from(item_id.get()).expect("We always choose legal EntityIds as item ids") }))
|
||||||
|
// Finally, we're in the case where no entity was assigned yet (either
|
||||||
|
// ever, or due to corrections after a rollback). This proceeds
|
||||||
|
// identically to the "impossible ID" case.
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
// Try to atomically compare with the empty id.
|
||||||
|
match comp.compare_exchange(None, Some(new_item_id)) {
|
||||||
|
Ok(_) => {
|
||||||
|
let item_id = *next_id;
|
||||||
|
*next_id += 1;
|
||||||
|
item_id
|
||||||
|
},
|
||||||
|
Err(item_id) => {
|
||||||
|
EntityId::try_from(item_id.expect("TODO: Fix handling of reset to None when we have concurrent writers.").get())
|
||||||
|
.expect("We always choose legal EntityIds as item ids")
|
||||||
|
},
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
depth.insert(item_id, depth[&parent_container_item_id] + 1);
|
||||||
|
|
||||||
|
for (i, component) in item.components().iter().enumerate() {
|
||||||
|
// recursive items' children have the same position as their parents, and since
|
||||||
|
// they occur afterwards in the topological sort of the parent graph (which
|
||||||
|
// should still always be a tree, even with recursive items), we
|
||||||
|
// have enough information to put them back into their parents on load
|
||||||
|
bfs_queue.push_back((format!("component_{}", i), Some(component), item_id));
|
||||||
}
|
}
|
||||||
})
|
|
||||||
|
let upsert = ItemModelPair {
|
||||||
|
model: Item {
|
||||||
|
item_definition_id: item.item_definition_id().to_owned(),
|
||||||
|
position,
|
||||||
|
parent_container_item_id,
|
||||||
|
item_id,
|
||||||
|
stack_size: if item.is_stackable() {
|
||||||
|
item.amount() as i32
|
||||||
|
} else {
|
||||||
|
1
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// Continue to remember the atomic, in case we detect an error later and want
|
||||||
|
// to roll back to preserve liveness.
|
||||||
|
comp,
|
||||||
|
};
|
||||||
|
upserts.push(upsert);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
upserts.sort_by_key(|pair| (depth[&pair.model.item_id], pair.model.item_id));
|
||||||
|
tracing::debug!("upserts: {:#?}", upserts);
|
||||||
|
upserts
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn convert_body_to_database_json(body: &CompBody) -> Result<String, Error> {
|
pub fn convert_body_to_database_json(body: &CompBody) -> Result<String, Error> {
|
||||||
@ -192,23 +215,30 @@ pub fn convert_waypoint_from_database_json(position: &str) -> Result<Waypoint, E
|
|||||||
Ok(Waypoint::new(character_position.waypoint, Time(0.0)))
|
Ok(Waypoint::new(character_position.waypoint, Time(0.0)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Properly-recursive items (currently modular weapons) occupy the same
|
||||||
|
/// inventory slot as their parent. The caller is responsible for ensuring that
|
||||||
|
/// inventory_items and loadout_items are topologically sorted (i.e. forall i,
|
||||||
|
/// `items[i].parent_container_item_id == x` implies exists j < i satisfying
|
||||||
|
/// `items[j].item_id == x`)
|
||||||
pub fn convert_inventory_from_database_items(
|
pub fn convert_inventory_from_database_items(
|
||||||
|
inventory_container_id: i64,
|
||||||
inventory_items: &[Item],
|
inventory_items: &[Item],
|
||||||
|
loadout_container_id: i64,
|
||||||
loadout_items: &[Item],
|
loadout_items: &[Item],
|
||||||
) -> Result<Inventory, Error> {
|
) -> Result<Inventory, Error> {
|
||||||
// Loadout items must be loaded before inventory items since loadout items
|
// Loadout items must be loaded before inventory items since loadout items
|
||||||
// provide inventory slots. Since items stored inside loadout items actually
|
// provide inventory slots. Since items stored inside loadout items actually
|
||||||
// have their parent_container_item_id as the loadout pseudo-container we rely
|
// have their parent_container_item_id as the loadout pseudo-container we rely
|
||||||
// on populating the loadout items first, and then inserting the items into the
|
// on populating the loadout items first, and then inserting the items into the
|
||||||
// inventory at the correct position. When we want to support items inside the
|
// inventory at the correct position.
|
||||||
// player's inventory containing other items (such as "right click to
|
//
|
||||||
// unwrap" gifts perhaps) then we will need to refactor inventory/loadout
|
let loadout = convert_loadout_from_database_items(loadout_container_id, loadout_items)?;
|
||||||
// persistence to traverse the tree of items and load them from the root
|
|
||||||
// down.
|
|
||||||
let loadout = convert_loadout_from_database_items(loadout_items)?;
|
|
||||||
let mut inventory = Inventory::new_with_loadout(loadout);
|
let mut inventory = Inventory::new_with_loadout(loadout);
|
||||||
|
let mut item_indices = HashMap::new();
|
||||||
|
|
||||||
|
for (i, db_item) in inventory_items.iter().enumerate() {
|
||||||
|
item_indices.insert(db_item.item_id, i);
|
||||||
|
|
||||||
for db_item in inventory_items.iter() {
|
|
||||||
let mut item = get_item_from_asset(db_item.item_definition_id.as_str())?;
|
let mut item = get_item_from_asset(db_item.item_definition_id.as_str())?;
|
||||||
|
|
||||||
// NOTE: Since this is freshly loaded, the atomic is *unique.*
|
// NOTE: Since this is freshly loaded, the atomic is *unique.*
|
||||||
@ -234,55 +264,99 @@ pub fn convert_inventory_from_database_items(
|
|||||||
// Insert item into inventory
|
// Insert item into inventory
|
||||||
|
|
||||||
// Slot position
|
// Slot position
|
||||||
let slot: InvSlotId = serde_json::from_str(&db_item.position).map_err(|_| {
|
let slot = |s: &str| {
|
||||||
Error::ConversionError(format!(
|
serde_json::from_str::<InvSlotId>(s).map_err(|_| {
|
||||||
"Failed to parse item position: {:?}",
|
Error::ConversionError(format!(
|
||||||
&db_item.position
|
"Failed to parse item position: {:?}",
|
||||||
))
|
&db_item.position
|
||||||
})?;
|
))
|
||||||
|
})
|
||||||
|
};
|
||||||
|
|
||||||
let insert_res = inventory.insert_at(slot, item).map_err(|_| {
|
if db_item.parent_container_item_id == inventory_container_id {
|
||||||
// If this happens there were too many items in the database for the current
|
let slot = slot(&db_item.position)?;
|
||||||
// inventory size
|
let insert_res = inventory.insert_at(slot, item).map_err(|_| {
|
||||||
Error::ConversionError(format!(
|
// If this happens there were too many items in the database for the current
|
||||||
"Error inserting item into inventory, position: {:?}",
|
// inventory size
|
||||||
slot
|
Error::ConversionError(format!(
|
||||||
))
|
"Error inserting item into inventory, position: {:?}",
|
||||||
})?;
|
slot
|
||||||
|
))
|
||||||
|
})?;
|
||||||
|
|
||||||
if insert_res.is_some() {
|
if insert_res.is_some() {
|
||||||
// If inventory.insert returns an item, it means it was swapped for an item that
|
// If inventory.insert returns an item, it means it was swapped for an item that
|
||||||
// already occupied the slot. Multiple items being stored in the database for
|
// already occupied the slot. Multiple items being stored in the database for
|
||||||
// the same slot is an error.
|
// the same slot is an error.
|
||||||
return Err(Error::ConversionError(
|
return Err(Error::ConversionError(
|
||||||
"Inserted an item into the same slot twice".to_string(),
|
"Inserted an item into the same slot twice".to_string(),
|
||||||
));
|
));
|
||||||
|
}
|
||||||
|
} else if let Some(&j) = item_indices.get(&db_item.parent_container_item_id) {
|
||||||
|
if let Some(Some(parent)) = inventory.slot_mut(slot(&inventory_items[j].position)?) {
|
||||||
|
parent.add_component(item);
|
||||||
|
} else {
|
||||||
|
return Err(Error::ConversionError(format!(
|
||||||
|
"Parent slot {} for component {} was empty even though it occurred earlier in \
|
||||||
|
the loop?",
|
||||||
|
db_item.parent_container_item_id, db_item.item_id
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
return Err(Error::ConversionError(format!(
|
||||||
|
"Couldn't find parent item {} before item {} in inventory",
|
||||||
|
db_item.parent_container_item_id, db_item.item_id
|
||||||
|
)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(inventory)
|
Ok(inventory)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn convert_loadout_from_database_items(database_items: &[Item]) -> Result<Loadout, Error> {
|
pub fn convert_loadout_from_database_items(
|
||||||
|
loadout_container_id: i64,
|
||||||
|
database_items: &[Item],
|
||||||
|
) -> Result<Loadout, Error> {
|
||||||
let loadout_builder = LoadoutBuilder::new();
|
let loadout_builder = LoadoutBuilder::new();
|
||||||
let mut loadout = loadout_builder.build();
|
let mut loadout = loadout_builder.build();
|
||||||
|
let mut item_indices = HashMap::new();
|
||||||
|
|
||||||
|
for (i, db_item) in database_items.iter().enumerate() {
|
||||||
|
item_indices.insert(db_item.item_id, i);
|
||||||
|
|
||||||
for db_item in database_items.iter() {
|
|
||||||
let item = get_item_from_asset(db_item.item_definition_id.as_str())?;
|
let item = get_item_from_asset(db_item.item_definition_id.as_str())?;
|
||||||
|
|
||||||
// NOTE: item id is currently *unique*, so we can store the ID safely.
|
// NOTE: item id is currently *unique*, so we can store the ID safely.
|
||||||
let comp = item.get_item_id_for_database();
|
let comp = item.get_item_id_for_database();
|
||||||
comp.store(Some(NonZeroU64::try_from(db_item.item_id as u64).map_err(
|
comp.store(Some(NonZeroU64::try_from(db_item.item_id as u64).map_err(
|
||||||
|_| Error::ConversionError("Item with zero item_id".to_owned()),
|
|_| Error::ConversionError("Item with zero item_id".to_owned()),
|
||||||
)?));
|
)?));
|
||||||
|
|
||||||
loadout
|
let convert_error = |err| match err {
|
||||||
.set_item_at_slot_using_persistence_key(&db_item.position, item)
|
LoadoutError::InvalidPersistenceKey => {
|
||||||
.map_err(|err| match err {
|
Error::ConversionError(format!("Invalid persistence key: {}", &db_item.position))
|
||||||
LoadoutError::InvalidPersistenceKey => Error::ConversionError(format!(
|
},
|
||||||
"Invalid persistence key: {}",
|
LoadoutError::NoParentAtSlot => {
|
||||||
&db_item.position
|
Error::ConversionError(format!("No parent item at slot: {}", &db_item.position))
|
||||||
)),
|
},
|
||||||
})?;
|
};
|
||||||
|
|
||||||
|
if db_item.parent_container_item_id == loadout_container_id {
|
||||||
|
loadout
|
||||||
|
.set_item_at_slot_using_persistence_key(&db_item.position, item)
|
||||||
|
.map_err(convert_error)?;
|
||||||
|
} else if let Some(&j) = item_indices.get(&db_item.parent_container_item_id) {
|
||||||
|
loadout
|
||||||
|
.update_item_at_slot_using_persistence_key(&database_items[j].position, |parent| {
|
||||||
|
parent.add_component(item);
|
||||||
|
})
|
||||||
|
.map_err(convert_error)?;
|
||||||
|
} else {
|
||||||
|
return Err(Error::ConversionError(format!(
|
||||||
|
"Couldn't find parent item {} before item {} in loadout",
|
||||||
|
db_item.parent_container_item_id, db_item.item_id
|
||||||
|
)));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(loadout)
|
Ok(loadout)
|
||||||
|
Loading…
Reference in New Issue
Block a user