Merge branch 'desttinghim/gen-store' into 'master'

Use older version of Store and introduce Depot

See merge request veloren/veloren!1993
This commit is contained in:
Joshua Barretto 2021-03-25 15:33:31 +00:00
commit 6a49c1e767
4 changed files with 254 additions and 149 deletions

View File

@ -16,6 +16,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Changed
- Restored old version of Store and renamed the new and modified version to Depot
### Removed
### Fixed

221
common/src/depot.rs Normal file
View File

@ -0,0 +1,221 @@
use std::{
cmp::{Eq, Ord, Ordering, PartialEq, PartialOrd},
fmt, hash,
marker::PhantomData,
ops::{Index, IndexMut},
};
/// Type safe index into Depot
pub struct Id<T> {
idx: u32,
gen: u32,
phantom: PhantomData<T>,
}
impl<T> Id<T> {
pub fn id(&self) -> u64 { self.idx as u64 | ((self.gen as u64) << 32) }
}
impl<T> Copy for Id<T> {}
impl<T> Clone for Id<T> {
fn clone(&self) -> Self {
Self {
idx: self.idx,
gen: self.gen,
phantom: PhantomData,
}
}
}
impl<T> Eq for Id<T> {}
impl<T> PartialEq for Id<T> {
fn eq(&self, other: &Self) -> bool { self.idx == other.idx && self.gen == other.gen }
}
impl<T> Ord for Id<T> {
fn cmp(&self, other: &Self) -> Ordering { (self.idx, self.gen).cmp(&(other.idx, other.gen)) }
}
impl<T> PartialOrd for Id<T> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) }
}
impl<T> fmt::Debug for Id<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Id<{}>({}, {})",
std::any::type_name::<T>(),
self.idx,
self.gen
)
}
}
impl<T> hash::Hash for Id<T> {
fn hash<H: hash::Hasher>(&self, h: &mut H) {
self.idx.hash(h);
self.gen.hash(h);
}
}
struct Entry<T> {
gen: u32,
item: Option<T>,
}
/// A general-purpose high performance allocator, basically Vec with type safe
/// indices (Id)
pub struct Depot<T> {
entries: Vec<Entry<T>>,
len: usize,
}
impl<T> Default for Depot<T> {
fn default() -> Self {
Self {
entries: Vec::new(),
len: 0,
}
}
}
impl<T> Depot<T> {
pub fn is_empty(&self) -> bool { self.len == 0 }
pub fn len(&self) -> usize { self.len }
pub fn contains(&self, id: Id<T>) -> bool {
self.entries
.get(id.idx as usize)
.map(|e| e.gen == id.gen && e.item.is_some())
.unwrap_or(false)
}
pub fn get(&self, id: Id<T>) -> Option<&T> {
if let Some(entry) = self.entries.get(id.idx as usize) {
if entry.gen == id.gen {
entry.item.as_ref()
} else {
panic!("Stale ID used to access depot entry");
}
} else {
None
}
}
pub fn get_mut(&mut self, id: Id<T>) -> Option<&mut T> {
if let Some(entry) = self.entries.get_mut(id.idx as usize) {
if entry.gen == id.gen {
entry.item.as_mut()
} else {
panic!("Stale ID used to access depot entry");
}
} else {
None
}
}
pub fn ids(&self) -> impl Iterator<Item = Id<T>> + '_ { self.iter().map(|(id, _)| id) }
pub fn values(&self) -> impl Iterator<Item = &T> + '_ { self.iter().map(|(_, item)| item) }
pub fn values_mut(&mut self) -> impl Iterator<Item = &mut T> + '_ {
self.iter_mut().map(|(_, item)| item)
}
pub fn iter(&self) -> impl Iterator<Item = (Id<T>, &T)> + '_ {
self.entries
.iter()
.enumerate()
.filter_map(move |(idx, entry)| {
Some(Id {
idx: idx as u32,
gen: entry.gen,
phantom: PhantomData,
})
.zip(entry.item.as_ref())
})
}
pub fn iter_mut(&mut self) -> impl Iterator<Item = (Id<T>, &mut T)> + '_ {
self.entries
.iter_mut()
.enumerate()
.filter_map(move |(idx, entry)| {
Some(Id {
idx: idx as u32,
gen: entry.gen,
phantom: PhantomData,
})
.zip(entry.item.as_mut())
})
}
pub fn insert(&mut self, item: T) -> Id<T> {
if self.len < self.entries.len() {
// TODO: Make this more efficient with a lookahead system
let (idx, entry) = self
.entries
.iter_mut()
.enumerate()
.find(|(_, e)| e.item.is_none())
.unwrap();
entry.item = Some(item);
assert!(entry.gen < u32::MAX);
entry.gen += 1;
Id {
idx: idx as u32,
gen: entry.gen,
phantom: PhantomData,
}
} else {
assert!(self.entries.len() < (u32::MAX - 1) as usize);
let id = Id {
idx: self.entries.len() as u32,
gen: 0,
phantom: PhantomData,
};
self.entries.push(Entry {
gen: 0,
item: Some(item),
});
self.len += 1;
id
}
}
pub fn remove(&mut self, id: Id<T>) -> Option<T> {
if let Some(item) = self
.entries
.get_mut(id.idx as usize)
.and_then(|e| if e.gen == id.gen { e.item.take() } else { None })
{
self.len -= 1;
Some(item)
} else {
None
}
}
pub fn recreate_id(&self, i: u64) -> Option<Id<T>> {
if i as usize >= self.entries.len() {
None
} else {
Some(Id {
idx: i as u32,
gen: self
.entries
.get(i as usize)
.map(|e| e.gen)
.unwrap_or_default(),
phantom: PhantomData,
})
}
}
}
impl<T> Index<Id<T>> for Depot<T> {
type Output = T;
fn index(&self, id: Id<T>) -> &Self::Output { self.get(id).unwrap() }
}
impl<T> IndexMut<Id<T>> for Depot<T> {
fn index_mut(&mut self, id: Id<T>) -> &mut Self::Output { self.get_mut(id).unwrap() }
}

View File

@ -33,6 +33,7 @@ pub mod combat;
pub mod comp;
#[cfg(not(target_arch = "wasm32"))]
pub mod consts;
#[cfg(not(target_arch = "wasm32"))] pub mod depot;
#[cfg(not(target_arch = "wasm32"))]
pub mod effect;
#[cfg(not(target_arch = "wasm32"))] pub mod event;

View File

@ -5,201 +5,82 @@ use std::{
ops::{Index, IndexMut},
};
/// Type safe index into Store
pub struct Id<T> {
idx: u32,
gen: u32,
phantom: PhantomData<T>,
}
// NOTE: We use u64 to make sure we are consistent across all machines. We
// assume that usize fits into 8 bytes.
pub struct Id<T>(u64, PhantomData<T>);
impl<T> Id<T> {
pub fn id(&self) -> u64 { self.idx as u64 | ((self.gen as u64) << 32) }
pub fn id(&self) -> u64 { self.0 }
}
impl<T> Copy for Id<T> {}
impl<T> Clone for Id<T> {
fn clone(&self) -> Self {
Self {
idx: self.idx,
gen: self.gen,
phantom: PhantomData,
}
}
fn clone(&self) -> Self { Self(self.0, PhantomData) }
}
impl<T> Eq for Id<T> {}
impl<T> PartialEq for Id<T> {
fn eq(&self, other: &Self) -> bool { self.idx == other.idx && self.gen == other.gen }
fn eq(&self, other: &Self) -> bool { self.0 == other.0 }
}
impl<T> Ord for Id<T> {
fn cmp(&self, other: &Self) -> Ordering { (self.idx, self.gen).cmp(&(other.idx, other.gen)) }
fn cmp(&self, other: &Self) -> Ordering { self.0.cmp(&(other.0)) }
}
impl<T> PartialOrd for Id<T> {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) }
}
impl<T> fmt::Debug for Id<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Id<{}>({}, {})",
std::any::type_name::<T>(),
self.idx,
self.gen
)
write!(f, "Id<{}>({})", std::any::type_name::<T>(), self.0)
}
}
impl<T> hash::Hash for Id<T> {
fn hash<H: hash::Hasher>(&self, h: &mut H) {
self.idx.hash(h);
self.gen.hash(h);
}
fn hash<H: hash::Hasher>(&self, h: &mut H) { self.0.hash(h); }
}
struct Entry<T> {
gen: u32,
item: Option<T>,
}
/// A general-purpose high performance allocator, basically Vec with type safe
/// indices (Id)
pub struct Store<T> {
entries: Vec<Entry<T>>,
len: usize,
items: Vec<T>,
}
impl<T> Default for Store<T> {
fn default() -> Self {
Self {
entries: Vec::new(),
len: 0,
}
}
fn default() -> Self { Self { items: Vec::new() } }
}
impl<T> Store<T> {
pub fn is_empty(&self) -> bool { self.len == 0 }
pub fn len(&self) -> usize { self.len }
pub fn contains(&self, id: Id<T>) -> bool {
self.entries
.get(id.idx as usize)
.map(|e| e.gen == id.gen)
.unwrap_or(false)
}
pub fn get(&self, id: Id<T>) -> &T {
let entry = self.entries.get(id.idx as usize).unwrap();
if entry.gen == id.gen {
entry.item.as_ref().unwrap()
} else {
panic!("Stale ID used to access store entry");
}
// NOTE: Safe conversion, because it came from usize.
self.items.get(id.0 as usize).unwrap()
}
pub fn get_mut(&mut self, id: Id<T>) -> &mut T {
let entry = self.entries.get_mut(id.idx as usize).unwrap();
if entry.gen == id.gen {
entry.item.as_mut().unwrap()
} else {
panic!("Stale ID used to access store entry");
}
// NOTE: Safe conversion, because it came from usize.
self.items.get_mut(id.0 as usize).unwrap()
}
pub fn ids(&self) -> impl Iterator<Item = Id<T>> + '_ { self.iter().map(|(id, _)| id) }
pub fn values(&self) -> impl Iterator<Item = &T> + '_ { self.iter().map(|(_, item)| item) }
pub fn values_mut(&mut self) -> impl Iterator<Item = &mut T> + '_ {
self.iter_mut().map(|(_, item)| item)
pub fn ids(&self) -> impl Iterator<Item = Id<T>> {
(0..self.items.len()).map(|i| Id(i as u64, PhantomData))
}
pub fn iter(&self) -> impl Iterator<Item = (Id<T>, &T)> + '_ {
self.entries
.iter()
.enumerate()
.filter_map(move |(idx, entry)| {
Some(Id {
idx: idx as u32,
gen: entry.gen,
phantom: PhantomData,
})
.zip(entry.item.as_ref())
})
}
pub fn values(&self) -> impl Iterator<Item = &T> { self.items.iter() }
pub fn iter_mut(&mut self) -> impl Iterator<Item = (Id<T>, &mut T)> + '_ {
self.entries
.iter_mut()
.enumerate()
.filter_map(move |(idx, entry)| {
Some(Id {
idx: idx as u32,
gen: entry.gen,
phantom: PhantomData,
})
.zip(entry.item.as_mut())
})
pub fn values_mut(&mut self) -> impl Iterator<Item = &mut T> { self.items.iter_mut() }
pub fn iter(&self) -> impl Iterator<Item = (Id<T>, &T)> { self.ids().zip(self.values()) }
pub fn iter_mut(&mut self) -> impl Iterator<Item = (Id<T>, &mut T)> {
self.ids().zip(self.values_mut())
}
pub fn insert(&mut self, item: T) -> Id<T> {
if self.len < self.entries.len() {
// TODO: Make this more efficient with a lookahead system
let (idx, entry) = self
.entries
.iter_mut()
.enumerate()
.find(|(_, e)| e.item.is_none())
.unwrap();
entry.item = Some(item);
assert!(entry.gen < u32::MAX);
entry.gen += 1;
Id {
idx: idx as u32,
gen: entry.gen,
phantom: PhantomData,
}
} else {
assert!(self.entries.len() < (u32::MAX - 1) as usize);
let id = Id {
idx: self.entries.len() as u32,
gen: 0,
phantom: PhantomData,
};
self.entries.push(Entry {
gen: 0,
item: Some(item),
});
self.len += 1;
id
}
}
pub fn remove(&mut self, id: Id<T>) -> Option<T> {
if let Some(item) = self
.entries
.get_mut(id.idx as usize)
.and_then(|e| if e.gen == id.gen { e.item.take() } else { None })
{
self.len -= 1;
Some(item)
} else {
None
}
// NOTE: Assumes usize fits into 8 bytes.
let id = Id(self.items.len() as u64, PhantomData);
self.items.push(item);
id
}
pub fn recreate_id(&self, i: u64) -> Option<Id<T>> {
if i as usize >= self.entries.len() {
if i as usize >= self.items.len() {
None
} else {
Some(Id {
idx: i as u32,
gen: self
.entries
.get(i as usize)
.map(|e| e.gen)
.unwrap_or_default(),
phantom: PhantomData,
})
Some(Id::<T>(i, PhantomData))
}
}
}