From 96d44163fce21e8371c4d07c465c9e29995fef6c Mon Sep 17 00:00:00 2001 From: BitSyndicate Date: Fri, 2 May 2025 01:10:17 +0200 Subject: [PATCH 1/7] feat(ecs): add basic component storage for ECS --- src/collections/sparse_set.rs | 47 ++++++++++-- src/ecs/mod.rs | 137 ++++++++++++++++++++++++++++++++++ src/main.rs | 1 + 3 files changed, 179 insertions(+), 6 deletions(-) create mode 100644 src/ecs/mod.rs diff --git a/src/collections/sparse_set.rs b/src/collections/sparse_set.rs index 5e0ec84..75b04ea 100644 --- a/src/collections/sparse_set.rs +++ b/src/collections/sparse_set.rs @@ -20,6 +20,19 @@ where dense_to_id: Vec, } +impl core::fmt::Debug for SparseSet +where + T: core::fmt::Debug, + PackedAlloc: Allocator, + SparseAlloc: Allocator, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_map() + .entries(self.dense_to_id.iter().zip(self.dense.iter())) + .finish() + } +} + impl SparseSet { pub const fn new() -> Self { Self { @@ -236,7 +249,10 @@ mod tests { assert_eq!(sparse_set.remove(SPARSE_PAGESIZE + 2).unwrap(), 3); assert_eq!(sparse_set.sparse[1].as_ref().unwrap().1, 2); - assert_eq!(sparse_set.keys(), [10, 11, 12, SPARSE_PAGESIZE, SPARSE_PAGESIZE + 1]); + assert_eq!( + sparse_set.keys(), + [10, 11, 12, SPARSE_PAGESIZE, SPARSE_PAGESIZE + 1] + ); assert_eq!(sparse_set.values(), [1, 2, 2, 1, 2]); assert_eq!(sparse_set.remove(SPARSE_PAGESIZE + 1).unwrap(), 2); @@ -249,25 +265,44 @@ mod tests { assert_eq!(sparse_set.keys(), [10, 11, 12]); assert_eq!(sparse_set.values(), [1, 2, 2]); - sparse_set.insert(SPARSE_PAGESIZE, 1); sparse_set.insert(SPARSE_PAGESIZE + 1, 2); sparse_set.insert(SPARSE_PAGESIZE + 2, 3); assert_eq!(sparse_set.remove(10).unwrap(), 1); assert_eq!(sparse_set.sparse[0].as_ref().unwrap().1, 2); - // swap-remove - assert_eq!(sparse_set.keys(), [SPARSE_PAGESIZE + 2, 11, 12, SPARSE_PAGESIZE, SPARSE_PAGESIZE + 1]); + // swap-remove + assert_eq!( + sparse_set.keys(), + [ + SPARSE_PAGESIZE + 2, + 11, + 12, + SPARSE_PAGESIZE, + SPARSE_PAGESIZE + 1 + ] + ); assert_eq!(sparse_set.values(), [3, 2, 2, 1, 2]); assert_eq!(sparse_set.remove(11).unwrap(), 2); assert_eq!(sparse_set.sparse[0].as_ref().unwrap().1, 1); - assert_eq!(sparse_set.keys(), [SPARSE_PAGESIZE + 2, SPARSE_PAGESIZE + 1, 12, SPARSE_PAGESIZE]); + assert_eq!( + sparse_set.keys(), + [ + SPARSE_PAGESIZE + 2, + SPARSE_PAGESIZE + 1, + 12, + SPARSE_PAGESIZE + ] + ); assert_eq!(sparse_set.values(), [3, 2, 2, 1]); assert_eq!(sparse_set.remove(12).unwrap(), 2); assert!(sparse_set.sparse[0].is_none()); - assert_eq!(sparse_set.keys(), [SPARSE_PAGESIZE + 2, SPARSE_PAGESIZE + 1, SPARSE_PAGESIZE]); + assert_eq!( + sparse_set.keys(), + [SPARSE_PAGESIZE + 2, SPARSE_PAGESIZE + 1, SPARSE_PAGESIZE] + ); assert_eq!(sparse_set.values(), [3, 2, 1]); } } diff --git a/src/ecs/mod.rs b/src/ecs/mod.rs new file mode 100644 index 0000000..bb2500a --- /dev/null +++ b/src/ecs/mod.rs @@ -0,0 +1,137 @@ +use core::{any::TypeId, num::NonZeroU64, sync::atomic::AtomicU64}; +use std::{collections::BTreeMap, sync::RwLock}; + +use allocator_api2::alloc::Allocator; +use bytemuck::Contiguous; + +use crate::collections::SparseSet; + +pub type ECS = EntityComponentSystem; + +#[repr(transparent)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Entity(u64); + +static COMPONENT_ID_CREATOR: AtomicU64 = AtomicU64::new(1); +static COMPONENT_IDS: RwLock> = RwLock::new(BTreeMap::new()); + +fn create_component_id() -> ComponentId +where + T: 'static + Sized, +{ + let type_id = core::any::TypeId::of::(); + { + // unwrap-justification: this only errors if RwLock is poisoned + let component_read = COMPONENT_IDS.read().unwrap(); + if let Some(id) = component_read.get(&type_id) { + return *id; + } + } + let new_id = ComponentId( + NonZeroU64::new(COMPONENT_ID_CREATOR.fetch_add(1, core::sync::atomic::Ordering::Relaxed)) + .unwrap(), + ); + { + // unwrap-justification: see above + let mut component_write = COMPONENT_IDS.write().unwrap(); + component_write.insert(type_id, new_id); + } + new_id +} + +pub trait ComponentAllocator: Allocator { + fn new() -> Self; +} + +pub trait Component: core::fmt::Debug + Send + Sized + 'static { + type Allocator: ComponentAllocator; + + fn id() -> ComponentId { + static COMPONENT_ID: AtomicU64 = AtomicU64::new(0); + // TODO: reevaluate ordering later + let mut current_id = COMPONENT_ID.load(core::sync::atomic::Ordering::SeqCst); + if current_id == 0 { + current_id = create_component_id::().0.into_integer(); + COMPONENT_ID.store(current_id, core::sync::atomic::Ordering::SeqCst); + } + ComponentId(NonZeroU64::new(current_id).unwrap()) + } +} + +pub trait ComponentStorage: core::fmt::Debug + 'static {} + +impl ComponentStorage for SparseSet +where + T: Component, + SparseAlloc: Allocator + Clone + 'static, +{ +} + +#[repr(transparent)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct ComponentId(NonZeroU64); + +#[derive(Debug)] +pub struct ComponentSet { + sets: SparseSet>, + cold_alloc: A, +} + +pub struct EntityComponentSystem { + components: ComponentSet, +} + +impl ComponentSet { + fn new() -> Self { + Self { + sets: SparseSet::new(), + cold_alloc: allocator_api2::alloc::Global, + } + } +} + +impl ComponentSet +where + A: Allocator + Clone + 'static, +{ + fn new_in(alloc: A) -> Self { + Self { + sets: SparseSet::new(), + cold_alloc: alloc, + } + } + + fn get_component_set(&self) -> Option<&SparseSet> { + let set = self.sets.get(T::id().0.into_integer() as usize)?; + (set as &dyn core::any::Any).downcast_ref() + } + + fn get_component_set_mut( + &mut self, + ) -> Option<&mut SparseSet> { + let set = self.sets.get_mut(T::id().0.into_integer() as usize)?; + (set as &mut dyn core::any::Any).downcast_mut() + } + + fn insert_component_set(&mut self) -> &mut SparseSet { + if self.sets.contains(T::id().0.into_integer() as usize) { + self.get_component_set_mut::().unwrap() + } else { + let set = SparseSet::::new_in(T::Allocator::new(), self.cold_alloc.clone()); + self.sets + .insert(T::id().0.into_integer() as usize, Box::new(set) as Box<_>); + self.get_component_set_mut::().unwrap() + } + } + + fn add_to_entity(&mut self, entity: Entity, data: T) -> Option { + let set = self.insert_component_set::(); + set.insert(entity.0 as usize, data) + } +} + +impl Default for ComponentSet { + fn default() -> Self { + Self::new() + } +} diff --git a/src/main.rs b/src/main.rs index 0105e37..6786044 100644 --- a/src/main.rs +++ b/src/main.rs @@ -19,6 +19,7 @@ use zlog::config::LoggerConfig; pub mod camera; pub mod collections; +pub mod ecs; pub mod model; pub mod texture; From 1b89120b735be202f608ebf4fec3a158f93ca88e Mon Sep 17 00:00:00 2001 From: Chance Date: Thu, 1 May 2025 20:00:06 -0400 Subject: [PATCH 2/7] feat(ecs): entity spawning --- src/ecs/mod.rs | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/src/ecs/mod.rs b/src/ecs/mod.rs index bb2500a..4069d90 100644 --- a/src/ecs/mod.rs +++ b/src/ecs/mod.rs @@ -27,6 +27,7 @@ where return *id; } } + let new_id = ComponentId( NonZeroU64::new(COMPONENT_ID_CREATOR.fetch_add(1, core::sync::atomic::Ordering::Relaxed)) .unwrap(), @@ -48,11 +49,10 @@ pub trait Component: core::fmt::Debug + Send + Sized + 'static { fn id() -> ComponentId { static COMPONENT_ID: AtomicU64 = AtomicU64::new(0); - // TODO: reevaluate ordering later - let mut current_id = COMPONENT_ID.load(core::sync::atomic::Ordering::SeqCst); + let mut current_id = COMPONENT_ID.load(core::sync::atomic::Ordering::Relaxed); if current_id == 0 { current_id = create_component_id::().0.into_integer(); - COMPONENT_ID.store(current_id, core::sync::atomic::Ordering::SeqCst); + COMPONENT_ID.store(current_id, core::sync::atomic::Ordering::Relaxed); } ComponentId(NonZeroU64::new(current_id).unwrap()) } @@ -79,6 +79,16 @@ pub struct ComponentSet { pub struct EntityComponentSystem { components: ComponentSet, + next_id: AtomicU64, +} + +impl EntityComponentSystem { + fn spawn(&mut self) -> Entity { + let entity_id = self + .next_id + .fetch_add(1, core::sync::atomic::Ordering::Relaxed); + Entity(entity_id) + } } impl ComponentSet { From 0b98e94421e8c1b108e6eab73cd1a5274aa761a7 Mon Sep 17 00:00:00 2001 From: BitSyndicate Date: Sun, 4 May 2025 21:09:40 +0200 Subject: [PATCH 3/7] refactor: redistribute ECS into multiple files --- src/ecs/component.rs | 73 ++++++++++++++++++++++ src/ecs/entity.rs | 26 ++++++++ src/ecs/mod.rs | 142 +++---------------------------------------- src/ecs/storage.rs | 75 +++++++++++++++++++++++ 4 files changed, 181 insertions(+), 135 deletions(-) create mode 100644 src/ecs/component.rs create mode 100644 src/ecs/entity.rs create mode 100644 src/ecs/storage.rs diff --git a/src/ecs/component.rs b/src/ecs/component.rs new file mode 100644 index 0000000..0fce562 --- /dev/null +++ b/src/ecs/component.rs @@ -0,0 +1,73 @@ +use core::{any::TypeId, marker::PhantomData, num::NonZeroU64, sync::atomic::AtomicU64}; +use std::{collections::BTreeMap, sync::RwLock}; + +use allocator_api2::alloc::{Allocator, Global}; +use bytemuck::Contiguous; + +static COMPONENT_ID_CREATOR: AtomicU64 = AtomicU64::new(1); +static COMPONENT_IDS: RwLock> = RwLock::new(BTreeMap::new()); + +pub fn create_component_id() -> ComponentId +where + T: 'static + Sized, +{ + let type_id = core::any::TypeId::of::(); + { + // unwrap-justification: this only errors if RwLock is poisoned + let component_read = COMPONENT_IDS.read().unwrap(); + if let Some(id) = component_read.get(&type_id) { + return *id; + } + } + + let new_id = ComponentId( + NonZeroU64::new(COMPONENT_ID_CREATOR.fetch_add(1, core::sync::atomic::Ordering::Relaxed)) + .unwrap(), + ); + { + // unwrap-justification: see above + let mut component_write = COMPONENT_IDS.write().unwrap(); + component_write.insert(type_id, new_id); + } + new_id +} + +pub trait ComponentAllocator: Allocator { + fn new() -> Self; +} + +impl ComponentAllocator for Global { + fn new() -> Self { + Self + } +} + +pub trait Component: core::fmt::Debug + Send + Sized + 'static { + type Allocator: ComponentAllocator; + + fn id() -> ComponentId { + static COMPONENT_ID: AtomicU64 = AtomicU64::new(0); + let mut current_id = COMPONENT_ID.load(core::sync::atomic::Ordering::Relaxed); + if current_id == 0 { + current_id = create_component_id::().to_int(); + COMPONENT_ID.store(current_id, core::sync::atomic::Ordering::Relaxed); + } + ComponentId(NonZeroU64::new(current_id).unwrap()) + } +} + +#[repr(transparent)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct ComponentId(NonZeroU64); + +impl ComponentId { + pub fn to_int(self) -> u64 { + self.0.into_integer() + } +} + +impl From for u64 { + fn from(value: ComponentId) -> Self { + value.to_int() + } +} diff --git a/src/ecs/entity.rs b/src/ecs/entity.rs new file mode 100644 index 0000000..ce67bcd --- /dev/null +++ b/src/ecs/entity.rs @@ -0,0 +1,26 @@ +use core::sync::atomic::{AtomicU64, Ordering}; + +#[repr(transparent)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Entity(u64); + +fn create_entity() -> Entity { + static ENTITY_ID: AtomicU64 = AtomicU64::new(0); + Entity(ENTITY_ID.fetch_add(1, Ordering::Relaxed)) +} + +impl Entity { + pub fn new() -> Self { + create_entity() + } + + pub fn to_int(self) -> u64 { + self.0 + } +} + +impl From for u64 { + fn from(value: Entity) -> Self { + value.to_int() + } +} diff --git a/src/ecs/mod.rs b/src/ecs/mod.rs index 4069d90..a3e2248 100644 --- a/src/ecs/mod.rs +++ b/src/ecs/mod.rs @@ -1,147 +1,19 @@ -use core::{any::TypeId, num::NonZeroU64, sync::atomic::AtomicU64}; -use std::{collections::BTreeMap, sync::RwLock}; +mod component; +pub use component::{Component, ComponentAllocator, ComponentId}; +mod entity; +pub use entity::Entity; +mod storage; -use allocator_api2::alloc::Allocator; -use bytemuck::Contiguous; - -use crate::collections::SparseSet; +use self::storage::ComponentSet; pub type ECS = EntityComponentSystem; -#[repr(transparent)] -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct Entity(u64); - -static COMPONENT_ID_CREATOR: AtomicU64 = AtomicU64::new(1); -static COMPONENT_IDS: RwLock> = RwLock::new(BTreeMap::new()); - -fn create_component_id() -> ComponentId -where - T: 'static + Sized, -{ - let type_id = core::any::TypeId::of::(); - { - // unwrap-justification: this only errors if RwLock is poisoned - let component_read = COMPONENT_IDS.read().unwrap(); - if let Some(id) = component_read.get(&type_id) { - return *id; - } - } - - let new_id = ComponentId( - NonZeroU64::new(COMPONENT_ID_CREATOR.fetch_add(1, core::sync::atomic::Ordering::Relaxed)) - .unwrap(), - ); - { - // unwrap-justification: see above - let mut component_write = COMPONENT_IDS.write().unwrap(); - component_write.insert(type_id, new_id); - } - new_id -} - -pub trait ComponentAllocator: Allocator { - fn new() -> Self; -} - -pub trait Component: core::fmt::Debug + Send + Sized + 'static { - type Allocator: ComponentAllocator; - - fn id() -> ComponentId { - static COMPONENT_ID: AtomicU64 = AtomicU64::new(0); - let mut current_id = COMPONENT_ID.load(core::sync::atomic::Ordering::Relaxed); - if current_id == 0 { - current_id = create_component_id::().0.into_integer(); - COMPONENT_ID.store(current_id, core::sync::atomic::Ordering::Relaxed); - } - ComponentId(NonZeroU64::new(current_id).unwrap()) - } -} - -pub trait ComponentStorage: core::fmt::Debug + 'static {} - -impl ComponentStorage for SparseSet -where - T: Component, - SparseAlloc: Allocator + Clone + 'static, -{ -} - -#[repr(transparent)] -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub struct ComponentId(NonZeroU64); - -#[derive(Debug)] -pub struct ComponentSet { - sets: SparseSet>, - cold_alloc: A, -} - pub struct EntityComponentSystem { components: ComponentSet, - next_id: AtomicU64, } impl EntityComponentSystem { fn spawn(&mut self) -> Entity { - let entity_id = self - .next_id - .fetch_add(1, core::sync::atomic::Ordering::Relaxed); - Entity(entity_id) - } -} - -impl ComponentSet { - fn new() -> Self { - Self { - sets: SparseSet::new(), - cold_alloc: allocator_api2::alloc::Global, - } - } -} - -impl ComponentSet -where - A: Allocator + Clone + 'static, -{ - fn new_in(alloc: A) -> Self { - Self { - sets: SparseSet::new(), - cold_alloc: alloc, - } - } - - fn get_component_set(&self) -> Option<&SparseSet> { - let set = self.sets.get(T::id().0.into_integer() as usize)?; - (set as &dyn core::any::Any).downcast_ref() - } - - fn get_component_set_mut( - &mut self, - ) -> Option<&mut SparseSet> { - let set = self.sets.get_mut(T::id().0.into_integer() as usize)?; - (set as &mut dyn core::any::Any).downcast_mut() - } - - fn insert_component_set(&mut self) -> &mut SparseSet { - if self.sets.contains(T::id().0.into_integer() as usize) { - self.get_component_set_mut::().unwrap() - } else { - let set = SparseSet::::new_in(T::Allocator::new(), self.cold_alloc.clone()); - self.sets - .insert(T::id().0.into_integer() as usize, Box::new(set) as Box<_>); - self.get_component_set_mut::().unwrap() - } - } - - fn add_to_entity(&mut self, entity: Entity, data: T) -> Option { - let set = self.insert_component_set::(); - set.insert(entity.0 as usize, data) - } -} - -impl Default for ComponentSet { - fn default() -> Self { - Self::new() + Entity::new() } } diff --git a/src/ecs/storage.rs b/src/ecs/storage.rs new file mode 100644 index 0000000..19d9d8e --- /dev/null +++ b/src/ecs/storage.rs @@ -0,0 +1,75 @@ +use allocator_api2::alloc::Allocator; + +use crate::collections::SparseSet; + +use super::{Component, ComponentAllocator, Entity}; + +pub trait ComponentStorage: core::fmt::Debug + 'static {} + +impl ComponentStorage for SparseSet +where + T: Component, + SparseAlloc: Allocator + Clone + 'static, +{ +} + +#[derive(Debug)] +pub struct ComponentSet { + sets: SparseSet>, + cold_alloc: A, +} + +impl ComponentSet { + fn new() -> Self { + Self { + sets: SparseSet::new(), + cold_alloc: allocator_api2::alloc::Global, + } + } +} + +impl ComponentSet +where + A: Allocator + Clone + 'static, +{ + fn new_in(alloc: A) -> Self { + Self { + sets: SparseSet::new(), + cold_alloc: alloc, + } + } + + fn get_component_set(&self) -> Option<&SparseSet> { + let set = self.sets.get(T::id().to_int() as usize)?; + (set as &dyn core::any::Any).downcast_ref() + } + + fn get_component_set_mut( + &mut self, + ) -> Option<&mut SparseSet> { + let set = self.sets.get_mut(T::id().to_int() as usize)?; + (set as &mut dyn core::any::Any).downcast_mut() + } + + fn insert_component_set(&mut self) -> &mut SparseSet { + if self.sets.contains(T::id().to_int() as usize) { + self.get_component_set_mut::().unwrap() + } else { + let set = SparseSet::::new_in(T::Allocator::new(), self.cold_alloc.clone()); + self.sets + .insert(T::id().to_int() as usize, Box::new(set) as Box<_>); + self.get_component_set_mut::().unwrap() + } + } + + fn add_to_entity(&mut self, entity: Entity, data: T) -> Option { + let set = self.insert_component_set::(); + set.insert(entity.to_int() as usize, data) + } +} + +impl Default for ComponentSet { + fn default() -> Self { + Self::new() + } +} From 5a07fae912c4249697aa901198e00b5e937d244b Mon Sep 17 00:00:00 2001 From: BitSyndicate Date: Sun, 4 May 2025 23:27:24 +0200 Subject: [PATCH 4/7] feat: impl Debug fmt for sparse set --- src/collections/sparse_set.rs | 47 ++++++++++++++++++++++++++++++----- 1 file changed, 41 insertions(+), 6 deletions(-) diff --git a/src/collections/sparse_set.rs b/src/collections/sparse_set.rs index 5e0ec84..d808acb 100644 --- a/src/collections/sparse_set.rs +++ b/src/collections/sparse_set.rs @@ -30,6 +30,19 @@ impl SparseSet { } } +impl core::fmt::Debug for SparseSet +where + T: core::fmt::Debug, + PackedAlloc: Allocator, + SparseAlloc: Allocator, +{ + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_map() + .entries(self.dense_to_id.iter().zip(self.dense.iter())) + .finish() + } +} + impl SparseSet where PackedAlloc: Allocator, @@ -236,7 +249,10 @@ mod tests { assert_eq!(sparse_set.remove(SPARSE_PAGESIZE + 2).unwrap(), 3); assert_eq!(sparse_set.sparse[1].as_ref().unwrap().1, 2); - assert_eq!(sparse_set.keys(), [10, 11, 12, SPARSE_PAGESIZE, SPARSE_PAGESIZE + 1]); + assert_eq!( + sparse_set.keys(), + [10, 11, 12, SPARSE_PAGESIZE, SPARSE_PAGESIZE + 1] + ); assert_eq!(sparse_set.values(), [1, 2, 2, 1, 2]); assert_eq!(sparse_set.remove(SPARSE_PAGESIZE + 1).unwrap(), 2); @@ -249,25 +265,44 @@ mod tests { assert_eq!(sparse_set.keys(), [10, 11, 12]); assert_eq!(sparse_set.values(), [1, 2, 2]); - sparse_set.insert(SPARSE_PAGESIZE, 1); sparse_set.insert(SPARSE_PAGESIZE + 1, 2); sparse_set.insert(SPARSE_PAGESIZE + 2, 3); assert_eq!(sparse_set.remove(10).unwrap(), 1); assert_eq!(sparse_set.sparse[0].as_ref().unwrap().1, 2); - // swap-remove - assert_eq!(sparse_set.keys(), [SPARSE_PAGESIZE + 2, 11, 12, SPARSE_PAGESIZE, SPARSE_PAGESIZE + 1]); + // swap-remove + assert_eq!( + sparse_set.keys(), + [ + SPARSE_PAGESIZE + 2, + 11, + 12, + SPARSE_PAGESIZE, + SPARSE_PAGESIZE + 1 + ] + ); assert_eq!(sparse_set.values(), [3, 2, 2, 1, 2]); assert_eq!(sparse_set.remove(11).unwrap(), 2); assert_eq!(sparse_set.sparse[0].as_ref().unwrap().1, 1); - assert_eq!(sparse_set.keys(), [SPARSE_PAGESIZE + 2, SPARSE_PAGESIZE + 1, 12, SPARSE_PAGESIZE]); + assert_eq!( + sparse_set.keys(), + [ + SPARSE_PAGESIZE + 2, + SPARSE_PAGESIZE + 1, + 12, + SPARSE_PAGESIZE + ] + ); assert_eq!(sparse_set.values(), [3, 2, 2, 1]); assert_eq!(sparse_set.remove(12).unwrap(), 2); assert!(sparse_set.sparse[0].is_none()); - assert_eq!(sparse_set.keys(), [SPARSE_PAGESIZE + 2, SPARSE_PAGESIZE + 1, SPARSE_PAGESIZE]); + assert_eq!( + sparse_set.keys(), + [SPARSE_PAGESIZE + 2, SPARSE_PAGESIZE + 1, SPARSE_PAGESIZE] + ); assert_eq!(sparse_set.values(), [3, 2, 1]); } } From c32f8126e95c40293617f3cedbe699798e9bcbdf Mon Sep 17 00:00:00 2001 From: BitSyndicate Date: Mon, 5 May 2025 00:32:45 +0200 Subject: [PATCH 5/7] docs: write documentation for sparse set --- src/collections/mod.rs | 3 + src/collections/sparse_set.rs | 198 ++++++++++++++++++++++++++++++++++ 2 files changed, 201 insertions(+) diff --git a/src/collections/mod.rs b/src/collections/mod.rs index fa842ed..0802f59 100644 --- a/src/collections/mod.rs +++ b/src/collections/mod.rs @@ -1,3 +1,6 @@ +/// Collections types for Zenyx +/// +/// - [`SparseSet`] mod sparse_set; pub use sparse_set::SparseSet; diff --git a/src/collections/sparse_set.rs b/src/collections/sparse_set.rs index d808acb..fd31b88 100644 --- a/src/collections/sparse_set.rs +++ b/src/collections/sparse_set.rs @@ -10,17 +10,36 @@ use bytemuck::Contiguous; const SPARSE_PAGESIZE: usize = (1 << 10) * 4; type SparsePage = Option<(Box<[Option; SPARSE_PAGESIZE], A>, usize)>; +/// A sparse set for fast lookup of large indices +/// +/// The sparse allocator is mainly used for bulk allocations in the system's page size +/// for the lookup array. It will also be used for the array of pointers into those +/// bulk allocations. Additionally it will be used for the reverse map that generates keys +/// from the value in the internal packed array +/// +/// The packed allocator will exclusively be used to store the values of type `T`. +/// +/// All operations on this datastructure, meaning insertion, lookup, and deletion, are `O(1)`. +/// +/// This datastructure does not in any way guarantee ordering of the values on +/// its own. +#[derive(Hash)] pub struct SparseSet where PackedAlloc: Allocator, SparseAlloc: Allocator, { + /// The paginated array of keys. The value at the key is an index into the dense array minus + /// one where the value corresponding to that key is stored. sparse: Vec, SparseAlloc>, + /// The dense array where the values corresponding to the keys are stored dense: Vec, + /// The reverse map to get the index in the sparse array from the index in the dense array dense_to_id: Vec, } impl SparseSet { + /// Creates a new [`SparseSet`] with the global allocator. pub const fn new() -> Self { Self { sparse: Vec::new(), @@ -30,6 +49,12 @@ impl SparseSet { } } +impl Default for SparseSet { + fn default() -> Self { + Self::new() + } +} + impl core::fmt::Debug for SparseSet where T: core::fmt::Debug, @@ -48,6 +73,21 @@ where PackedAlloc: Allocator, SparseAlloc: Allocator + Clone, { + /// Inserts an element into the sparse set with the key `id`. This will + /// return the previous value if it already exists. + /// + /// ``` + /// use zenyx::collections::SparseSet; + /// + /// let mut sparse_set: SparseSet = SparseSet::new(); + /// + /// sparse_set.insert(10, 123); + /// assert_eq!(sparse_set.get(10), Some(&123)); + /// + /// let prev = sparse_set.insert(10, 9); + /// assert_eq!(prev, Some(123)); + /// assert_eq!(sparse_set.get(10), Some(&9)); + /// ``` pub fn insert(&mut self, id: usize, value: T) -> Option { match self.get_dense_idx(id) { Some(idx) => { @@ -65,15 +105,39 @@ where } } + /// Gets the value with the key `id` + /// + /// ``` + /// use zenyx::collections::SparseSet; + /// + /// let mut sparse_set: SparseSet = SparseSet::new(); + /// + /// sparse_set.insert(10, 123); + /// assert_eq!(sparse_set.get(10), Some(&123)); + /// ``` pub fn get(&self, id: usize) -> Option<&T> { self.dense.get(self.get_dense_idx(id)?) } + /// Gets the value with the key `id` mutably. + /// + /// ``` + /// use zenyx::collections::SparseSet; + /// + /// let mut sparse_set: SparseSet = SparseSet::new(); + /// + /// sparse_set.insert(10, 123); + /// let value = sparse_set.get_mut(10).unwrap(); + /// *value = 0; + /// assert_eq!(sparse_set.get(10), Some(&0)); + /// ``` pub fn get_mut(&mut self, id: usize) -> Option<&mut T> { let idx = self.get_dense_idx(id)?; self.dense.get_mut(idx) } + /// Sets the dense index of an `key` to `idx`. This will remove said index + /// if it is [`None`]. fn set_dense_idx(&mut self, id: usize, idx: Option) { let page = id / SPARSE_PAGESIZE; let sparse_index = id % SPARSE_PAGESIZE; @@ -97,6 +161,15 @@ where } } + /// Gets the index in the dense array for a key `id` + /// + /// ``` + /// use zenyx::collections::SparseSet; + /// + /// let mut sparse_set: SparseSet = SparseSet::new(); + /// sparse_set.insert(10, 123); + /// assert_eq!(sparse_set.values()[sparse_set.get_dense_idx(10).unwrap()], 123); + /// ``` pub fn get_dense_idx(&self, id: usize) -> Option { let page = id / SPARSE_PAGESIZE; let sparse_index = id % SPARSE_PAGESIZE; @@ -104,6 +177,8 @@ where page.0[sparse_index].map(|idx| idx.into_integer() - 1) } + /// This reduces the usage count for a page in the sparse array, deallocating + /// it if it is not used anymore fn reduce_page_usage_count(&mut self, id: usize) { let page = id / SPARSE_PAGESIZE; let Some(usage) = &mut self.sparse[page] else { @@ -116,6 +191,7 @@ where } } + /// Increase the page usage count for a page in the sparse array fn increase_page_usage_count(&mut self, id: usize) { let page = id / SPARSE_PAGESIZE; if page >= self.sparse.len() { @@ -127,6 +203,18 @@ where usage.1 += 1; } + /// Removes the value with the key `id` from the sparse set, returning the + /// value if it existed. + /// + /// ``` + /// use zenyx::collections::SparseSet; + /// + /// let mut sparse_set: SparseSet = SparseSet::new(); + /// + /// sparse_set.insert(10, 123); + /// assert_eq!(sparse_set.remove(10), Some(123)); + /// assert_eq!(sparse_set.remove(10), None); + /// ``` pub fn remove(&mut self, id: usize) -> Option { let index = self.get_dense_idx(id)?; if self.dense.is_empty() { @@ -142,26 +230,126 @@ where Some(previous) } + /// Returns if there are values in this sparse set. + /// ``` + /// use zenyx::collections::SparseSet; + /// + /// let mut sparse_set: SparseSet = SparseSet::new(); + /// assert!(sparse_set.is_empty()); + /// sparse_set.insert(10, 123); + /// assert!(!sparse_set.is_empty()); + /// ``` pub fn is_empty(&self) -> bool { self.len() == 0 } + /// Returns the number of values in this sparse set + /// + /// ``` + /// use zenyx::collections::SparseSet; + /// + /// let mut sparse_set: SparseSet = SparseSet::new(); + /// sparse_set.insert(10, 123); + /// sparse_set.insert(10, 9); + /// sparse_set.insert(11, 10); + /// assert_eq!(sparse_set.len(), 2); + /// ``` pub fn len(&self) -> usize { self.dense.len() } + /// Checks if the sparse set contains a value with key `id` + /// + /// ``` + /// use zenyx::collections::SparseSet; + /// + /// let mut sparse_set: SparseSet = SparseSet::new(); + /// assert!(!sparse_set.contains(10)); + /// sparse_set.insert(10, 123); + /// assert!(sparse_set.contains(10)); + /// ``` pub fn contains(&self, id: usize) -> bool { self.get_dense_idx(id).is_some() } + /// Gets the keys of all values in the sparse set. This method does not provide + /// any ordering guarantees other than the keys contained corresponding to + /// the values with the same index returned by [`Self::values`]. + /// + /// ``` + /// use zenyx::collections::SparseSet; + /// + /// let mut sparse_set: SparseSet = SparseSet::new(); + /// assert!(sparse_set.keys().is_empty()); + /// sparse_set.insert(10, 10); + /// sparse_set.insert(9, 10); + /// sparse_set.insert(11, 10); + /// + /// assert_eq!(sparse_set.keys(), &[10, 9, 11]); + /// sparse_set.remove(10); + /// assert_eq!(sparse_set.keys(), &[11, 9]); + /// ``` pub fn keys(&self) -> &[usize] { &self.dense_to_id } + /// Gets all values in the sparse set, the corresponding `key` is at the same + /// position in the slice returned by [`Self::keys`]. + /// + /// Otherwise there are no ordering guarantees. + /// + /// ``` + /// use zenyx::collections::SparseSet; + /// + /// let mut sparse_set: SparseSet = SparseSet::new(); + /// assert!(sparse_set.values().is_empty()); + /// sparse_set.insert(10, 10); + /// sparse_set.insert(9, 9); + /// sparse_set.insert(11, 11); + /// + /// assert_eq!(sparse_set.values(), &[10, 9, 11]); + /// sparse_set.remove(10); + /// assert_eq!(sparse_set.values(), &[11, 9]); + /// ``` pub fn values(&self) -> &[T] { &self.dense } + /// Mutable version of [`Self::keys`] + /// + /// ``` + /// use zenyx::collections::SparseSet; + /// + /// let mut sparse_set: SparseSet = SparseSet::new(); + /// assert!(sparse_set.values().is_empty()); + /// sparse_set.insert(10, 10); + /// sparse_set.insert(9, 9); + /// sparse_set.insert(11, 11); + /// + /// let dense_of_9 = sparse_set.get_dense_idx(9).unwrap(); + /// let dense_of_10 = sparse_set.get_dense_idx(10).unwrap(); + /// + /// let values = sparse_set.values_mut(); + /// values[dense_of_10] = 9; + /// values[dense_of_9] = 10; + /// + /// assert_eq!(sparse_set.get(10), Some(&9)); + /// assert_eq!(sparse_set.get(9), Some(&10)); + /// ``` + pub fn values_mut(&mut self) -> &mut [T] { + &mut self.dense + } + + /// Creates a new [`SparseSet`] with the values allocated by the `packed_alloc` + /// and everything else, as described in the top level documentation for [`SparseSet`] + /// in the `sparse_alloc`. + /// + /// ``` + /// use zenyx::collections::SparseSet; + /// use allocator_api2::alloc::Global; + /// + /// let sparse_set = SparseSet::::new_in(Global, Global); + /// ``` pub fn new_in(packed_alloc: PackedAlloc, sparse_alloc: SparseAlloc) -> Self { Self { dense: Vec::new_in(packed_alloc), @@ -175,6 +363,16 @@ impl SparseSet where PackedAlloc: Allocator, { + /// Creates a new [`SparseSet`] with the values allocated by the `packed_alloc` + /// Everything else, as described in the top level documentation for [`SparseSet`] + /// is allocated using the global allocator. + /// + /// ``` + /// use zenyx::collections::SparseSet; + /// use allocator_api2::alloc::Global; + /// + /// let sparse_set = SparseSet::::new_in_packed(Global); + /// ``` pub const fn new_in_packed(packed_alloc: PackedAlloc) -> Self { Self { sparse: Vec::new(), From 2704efbd9be285c5513f4831654d8fe6d0ab1db7 Mon Sep 17 00:00:00 2001 From: BitSyndicate Date: Mon, 5 May 2025 00:45:10 +0200 Subject: [PATCH 6/7] chore: fix missing periods --- src/collections/sparse_set.rs | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/collections/sparse_set.rs b/src/collections/sparse_set.rs index fd31b88..2bbcc7e 100644 --- a/src/collections/sparse_set.rs +++ b/src/collections/sparse_set.rs @@ -15,13 +15,13 @@ type SparsePage = Option<(Box<[Option; SPARSE_PAGESIZE], A>, us /// The sparse allocator is mainly used for bulk allocations in the system's page size /// for the lookup array. It will also be used for the array of pointers into those /// bulk allocations. Additionally it will be used for the reverse map that generates keys -/// from the value in the internal packed array +/// from the value in the internal packed array. /// /// The packed allocator will exclusively be used to store the values of type `T`. /// /// All operations on this datastructure, meaning insertion, lookup, and deletion, are `O(1)`. /// -/// This datastructure does not in any way guarantee ordering of the values on +/// This data structure does not in any way guarantee ordering of the values on /// its own. #[derive(Hash)] pub struct SparseSet @@ -32,9 +32,9 @@ where /// The paginated array of keys. The value at the key is an index into the dense array minus /// one where the value corresponding to that key is stored. sparse: Vec, SparseAlloc>, - /// The dense array where the values corresponding to the keys are stored + /// The dense array where the values corresponding to the keys are stored. dense: Vec, - /// The reverse map to get the index in the sparse array from the index in the dense array + /// The reverse map to get the index in the sparse array from the index in the dense array. dense_to_id: Vec, } @@ -105,7 +105,7 @@ where } } - /// Gets the value with the key `id` + /// Gets the value with the key `id`. /// /// ``` /// use zenyx::collections::SparseSet; @@ -161,7 +161,7 @@ where } } - /// Gets the index in the dense array for a key `id` + /// Gets the index in the dense array for a key `id`. /// /// ``` /// use zenyx::collections::SparseSet; @@ -178,7 +178,7 @@ where } /// This reduces the usage count for a page in the sparse array, deallocating - /// it if it is not used anymore + /// it if it is not used anymore. fn reduce_page_usage_count(&mut self, id: usize) { let page = id / SPARSE_PAGESIZE; let Some(usage) = &mut self.sparse[page] else { @@ -191,7 +191,7 @@ where } } - /// Increase the page usage count for a page in the sparse array + /// Increase the page usage count for a page in the sparse array. fn increase_page_usage_count(&mut self, id: usize) { let page = id / SPARSE_PAGESIZE; if page >= self.sparse.len() { @@ -243,7 +243,7 @@ where self.len() == 0 } - /// Returns the number of values in this sparse set + /// Returns the number of values in this sparse set. /// /// ``` /// use zenyx::collections::SparseSet; @@ -258,7 +258,7 @@ where self.dense.len() } - /// Checks if the sparse set contains a value with key `id` + /// Checks if the sparse set contains a value with key `id`. /// /// ``` /// use zenyx::collections::SparseSet; @@ -315,7 +315,7 @@ where &self.dense } - /// Mutable version of [`Self::keys`] + /// Mutable version of [`Self::keys`]. /// /// ``` /// use zenyx::collections::SparseSet; From 1dd5b549c4eb4431cf3e09a52ba62f01574a5c1c Mon Sep 17 00:00:00 2001 From: BitSyndicate Date: Mon, 5 May 2025 00:55:55 +0200 Subject: [PATCH 7/7] chore: fix another missing period --- src/collections/sparse_set.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/collections/sparse_set.rs b/src/collections/sparse_set.rs index 2bbcc7e..a23117c 100644 --- a/src/collections/sparse_set.rs +++ b/src/collections/sparse_set.rs @@ -10,7 +10,7 @@ use bytemuck::Contiguous; const SPARSE_PAGESIZE: usize = (1 << 10) * 4; type SparsePage = Option<(Box<[Option; SPARSE_PAGESIZE], A>, usize)>; -/// A sparse set for fast lookup of large indices +/// A sparse set for fast lookup of large indices. /// /// The sparse allocator is mainly used for bulk allocations in the system's page size /// for the lookup array. It will also be used for the array of pointers into those