所以我正在编写一个基本的固定大小的池分配器来使用 Rust 中的 Vulkan 进行一些内存管理。这将是一个更大的内存管理系统的一个子部分。到目前为止,它似乎至少在非异步上下文中有效。最后一个是析构函数。当前的实现似乎无法知道块中的什么被“填充”了。所以我不知道在数组的哪个位置运行就地析构函数。
我目前的想法是为头部编写一个链表排序算法,然后使用任何头部之间的范围来确定我应该在哪里运行析构函数。有没有人能想到更好的方法?特别是对于 Vulkan,清除从中获取的所有句柄非常重要。由于像这样破坏池只会在应用程序中发生一次,因此猜测它更大一点并不可怕,但只是好奇是否有更好的方法。
use std::ops::{Deref, DerefMut};
use std::ptr::NonNull;
use std::mem::{size_of, align_of};
const BLOCK_SIZE_POWER: usize = 6;
const BLOCK_SIZE: usize = 1 << BLOCK_SIZE_POWER - 1;
struct BlockNode {
next: Option<*mut BlockNode>
}
struct Block<T> {
pub data: *mut T,
pub byte_size: usize
}
#[allow(unused)]
pub struct FixedSizeAllocator<T: Sized> {
blocks: Vec<Block<T>>,
head: Option<*mut BlockNode>,
capacity: usize
}
impl<T: Sized> FixedSizeAllocator<T> {
const ITEM_SIZE: usize = if size_of::<T>() >= size_of::<BlockNode>() { size_of::<T>() } else { size_of::<T>() };
fn allocate_block(capacity: usize) -> Block<T> {
let layout = std::alloc::Layout::from_size_align(
Self::ITEM_SIZE*capacity, align_of::<T>()
).expect("Unable to allocate memory from OS");
unsafe
{
//Allocate memory block where i'll items will reside
let memory: *mut u8 = std::alloc::alloc(layout);
//We need to initialize the freelist so that each item points to the next potential item.
let mut next_ptr: *mut u8 = memory;
let mut last_ptr: *mut u8 = memory;
for _ in 0..capacity-1 {
next_ptr = last_ptr.offset(Self::ITEM_SIZE as isize);
last_ptr.cast::<BlockNode>().write(BlockNode { next: Some(next_ptr.cast()) });
last_ptr = next_ptr.cast();
}
//Our last item shoul be last in our linked list
next_ptr.cast::<BlockNode>().write(BlockNode { next: None });
return Block { data: memory.cast(), byte_size: layout.size() };
}
}
pub fn new(capacity: usize) -> Self {
if capacity == 0 {
return FixedSizeAllocator {
blocks: Vec::new(),
head: None,
capacity: 0
};
}
unsafe
{
let mut blocks: Vec<Block<T>> = Vec::with_capacity(4);
let allocation = Self::allocate_block(capacity);
blocks.push(allocation);
let head = Some(blocks.first().unwrap_unchecked().data.cast());
return FixedSizeAllocator {
head,
blocks,
capacity
};
}
}
pub fn alloc(&mut self, value: T) -> Option<FixedSizeHandle<T>> {
if self.head.is_none() {
let block = Self::allocate_block(self.capacity);
self.head = Some(block.data.cast());
self.blocks.push(block);
self.capacity += self.capacity;
}
unsafe
{
let head_node = self.head.clone().unwrap_unchecked();
self.head = (*head_node).next.clone();
//We want to fill it with a valid value
head_node.cast::<T>().write(value);
return Some(FixedSizeHandle {
ptr: NonNull::new_unchecked(head_node.cast()),
pool: NonNull::new_unchecked(self)
});
}
}
pub fn dealloc(&mut self,ptr: NonNull<T>) {
if std::mem::needs_drop::<T>() {
unsafe { std::ptr::drop_in_place(ptr.as_ptr()) };
}
let block_ptr: *mut BlockNode = ptr.as_ptr().cast();
if self.head.is_none() {
self.head = Some(block_ptr);
return;
}
unsafe
{
(*block_ptr).next = self.head;
self.head = Some(block_ptr);
}
}
}
impl<T> Drop for FixedSizeAllocator<T> {
fn drop(&mut self) {
unsafe
{
if std::mem::needs_drop::<T>() {
//Now we need to deallocate any potential resources associated with the inner values.
}
for block in self.blocks.iter_mut() {
let layout = std::alloc::Layout::from_size_align(block.byte_size, align_of::<T>())
.expect("Could not create required layout");
std::alloc::dealloc(block.data.cast(),layout);
}
}
}
}
pub struct FixedSizeHandle<T> {
ptr: NonNull<T>,
pool: NonNull<FixedSizeAllocator<T>>
}
impl<T> Drop for FixedSizeHandle<T> {
#[inline(always)]
fn drop(&mut self) {
unsafe { self.pool.as_mut().dealloc(self.ptr) };
}
}
impl<T> Deref for FixedSizeHandle<T> {
type Target = T;
#[inline(always)]
fn deref(&self) -> &Self::Target {
return unsafe { self.ptr.as_ref() };
}
}
impl<T> DerefMut for FixedSizeHandle<T> {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
return unsafe { self.ptr.as_mut() };
}
}
fn main() {
let mut allocator: FixedSizeAllocator<f64> = FixedSizeAllocator::new(16);
{
let mut value = allocator.alloc(3.0).unwrap();
println!("Value is {}", *value);
*value = 25.0;
println!("Value is {}", *value);
}
}