use crate::Error;
use sp_std::{convert::{TryFrom, TryInto}, ops::{Range, Index, IndexMut}};
use sp_wasm_interface::{Pointer, WordSize};
const ALIGNMENT: u32 = 8;
const N: usize = 22;
const MAX_POSSIBLE_ALLOCATION: u32 = 16777216; 
const MIN_POSSIBLE_ALLOCATION: u32 = 8;
const HEADER_SIZE: u32 = 8;
fn error(msg: &'static str) -> Error {
	Error::Other(msg)
}
macro_rules! trace {
	( $( $args:expr ),+ ) => {
		sp_std::if_std! {
			log::trace!(target: "wasm-heap", $( $args ),+);
		}
	}
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
struct Order(u32);
impl Order {
	
	
	
	fn from_raw(order: u32) -> Result<Self, Error> {
		if order < N as u32 {
			Ok(Self(order))
		} else {
			Err(error("invalid order"))
		}
	}
	
	
	
	
	
	fn from_size(size: u32) -> Result<Self, Error> {
		let clamped_size = if size > MAX_POSSIBLE_ALLOCATION {
			return Err(Error::RequestedAllocationTooLarge);
		} else if size < MIN_POSSIBLE_ALLOCATION {
			MIN_POSSIBLE_ALLOCATION
		} else {
			size
		};
		
		
		
		let power_of_two_size = clamped_size.next_power_of_two();
		
		
		let order = power_of_two_size.trailing_zeros() - MIN_POSSIBLE_ALLOCATION.trailing_zeros();
		Ok(Self(order))
	}
	
	
	
	fn size(&self) -> u32 {
		MIN_POSSIBLE_ALLOCATION << self.0
	}
	
	fn into_raw(self) -> u32 {
		self.0
	}
}
const EMPTY_MARKER: u32 = u32::max_value();
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
enum Link {
	
	Null,
	
	Ptr(u32),
}
impl Link {
	
	fn from_raw(raw: u32) -> Self {
		if raw != EMPTY_MARKER {
			Self::Ptr(raw)
		} else {
			Self::Null
		}
	}
	
	fn into_raw(self) -> u32 {
		match self {
			Self::Null => EMPTY_MARKER,
			Self::Ptr(ptr) => ptr,
		}
	}
}
#[derive(Clone, Debug, PartialEq, Eq)]
enum Header {
	
	Free(Link),
	
	
	Occupied(Order),
}
impl Header {
	fn read_from<M: Memory + ?Sized>(memory: &M, header_ptr: u32) -> Result<Self, Error> {
		let raw_header = memory.read_le_u64(header_ptr)?;
		
		
		let occupied = raw_header & 0x00000001_00000000 != 0;
		let header_data = raw_header as u32;
		Ok(if occupied {
			Self::Occupied(Order::from_raw(header_data)?)
		} else {
			Self::Free(Link::from_raw(header_data))
		})
	}
	
	fn write_into<M: Memory + ?Sized>(&self, memory: &mut M, header_ptr: u32) -> Result<(), Error> {
		let (header_data, occupied_mask) = match *self {
			Self::Occupied(order) => (order.into_raw(), 0x00000001_00000000),
			Self::Free(link) => (link.into_raw(), 0x00000000_00000000),
		};
		let raw_header = header_data as u64 | occupied_mask;
		memory.write_le_u64(header_ptr, raw_header)?;
		Ok(())
	}
	
	fn into_occupied(self) -> Option<Order> {
		match self {
			Self::Occupied(order) => Some(order),
			_ => None,
		}
	}
	
	fn into_free(self) -> Option<Link> {
		match self {
			Self::Free(link) => Some(link),
			_ => None,
		}
	}
}
struct FreeLists {
	heads: [Link; N],
}
impl FreeLists {
	
	fn new() -> Self {
		Self {
			heads: [Link::Null; N]
		}
	}
	
	fn replace(&mut self, order: Order, new: Link) -> Link {
		let prev = self[order];
		self[order] = new;
		prev
	}
}
impl Index<Order> for FreeLists {
	type Output = Link;
	fn index(&self, index: Order) -> &Link {
		&self.heads[index.0 as usize]
	}
}
impl IndexMut<Order> for FreeLists {
	fn index_mut(&mut self, index: Order) -> &mut Link {
		&mut self.heads[index.0 as usize]
	}
}
pub struct FreeingBumpHeapAllocator {
	bumper: u32,
	free_lists: FreeLists,
	total_size: u32,
}
impl FreeingBumpHeapAllocator {
	
	
	
	
	
	
	pub fn new(heap_base: u32) -> Self {
		let aligned_heap_base = (heap_base + ALIGNMENT - 1) / ALIGNMENT * ALIGNMENT;
		FreeingBumpHeapAllocator {
			bumper: aligned_heap_base,
			free_lists: FreeLists::new(),
			total_size: 0,
		}
	}
	
	
	
	
	
	
	
	
	
	
	pub fn allocate<M: Memory + ?Sized>(
		&mut self,
		mem: &mut M,
		size: WordSize,
	) -> Result<Pointer<u8>, Error> {
		let order = Order::from_size(size)?;
		let header_ptr: u32 = match self.free_lists[order] {
			Link::Ptr(header_ptr) => {
				assert!(
					header_ptr + order.size() + HEADER_SIZE <= mem.size(),
					"Pointer is looked up in list of free entries, into which
					only valid values are inserted; qed"
				);
				
				let next_free = Header::read_from(mem, header_ptr)?
					.into_free()
					.ok_or_else(|| error("free list points to a occupied header"))?;
				self.free_lists[order] = next_free;
				header_ptr
			}
			Link::Null => {
				
				self.bump(order.size() + HEADER_SIZE, mem.size())?
			}
		};
		
		Header::Occupied(order).write_into(mem, header_ptr)?;
		self.total_size += order.size() + HEADER_SIZE;
		trace!("Heap size is {} bytes after allocation", self.total_size);
		Ok(Pointer::new(header_ptr + HEADER_SIZE))
	}
	
	
	
	
	
	
	pub fn deallocate<M: Memory + ?Sized>(&mut self, mem: &mut M, ptr: Pointer<u8>) -> Result<(), Error> {
		let header_ptr = u32::from(ptr)
			.checked_sub(HEADER_SIZE)
			.ok_or_else(|| error("Invalid pointer for deallocation"))?;
		let order = Header::read_from(mem, header_ptr)?
			.into_occupied()
			.ok_or_else(|| error("the allocation points to an empty header"))?;
		
		let prev_head = self.free_lists.replace(order, Link::Ptr(header_ptr));
		Header::Free(prev_head).write_into(mem, header_ptr)?;
		
		self.total_size = self
			.total_size
			.checked_sub(order.size() + HEADER_SIZE)
			.ok_or_else(|| error("Unable to subtract from total heap size without overflow"))?;
		trace!("Heap size is {} bytes after deallocation", self.total_size);
		Ok(())
	}
	
	
	
	
	
	fn bump(&mut self, size: u32, heap_end: u32) -> Result<u32, Error> {
		if self.bumper + size > heap_end {
			return Err(Error::AllocatorOutOfSpace);
		}
		let res = self.bumper;
		self.bumper += size;
		Ok(res)
	}
}
pub trait Memory {
	
	fn read_le_u64(&self, ptr: u32) -> Result<u64, Error>;
	
	fn write_le_u64(&mut self, ptr: u32, val: u64) -> Result<(), Error>;
	
	fn size(&self) -> u32;
}
impl Memory for [u8] {
	fn read_le_u64(&self, ptr: u32) -> Result<u64, Error> {
		let range =
			heap_range(ptr, 8, self.len()).ok_or_else(|| error("read out of heap bounds"))?;
		let bytes = self[range]
			.try_into()
			.expect("[u8] slice of length 8 must be convertible to [u8; 8]");
		Ok(u64::from_le_bytes(bytes))
	}
	fn write_le_u64(&mut self, ptr: u32, val: u64) -> Result<(), Error> {
		let range =
			heap_range(ptr, 8, self.len()).ok_or_else(|| error("write out of heap bounds"))?;
		let bytes = val.to_le_bytes();
		&mut self[range].copy_from_slice(&bytes[..]);
		Ok(())
	}
	fn size(&self) -> u32 {
		u32::try_from(self.len()).expect("size of Wasm linear memory is <2^32; qed")
	}
}
fn heap_range(offset: u32, length: u32, heap_len: usize) -> Option<Range<usize>> {
	let start = offset as usize;
	let end = offset.checked_add(length)? as usize;
	if end <= heap_len {
		Some(start..end)
	} else {
		None
	}
}
#[cfg(test)]
mod tests {
	use super::*;
	const PAGE_SIZE: u32 = 65536;
	
	fn to_pointer(address: u32) -> Pointer<u8> {
		Pointer::new(address)
	}
	#[test]
	fn should_allocate_properly() {
		
		let mut mem = [0u8; PAGE_SIZE as usize];
		let mut heap = FreeingBumpHeapAllocator::new(0);
		
		let ptr = heap.allocate(&mut mem[..], 1).unwrap();
		
		
		assert_eq!(ptr, to_pointer(HEADER_SIZE));
	}
	#[test]
	fn should_always_align_pointers_to_multiples_of_8() {
		
		let mut mem = [0u8; PAGE_SIZE as usize];
		let mut heap = FreeingBumpHeapAllocator::new(13);
		
		let ptr = heap.allocate(&mut mem[..], 1).unwrap();
		
		
		
		assert_eq!(ptr, to_pointer(24));
	}
	#[test]
	fn should_increment_pointers_properly() {
		
		let mut mem = [0u8; PAGE_SIZE as usize];
		let mut heap = FreeingBumpHeapAllocator::new(0);
		
		let ptr1 = heap.allocate(&mut mem[..], 1).unwrap();
		let ptr2 = heap.allocate(&mut mem[..], 9).unwrap();
		let ptr3 = heap.allocate(&mut mem[..], 1).unwrap();
		
		
		assert_eq!(ptr1, to_pointer(HEADER_SIZE));
		
		
		assert_eq!(ptr2, to_pointer(24));
		
		assert_eq!(ptr3, to_pointer(24 + 16 + HEADER_SIZE));
	}
	#[test]
	fn should_free_properly() {
		
		let mut mem = [0u8; PAGE_SIZE as usize];
		let mut heap = FreeingBumpHeapAllocator::new(0);
		let ptr1 = heap.allocate(&mut mem[..], 1).unwrap();
		
		assert_eq!(ptr1, to_pointer(HEADER_SIZE));
		let ptr2 = heap.allocate(&mut mem[..], 1).unwrap();
		
		assert_eq!(ptr2, to_pointer(24));
		
		heap.deallocate(&mut mem[..], ptr2).unwrap();
		
		
		
		assert_eq!(heap.free_lists.heads[0], Link::Ptr(u32::from(ptr2) - HEADER_SIZE));
	}
	#[test]
	fn should_deallocate_and_reallocate_properly() {
		
		let mut mem = [0u8; PAGE_SIZE as usize];
		let padded_offset = 16;
		let mut heap = FreeingBumpHeapAllocator::new(13);
		let ptr1 = heap.allocate(&mut mem[..], 1).unwrap();
		
		assert_eq!(ptr1, to_pointer(padded_offset + HEADER_SIZE));
		let ptr2 = heap.allocate(&mut mem[..], 9).unwrap();
		
		
		
		assert_eq!(ptr2, to_pointer(padded_offset + 16 + HEADER_SIZE));
		
		heap.deallocate(&mut mem[..], ptr2).unwrap();
		let ptr3 = heap.allocate(&mut mem[..], 9).unwrap();
		
		
		assert_eq!(ptr3, to_pointer(padded_offset + 16 + HEADER_SIZE));
		assert_eq!(heap.free_lists.heads, [Link::Null; N]);
	}
	#[test]
	fn should_build_linked_list_of_free_areas_properly() {
		
		let mut mem = [0u8; PAGE_SIZE as usize];
		let mut heap = FreeingBumpHeapAllocator::new(0);
		let ptr1 = heap.allocate(&mut mem[..], 8).unwrap();
		let ptr2 = heap.allocate(&mut mem[..], 8).unwrap();
		let ptr3 = heap.allocate(&mut mem[..], 8).unwrap();
		
		heap.deallocate(&mut mem[..], ptr1).unwrap();
		heap.deallocate(&mut mem[..], ptr2).unwrap();
		heap.deallocate(&mut mem[..], ptr3).unwrap();
		
		assert_eq!(heap.free_lists.heads[0], Link::Ptr(u32::from(ptr3) - HEADER_SIZE));
		let ptr4 = heap.allocate(&mut mem[..], 8).unwrap();
		assert_eq!(ptr4, ptr3);
		assert_eq!(heap.free_lists.heads[0], Link::Ptr(u32::from(ptr2) - HEADER_SIZE));
	}
	#[test]
	fn should_not_allocate_if_too_large() {
		
		let mut mem = [0u8; PAGE_SIZE as usize];
		let mut heap = FreeingBumpHeapAllocator::new(13);
		
		let ptr = heap.allocate(&mut mem[..], PAGE_SIZE - 13);
		
		match ptr.unwrap_err() {
			Error::AllocatorOutOfSpace => {},
			e => panic!("Expected allocator out of space error, got: {:?}", e),
		}
	}
	#[test]
	fn should_not_allocate_if_full() {
		
		let mut mem = [0u8; PAGE_SIZE as usize];
		let mut heap = FreeingBumpHeapAllocator::new(0);
		let ptr1 = heap.allocate(&mut mem[..], (PAGE_SIZE / 2) - HEADER_SIZE).unwrap();
		assert_eq!(ptr1, to_pointer(HEADER_SIZE));
		
		let ptr2 = heap.allocate(&mut mem[..], PAGE_SIZE / 2);
		
		
		match ptr2.unwrap_err() {
			Error::AllocatorOutOfSpace => {},
			e => panic!("Expected allocator out of space error, got: {:?}", e),
		}
	}
	#[test]
	fn should_allocate_max_possible_allocation_size() {
		
		let mut mem = vec![0u8; (MAX_POSSIBLE_ALLOCATION + PAGE_SIZE) as usize];
		let mut heap = FreeingBumpHeapAllocator::new(0);
		
		let ptr = heap.allocate(&mut mem[..], MAX_POSSIBLE_ALLOCATION).unwrap();
		
		assert_eq!(ptr, to_pointer(HEADER_SIZE));
	}
	#[test]
	fn should_not_allocate_if_requested_size_too_large() {
		
		let mut mem = [0u8; PAGE_SIZE as usize];
		let mut heap = FreeingBumpHeapAllocator::new(0);
		
		let ptr = heap.allocate(&mut mem[..], MAX_POSSIBLE_ALLOCATION + 1);
		
		match ptr.unwrap_err() {
			Error::RequestedAllocationTooLarge => {},
			e => panic!("Expected allocation size too large error, got: {:?}", e),
		}
	}
	#[test]
	fn should_return_error_when_bumper_greater_than_heap_size() {
		
		let mut mem = [0u8; 64];
		let mut heap = FreeingBumpHeapAllocator::new(0);
		let ptr1 = heap.allocate(&mut mem[..], 32).unwrap();
		assert_eq!(ptr1, to_pointer(HEADER_SIZE));
		heap.deallocate(&mut mem[..], ptr1).expect("failed freeing ptr1");
		assert_eq!(heap.total_size, 0);
		assert_eq!(heap.bumper, 40);
		let ptr2 = heap.allocate(&mut mem[..], 16).unwrap();
		assert_eq!(ptr2, to_pointer(48));
		heap.deallocate(&mut mem[..], ptr2).expect("failed freeing ptr2");
		assert_eq!(heap.total_size, 0);
		assert_eq!(heap.bumper, 64);
		
		
		
		
		
		let ptr = heap.allocate(&mut mem[..], 8);
		
		match ptr.unwrap_err() {
			Error::AllocatorOutOfSpace => {},
			e => panic!("Expected allocator out of space error, got: {:?}", e),
		}
	}
	#[test]
	fn should_include_prefixes_in_total_heap_size() {
		
		let mut mem = [0u8; PAGE_SIZE as usize];
		let mut heap = FreeingBumpHeapAllocator::new(1);
		
		
		heap.allocate(&mut mem[..], 9).unwrap();
		
		assert_eq!(heap.total_size, HEADER_SIZE + 16);
	}
	#[test]
	fn should_calculate_total_heap_size_to_zero() {
		
		let mut mem = [0u8; PAGE_SIZE as usize];
		let mut heap = FreeingBumpHeapAllocator::new(13);
		
		let ptr = heap.allocate(&mut mem[..], 42).unwrap();
		assert_eq!(ptr, to_pointer(16 + HEADER_SIZE));
		heap.deallocate(&mut mem[..], ptr).unwrap();
		
		assert_eq!(heap.total_size, 0);
	}
	#[test]
	fn should_calculate_total_size_of_zero() {
		
		let mut mem = [0u8; PAGE_SIZE as usize];
		let mut heap = FreeingBumpHeapAllocator::new(19);
		
		for _ in 1..10 {
			let ptr = heap.allocate(&mut mem[..], 42).unwrap();
			heap.deallocate(&mut mem[..], ptr).unwrap();
		}
		
		assert_eq!(heap.total_size, 0);
	}
	#[test]
	fn should_read_and_write_u64_correctly() {
		
		let mut mem = [0u8; PAGE_SIZE as usize];
		
		Memory::write_le_u64(mem.as_mut(), 40, 4480113).unwrap();
		
		let value = Memory::read_le_u64(mem.as_mut(), 40).unwrap();
		assert_eq!(value, 4480113);
	}
	#[test]
	fn should_get_item_size_from_order() {
		
		let raw_order = 0;
		
		let item_size = Order::from_raw(raw_order).unwrap().size();
		
		assert_eq!(item_size, 8);
	}
	#[test]
	fn should_get_max_item_size_from_index() {
		
		let raw_order = 21;
		
		let item_size = Order::from_raw(raw_order).unwrap().size();
		
		assert_eq!(item_size as u32, MAX_POSSIBLE_ALLOCATION);
	}
	#[test]
	fn deallocate_needs_to_maintain_linked_list() {
		let mut mem = [0u8; 8 * 2 * 4 + ALIGNMENT as usize];
		let mut heap = FreeingBumpHeapAllocator::new(0);
		
		let ptrs = (0..4).map(|_| heap.allocate(&mut mem[..], 8).unwrap()).collect::<Vec<_>>();
		ptrs.into_iter().for_each(|ptr| heap.deallocate(&mut mem[..], ptr).unwrap());
		
		let _ = (0..4).map(|_| heap.allocate(&mut mem[..], 8).unwrap()).collect::<Vec<_>>();
	}
	#[test]
	fn header_read_write() {
		let roundtrip = |header: Header| {
			let mut memory = [0u8; 32];
			header.write_into(memory.as_mut(), 0).unwrap();
			let read_header = Header::read_from(memory.as_mut(), 0).unwrap();
			assert_eq!(header, read_header);
		};
		roundtrip(Header::Occupied(Order(0)));
		roundtrip(Header::Occupied(Order(1)));
		roundtrip(Header::Free(Link::Null));
		roundtrip(Header::Free(Link::Ptr(0)));
		roundtrip(Header::Free(Link::Ptr(4)));
	}
}