Skip to content

Commit

Permalink
Remove memory statistics
Browse files Browse the repository at this point in the history
  • Loading branch information
srebhan committed Sep 29, 2023
1 parent acf4b23 commit 457af26
Show file tree
Hide file tree
Showing 3 changed files with 1 addition and 48 deletions.
15 changes: 0 additions & 15 deletions core/memallocator.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ package core

import (
"errors"
"sync/atomic"
)

// Define a memory allocator
Expand All @@ -11,20 +10,6 @@ type MemAllocator interface {
Inner(buf []byte) []byte
Protect(buf []byte, readonly bool) error
Free(buf []byte) error
Stats() *MemStats
}

// AllocatorStatistics statistics about memory allocations and errors
type MemStats struct {
PageAllocs atomic.Uint64
PageAllocErrors atomic.Uint64
PageFrees atomic.Uint64
PageFreeErrors atomic.Uint64
ObjectAllocs atomic.Uint64
ObjectAllocErrors atomic.Uint64
ObjectFrees atomic.Uint64
ObjectFreeErrors atomic.Uint64
Slabs atomic.Uint64
}

var (
Expand Down
17 changes: 1 addition & 16 deletions core/memallocator_page.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,27 +8,23 @@ import (
)

type pageAllocator struct {
stats *MemStats
objects map[int]*pageObject
sync.Mutex
}

func NewPageAllocator() MemAllocator {
a := &pageAllocator{
objects: make(map[int]*pageObject),
stats: &MemStats{},
}
return a
}

func (a *pageAllocator) Alloc(size int) ([]byte, error) {
a.stats.ObjectAllocs.Add(1)
if size < 1 {
return nil, ErrNullAlloc
}
o, err := a.newPageObject(size)
if err != nil {
a.stats.ObjectAllocErrors.Add(1)
return nil, err
}

Expand Down Expand Up @@ -79,12 +75,9 @@ func (a *pageAllocator) Inner(buf []byte) []byte {
}

func (a *pageAllocator) Free(buf []byte) error {
a.stats.ObjectFrees.Add(1)

// Determine the address of the buffer we should free
o, found := a.pop(buf)
if !found {
a.stats.ObjectFreeErrors.Add(1)
return ErrBufferNotOwnedByAllocator
}

Expand All @@ -94,19 +87,13 @@ func (a *pageAllocator) Free(buf []byte) error {
}

// Free the related memory
a.stats.PageFrees.Add(uint64(len(o.memory) / pageSize))
if err := memcall.Free(o.memory); err != nil {
a.stats.PageFreeErrors.Add(1)
return err
}

return nil
}

func (a *pageAllocator) Stats() *MemStats {
return a.stats
}

// *** INTERNAL FUNCTIONS *** //
func (a *pageAllocator) lookup(buf []byte) (*pageObject, bool) {
if len(buf) == 0 {
Expand Down Expand Up @@ -158,10 +145,8 @@ func (a *pageAllocator) newPageObject(size int) (*pageObject, error) {
innerLen := roundToPageSize(size)

// Allocate the total needed memory
a.stats.PageAllocs.Add(uint64(2 + innerLen/pageSize))
memory, err := memcall.Alloc((2 * pageSize) + innerLen)
memory, err := memcall.Alloc(2*pageSize + innerLen)
if err != nil {
a.stats.PageAllocErrors.Add(1)
return nil, err
}

Expand Down
17 changes: 0 additions & 17 deletions core/memallocator_slab.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ func WithMinCanarySize(size int) SlabOption {
// Memory allocator implementation
type slabAllocator struct {
maxSlabSize int
stats *MemStats
cfg *SlabAllocatorConfig
allocator *pageAllocator
slabs []*slab
Expand All @@ -61,18 +60,15 @@ func NewSlabAllocator(options ...SlabOption) MemAllocator {
// Setup the allocator and initialize the slabs
a := &slabAllocator{
maxSlabSize: cfg.Sizes[len(cfg.Sizes)-1],
stats: &MemStats{},
cfg: cfg,
slabs: make([]*slab, 0, len(cfg.Sizes)),
allocator: &pageAllocator{
objects: make(map[int]*pageObject),
stats: &MemStats{},
},
}
for _, size := range cfg.Sizes {
s := &slab{
objSize: size,
stats: a.stats,
allocator: a.allocator,
}
a.slabs = append(a.slabs, s)
Expand Down Expand Up @@ -177,10 +173,6 @@ func (a *slabAllocator) Free(buf []byte) error {
return s.free(buf)
}

func (a *slabAllocator) Stats() *MemStats {
return a.stats
}

// *** INTERNAL FUNCTIONS *** //

// Page implementation
Expand Down Expand Up @@ -232,7 +224,6 @@ func newPage(page []byte, size int) *slabPage {
// Slab is a container for all Pages serving the same size
type slab struct {
objSize int
stats *MemStats
allocator *pageAllocator
pages []*slabPage
sync.Mutex
Expand All @@ -255,10 +246,8 @@ func (s *slab) alloc(size int) ([]byte, error) {
// Use the page allocator to get a new guarded memory page
page, err := s.allocator.Alloc(pageSize - s.objSize)
if err != nil {
s.stats.PageAllocErrors.Add(1)
return nil, err
}
s.stats.PageAllocs.Store(s.allocator.stats.PageAllocs.Load())
c = newPage(page, s.objSize)
s.pages = append(s.pages, c)
}
Expand All @@ -268,7 +257,6 @@ func (s *slab) alloc(size int) ([]byte, error) {
c.head = c.head.next
c.used++

s.stats.ObjectAllocs.Add(1)
data := getBufferPart(c.buffer, obj.offset, size)
canary := getBufferPart(c.buffer, obj.offset+size, s.objSize-size)

Expand Down Expand Up @@ -309,11 +297,8 @@ func (s *slab) free(buf []byte) error {
return ErrBufferNotOwnedByAllocator
}

s.stats.ObjectFrees.Add(1)

// Wipe the buffer including the canary check
if err := s.wipe(c, offset, len(buf)); err != nil {
s.stats.ObjectFreeErrors.Add(1)
return err
}
obj := &slabObject{
Expand All @@ -327,9 +312,7 @@ func (s *slab) free(buf []byte) error {
// free the underlying memory
if c.used == 0 {
err := s.allocator.Free(c.buffer)
s.stats.PageFrees.Store(s.allocator.stats.PageFrees.Load())
if err != nil {
s.stats.PageFreeErrors.Add(1)
return err
}

Expand Down

0 comments on commit 457af26

Please sign in to comment.