diff --git a/butils/md5hash/md5.go b/butils/md5hash/md5.go index 7bab9bd..1de71a4 100644 --- a/butils/md5hash/md5.go +++ b/butils/md5hash/md5.go @@ -1,3 +1,17 @@ +// Copyright 2019-2024 Xu Ruibo (hustxurb@163.com) and Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package md5hash import ( @@ -6,10 +20,10 @@ import ( "hash" ) -// Size The size of an MD5 checksum in bytes. +// The size of an MD5 checksum in bytes. const Size = 16 -// BlockSize The blocksize of MD5 in bytes. +// The blocksize of MD5 in bytes. const BlockSize = 64 const ( @@ -184,10 +198,12 @@ func MD5Hash(data []byte) (h []byte, hi, lo uint64) { } //go:inline -func MD5Sum(p []byte) (h []byte, hi, lo uint64) { - var d = digest{s: [4]uint32{init0, init1, init2, init3}} +func MD5Sum(p []byte, h []byte) (hi, lo uint64) { + var d = digest{ + s: [4]uint32{init0, init1, init2, init3}, + len: uint64(len(p)), + } // Write - d.len += uint64(len(p)) if d.nx > 0 { n := copy(d.x[d.nx:], p) d.nx += n @@ -251,7 +267,6 @@ func MD5Sum(p []byte) (h []byte, hi, lo uint64) { panic("d.nx != 0") } - h = make([]byte, Size) binary.LittleEndian.PutUint32(h[0:], d.s[0]) binary.LittleEndian.PutUint32(h[4:], d.s[1]) binary.LittleEndian.PutUint32(h[8:], d.s[2]) diff --git a/butils/md5hash/md5_test.go b/butils/md5hash/md5_test.go index a997fdd..2d5aa69 100644 --- a/butils/md5hash/md5_test.go +++ b/butils/md5hash/md5_test.go @@ -1,6 +1,16 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Copyright 2019-2024 Xu Ruibo (hustxurb@163.com) and Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. package md5hash @@ -228,9 +238,10 @@ func TestAllocations(t *testing.T) { func TestMD5Hash(t *testing.T) { in := []byte("hello, world!") h, hi, lo := MD5Hash(in) - h2, hi2, lo2 := MD5Sum(in) + var h2 [Size]byte + hi2, lo2 := MD5Sum(in, h2[:]) - if !bytes.Equal(h, h2) { + if !bytes.Equal(h, h2[:]) { t.Errorf("MD5Hash(in) = %x, want %x", h, h2) } if hi != hi2 { diff --git a/butils/md5hash/md5block_arm.s b/butils/md5hash/md5block_arm.s index e02de26..84e6343 100644 --- a/butils/md5hash/md5block_arm.s +++ b/butils/md5hash/md5block_arm.s @@ -12,8 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// ARM version of md5block.go - #include "textflag.h" // Register definitions diff --git a/butils/md5hash/md5block_arm64.s b/butils/md5hash/md5block_arm64.s index ce1b0f0..e683a0e 100644 --- a/butils/md5hash/md5block_arm64.s +++ b/butils/md5hash/md5block_arm64.s @@ -12,9 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -// ARM64 version of md5block.go -// derived from crypto/md5/md5block_amd64.s - #include "textflag.h" TEXT ·block(SB),NOSPLIT,$0-32 diff --git a/butils/vectormap/kvholder.go b/butils/vectormap/kvholder.go index 87c8e26..df2d615 100644 --- a/butils/vectormap/kvholder.go +++ b/butils/vectormap/kvholder.go @@ -234,8 +234,5 @@ func LoadUint32(buf []byte) (dest uint32) { //go:inline func Cap4Size(vSize uint32) uint32 { - if vSize&3 != 0 { - return (vSize>>2 + 1) << 2 - } - return vSize + return (vSize + 3) &^ 3 } diff --git a/butils/vectormap/lfumap.go b/butils/vectormap/lfumap.go index 991c555..96f258e 100644 --- a/butils/vectormap/lfumap.go +++ b/butils/vectormap/lfumap.go @@ -53,8 +53,8 @@ func newInnerLFUMap(owner *VectorMap, sz uint32) (m *LFUMap) { limit: groups * maxAvgGroupLoad, } memMax := owner.memCap / Byte(owner.buckets) - if memMax > 64*MB || memMax <= 0 { - memMax = 64 * MB + if memMax > maxShardMemSize || memMax <= 0 { + memMax = maxShardMemSize } for i := range m.ctrl { m.ctrl[i] = newEmptyMetadata() @@ -850,6 +850,8 @@ func (m *LFUMap) Delete(l uint64, key []byte) (ok bool) { } func (m *LFUMap) Clear() { + m.putLock.Lock() + m.rehashLock.Lock() for i, c := range m.ctrl { for j := range c { m.ctrl[i][j] = empty @@ -867,8 +869,27 @@ func (m *LFUMap) Clear() { } m.resident, m.dead = 0, 0 + kvholder := newKVHolder(Byte(m.kvHolder.cap)) + m.kvHolder.cap = 0 + m.kvHolder.buffer.release() + m.kvHolder = kvholder + m.rehashLock.Unlock() + m.putLock.Unlock() +} + +func (m *LFUMap) Close() { + m.putLock.Lock() + m.rehashLock.Lock() + m.ctrl = nil + m.counters = nil + m.groups = nil + m.resident, m.dead = 0, 0 m.kvHolder.cap = 0 - m.kvHolder.data = nil + m.kvHolder.buffer.release() + m.kvHolder = nil + m.owner = nil + m.rehashLock.Unlock() + m.putLock.Unlock() } func (m *LFUMap) QueryCount() (count uint64) { @@ -1000,7 +1021,7 @@ func (m *LFUMap) Eliminate() (delCount int, skipReason int) { return } -func (m *LFUMap) GCCopy() (deadCount int, gcMem int, subSince bool, skipReason int) { +func (m *LFUMap) GCCopy() (deadCount int, gcMem int, skipReason int) { if m.garbageUsage() < garbageRate { skipReason = skipReason1 return diff --git a/butils/vectormap/lrumap.go b/butils/vectormap/lrumap.go index d04ea1c..7b618f6 100644 --- a/butils/vectormap/lrumap.go +++ b/butils/vectormap/lrumap.go @@ -22,14 +22,14 @@ import ( "time" "unsafe" - "github.com/zuoyebang/bitalostored/butils/vectormap/simd" - "github.com/zuoyebang/bitalostored/butils/md5hash" + "github.com/zuoyebang/bitalostored/butils/vectormap/simd" ) var UnitTime = 30 * time.Second const LRUSubDuration = 24 * time.Hour +const LRUMaxDuration = 20 * 24 * time.Hour type LRUMap struct { owner *VectorMap @@ -64,8 +64,8 @@ func newInnerLRUMap(owner *VectorMap, sz uint32) (m *LRUMap) { limit: groups * maxAvgGroupLoad, } memMax := owner.memCap / Byte(owner.buckets) - if memMax > 64*MB || memMax <= 0 { - memMax = 64 * MB + if !owner.skipCheck && (memMax > maxShardMemSize || memMax <= 0) { + memMax = maxShardMemSize } for i := range m.ctrl { m.ctrl[i] = newEmptyMetadata() @@ -878,6 +878,8 @@ func (m *LRUMap) Delete(l uint64, key []byte) (ok bool) { } func (m *LRUMap) Clear() { + m.putLock.Lock() + m.rehashLock.Lock() for i, c := range m.ctrl { for j := range c { m.ctrl[i][j] = empty @@ -895,8 +897,27 @@ func (m *LRUMap) Clear() { } m.resident, m.dead = 0, 0 + kvholder := newKVHolder(Byte(m.kvHolder.cap)) + m.kvHolder.cap = 0 + m.kvHolder.buffer.release() + m.kvHolder = kvholder + m.rehashLock.Unlock() + m.putLock.Unlock() +} + +func (m *LRUMap) Close() { + m.putLock.Lock() + m.rehashLock.Lock() + m.ctrl = nil + m.sinces = nil + m.groups = nil + m.resident, m.dead = 0, 0 m.kvHolder.cap = 0 - m.kvHolder.data = nil + m.kvHolder.buffer.release() + m.kvHolder = nil + m.owner = nil + m.rehashLock.Unlock() + m.putLock.Unlock() } func (m *LRUMap) QueryCount() (count uint64) { @@ -972,6 +993,9 @@ func (m *LRUMap) rehash() { m.limit = n * maxAvgGroupLoad m.resident, m.dead = resident, 0 m.rehashLock.Unlock() + if m.owner.logger != nil { + m.owner.logger.Infof("rehash done, new size: %d", n) + } } func (m *LRUMap) loadFactor() float32 { @@ -1021,7 +1045,27 @@ func (m *LRUMap) Eliminate() (delCount int, skipReason int) { return } -func (m *LRUMap) GCCopy() (deadCount int, gcMem int, subSince bool, skipReason int) { +func (m *LRUMap) AdaptStartTime() (subSince bool) { + if time.Since(m.lastSubTime) > LRUSubDuration { + if time.Since(m.startTime.Add(time.Duration(m.minTopSince)*UnitTime)) > LRUMaxDuration { + m.minTopSince = uint16(LRUSubDuration / UnitTime) + } + m.lastSubTime = time.Now() + var level [16]uint16 + for i := 0; i < 16; i++ { + level[i] = m.minTopSince + } + ctrLen := len(m.ctrl) + for i := 0; i < ctrLen; i++ { + simd.MSubs256epu16(unsafe.Pointer(&(m.sinces[i])), unsafe.Pointer(&level), unsafe.Pointer(&(m.sinces[i]))) + } + m.startTime = m.startTime.Add(time.Duration(m.minTopSince) * UnitTime) + subSince = true + } + return +} + +func (m *LRUMap) GCCopy() (deadCount int, gcMem int, skipReason int) { if m.garbageUsage() < garbageRate { skipReason = skipReason1 return @@ -1074,20 +1118,6 @@ func (m *LRUMap) GCCopy() (deadCount int, gcMem int, subSince bool, skipReason i } } - if time.Since(m.lastSubTime) > LRUSubDuration && m.minTopSince > 0 { - m.lastSubTime = time.Now() - var level [16]uint16 - for i := 0; i < 16; i++ { - level[i] = m.minTopSince - } - ctrLen := len(m.ctrl) - for i := 0; i < ctrLen; i++ { - simd.MSubs256epu16(unsafe.Pointer(&(sinces[i])), unsafe.Pointer(&level), unsafe.Pointer(&(sinces[i]))) - } - m.startTime = m.startTime.Add(time.Duration(m.minTopSince) * UnitTime) - subSince = true - } - m.rehashLock.Lock() m.groups = groups m.ctrl = ctrl diff --git a/butils/vectormap/vectormap.go b/butils/vectormap/vectormap.go index 8760368..ffd77ad 100644 --- a/butils/vectormap/vectormap.go +++ b/butils/vectormap/vectormap.go @@ -17,6 +17,7 @@ package vectormap import ( "fmt" "math" + "sync" "time" "github.com/zuoyebang/bitalostored/butils/md5hash" @@ -42,6 +43,7 @@ const ( minBuckets int = 1024 maxMemSize Byte = 128 << 30 minMemSize Byte = 1 << 30 + maxShardMemSize Byte = 64 << 20 overShortSize uint32 = 1 << 7 overLongSize uint32 = (1 << 15) - 1 overLongStoreH uint32 = overLongSize >> 8 @@ -52,7 +54,8 @@ const ( limitSize uint32 = 4 << 20 storeUintBytes uint32 = 4 - MinEliminateDuration = 60 * time.Second + MinEliminateGoroutines = 1 + MinEliminateDuration = 180 * time.Second ) const ( @@ -88,9 +91,9 @@ type ILogger interface { type Option func(vm *VectorMap) -func WithDebug() Option { +func WithSkipCheck() Option { return func(vm *VectorMap) { - vm.debug = true + vm.skipCheck = true } } @@ -116,22 +119,21 @@ func WithEliminate(memCap Byte, goroutines int, duration time.Duration) Option { return func(vm *VectorMap) { vm.memCap = memCap - if vm.debug { + if vm.skipCheck { if goroutines == 0 { return } } if goroutines <= 0 { - goroutines = 1 + goroutines = MinEliminateGoroutines } - if !vm.debug && duration < MinEliminateDuration { + if !vm.skipCheck && duration < MinEliminateDuration { duration = MinEliminateDuration } vm.eliminateHandler = &eliminateHandler{ - goroutines: goroutines, - circleDuration: time.Duration(float64(duration) * 0.15), - stepDuration: duration / 1000, + goroutines: goroutines, + stepDuration: duration, } } } @@ -157,7 +159,9 @@ type VectorMap struct { memCap Byte eliminateHandler *eliminateHandler logger ILogger - debug bool + skipCheck bool + stop bool + wg sync.WaitGroup mtype MapType } @@ -167,7 +171,7 @@ func NewVectorMap(sz uint32, ops ...Option) (vm *VectorMap) { op(vm) } - if !vm.debug { + if !vm.skipCheck { if vm.memCap < minMemSize { vm.memCap = minMemSize } else if vm.memCap > maxMemSize { @@ -201,7 +205,6 @@ func NewVectorMap(sz uint32, ops ...Option) (vm *VectorMap) { } if vm.eliminateHandler != nil { - vm.eliminateHandler.stepDuration = time.Duration(int(vm.eliminateHandler.stepDuration) * (vm.buckets / 1000)) vm.eliminateHandler.Handle(vm) } return vm @@ -213,13 +216,15 @@ func (vm *VectorMap) slotAt(hi uint64) Map { } func (vm *VectorMap) Put(k []byte, v []byte) bool { - h, hi, lo := md5hash.MD5Sum(k) - return vm.slotAt(hi).Put(lo, h, v) + var h [16]byte + hi, lo := md5hash.MD5Sum(k, h[:]) + return vm.slotAt(hi).Put(lo, h[:], v) } func (vm *VectorMap) PutMultiValue(k []byte, vlen int, vals ...[]byte) bool { - h, hi, lo := md5hash.MD5Sum(k) - return vm.slotAt(hi).PutMultiValue(lo, h, uint32(vlen), vals) + var h [16]byte + hi, lo := md5hash.MD5Sum(k, h[:]) + return vm.slotAt(hi).PutMultiValue(lo, h[:], uint32(vlen), vals) } func (vm *VectorMap) RePutFails() uint64 { @@ -236,24 +241,28 @@ func (vm *VectorMap) RePut(k []byte, v []byte) (res bool) { res = false return } - h, hi, lo := md5hash.MD5Sum(k) - res = vm.slotAt(hi).RePut(lo, h, v) + var h [16]byte + hi, lo := md5hash.MD5Sum(k, h[:]) + res = vm.slotAt(hi).RePut(lo, h[:], v) return } func (vm *VectorMap) Get(k []byte) (v []byte, closer func(), ok bool) { - h, hi, lo := md5hash.MD5Sum(k) - return vm.slotAt(hi).Get(lo, h) + var h [16]byte + hi, lo := md5hash.MD5Sum(k, h[:]) + return vm.slotAt(hi).Get(lo, h[:]) } func (vm *VectorMap) Delete(k []byte) { - h, hi, lo := md5hash.MD5Sum(k) - vm.slotAt(hi).Delete(lo, h) + var h [16]byte + hi, lo := md5hash.MD5Sum(k, h[:]) + vm.slotAt(hi).Delete(lo, h[:]) } func (vm *VectorMap) Has(k []byte) (ok bool) { - h, hi, lo := md5hash.MD5Sum(k) - return vm.slotAt(hi).Has(lo, h) + var h [16]byte + hi, lo := md5hash.MD5Sum(k, h[:]) + return vm.slotAt(hi).Has(lo, h[:]) } func (vm *VectorMap) Clear() { @@ -262,6 +271,14 @@ func (vm *VectorMap) Clear() { } } +func (vm *VectorMap) Close() { + vm.stop = true + vm.wg.Wait() + for _, m := range vm.shards { + m.Close() + } +} + func (vm *VectorMap) Count() int { var sum int for _, m := range vm.shards { @@ -278,6 +295,10 @@ func (vm *VectorMap) Items() uint32 { return sum } +func (vm *VectorMap) Shards() int { + return vm.buckets +} + func (vm *VectorMap) Capacity() int { var sum int for _, m := range vm.shards { @@ -331,12 +352,13 @@ type Map interface { itemsMemUsage() float32 memUsage() float32 Clear() + Close() Count() int Capacity() int QueryCount() uint64 MissCount() uint64 Eliminate() (delCount int, skipReason int) - GCCopy() (deadCount int, gcMem int, subSince bool, skipReason int) + GCCopy() (deadCount int, gcMem int, skipReason int) kvholder() *kvHolder Groups() []group Resident() uint32 @@ -367,46 +389,52 @@ const ( ) type eliminateHandler struct { - goroutines int - circleDuration time.Duration - stepDuration time.Duration + goroutines int + stepDuration time.Duration } func (h *eliminateHandler) Handle(vm *VectorMap) { + d := h.stepDuration / time.Duration(vm.buckets) switch vm.mtype { case MapTypeLFU: + for i := 0; i < h.goroutines; i++ { + vm.wg.Add(1) go func(idx int) { for { start := time.Now() var eliMaps, eliItems, gcMaps, gcItems, gcMem, eliSkipReason, gcSkipReason int for j := idx; j < vm.buckets; j += h.goroutines { + if vm.stop { + vm.wg.Done() + return + } ec, reason := vm.shards[j].Eliminate() if ec > 0 { eliMaps++ eliItems += ec } eliSkipReason |= reason - gcI, gcM, _, rs := vm.shards[j].GCCopy() + gcI, gcM, rs := vm.shards[j].GCCopy() if gcI > 0 { gcMaps++ gcItems += gcI gcMem += gcM } gcSkipReason |= rs - time.Sleep(h.stepDuration) + time.Sleep(d) } cost := time.Since(start) if vm.logger != nil { vm.logger.Infof("eliminate index %d cost: %v, eliMaps: %d, eliItems: %d, gcMaps: %d, gcItems: %d, gcMem: %d", idx, cost, eliMaps, eliItems, gcMaps, gcItems, gcMem) } - time.Sleep(h.circleDuration) } }(i) } case MapTypeLRU: for i := 0; i < h.goroutines; i++ { + vm.wg.Add(1) go func(idx int) { for { start := time.Now() @@ -414,13 +442,19 @@ func (h *eliminateHandler) Handle(vm *VectorMap) { var minStartTime = time.Now() var topSince uint16 for j := idx; j < vm.buckets; j += h.goroutines { + if vm.stop { + vm.wg.Done() + return + } ec, reason := vm.shards[j].Eliminate() if ec > 0 { eliMaps++ eliItems += ec } eliSkipReason |= reason - gcI, gcM, subSince, rs := vm.shards[j].GCCopy() + gcI, gcM, rs := vm.shards[j].GCCopy() + lruMap := vm.shards[j].(*LRUMap) + subSince := lruMap.AdaptStartTime() if gcI > 0 { gcMaps++ gcItems += gcI @@ -429,22 +463,20 @@ func (h *eliminateHandler) Handle(vm *VectorMap) { subTimes++ } } - lruMap := vm.shards[j].(*LRUMap) - if lruMap.startTime.Before(start) { + if lruMap.startTime.Before(minStartTime) { minStartTime = lruMap.startTime } if lruMap.minTopSince > topSince { topSince = lruMap.minTopSince } gcSkipReason |= rs - time.Sleep(h.stepDuration) + time.Sleep(d) } cost := time.Since(start) if vm.logger != nil { vm.logger.Infof("eliminate index %d cost: %v, eliMaps: %d, eliItems: %d, gcMaps: %d, gcItems: %d, gcMem: %d, minStartTime: %s, subTimes: %d, topSince: %d, eliSkipReason: %d, gcSkipReason: %d", idx, cost, eliMaps, eliItems, gcMaps, gcItems, gcMem, minStartTime.Format(time.DateTime), subTimes, topSince, eliSkipReason, gcSkipReason) } - time.Sleep(h.circleDuration) } }(i) } diff --git a/butils/vectormap/vectormap_test.go b/butils/vectormap/vectormap_test.go index 3a56712..b01564f 100644 --- a/butils/vectormap/vectormap_test.go +++ b/butils/vectormap/vectormap_test.go @@ -15,8 +15,10 @@ package vectormap import ( + "bytes" "fmt" "strconv" + "sync" "testing" "time" @@ -24,31 +26,67 @@ import ( ) func TestVectorGet(t *testing.T) { - values := genBytesData(100, 10000) - m := NewVectorMap(10000, WithBuckets(16)) - for i := 0; i < 10000; i++ { + n := 10000 + values := genBytesData(100, n) + m := NewVectorMap(uint32(n), WithBuckets(16)) + for i := 0; i < n; i++ { key := []byte(strconv.Itoa(i)) value := values[i] m.RePut(key, value) } + var wg sync.WaitGroup + var closeCh = make(chan struct{}) for i := 0; i < 16; i++ { + wg.Add(1) go func() { for j := 0; ; j++ { - if j == 10000 { - j = 0 - } - key := []byte(strconv.Itoa(j)) - value, closer, ok := m.Get(key) - assert.Equal(t, true, ok) - assert.Equal(t, values[j], value, "ex: %s \nac: %s\n", string(values[j]), string(value)) - if closer != nil { - closer() + select { + case <-closeCh: + wg.Done() + return + default: + if j == 10000 { + j = 0 + } + key := []byte(strconv.Itoa(j)) + value, closer, ok := m.Get(key) + assert.Equal(t, true, ok) + assert.Equal(t, values[j], value, "ex: %s \nac: %s\n", string(values[j]), string(value)) + if closer != nil { + closer() + } } } }() } + time.Sleep(10 * time.Second) + close(closeCh) + wg.Wait() + m.Clear() +} - time.Sleep(time.Second * 10) +func TestVectorMap1m(t *testing.T) { + value := bytes.Repeat([]byte("a"), 1*1024*1024) + keyNum := 10 + m := NewVectorMap(uint32(20), WithType(MapTypeLRU), WithBuckets(8), WithSkipCheck(), WithEliminate(Byte(5<<30), 0, time.Duration(10)*time.Second)) + for k := 1; k <= keyNum; k++ { + newKey := []byte("performance_test_key_prefix_" + strconv.Itoa(k)) + ok := m.RePut(newKey, value) + assert.Equal(t, true, ok) + } + fmt.Println(m.Items()) + for k := 1; k <= keyNum; k++ { + newKey := []byte("performance_test_key_prefix_" + strconv.Itoa(k)) + v, closer, _ := m.Get(newKey) + if len(v) <= 0 { + fmt.Println("error vlen=0") + } + if closer != nil { + closer() + } + } + fmt.Println(m.Items()) + m.Close() } func TestVectorMapPut(t *testing.T) { @@ -79,6 +117,7 @@ func TestVectorMapPut(t *testing.T) { if closer != nil { closer() } + m.Clear() } func TestVectorMapPutMulti(t *testing.T) { @@ -116,7 +155,7 @@ func TestVectorMap_Base(t *testing.T) { keys := genStringData(16, 100) // insert - m := NewVectorMap(2, WithDebug(), WithBuckets(1), WithEliminate(1*GB, 0, 1*time.Second)) + m := NewVectorMap(2, WithSkipCheck(), WithBuckets(1), WithEliminate(1*GB, 0, 1*time.Second)) m.RePut([]byte(keys[0]), []byte(keys[1])) v, closer, ok := m.Get([]byte(keys[0])) assert.Equal(t, true, ok) @@ -192,7 +231,7 @@ func TestVectorMap_Base(t *testing.T) { func TestVectorMap_BaseLRU(t *testing.T) { keys := genStringData(16, 100) - m := NewVectorMap(2, WithDebug(), WithType(MapTypeLRU), WithBuckets(1), WithEliminate(1*GB, 0, 1*time.Second)) + m := NewVectorMap(2, WithSkipCheck(), WithType(MapTypeLRU), WithBuckets(1), WithEliminate(1*GB, 0, 1*time.Second)) m.RePut([]byte(keys[0]), []byte(keys[1])) v, closer, ok := m.Get([]byte(keys[0])) assert.Equal(t, true, ok) @@ -265,7 +304,7 @@ func TestVectorMap_BaseLRU(t *testing.T) { } func TestVectorMap_GC_Release(t *testing.T) { - m := NewVectorMap(4, WithDebug(), WithBuckets(1), WithEliminate(3*KB, 0, 100*time.Millisecond)) + m := NewVectorMap(4, WithSkipCheck(), WithBuckets(1), WithEliminate(3*KB, 0, 100*time.Millisecond)) { m.RePut([]byte("a"), []byte("b")) m.RePut([]byte("c"), make([]byte, 1024)) @@ -284,7 +323,7 @@ func TestVectorMap_GC_Release(t *testing.T) { } func TestVectorMap_GC(t *testing.T) { - m := NewVectorMap(4, WithDebug(), WithBuckets(1), WithEliminate(3*KB, 0, 100*time.Millisecond)) + m := NewVectorMap(4, WithSkipCheck(), WithBuckets(1), WithEliminate(3*KB, 0, 100*time.Millisecond)) { m.RePut([]byte("a"), []byte("b")) m.RePut([]byte("c"), []byte("d")) @@ -306,17 +345,19 @@ func TestVectorMap_GC(t *testing.T) { } func TestVectorMap_EliminateAndGC(t *testing.T) { - m := NewVectorMap(4, WithDebug(), WithBuckets(1), WithEliminate(3*KB, 0, 100*time.Millisecond)) + m := NewVectorMap(4, WithSkipCheck(), WithBuckets(1), WithEliminate(3*KB, 0, 100*time.Millisecond)) { m.shards[0].Eliminate() m.shards[0].GCCopy() } + m.Get([]byte("b")) m.Get([]byte("c")) vlen := 992 m.RePut([]byte("a"), make([]byte, vlen)) + m.RePut([]byte("b"), make([]byte, vlen)) m.shards[0].Eliminate() assert.Equal(t, float32(32+20+vlen+20+vlen)/(3*1024), m.shards[0].itemsMemUsage()) @@ -350,7 +391,7 @@ func TestVectorMap_EliminateAndGC(t *testing.T) { } func TestVectorMap_EliminateAndGC_LRU(t *testing.T) { - m := NewVectorMap(4, WithDebug(), WithType(MapTypeLRU), WithBuckets(1), WithEliminate(3*KB, 0, 100*time.Millisecond)) + m := NewVectorMap(4, WithSkipCheck(), WithType(MapTypeLRU), WithBuckets(1), WithEliminate(3*KB, 0, 100*time.Millisecond)) { m.shards[0].Eliminate() @@ -675,7 +716,7 @@ func TestVectorMapLRU_BigValue(t *testing.T) { vs2 := genBytesData(1<<5, 4) ca := 1 << 19 m := NewVectorMap(4, - WithDebug(), + WithSkipCheck(), WithLogger(logger), WithLRUUnitTime(time.Second), WithType(MapTypeLRU), @@ -733,7 +774,6 @@ func TestVectorMapLRU_BigValue(t *testing.T) { } m.shards[0].Eliminate() m.shards[0].GCCopy() - time.Sleep(time.Second) } if ok = m.RePut(k1, vs[1]); ok { @@ -760,7 +800,7 @@ func TestVectorMapLFU_BigValue(t *testing.T) { vs2 := genBytesData(1<<5, 4) ca := 1 << 19 m := NewVectorMap(4, - WithDebug(), + WithSkipCheck(), WithLogger(logger), WithType(MapTypeLFU), WithBuckets(1), @@ -835,7 +875,7 @@ func TestVectorMapLFU_BigValue(t *testing.T) { func TestGCTime(t *testing.T) { vs := genBytesData(128, 1) - m := NewVectorMap(4, WithDebug(), WithBuckets(1), WithEliminate(64*MB, 0, 100*time.Millisecond)) + m := NewVectorMap(4, WithSkipCheck(), WithBuckets(1), WithEliminate(64*MB, 0, 100*time.Millisecond)) for i := 0; i < 460000; i++ { m.RePut([]byte(strconv.Itoa(i)), vs[0]) } @@ -856,6 +896,47 @@ func TestGCTime(t *testing.T) { m.Clear() } +func TestVectorMap_LRU_AdaptStartTime(t *testing.T) { + vs := genBytesData(128, 4) + m := NewVectorMap(4, + WithSkipCheck(), + WithType(MapTypeLRU), + WithBuckets(1), + WithEliminate(10*MB, 0, 5*time.Second)) + + lm0 := m.shards[0].(*LRUMap) + + lm0.startTime = time.Now().Add(-time.Hour * 21 * 24) + lm0.lastSubTime = time.Now().Add(-time.Hour * 1 * 24) + + for i := 0; i < 8; i++ { + m.RePut([]byte(strconv.Itoa(i)), vs[0]) + } + origin := time.Since(lm0.startTime) / UnitTime + + subSince := lm0.AdaptStartTime() + assert.Equal(t, true, subSince) + for _, s := range lm0.sinces[0] { + if s > 0 { + assert.Equal(t, uint16(origin-(LRUSubDuration/UnitTime)), s) + } + } +} + +func TestVectorMap_Logger(t *testing.T) { + logger := &defaultLogger{} + m := NewVectorMap(4, + WithSkipCheck(), + WithLogger(logger), + WithLRUUnitTime(time.Second), + WithType(MapTypeLRU), + WithBuckets(512), + WithEliminate(10*MB, 1, 5*time.Second)) + + time.Sleep(7 * time.Second) + m.Close() +} + func genBytesData(size, count int) (keys [][]byte) { letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") r := make([]byte, size*count) diff --git a/conf/bitalostored.toml b/conf/bitalostored.toml index 7e9e458..8496100 100644 --- a/conf/bitalostored.toml +++ b/conf/bitalostored.toml @@ -1,9 +1,7 @@ [server] product_name = "bitalostored-demo" address = ":19091" -max_client = 15000 -keep_alive = "600s" -max_procs = 12 +net_event_loop_num = 8 db_path = "bitalostored-data" slow_time = "30ms" slow_key_window_time = "2000ms" @@ -44,7 +42,7 @@ enable_clock_cache = false # default cache_size = 0 # default [raft_queue] -workers = 60 +workers = 32 length = 10000 [raft_cluster] diff --git a/go.mod b/go.mod index 20d9675..38db990 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,6 @@ require ( github.com/RoaringBitmap/roaring v1.9.0 github.com/VictoriaMetrics/metrics v1.6.2 github.com/cockroachdb/errors v1.11.1 - github.com/cockroachdb/pebble v0.0.0-20210406181039-e3809b89b488 github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 github.com/emirpasic/gods v1.12.0 github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab @@ -25,18 +24,19 @@ require ( github.com/martini-contrib/gzip v0.0.0-20151124214156-6c035326b43f github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 github.com/martini-contrib/sessions v0.0.0-20140630231722-fa13114fbcf0 - github.com/panjf2000/ants/v2 v2.9.0 + github.com/panjf2000/ants/v2 v2.10.0 + github.com/panjf2000/gnet/v2 v2.5.7 github.com/shopspring/decimal v1.3.1 github.com/sony/gobreaker v0.5.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.9.0 github.com/yuin/gopher-lua v1.1.1 - github.com/zuoyebang/bitalosdb v1.1.4 - github.com/zuoyebang/bitalostable v1.0.0 + github.com/zuoyebang/bitalosdb v1.2.0 + github.com/zuoyebang/bitalostable v1.0.1 go.uber.org/atomic v1.7.0 - go.uber.org/zap v1.26.0 + go.uber.org/zap v1.27.0 golang.org/x/net v0.20.0 - golang.org/x/sys v0.18.0 + golang.org/x/sys v0.22.0 google.golang.org/protobuf v1.33.0 gorm.io/driver/mysql v1.5.2 gorm.io/driver/sqlite v1.5.4 @@ -83,12 +83,14 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fastrand v1.0.0 // indirect github.com/valyala/histogram v1.0.1 // indirect - go.uber.org/multierr v1.10.0 // indirect + go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.18.0 // indirect golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect - golang.org/x/sync v0.3.0 // indirect + golang.org/x/sync v0.7.0 // indirect golang.org/x/text v0.14.0 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 9e778b2..ecc3776 100644 --- a/go.sum +++ b/go.sum @@ -36,21 +36,17 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cockroachdb/datadriven v1.0.0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4= github.com/cockroachdb/errors v1.6.1/go.mod h1:tm6FTP5G81vwJ5lC0SizQo374JNCOPrHyXGitRJoDqM= github.com/cockroachdb/errors v1.7.5/go.mod h1:m/IWRCPXYZ6TvLLDuC0kfLR1pp/+BiZ0h16WHaBMRMM= -github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac= github.com/cockroachdb/errors v1.8.2/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac= github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8= github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v0.0.0-20210406181039-e3809b89b488 h1:9Ydk2DZyu/YEL1W7Kha7Ax78R2rUvbTgS/U2nW3O8iw= -github.com/cockroachdb/pebble v0.0.0-20210406181039-e3809b89b488/go.mod h1:1XpB4cLQcF189RAcWi4gUc110zJgtOfT7SVNGY8sOe0= github.com/cockroachdb/redact v1.0.6/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/redact v1.0.8/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -83,14 +79,12 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= -github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/3rZdM= github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= @@ -120,7 +114,6 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.2-0.20190904063534-ff6b7dc882cf/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= @@ -200,7 +193,6 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.15.11 h1:Lcadnb3RKGin4FYM/orgq0qde+nc15E5Cbqg4B9Sx9c= github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= @@ -267,8 +259,10 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c h1:rp5dCmg/yLR3mgFuSOe4oEnDDmGLROTvMragMUXpTQw= github.com/oxtoacart/bpool v0.0.0-20190530202638-03653db5a59c/go.mod h1:X07ZCGwUbLaax7L0S3Tw4hpejzu63ZrrQiUe6W0hcy0= -github.com/panjf2000/ants/v2 v2.9.0 h1:SztCLkVxBRigbg+vt0S5QvF5vxAbxbKt09/YfAJ0tEo= -github.com/panjf2000/ants/v2 v2.9.0/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I= +github.com/panjf2000/ants/v2 v2.10.0 h1:zhRg1pQUtkyRiOFo2Sbqwjp0GfBNo9cUY2/Grpx1p+8= +github.com/panjf2000/ants/v2 v2.10.0/go.mod h1:7ZxyxsqE4vvW0M7LSD8aI3cKwgFhBHbxnlN8mDqHa1I= +github.com/panjf2000/gnet/v2 v2.5.7 h1:EGGIfLYEVAp2l5WSYT2XddSjpQ642PjwphbWhcJ0WBY= +github.com/panjf2000/gnet/v2 v2.5.7/go.mod h1:ppopMJ8VrDbJu8kDsqFQTgNmpMS8Le5CmPxISf+Sauk= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= @@ -311,7 +305,6 @@ github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpE github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= @@ -321,6 +314,7 @@ github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8 github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= github.com/valyala/fastrand v1.0.0 h1:LUKT9aKer2dVQNUi3waewTbKV+7H17kvWFNKs2ObdkI= @@ -341,17 +335,17 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M= github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw= -github.com/zuoyebang/bitalosdb v1.1.4 h1:sbJDw7qxHYeaE93+mCaWrXPAX+VdKlS6dGGkPQh96yQ= -github.com/zuoyebang/bitalosdb v1.1.4/go.mod h1:WYQtj3xs02t/wfzolEaEsW7Y8oq4q1DhTHIQO8fXE3c= -github.com/zuoyebang/bitalostable v1.0.0 h1:iwhbyAH9CJr2ZwpbdDM/6gv3hLp713r+vTNDWK40W3M= -github.com/zuoyebang/bitalostable v1.0.0/go.mod h1:ftM42fXkDkyucCFTiysCHd6g2wforH15S5RIATuSBfs= +github.com/zuoyebang/bitalosdb v1.2.0 h1:6AfFLNWCCLDAGfEj4XNF16cYt/8LyEIcnZQIqHr9emY= +github.com/zuoyebang/bitalosdb v1.2.0/go.mod h1:r4yTXkL40dNH7WENggsxWSSH8mJsDkTLAx/4q8BSVro= +github.com/zuoyebang/bitalostable v1.0.1 h1:8OpcswyL9AN6e7VoxovV8Ut6VS6WZJh8URf/MTR1CH4= +github.com/zuoyebang/bitalostable v1.0.1/go.mod h1:ftM42fXkDkyucCFTiysCHd6g2wforH15S5RIATuSBfs= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/multierr v1.10.0 h1:S0h4aNzvfcFsC3dRF1jLoaov7oRaKqRGC/pUEJ2yvPQ= -go.uber.org/multierr v1.10.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= -go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -367,7 +361,6 @@ golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= @@ -378,8 +371,6 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -406,8 +397,9 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -420,15 +412,14 @@ golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -448,7 +439,6 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -486,6 +476,8 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/raft/config/config.go b/raft/config/config.go index ae17de3..cbc9f05 100644 --- a/raft/config/config.go +++ b/raft/config/config.go @@ -26,18 +26,15 @@ import ( "strconv" "time" - "github.com/zuoyebang/bitalostored/raft/logger" - - "github.com/zuoyebang/bitalostored/raft/raftio" - "github.com/cockroachdb/errors" "github.com/lni/goutils/netutil" "github.com/lni/goutils/stringutil" - "github.com/zuoyebang/bitalostored/raft/internal/fileutil" "github.com/zuoyebang/bitalostored/raft/internal/id" "github.com/zuoyebang/bitalostored/raft/internal/settings" "github.com/zuoyebang/bitalostored/raft/internal/vfs" + "github.com/zuoyebang/bitalostored/raft/logger" + "github.com/zuoyebang/bitalostored/raft/raftio" pb "github.com/zuoyebang/bitalostored/raft/raftpb" ) @@ -832,21 +829,21 @@ func getDefaultLogDBConfig() LogDBConfig { KVMaxBackgroundFlushes: 2, KVLRUCacheSize: 0, KVKeepLogFileNum: 16, - KVWriteBufferSize: 128 * 1024 * 1024, - KVMaxWriteBufferNumber: 6, + KVWriteBufferSize: 128 << 20, + KVMaxWriteBufferNumber: 8, KVLevel0FileNumCompactionTrigger: 16, KVLevel0SlowdownWritesTrigger: 24, - KVLevel0StopWritesTrigger: 32, - KVMaxBytesForLevelBase: 4 * 1024 * 1024 * 1024, + KVLevel0StopWritesTrigger: 64, + KVMaxBytesForLevelBase: 4 << 30, KVMaxBytesForLevelMultiplier: 2, - KVTargetFileSizeBase: 128 * 1024 * 1024, + KVTargetFileSizeBase: 128 << 20, KVTargetFileSizeMultiplier: 2, KVLevelCompactionDynamicLevelBytes: 0, KVRecycleLogFileNum: 0, KVNumOfLevels: 7, - KVBlockSize: 128 * 1024, - SaveBufferSize: 32 * 1024, - MaxSaveBufferSize: 64 * 1024 * 1024, + KVBlockSize: 128 << 10, + SaveBufferSize: 32 << 10, + MaxSaveBufferSize: 64 << 20, } } @@ -923,7 +920,7 @@ func GetDefaultExpertConfig() ExpertConfig { // unless it is absoloutely necessary. type ExpertConfig struct { // LogDBFactory is the factory function used for creating the LogDB instance - // used by NodeHost. When not set, the default built-in Pebble based LogDB + // used by NodeHost. When not set, the default built-in Bitable based LogDB // implementation is used. LogDBFactory LogDBFactory // TransportFactory is an optional factory type used for creating the custom diff --git a/raft/internal/fileutil/go114.go b/raft/internal/fileutil/go114.go index a1340b1..a3548f4 100644 --- a/raft/internal/fileutil/go114.go +++ b/raft/internal/fileutil/go114.go @@ -23,12 +23,6 @@ import ( "os" ) -// TODO: -// io/iotuil has been deprecated in go1.16 -// ioutil.Discard, ioutil.TempFile and other functions have been moved to the -// other stdlib packages (io and os) in go1.16. -// remove this file when we require go1.16 for dragonboat - // Discard ... var Discard = ioutil.Discard diff --git a/raft/internal/fileutil/go116.go b/raft/internal/fileutil/go116.go index 70e13e3..b215b81 100644 --- a/raft/internal/fileutil/go116.go +++ b/raft/internal/fileutil/go116.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build go1.16 -// +build go1.16 package fileutil diff --git a/raft/internal/fileutil/utils.go b/raft/internal/fileutil/utils.go index 1028871..e69611f 100644 --- a/raft/internal/fileutil/utils.go +++ b/raft/internal/fileutil/utils.go @@ -37,9 +37,6 @@ import ( ) const ( - // DefaultFileMode is the default file mode for files generated by - // Dragonboat. - DefaultFileMode = 0640 // SnapshotFlagFilename defines the filename of the snapshot flag file. SnapshotFlagFilename = "dragonboat.snapshot.message" defaultDirFileMode = 0750 diff --git a/raft/internal/invariants/nomemfs.go b/raft/internal/invariants/nomemfs.go index 069ba3e..0890951 100644 --- a/raft/internal/invariants/nomemfs.go +++ b/raft/internal/invariants/nomemfs.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !dragonboat_memfs_test -// +build !dragonboat_memfs_test package invariants diff --git a/raft/internal/invariants/nomonkey.go b/raft/internal/invariants/nomonkey.go index 19deb36..8d4a2b8 100644 --- a/raft/internal/invariants/nomonkey.go +++ b/raft/internal/invariants/nomonkey.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !dragonboat_monkeytest -// +build !dragonboat_monkeytest package invariants diff --git a/raft/internal/invariants/norace.go b/raft/internal/invariants/norace.go index 21eb0ea..6eaa675 100644 --- a/raft/internal/invariants/norace.go +++ b/raft/internal/invariants/norace.go @@ -13,7 +13,6 @@ // limitations under the License. //go:build !race -// +build !race package invariants diff --git a/raft/internal/invariants/norocksdb.go b/raft/internal/invariants/norocksdb.go deleted file mode 100644 index 4f1670c..0000000 --- a/raft/internal/invariants/norocksdb.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017-2020 Lei Ni (nilei81@gmail.com) and other contributors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !dragonboat_rocksdb_test -// +build !dragonboat_rocksdb_test - -package invariants - -// DragonboatRocksDBTest is the flag indicating whether it is for rocksdb test -const DragonboatRocksDBTest = false diff --git a/raft/internal/invariants/rocksdb.go b/raft/internal/invariants/rocksdb.go deleted file mode 100644 index bfc3a8d..0000000 --- a/raft/internal/invariants/rocksdb.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2017-2020 Lei Ni (nilei81@gmail.com) and other contributors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build dragonboat_rocksdb_test -// +build dragonboat_rocksdb_test - -package invariants - -// DragonboatRocksDBTest is the flag indicating whether it is for rocksdb test -const DragonboatRocksDBTest = true diff --git a/raft/internal/logdb/batch.go b/raft/internal/logdb/batch.go index 2362b10..14aeaa5 100644 --- a/raft/internal/logdb/batch.go +++ b/raft/internal/logdb/batch.go @@ -17,11 +17,11 @@ package logdb import ( "math" + "github.com/cockroachdb/errors" + "github.com/zuoyebang/bitalostored/raft/internal/logdb/kv" "github.com/zuoyebang/bitalostored/raft/raftio" pb "github.com/zuoyebang/bitalostored/raft/raftpb" - - "github.com/cockroachdb/errors" ) // diff --git a/raft/internal/logdb/cache_test.go b/raft/internal/logdb/cache_test.go index 421b922..3940750 100644 --- a/raft/internal/logdb/cache_test.go +++ b/raft/internal/logdb/cache_test.go @@ -18,10 +18,9 @@ import ( "reflect" "testing" + "github.com/stretchr/testify/require" "github.com/zuoyebang/bitalostored/raft/raftio" pb "github.com/zuoyebang/bitalostored/raft/raftpb" - - "github.com/stretchr/testify/require" ) func TestTrySaveSnapshot(t *testing.T) { diff --git a/raft/internal/logdb/compaction_test.go b/raft/internal/logdb/compaction_test.go index 91362f3..317a88d 100644 --- a/raft/internal/logdb/compaction_test.go +++ b/raft/internal/logdb/compaction_test.go @@ -18,9 +18,8 @@ import ( "reflect" "testing" - "github.com/zuoyebang/bitalostored/raft/raftio" - "github.com/lni/goutils/leaktest" + "github.com/zuoyebang/bitalostored/raft/raftio" ) func TestCompactionTaskCanBeCreated(t *testing.T) { diff --git a/raft/internal/logdb/db.go b/raft/internal/logdb/db.go index a7ef745..346b9bf 100644 --- a/raft/internal/logdb/db.go +++ b/raft/internal/logdb/db.go @@ -18,14 +18,14 @@ import ( "encoding/binary" "math" + "github.com/cockroachdb/errors" + "github.com/zuoyebang/bitalostored/raft/config" "github.com/zuoyebang/bitalostored/raft/internal/logdb/kv" "github.com/zuoyebang/bitalostored/raft/internal/settings" "github.com/zuoyebang/bitalostored/raft/internal/vfs" "github.com/zuoyebang/bitalostored/raft/raftio" pb "github.com/zuoyebang/bitalostored/raft/raftpb" - - "github.com/cockroachdb/errors" ) var ( diff --git a/raft/internal/logdb/db_test.go b/raft/internal/logdb/db_test.go index f25457f..dcf4a6f 100644 --- a/raft/internal/logdb/db_test.go +++ b/raft/internal/logdb/db_test.go @@ -25,8 +25,8 @@ import ( "github.com/cockroachdb/errors" "github.com/lni/goutils/leaktest" "github.com/stretchr/testify/require" - "github.com/zuoyebang/bitalostored/raft/config" + "github.com/zuoyebang/bitalostored/raft/config" "github.com/zuoyebang/bitalostored/raft/internal/fileutil" "github.com/zuoyebang/bitalostored/raft/internal/vfs" "github.com/zuoyebang/bitalostored/raft/raftio" diff --git a/raft/internal/logdb/kv/pebble/kv_pebble.go b/raft/internal/logdb/kv/bitable/kv_bitable.go similarity index 63% rename from raft/internal/logdb/kv/pebble/kv_pebble.go rename to raft/internal/logdb/kv/bitable/kv_bitable.go index f6c80d2..1772613 100644 --- a/raft/internal/logdb/kv/pebble/kv_pebble.go +++ b/raft/internal/logdb/kv/bitable/kv_bitable.go @@ -12,34 +12,27 @@ // See the License for the specific language governing permissions and // limitations under the License. -package pebble - -// WARNING: pebble support is expermental, DO NOT USE IT IN PRODUCTION. +package bitable import ( "bytes" "fmt" "sync" + "time" - "github.com/zuoyebang/bitalostored/raft/config" - "github.com/zuoyebang/bitalostored/raft/logger" - - "github.com/cockroachdb/pebble" "github.com/lni/goutils/syncutil" - + bitable "github.com/zuoyebang/bitalostable" + "github.com/zuoyebang/bitalostored/raft/config" "github.com/zuoyebang/bitalostored/raft/internal/fileutil" "github.com/zuoyebang/bitalostored/raft/internal/logdb/kv" "github.com/zuoyebang/bitalostored/raft/internal/utils" "github.com/zuoyebang/bitalostored/raft/internal/vfs" + "github.com/zuoyebang/bitalostored/raft/logger" ) -var ( - plog = logger.GetLogger("pebblekv") -) +const bitableLogTag = "[bitable/raftlog]" -const ( - maxLogFileSize = 1024 * 1024 * 128 -) +var plog = logger.GetLogger("bitablekv") var firstError = utils.FirstError @@ -70,82 +63,120 @@ func (l *eventListener) notify() { }) } -func (l *eventListener) onCompactionEnd(pebble.CompactionInfo) { +func (l *eventListener) onCompactionEnd(info bitable.CompactionInfo) { + plog.Infof("%s %s", bitableLogTag, info) l.notify() } -func (l *eventListener) onFlushEnd(pebble.FlushInfo) { +func (l *eventListener) onFlushEnd(info bitable.FlushInfo) { + plog.Infof("%s %s", bitableLogTag, info) l.notify() } -func (l *eventListener) onWALCreated(pebble.WALCreateInfo) { +func (l *eventListener) onWALCreated(bitable.WALCreateInfo) { l.notify() } -type pebbleWriteBatch struct { - wb *pebble.Batch - db *pebble.DB - wo *pebble.WriteOptions +type bitableWriteBatch struct { + wb *bitable.Batch + db *bitable.DB + wo *bitable.WriteOptions } -func (w *pebbleWriteBatch) Destroy() { +func (w *bitableWriteBatch) Destroy() { if err := w.wb.Close(); err != nil { panic(err) } } -func (w *pebbleWriteBatch) Put(key []byte, val []byte) { +func (w *bitableWriteBatch) Put(key []byte, val []byte) { if err := w.wb.Set(key, val, w.wo); err != nil { panic(err) } } -func (w *pebbleWriteBatch) Delete(key []byte) { +func (w *bitableWriteBatch) Delete(key []byte) { if err := w.wb.Delete(key, w.wo); err != nil { panic(err) } } -func (w *pebbleWriteBatch) Clear() { +func (w *bitableWriteBatch) Clear() { if err := w.wb.Close(); err != nil { panic(err) } w.wb = w.db.NewBatch() } -func (w *pebbleWriteBatch) Count() int { +func (w *bitableWriteBatch) Count() int { return int(w.wb.Count()) } -type pebbleLogger struct{} +var _ bitable.Logger = (*bitableLogger)(nil) + +type bitableLogger struct{} + +func (l bitableLogger) Info(args ...interface{}) { + plog.Infof(fmt.Sprint(args...)) +} + +func (l bitableLogger) Warn(args ...interface{}) { + plog.Warningf(fmt.Sprint(args...)) +} -var _ pebble.Logger = (*pebbleLogger)(nil) +func (l bitableLogger) Error(args ...interface{}) { + plog.Errorf(fmt.Sprint(args...)) +} -// PebbleLogger is the logger used by pebble -var PebbleLogger pebbleLogger +func (l bitableLogger) Cost(args ...interface{}) func() { + begin := time.Now() + return func() { + plog.Infof(fmt.Sprint(fmt.Sprint(args...), " ", fmtDuration(time.Now().Sub(begin)))) + } +} -func (pebbleLogger) Infof(format string, args ...interface{}) { - pebble.DefaultLogger.Infof(format, args...) +func (l bitableLogger) Warnf(format string, args ...interface{}) { + plog.Warningf(format, args...) } -func (pebbleLogger) Fatalf(format string, args ...interface{}) { - pebble.DefaultLogger.Infof(format, args...) - panic(fmt.Errorf(format, args...)) +func (l bitableLogger) Errorf(format string, args ...interface{}) { + plog.Errorf(format, args...) +} + +func (bitableLogger) Infof(format string, args ...interface{}) { + plog.Infof(format, args...) +} + +func (bitableLogger) Fatalf(format string, args ...interface{}) { + plog.Warningf(format, args...) +} + +func fmtDuration(d time.Duration) string { + if d > time.Second { + return fmt.Sprintf("cost:%d.%03ds", d/time.Second, d/time.Millisecond%1000) + } + if d > time.Millisecond { + return fmt.Sprintf("cost:%d.%03dms", d/time.Millisecond, d/time.Microsecond%1000) + } + if d > time.Microsecond { + return fmt.Sprintf("cost:%d.%03dus", d/time.Microsecond, d%1000) + } + return fmt.Sprintf("cost:%dns", d) } -// NewKVStore returns a pebble based IKVStore instance. +// NewKVStore returns a bitable based IKVStore instance. func NewKVStore(config config.LogDBConfig, callback kv.LogDBCallback, dir string, wal string, fs vfs.IFS) (kv.IKVStore, error) { - return openPebbleDB(config, callback, dir, wal, fs) + return openBitableDB(config, callback, dir, wal, fs) } -// KV is a pebble based IKVStore type. +// KV is a bitable based IKVStore type. type KV struct { - db *pebble.DB + db *bitable.DB dbSet chan struct{} - opts *pebble.Options - ro *pebble.IterOptions - wo *pebble.WriteOptions + opts *bitable.Options + ro *bitable.IterOptions + wo *bitable.WriteOptions event *eventListener callback kv.LogDBCallback config config.LogDBConfig @@ -153,20 +184,20 @@ type KV struct { var _ kv.IKVStore = (*KV)(nil) -var pebbleWarning sync.Once +var bitableWarning sync.Once -func openPebbleDB(config config.LogDBConfig, callback kv.LogDBCallback, +func openBitableDB(config config.LogDBConfig, callback kv.LogDBCallback, dir string, walDir string, fs vfs.IFS) (kv.IKVStore, error) { if config.IsEmpty() { panic("invalid LogDBConfig") } - pebbleWarning.Do(func() { + bitableWarning.Do(func() { if fs == vfs.MemStrictFS { - plog.Warningf("running in pebble memfs test mode") + plog.Warningf("running in bitable memfs test mode") } }) //blockSize := int(config.KVBlockSize) - blockSize := 128 * 1024 + blockSize := 128 << 10 writeBufferSize := 128 << 20 targetFileSizeBase := int64(128 << 20) //cacheSize := int64(config.KVLRUCacheSize) @@ -175,11 +206,11 @@ func openPebbleDB(config config.LogDBConfig, callback kv.LogDBCallback, levelSizeMultiplier := int64(2) //numOfLevels := int64(config.KVNumOfLevels) numOfLevels := int64(7) - lopts := make([]pebble.LevelOptions, 0) + lopts := make([]bitable.LevelOptions, 0) sz := targetFileSizeBase for l := int64(0); l < numOfLevels; l++ { - opt := pebble.LevelOptions{ - Compression: pebble.SnappyCompression, + opt := bitable.LevelOptions{ + Compression: bitable.SnappyCompression, BlockSize: blockSize, TargetFileSize: sz, } @@ -187,25 +218,26 @@ func openPebbleDB(config config.LogDBConfig, callback kv.LogDBCallback, lopts = append(lopts, opt) } if inMonkeyTesting { - writeBufferSize = 1024 * 1024 * 4 + writeBufferSize = 4 << 20 } - cache := pebble.NewCache(cacheSize) - ro := &pebble.IterOptions{} - wo := &pebble.WriteOptions{Sync: false} - opts := &pebble.Options{ + cache := bitable.NewCache(cacheSize) + ro := &bitable.IterOptions{} + wo := &bitable.WriteOptions{Sync: false} + opts := &bitable.Options{ Levels: lopts, - MaxManifestFileSize: maxLogFileSize, + MaxManifestFileSize: 128 << 20, MemTableSize: writeBufferSize, MemTableStopWritesThreshold: 8, LBaseMaxBytes: 1 << 30, - L0CompactionThreshold: 32, - L0StopWritesThreshold: 64, + L0CompactionThreshold: 48, + L0StopWritesThreshold: 96, Cache: cache, - Logger: PebbleLogger, + Logger: bitableLogger{}, + LogTag: bitableLogTag, MaxOpenFiles: 8000, } if fs != vfs.DefaultFS { - opts.FS = vfs.NewPebbleFS(fs) + opts.FS = vfs.NewBitableFS(fs) } kv := &KV{ ro: ro, @@ -219,7 +251,7 @@ func openPebbleDB(config config.LogDBConfig, callback kv.LogDBCallback, kv: kv, stopper: syncutil.NewStopper(), } - opts.EventListener = pebble.EventListener{ + opts.EventListener = bitable.EventListener{ WALCreated: event.onWALCreated, FlushEnd: event.onFlushEnd, CompactionEnd: event.onCompactionEnd, @@ -233,13 +265,18 @@ func openPebbleDB(config config.LogDBConfig, callback kv.LogDBCallback, if err := fileutil.MkdirAll(dir, fs); err != nil { return nil, err } - pdb, err := pebble.Open(dir, opts) + pdb, err := bitable.Open(dir, opts) if err != nil { return nil, err } cache.Unref() kv.db = pdb kv.setEventListener(event) + plog.Infof("bitable open success MemTableSize:%d MemTableStopWritesThreshold:%d MaxManifestFileSize:%d L0StopWritesThreshold:%d", + opts.MemTableSize, + opts.MemTableStopWritesThreshold, + opts.MaxManifestFileSize, + opts.L0StopWritesThreshold) return kv, nil } @@ -251,7 +288,7 @@ func (r *KV) setEventListener(event *eventListener) { close(r.dbSet) // force a WALCreated event as the one issued when opening the DB didn't get // handled - event.onWALCreated(pebble.WALCreateInfo{}) + event.onWALCreated(bitable.WALCreateInfo{}) } // Name returns the IKVStore type name. @@ -268,7 +305,7 @@ func (r *KV) Close() error { return nil } -func iteratorIsValid(iter *pebble.Iterator) bool { +func iteratorIsValid(iter *bitable.Iterator) bool { v := iter.Valid() if err := iter.Error(); err != nil { plog.Panicf("%+v", err) @@ -309,7 +346,7 @@ func (r *KV) IterateValue(fk []byte, lk []byte, inc bool, // GetValue ... func (r *KV) GetValue(key []byte, op func([]byte) error) (err error) { val, closer, err := r.db.Get(key) - if err != nil && err != pebble.ErrNotFound { + if err != nil && err != bitable.ErrNotFound { return err } defer func() { @@ -332,7 +369,7 @@ func (r *KV) DeleteValue(key []byte) error { // GetWriteBatch ... func (r *KV) GetWriteBatch() kv.IWriteBatch { - return &pebbleWriteBatch{ + return &bitableWriteBatch{ wb: r.db.NewBatch(), db: r.db, wo: r.wo, @@ -341,7 +378,7 @@ func (r *KV) GetWriteBatch() kv.IWriteBatch { // CommitWriteBatch ... func (r *KV) CommitWriteBatch(wb kv.IWriteBatch) error { - pwb, ok := wb.(*pebbleWriteBatch) + pwb, ok := wb.(*bitableWriteBatch) if !ok { panic("unknown type") } @@ -365,7 +402,7 @@ func (r *KV) BulkRemoveEntries(fk []byte, lk []byte) (err error) { // CompactEntries ... func (r *KV) CompactEntries(fk []byte, lk []byte) error { - return r.db.Compact(fk, lk) + return r.db.Compact(fk, lk, false) } // FullCompaction ... @@ -376,5 +413,5 @@ func (r *KV) FullCompaction() error { fk[i] = 0 lk[i] = 0xFF } - return r.db.Compact(fk, lk) + return r.db.Compact(fk, lk, false) } diff --git a/raft/internal/logdb/kv/pebble/monkey.go b/raft/internal/logdb/kv/bitable/monkey.go similarity index 97% rename from raft/internal/logdb/kv/pebble/monkey.go rename to raft/internal/logdb/kv/bitable/monkey.go index 2bcc10b..dc658e4 100644 --- a/raft/internal/logdb/kv/pebble/monkey.go +++ b/raft/internal/logdb/kv/bitable/monkey.go @@ -15,7 +15,7 @@ //go:build dragonboat_monkeytest // +build dragonboat_monkeytest -package pebble +package bitable const ( inMonkeyTesting = true diff --git a/raft/internal/logdb/kv/pebble/monkeynoop.go b/raft/internal/logdb/kv/bitable/monkeynoop.go similarity index 93% rename from raft/internal/logdb/kv/pebble/monkeynoop.go rename to raft/internal/logdb/kv/bitable/monkeynoop.go index 743c256..db22399 100644 --- a/raft/internal/logdb/kv/pebble/monkeynoop.go +++ b/raft/internal/logdb/kv/bitable/monkeynoop.go @@ -13,9 +13,8 @@ // limitations under the License. //go:build !dragonboat_monkeytest -// +build !dragonboat_monkeytest -package pebble +package bitable const ( inMonkeyTesting = false diff --git a/raft/internal/logdb/kv_default.go b/raft/internal/logdb/kv_default.go index bd2e6d6..a8b39b8 100644 --- a/raft/internal/logdb/kv_default.go +++ b/raft/internal/logdb/kv_default.go @@ -12,23 +12,17 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !dragonboat_rocksdb_test && !dragonboat_memfs_test -// +build !dragonboat_rocksdb_test,!dragonboat_memfs_test +//go:build !dragonboat_memfs_test package logdb import ( "github.com/zuoyebang/bitalostored/raft/config" "github.com/zuoyebang/bitalostored/raft/internal/logdb/kv" - "github.com/zuoyebang/bitalostored/raft/internal/logdb/kv/pebble" + "github.com/zuoyebang/bitalostored/raft/internal/logdb/kv/bitable" "github.com/zuoyebang/bitalostored/raft/internal/vfs" ) -const ( - // DefaultKVStoreTypeName is the type name of the default kv store - DefaultKVStoreTypeName = "rocksdb" -) - func newDefaultKVStore(config config.LogDBConfig, callback kv.LogDBCallback, dir string, wal string, fs vfs.IFS) (kv.IKVStore, error) { @@ -39,5 +33,5 @@ func newDefaultKVStore(config config.LogDBConfig, panic("invalid fs") } } - return pebble.NewKVStore(config, callback, dir, wal, fs) + return bitable.NewKVStore(config, callback, dir, wal, fs) } diff --git a/raft/internal/logdb/kv_pebble_memfs.go b/raft/internal/logdb/kv_pebble_memfs.go index 2496028..6e40bf1 100644 --- a/raft/internal/logdb/kv_pebble_memfs.go +++ b/raft/internal/logdb/kv_pebble_memfs.go @@ -20,15 +20,10 @@ package logdb import ( "github.com/zuoyebang/bitalostored/raft/config" "github.com/zuoyebang/bitalostored/raft/internal/logdb/kv" - "github.com/zuoyebang/bitalostored/raft/internal/logdb/kv/pebble" + "github.com/zuoyebang/bitalostored/raft/internal/logdb/kv/bitable" "github.com/zuoyebang/bitalostored/raft/internal/vfs" ) -const ( - // DefaultKVStoreTypeName is the type name of the default kv store - DefaultKVStoreTypeName = "pebble" -) - func newDefaultKVStore(config config.LogDBConfig, callback kv.LogDBCallback, dir string, wal string, fs vfs.IFS) (kv.IKVStore, error) { @@ -40,5 +35,5 @@ func newDefaultKVStore(config config.LogDBConfig, panic("invalid fs") } } - return pebble.NewKVStore(config, callback, dir, wal, fs) + return bitable.NewKVStore(config, callback, dir, wal, fs) } diff --git a/raft/internal/logdb/kv_test.go b/raft/internal/logdb/kv_test.go index 501223a..6afa848 100644 --- a/raft/internal/logdb/kv_test.go +++ b/raft/internal/logdb/kv_test.go @@ -24,8 +24,8 @@ import ( "github.com/cockroachdb/errors" "github.com/lni/goutils/leaktest" - "github.com/zuoyebang/bitalostored/raft/config" + "github.com/zuoyebang/bitalostored/raft/config" "github.com/zuoyebang/bitalostored/raft/internal/logdb/kv" "github.com/zuoyebang/bitalostored/raft/internal/settings" "github.com/zuoyebang/bitalostored/raft/internal/vfs" diff --git a/raft/internal/logdb/logdb.go b/raft/internal/logdb/logdb.go index 94557bc..687da40 100644 --- a/raft/internal/logdb/logdb.go +++ b/raft/internal/logdb/logdb.go @@ -12,12 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -/* -Package logdb implements the persistent log storage used by Dragonboat. - -This package is internally used by Dragonboat, applications are not expected -to import this package. -*/ package logdb import ( diff --git a/raft/internal/logdb/logreader.go b/raft/internal/logdb/logreader.go index 346ba77..e0bb95b 100644 --- a/raft/internal/logdb/logreader.go +++ b/raft/internal/logdb/logreader.go @@ -37,11 +37,11 @@ import ( "sync" "unsafe" + "github.com/lni/goutils/logutil" + "github.com/zuoyebang/bitalostored/raft/internal/raft" "github.com/zuoyebang/bitalostored/raft/raftio" pb "github.com/zuoyebang/bitalostored/raft/raftpb" - - "github.com/lni/goutils/logutil" ) const ( diff --git a/raft/internal/logdb/logreader_etcd_test.go b/raft/internal/logdb/logreader_etcd_test.go index efec554..55a4090 100644 --- a/raft/internal/logdb/logreader_etcd_test.go +++ b/raft/internal/logdb/logreader_etcd_test.go @@ -19,12 +19,11 @@ import ( "reflect" "testing" + "github.com/lni/goutils/leaktest" "github.com/zuoyebang/bitalostored/raft/internal/raft" "github.com/zuoyebang/bitalostored/raft/internal/vfs" "github.com/zuoyebang/bitalostored/raft/raftio" pb "github.com/zuoyebang/bitalostored/raft/raftpb" - - "github.com/lni/goutils/leaktest" ) // most tests below are ported from etcd rafts diff --git a/raft/internal/logdb/sharded.go b/raft/internal/logdb/sharded.go index 019266f..8e004aa 100644 --- a/raft/internal/logdb/sharded.go +++ b/raft/internal/logdb/sharded.go @@ -19,15 +19,15 @@ import ( "math" "sync/atomic" + "github.com/cockroachdb/errors" + "github.com/lni/goutils/syncutil" + "github.com/zuoyebang/bitalostored/raft/config" "github.com/zuoyebang/bitalostored/raft/internal/logdb/kv" "github.com/zuoyebang/bitalostored/raft/internal/server" "github.com/zuoyebang/bitalostored/raft/internal/utils" "github.com/zuoyebang/bitalostored/raft/raftio" pb "github.com/zuoyebang/bitalostored/raft/raftpb" - - "github.com/cockroachdb/errors" - "github.com/lni/goutils/syncutil" ) // ShardedDB is a LogDB implementation using sharded rocksdb instances. diff --git a/raft/internal/raft/logentry.go b/raft/internal/raft/logentry.go index 12d21b9..730e66c 100644 --- a/raft/internal/raft/logentry.go +++ b/raft/internal/raft/logentry.go @@ -15,11 +15,10 @@ package raft import ( + "github.com/cockroachdb/errors" "github.com/zuoyebang/bitalostored/raft/internal/server" "github.com/zuoyebang/bitalostored/raft/internal/settings" pb "github.com/zuoyebang/bitalostored/raft/raftpb" - - "github.com/cockroachdb/errors" ) var ( diff --git a/raft/internal/raft/peer.go b/raft/internal/raft/peer.go index 0cbcf5d..0beb08b 100644 --- a/raft/internal/raft/peer.go +++ b/raft/internal/raft/peer.go @@ -210,6 +210,7 @@ func (p *Peer) GetUpdate(moreToApply bool, return ud, nil } +// 包含lower 不包含upper func (p *Peer) GetUpdateForFlush(lower, upper uint64) ([]pb.Update, error) { if lower >= upper { return []pb.Update{}, nil diff --git a/raft/internal/raft/raft.go b/raft/internal/raft/raft.go index 3d2e4f4..d0f1046 100644 --- a/raft/internal/raft/raft.go +++ b/raft/internal/raft/raft.go @@ -27,16 +27,17 @@ import ( "sort" "time" - "github.com/zuoyebang/bitalostored/raft/config" - "github.com/zuoyebang/bitalostored/raft/internal/server" - "github.com/zuoyebang/bitalostored/raft/internal/settings" - "github.com/zuoyebang/bitalostored/raft/logger" "github.com/zuoyebang/bitalostored/raft/order" - pb "github.com/zuoyebang/bitalostored/raft/raftpb" "github.com/cockroachdb/errors" "github.com/lni/goutils/logutil" "github.com/lni/goutils/random" + + "github.com/zuoyebang/bitalostored/raft/config" + "github.com/zuoyebang/bitalostored/raft/internal/server" + "github.com/zuoyebang/bitalostored/raft/internal/settings" + "github.com/zuoyebang/bitalostored/raft/logger" + pb "github.com/zuoyebang/bitalostored/raft/raftpb" ) var ( @@ -267,6 +268,7 @@ func newRaft(c config.Config, logdb ILogDB, initAddress ...PeerAddress) *raft { } plog.Infof("%s raft log rate limit enabled: %t, %d", dn(r.clusterID, r.nodeID), r.rl.Enabled(), c.MaxInMemLogSize) + //从snapshot中加载membership为空,默认用初始化的initAddress st, members := logdb.NodeState() plog.Infof("nodestate from snapshot raft state:%v, members:%v", st, members) if len(members.Addresses) <= 0 { diff --git a/raft/internal/raft/raft_etcd_paper_test.go b/raft/internal/raft/raft_etcd_paper_test.go index b3aaaa1..67224e7 100644 --- a/raft/internal/raft/raft_etcd_paper_test.go +++ b/raft/internal/raft/raft_etcd_paper_test.go @@ -38,9 +38,8 @@ import ( "sort" "testing" - pb "github.com/zuoyebang/bitalostored/raft/raftpb" - "github.com/zuoyebang/bitalostored/raft/logger" + pb "github.com/zuoyebang/bitalostored/raft/raftpb" ) func TestFollowerUpdateTermFromMessage(t *testing.T) { diff --git a/raft/internal/raft/readindex.go b/raft/internal/raft/readindex.go index ad85c8c..c8098cb 100644 --- a/raft/internal/raft/readindex.go +++ b/raft/internal/raft/readindex.go @@ -14,7 +14,9 @@ package raft -import "github.com/zuoyebang/bitalostored/raft/raftpb" +import ( + "github.com/zuoyebang/bitalostored/raft/raftpb" +) type readStatus struct { confirmed map[uint64]struct{} diff --git a/raft/internal/rsm/adapter.go b/raft/internal/rsm/adapter.go index a3a9f77..a4bf872 100644 --- a/raft/internal/rsm/adapter.go +++ b/raft/internal/rsm/adapter.go @@ -17,11 +17,11 @@ package rsm import ( "io" + "github.com/cockroachdb/errors" + "github.com/zuoyebang/bitalostored/raft/config" pb "github.com/zuoyebang/bitalostored/raft/raftpb" sm "github.com/zuoyebang/bitalostored/raft/statemachine" - - "github.com/cockroachdb/errors" ) // IStateMachine is an adapter interface for underlying sm.IStateMachine, diff --git a/raft/internal/rsm/managed.go b/raft/internal/rsm/managed.go index 31d3eac..d596b3a 100644 --- a/raft/internal/rsm/managed.go +++ b/raft/internal/rsm/managed.go @@ -18,11 +18,11 @@ import ( "io" "sync" + "github.com/cockroachdb/errors" + "github.com/zuoyebang/bitalostored/raft/config" pb "github.com/zuoyebang/bitalostored/raft/raftpb" sm "github.com/zuoyebang/bitalostored/raft/statemachine" - - "github.com/cockroachdb/errors" ) var ( diff --git a/raft/internal/rsm/statemachine.go b/raft/internal/rsm/statemachine.go index 61b80c0..c51ffb7 100644 --- a/raft/internal/rsm/statemachine.go +++ b/raft/internal/rsm/statemachine.go @@ -27,6 +27,9 @@ import ( "sync/atomic" "time" + "github.com/cockroachdb/errors" + "github.com/lni/goutils/logutil" + "github.com/zuoyebang/bitalostored/raft/config" "github.com/zuoyebang/bitalostored/raft/internal/raft" "github.com/zuoyebang/bitalostored/raft/internal/server" @@ -36,9 +39,6 @@ import ( "github.com/zuoyebang/bitalostored/raft/logger" pb "github.com/zuoyebang/bitalostored/raft/raftpb" sm "github.com/zuoyebang/bitalostored/raft/statemachine" - - "github.com/cockroachdb/errors" - "github.com/lni/goutils/logutil" ) var ( @@ -386,7 +386,9 @@ func (s *StateMachine) recover(ss pb.Snapshot, init bool) error { func (s *StateMachine) doRecover(ss pb.Snapshot, init bool) error { s.mu.Lock() defer s.mu.Unlock() - + //if s.GetLastApplied() >= ss.Index { + // return raft.ErrSnapshotOutOfDate + //} if s.aborted { return sm.ErrSnapshotStopped } @@ -439,6 +441,16 @@ func (s *StateMachine) apply(ss pb.Snapshot, init bool) { defer s.lastApplied.Unlock() s.lastApplied.index, s.lastApplied.term = ss.Index, ss.Term s.index, s.term = ss.Index, ss.Term + //s.lastApplied.index, s.lastApplied.term = ss.Index, ss.Term + //lastApplied从快照应用的OnDiskIndex开始 + //if ss.Imported { + // s.lastApplied.index, s.lastApplied.term = ss.Index, ss.Term + // s.index, s.term = ss.Index, ss.Term + //} else { + // //s.lastApplied.index, s.lastApplied.term = s.onDiskIndex, ss.Term + // s.lastApplied.term = ss.Term + // s.term = ss.Term + //} } func (s *StateMachine) applyOnDisk(ss pb.Snapshot, init bool) { @@ -724,8 +736,13 @@ func (s *StateMachine) GetSyncedIndex() uint64 { } func (s *StateMachine) setSyncedIndex(index uint64) { + //defer func() { + // plog.Infof("stack:%s", string(debug.Stack())) + //}() + //plog.Infof("synced index, s.GetSyncedIndex(): %d, index: %d", s.GetSyncedIndex(), index) if s.GetSyncedIndex() > index { plog.Panicf("synced index moving backward, s.GetSyncedIndex(): %d, index: %d", s.GetSyncedIndex(), index) + // panic("synced index moving backward") return } atomic.StoreUint64(&s.syncedIndex, index) @@ -931,6 +948,7 @@ func (s *StateMachine) handle(t []Task, a []sm.Entry) error { } }() update, noop := getEntryTypes(entries) + // plog.Infof("statemachine get entries originLen:%d, applyLen:%d, batch :%v, update: %v, noop: %v", len(t[idx].Entries), len(entries), batch, update, noop) membershipSnapshot := false if batch && update && noop { if err := s.handleBatch(entries, a, s.index); err != nil { @@ -938,6 +956,7 @@ func (s *StateMachine) handle(t []Task, a []sm.Entry) error { } } else { for i := range entries { + // 非replay的任务需要检查index值 if !t[idx].Replay && entries[i].Index <= s.index { continue } @@ -1073,7 +1092,7 @@ func (s *StateMachine) handleBatch(input []pb.Entry, ents []sm.Entry, applied ui s.setApplied(e.Index, e.Term) } } - + //plog.Infof("StateMachine handleBatch input len:%d, entries len:%d", len(input), len(ents)) if len(ents) > 0 { results, err := s.sm.BatchedUpdate(ents) if err != nil { @@ -1136,6 +1155,7 @@ func (s *StateMachine) noop(e pb.Entry) { } } +// result is a tuple of (result, should ignore, rejected, error) func (s *StateMachine) update(e pb.Entry) (sm.Result, bool, bool, error) { s.mu.Lock() defer s.mu.Unlock() diff --git a/raft/internal/rsm/statemachine_test.go b/raft/internal/rsm/statemachine_test.go index 8b4fb09..d9e3789 100644 --- a/raft/internal/rsm/statemachine_test.go +++ b/raft/internal/rsm/statemachine_test.go @@ -21,11 +21,10 @@ import ( "math/rand" "testing" - "github.com/zuoyebang/bitalostored/raft/client" - "github.com/cockroachdb/errors" "github.com/lni/goutils/leaktest" + "github.com/zuoyebang/bitalostored/raft/client" "github.com/zuoyebang/bitalostored/raft/config" "github.com/zuoyebang/bitalostored/raft/internal/raft" "github.com/zuoyebang/bitalostored/raft/internal/server" diff --git a/raft/internal/server/environment.go b/raft/internal/server/environment.go index dc2c997..b91aa78 100644 --- a/raft/internal/server/environment.go +++ b/raft/internal/server/environment.go @@ -20,19 +20,18 @@ import ( "os" "strings" - "github.com/zuoyebang/bitalostored/raft/config" - "github.com/zuoyebang/bitalostored/raft/logger" - "github.com/zuoyebang/bitalostored/raft/raftio" - "github.com/zuoyebang/bitalostored/raft/raftpb" - "github.com/cockroachdb/errors" "github.com/lni/goutils/random" + "github.com/zuoyebang/bitalostored/raft/config" "github.com/zuoyebang/bitalostored/raft/internal/fileutil" "github.com/zuoyebang/bitalostored/raft/internal/id" "github.com/zuoyebang/bitalostored/raft/internal/settings" "github.com/zuoyebang/bitalostored/raft/internal/utils" "github.com/zuoyebang/bitalostored/raft/internal/vfs" + "github.com/zuoyebang/bitalostored/raft/logger" + "github.com/zuoyebang/bitalostored/raft/raftio" + "github.com/zuoyebang/bitalostored/raft/raftpb" ) var ( @@ -349,7 +348,11 @@ func compatibleLogDBType(saved string, name string) bool { if saved == name { return true } - return (saved == "rocksdb" && name == "pebble") || + return (saved == "rocksdb" && name == "bitable") || + (saved == "bitable" && name == "rocksdb") || + (saved == "sharded-bitable" && name == "sharded-rocksdb") || + (saved == "sharded-rocksdb" && name == "sharded-bitable") || + (saved == "rocksdb" && name == "pebble") || (saved == "pebble" && name == "rocksdb") || (saved == "sharded-pebble" && name == "sharded-rocksdb") || (saved == "sharded-rocksdb" && name == "sharded-pebble") diff --git a/raft/internal/server/environment_test.go b/raft/internal/server/environment_test.go index e80b4d5..7aacbde 100644 --- a/raft/internal/server/environment_test.go +++ b/raft/internal/server/environment_test.go @@ -346,6 +346,12 @@ func TestCompatibleLogDBType(t *testing.T) { {"rocksdb", "pebble", true}, {"pebble", "tee", false}, {"tee", "pebble", false}, + {"sharded-rocksdb", "sharded-bitable", true}, + {"sharded-bitable", "sharded-rocksdb", true}, + {"bitable", "rocksdb", true}, + {"rocksdb", "bitable", true}, + {"bitable", "tee", false}, + {"tee", "bitable", false}, {"rocksdb", "tee", false}, {"tee", "rocksdb", false}, {"tee", "tee", true}, diff --git a/raft/internal/settings/hard.go b/raft/internal/settings/hard.go index b185539..5bd306b 100644 --- a/raft/internal/settings/hard.go +++ b/raft/internal/settings/hard.go @@ -1,4 +1,4 @@ -// Copyright 2017-2019 Lei Ni (nilei81@gmail.com) and other contributors. +// Copyright 2017-2019 Lei Ni (nilei81@gmail.com), Bitalostored author and other contributors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/raft/internal/settings/soft.go b/raft/internal/settings/soft.go index 55f8dda..b29304e 100644 --- a/raft/internal/settings/soft.go +++ b/raft/internal/settings/soft.go @@ -1,4 +1,4 @@ -// Copyright 2017-2019 Lei Ni (nilei81@gmail.com) and other contributors. +// Copyright 2017-2019 Lei Ni (nilei81@gmail.com), Bitalostored author and other contributors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/raft/internal/tests/kvtest.go b/raft/internal/tests/kvtest.go index 3d82cdc..2371632 100644 --- a/raft/internal/tests/kvtest.go +++ b/raft/internal/tests/kvtest.go @@ -33,11 +33,10 @@ import ( "sync" "time" + "github.com/lni/goutils/random" "github.com/zuoyebang/bitalostored/raft/internal/fileutil" "github.com/zuoyebang/bitalostored/raft/internal/tests/kvpb" sm "github.com/zuoyebang/bitalostored/raft/statemachine" - - "github.com/lni/goutils/random" ) // random delays diff --git a/raft/internal/transport/chunk.go b/raft/internal/transport/chunk.go index 605e005..e608a6b 100644 --- a/raft/internal/transport/chunk.go +++ b/raft/internal/transport/chunk.go @@ -20,6 +20,9 @@ import ( "sync" "sync/atomic" + "github.com/cockroachdb/errors" + "github.com/lni/goutils/logutil" + "github.com/zuoyebang/bitalostored/raft/internal/fileutil" "github.com/zuoyebang/bitalostored/raft/internal/rsm" "github.com/zuoyebang/bitalostored/raft/internal/server" @@ -28,9 +31,6 @@ import ( "github.com/zuoyebang/bitalostored/raft/internal/vfs" "github.com/zuoyebang/bitalostored/raft/raftio" pb "github.com/zuoyebang/bitalostored/raft/raftpb" - - "github.com/cockroachdb/errors" - "github.com/lni/goutils/logutil" ) var ( diff --git a/raft/internal/transport/fuzz.go b/raft/internal/transport/fuzz.go index 88f874a..6c1c37d 100644 --- a/raft/internal/transport/fuzz.go +++ b/raft/internal/transport/fuzz.go @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build gofuzz // +build gofuzz package transport diff --git a/raft/internal/transport/gossip.go b/raft/internal/transport/gossip.go index 2684f27..8287d5c 100644 --- a/raft/internal/transport/gossip.go +++ b/raft/internal/transport/gossip.go @@ -20,11 +20,11 @@ import ( "sync" "time" - "github.com/zuoyebang/bitalostored/raft/config" - "github.com/cockroachdb/errors" "github.com/hashicorp/memberlist" "github.com/lni/goutils/syncutil" + + "github.com/zuoyebang/bitalostored/raft/config" ) // NodeHostIDRegistry is a node registry backed by gossip. It is capable of diff --git a/raft/internal/transport/gossip_test.go b/raft/internal/transport/gossip_test.go index 377c737..ac05a13 100644 --- a/raft/internal/transport/gossip_test.go +++ b/raft/internal/transport/gossip_test.go @@ -18,10 +18,9 @@ import ( "testing" "time" - "github.com/zuoyebang/bitalostored/raft/config" - "github.com/lni/goutils/leaktest" + "github.com/zuoyebang/bitalostored/raft/config" "github.com/zuoyebang/bitalostored/raft/internal/id" ) diff --git a/raft/internal/transport/job.go b/raft/internal/transport/job.go index 7e3cbad..0830456 100644 --- a/raft/internal/transport/job.go +++ b/raft/internal/transport/job.go @@ -18,12 +18,12 @@ import ( "context" "sync/atomic" + "github.com/cockroachdb/errors" + "github.com/lni/goutils/logutil" + "github.com/zuoyebang/bitalostored/raft/internal/vfs" "github.com/zuoyebang/bitalostored/raft/raftio" pb "github.com/zuoyebang/bitalostored/raft/raftpb" - - "github.com/cockroachdb/errors" - "github.com/lni/goutils/logutil" ) const ( diff --git a/raft/internal/transport/job_test.go b/raft/internal/transport/job_test.go index 2240554..f12bb4f 100644 --- a/raft/internal/transport/job_test.go +++ b/raft/internal/transport/job_test.go @@ -18,10 +18,9 @@ import ( "context" "testing" - "github.com/zuoyebang/bitalostored/raft/config" - "github.com/lni/goutils/syncutil" + "github.com/zuoyebang/bitalostored/raft/config" "github.com/zuoyebang/bitalostored/raft/internal/vfs" pb "github.com/zuoyebang/bitalostored/raft/raftpb" ) diff --git a/raft/internal/transport/monkey.go b/raft/internal/transport/monkey.go index 466a125..1037a59 100644 --- a/raft/internal/transport/monkey.go +++ b/raft/internal/transport/monkey.go @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build dragonboat_monkeytest // +build dragonboat_monkeytest package transport diff --git a/raft/internal/transport/noop.go b/raft/internal/transport/noop.go index 62ad97c..0497672 100644 --- a/raft/internal/transport/noop.go +++ b/raft/internal/transport/noop.go @@ -20,12 +20,12 @@ import ( "sync/atomic" "time" + "github.com/cockroachdb/errors" + "github.com/lni/goutils/stringutil" + "github.com/zuoyebang/bitalostored/raft/config" "github.com/zuoyebang/bitalostored/raft/raftio" "github.com/zuoyebang/bitalostored/raft/raftpb" - - "github.com/cockroachdb/errors" - "github.com/lni/goutils/stringutil" ) var ( diff --git a/raft/internal/transport/registry.go b/raft/internal/transport/registry.go index f1daf57..0b31690 100644 --- a/raft/internal/transport/registry.go +++ b/raft/internal/transport/registry.go @@ -18,12 +18,12 @@ import ( "fmt" "sync" + "github.com/cockroachdb/errors" + "github.com/lni/goutils/logutil" + "github.com/zuoyebang/bitalostored/raft/config" "github.com/zuoyebang/bitalostored/raft/internal/server" "github.com/zuoyebang/bitalostored/raft/raftio" - - "github.com/cockroachdb/errors" - "github.com/lni/goutils/logutil" ) var ( diff --git a/raft/internal/transport/snapshot.go b/raft/internal/transport/snapshot.go index 3ce3145..a2277a0 100644 --- a/raft/internal/transport/snapshot.go +++ b/raft/internal/transport/snapshot.go @@ -36,13 +36,13 @@ package transport import ( "sync/atomic" + "github.com/cockroachdb/errors" + "github.com/zuoyebang/bitalostored/raft/internal/rsm" "github.com/zuoyebang/bitalostored/raft/internal/settings" "github.com/zuoyebang/bitalostored/raft/internal/vfs" "github.com/zuoyebang/bitalostored/raft/raftio" pb "github.com/zuoyebang/bitalostored/raft/raftpb" - - "github.com/cockroachdb/errors" ) var ( diff --git a/raft/internal/transport/tcp.go b/raft/internal/transport/tcp.go index 242c086..2925d0f 100644 --- a/raft/internal/transport/tcp.go +++ b/raft/internal/transport/tcp.go @@ -25,15 +25,15 @@ import ( "sync" "time" - "github.com/zuoyebang/bitalostored/raft/config" - "github.com/zuoyebang/bitalostored/raft/internal/settings" - "github.com/zuoyebang/bitalostored/raft/raftio" - pb "github.com/zuoyebang/bitalostored/raft/raftpb" - "github.com/cockroachdb/errors" "github.com/juju/ratelimit" "github.com/lni/goutils/netutil" "github.com/lni/goutils/syncutil" + + "github.com/zuoyebang/bitalostored/raft/config" + "github.com/zuoyebang/bitalostored/raft/internal/settings" + "github.com/zuoyebang/bitalostored/raft/raftio" + pb "github.com/zuoyebang/bitalostored/raft/raftpb" ) var ( diff --git a/raft/internal/transport/tests/localhost.csr b/raft/internal/transport/tests/localhost.csr new file mode 100644 index 0000000..54b5a3b --- /dev/null +++ b/raft/internal/transport/tests/localhost.csr @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIIClDCCAXwCAQAwTzELMAkGA1UEBhMCQ04xCzAJBgNVBAgMAkdEMQswCQYDVQQH +DAJTWjESMBAGA1UECgwJdGVzdCBJbmMuMRIwEAYDVQQDDAlsb2NhbGhvc3QwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDRjKyBu5H06wto4yQcavMN4vJ2 +oq4x4dBD/JXSEBTffrnubfGi6ZuCqdiq5Gh8iMEKHGQ5MCz8eMhLx4l51JmMcjA8 +m9F62dH84iRNfgSclpieSiNP9yY20FtcQsnvREX4+Pjiqx/1TuFDcm6OZ6NUg5In +klKBXOGuHkao1sAvIHwk32yzG/bhL/quGUoDIw2YVrX0o5UGQw1ZyVCXJxVway5W +jxcXqI4v8nj15tH7Yk3ALymROPQDTBtSHA9WVEkRFqn5p3AcqZYyz6ilrex4XYuM +PI2eSQH99vmhsen0I+qk54PCuf6nn8t0I5/rjLStBR2dOF4I1Im+an2F+0ErAgMB +AAGgADANBgkqhkiG9w0BAQsFAAOCAQEAnGA+u8XJ4s+tVlYSbE6w8eb/X+qllX6Y +N5OOzvIzBK1aIeRLXzqOgsgWYut+AtaR/WoQ3iO+nOz2gvElNFVp8fSrTo0TsOzD +ht7VZKOmyzgUY5SQpYfTRmoXbY5xNMcNUPQ5g/f8LQnsifnjJsA2R5ZuGoXLWAcf +HSXKa9cZq3e5bnFMuZM3VfJH1LV/Ma+xcXky4A3h86TwsTwBnqMDhbJYkYBxn3m5 +qXRE7N6L01SGFOwLgbJVuEaj/4/ZyOZnRF2O/4jnqegXoEVETPNe3Ovytt/t3GeN +Q7DAgpxnpkOMNRkf+A5T/rIUghWdy//RQzxQhRfgQDrGN0Y0CyQDpw== +-----END CERTIFICATE REQUEST----- diff --git a/raft/internal/transport/transport.go b/raft/internal/transport/transport.go index 12d2689..5fba8bd 100644 --- a/raft/internal/transport/transport.go +++ b/raft/internal/transport/transport.go @@ -46,6 +46,12 @@ import ( "sync/atomic" "time" + "github.com/cockroachdb/errors" + "github.com/lni/goutils/logutil" + "github.com/lni/goutils/netutil" + circuit "github.com/lni/goutils/netutil/rubyist/circuitbreaker" + "github.com/lni/goutils/syncutil" + "github.com/zuoyebang/bitalostored/raft/config" "github.com/zuoyebang/bitalostored/raft/internal/invariants" "github.com/zuoyebang/bitalostored/raft/internal/server" @@ -55,12 +61,6 @@ import ( ct "github.com/zuoyebang/bitalostored/raft/plugin/chan" "github.com/zuoyebang/bitalostored/raft/raftio" pb "github.com/zuoyebang/bitalostored/raft/raftpb" - - "github.com/cockroachdb/errors" - "github.com/lni/goutils/logutil" - "github.com/lni/goutils/netutil" - circuit "github.com/lni/goutils/netutil/rubyist/circuitbreaker" - "github.com/lni/goutils/syncutil" ) const ( diff --git a/raft/internal/transport/transport_test.go b/raft/internal/transport/transport_test.go index 6983aca..4e6c731 100644 --- a/raft/internal/transport/transport_test.go +++ b/raft/internal/transport/transport_test.go @@ -27,18 +27,17 @@ import ( "testing" "time" - "github.com/zuoyebang/bitalostored/raft/config" - "github.com/zuoyebang/bitalostored/raft/raftio" - "github.com/zuoyebang/bitalostored/raft/raftpb" - "github.com/lni/goutils/leaktest" "github.com/lni/goutils/netutil" "github.com/lni/goutils/syncutil" + "github.com/zuoyebang/bitalostored/raft/config" "github.com/zuoyebang/bitalostored/raft/internal/rsm" "github.com/zuoyebang/bitalostored/raft/internal/server" "github.com/zuoyebang/bitalostored/raft/internal/settings" "github.com/zuoyebang/bitalostored/raft/internal/vfs" + "github.com/zuoyebang/bitalostored/raft/raftio" + "github.com/zuoyebang/bitalostored/raft/raftpb" ) var serverAddress = fmt.Sprintf("localhost:%d", getTestPort()) diff --git a/raft/internal/utils/dio/io.go b/raft/internal/utils/dio/io.go index 39272d6..947bbf8 100644 --- a/raft/internal/utils/dio/io.go +++ b/raft/internal/utils/dio/io.go @@ -19,9 +19,8 @@ import ( "math" "sync/atomic" - pb "github.com/zuoyebang/bitalostored/raft/raftpb" - "github.com/golang/snappy" + pb "github.com/zuoyebang/bitalostored/raft/raftpb" ) // CompressionType is the type of the compression. diff --git a/raft/internal/vfs/defaultfs.go b/raft/internal/vfs/defaultfs.go index e023e08..4c7e746 100644 --- a/raft/internal/vfs/defaultfs.go +++ b/raft/internal/vfs/defaultfs.go @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build !dragonboat_memfs_test // +build !dragonboat_memfs_test package vfs diff --git a/raft/internal/vfs/memfs.go b/raft/internal/vfs/memfs.go index 04582ac..81491ef 100644 --- a/raft/internal/vfs/memfs.go +++ b/raft/internal/vfs/memfs.go @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build dragonboat_memfs_test // +build dragonboat_memfs_test package vfs diff --git a/raft/internal/vfs/vfs.go b/raft/internal/vfs/vfs.go index eb1e598..295dada 100644 --- a/raft/internal/vfs/vfs.go +++ b/raft/internal/vfs/vfs.go @@ -21,9 +21,8 @@ import ( "testing" "github.com/cockroachdb/errors/oserror" - pvfs "github.com/cockroachdb/pebble/vfs" - gvfs "github.com/lni/vfs" + pvfs "github.com/zuoyebang/bitalostable/vfs" ) // IFS is the vfs interface used by dragonboat. @@ -46,35 +45,39 @@ func NewMemFS() IFS { return gvfs.NewStrictMem() } -// PebbleFS is a wrapper struct that implements the pebble/vfs.FS interface. -type PebbleFS struct { +// BitableFS is a wrapper struct that implements the bitable/vfs.FS interface. +type BitableFS struct { fs IFS } -var _ pvfs.FS = (*PebbleFS)(nil) +func (p *BitableFS) GetDiskUsage(path string) (pvfs.DiskUsage, error) { + return pvfs.DiskUsage{}, nil +} + +var _ pvfs.FS = (*BitableFS)(nil) -// NewPebbleFS creates a new pebble/vfs.FS instance. -func NewPebbleFS(fs IFS) pvfs.FS { - return &PebbleFS{fs} +// NewBitableFS creates a new bitable/vfs.FS instance. +func NewBitableFS(fs IFS) pvfs.FS { + return &BitableFS{fs} } // GetFreeSpace ... -func (p *PebbleFS) GetFreeSpace(path string) (uint64, error) { +func (p *BitableFS) GetFreeSpace(path string) (uint64, error) { return p.fs.GetFreeSpace(path) } // Create ... -func (p *PebbleFS) Create(name string) (pvfs.File, error) { +func (p *BitableFS) Create(name string) (pvfs.File, error) { return p.fs.Create(name) } // Link ... -func (p *PebbleFS) Link(oldname, newname string) error { +func (p *BitableFS) Link(oldname, newname string) error { return p.fs.Link(oldname, newname) } // Open ... -func (p *PebbleFS) Open(name string, opts ...pvfs.OpenOption) (pvfs.File, error) { +func (p *BitableFS) Open(name string, opts ...pvfs.OpenOption) (pvfs.File, error) { f, err := p.fs.Open(name) if err != nil { return nil, err @@ -86,62 +89,62 @@ func (p *PebbleFS) Open(name string, opts ...pvfs.OpenOption) (pvfs.File, error) } // OpenDir ... -func (p *PebbleFS) OpenDir(name string) (pvfs.File, error) { +func (p *BitableFS) OpenDir(name string) (pvfs.File, error) { return p.fs.OpenDir(name) } // Remove ... -func (p *PebbleFS) Remove(name string) error { +func (p *BitableFS) Remove(name string) error { return p.fs.Remove(name) } // RemoveAll ... -func (p *PebbleFS) RemoveAll(name string) error { +func (p *BitableFS) RemoveAll(name string) error { return p.fs.RemoveAll(name) } // Rename ... -func (p *PebbleFS) Rename(oldname, newname string) error { +func (p *BitableFS) Rename(oldname, newname string) error { return p.fs.Rename(oldname, newname) } // ReuseForWrite ... -func (p *PebbleFS) ReuseForWrite(oldname, newname string) (pvfs.File, error) { +func (p *BitableFS) ReuseForWrite(oldname, newname string) (pvfs.File, error) { return p.fs.ReuseForWrite(oldname, newname) } // MkdirAll ... -func (p *PebbleFS) MkdirAll(dir string, perm os.FileMode) error { +func (p *BitableFS) MkdirAll(dir string, perm os.FileMode) error { return p.fs.MkdirAll(dir, perm) } // Lock ... -func (p *PebbleFS) Lock(name string) (io.Closer, error) { +func (p *BitableFS) Lock(name string) (io.Closer, error) { return p.fs.Lock(name) } // List ... -func (p *PebbleFS) List(dir string) ([]string, error) { +func (p *BitableFS) List(dir string) ([]string, error) { return p.fs.List(dir) } // Stat ... -func (p *PebbleFS) Stat(name string) (os.FileInfo, error) { +func (p *BitableFS) Stat(name string) (os.FileInfo, error) { return p.fs.Stat(name) } // PathBase ... -func (p *PebbleFS) PathBase(path string) string { +func (p *BitableFS) PathBase(path string) string { return p.fs.PathBase(path) } // PathJoin ... -func (p *PebbleFS) PathJoin(elem ...string) string { +func (p *BitableFS) PathJoin(elem ...string) string { return p.fs.PathJoin(elem...) } // PathDir ... -func (p *PebbleFS) PathDir(path string) string { +func (p *BitableFS) PathDir(path string) string { return p.fs.PathDir(path) } diff --git a/raft/nodehost_test.go b/raft/nodehost_test.go index 63663cb..9821cdb 100644 --- a/raft/nodehost_test.go +++ b/raft/nodehost_test.go @@ -2778,9 +2778,6 @@ func TestRegularStateMachineDoesNotAllowConcurrentSaveSnapshot(t *testing.T) { } func TestLogDBRateLimit(t *testing.T) { - if invariants.DragonboatRocksDBTest { - t.Skip("not supported on rocksdb") - } fs := vfs.GetTestFS() to := &testOption{ defaultTestNode: true, @@ -5031,9 +5028,6 @@ func testIOErrorIsHandled(t *testing.T, op vfs.Op) { } func TestIOErrorIsHandled(t *testing.T) { - if invariants.DragonboatRocksDBTest { - t.Skip("not supported on rocksdb") - } testIOErrorIsHandled(t, vfs.OpWrite) testIOErrorIsHandled(t, vfs.OpSync) } diff --git a/raft/plugin/chan/chan.go b/raft/plugin/chan/chan.go index 92f75f7..17199cd 100644 --- a/raft/plugin/chan/chan.go +++ b/raft/plugin/chan/chan.go @@ -18,12 +18,12 @@ import ( "context" "sync" + "github.com/cockroachdb/errors" + "github.com/lni/goutils/syncutil" + "github.com/zuoyebang/bitalostored/raft/config" "github.com/zuoyebang/bitalostored/raft/raftio" pb "github.com/zuoyebang/bitalostored/raft/raftpb" - - "github.com/cockroachdb/errors" - "github.com/lni/goutils/syncutil" ) var ( diff --git a/raft/raftio/transport.go b/raft/raftio/transport.go index f0d8cde..a9d605f 100644 --- a/raft/raftio/transport.go +++ b/raft/raftio/transport.go @@ -12,19 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -/* -Package raftio contains structs, interfaces and function definitions required -to build custom persistent Raft log storage and transport modules. - -Structs, interfaces and functions defined in the raftio package are only -required when building your custom persistent Raft log storage or transport -modules. Skip this package if you plan to use the default built-in LogDB and -transport modules provided by Dragonboat. - -Structs, interfaces and functions defined in the raftio package are not -considered as a part of Dragonboat's public APIs. Breaking changes might -happen in the coming minor releases. -*/ package raftio import ( diff --git a/raft/raftpb/fuzz.go b/raft/raftpb/fuzz.go index e5c79fa..6d440b8 100644 --- a/raft/raftpb/fuzz.go +++ b/raft/raftpb/fuzz.go @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build gofuzz // +build gofuzz package raftpb diff --git a/raft/raftpb/raft.go b/raft/raftpb/raft.go index 1f6509d..c66af68 100644 --- a/raft/raftpb/raft.go +++ b/raft/raftpb/raft.go @@ -20,13 +20,12 @@ import ( "strings" "unsafe" - "github.com/zuoyebang/bitalostored/raft/client" - "github.com/zuoyebang/bitalostored/raft/logger" - "github.com/lni/goutils/stringutil" + "github.com/zuoyebang/bitalostored/raft/client" "github.com/zuoyebang/bitalostored/raft/internal/settings" "github.com/zuoyebang/bitalostored/raft/internal/vfs" + "github.com/zuoyebang/bitalostored/raft/logger" ) var ( diff --git a/raft/tools/checkdisk/main.go b/raft/tools/checkdisk/main.go deleted file mode 100644 index 6bb8c5b..0000000 --- a/raft/tools/checkdisk/main.go +++ /dev/null @@ -1,339 +0,0 @@ -// Copyright 2018-2019 Lei Ni (nilei81@gmail.com) and other contributors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "flag" - "fmt" - "io" - "log" - "os" - "runtime" - "runtime/pprof" - "sync/atomic" - "time" - - "github.com/lni/goutils/syncutil" - "github.com/zuoyebang/bitalostored/raft" - "github.com/zuoyebang/bitalostored/raft/config" - "github.com/zuoyebang/bitalostored/raft/internal/logdb" - "github.com/zuoyebang/bitalostored/raft/internal/logdb/kv/pebble" - "github.com/zuoyebang/bitalostored/raft/internal/vfs" - "github.com/zuoyebang/bitalostored/raft/logger" - "github.com/zuoyebang/bitalostored/raft/raftio" - sm "github.com/zuoyebang/bitalostored/raft/statemachine" -) - -const ( - dataDirectoryName = "checkdisk-data-safe-to-delete/data1" - dataDirectoryName2 = "checkdisk-data-safe-to-delete/data2" - raftAddress = "localhost:26000" - raftAddress2 = "localhost:26001" -) - -var clustercount = flag.Int("num-of-clusters", 48, "number of raft clusters") -var read = flag.Bool("enable-read", false, "enable read") -var readonly = flag.Bool("read-only", false, "read only") -var batched = flag.Bool("batched-logdb", false, "use batched logdb") -var cpupprof = flag.Bool("cpu-profiling", false, "run CPU profiling") -var mempprof = flag.Bool("mem-profiling", false, "run mem profiling") -var inmemfs = flag.Bool("inmem-fs", false, "use in-memory filesystem") -var clientcount = flag.Int("num-of-clients", 10000, "number of clients to use") -var seconds = flag.Int("seconds-to-run", 60, "number of seconds to run") -var ckpt = flag.Int("checkpoint-interval", 0, "checkpoint interval") -var tiny = flag.Bool("tiny-memory", false, "tiny LogDB memory limit") -var twonh = flag.Bool("two-nodehosts", false, "use two nodehosts") - -type batchedLogDBFactory struct{} - -func (batchedLogDBFactory) Create(cfg config.NodeHostConfig, cb config.LogDBCallback, - dirs []string, lldirs []string) (raftio.ILogDB, error) { - return logdb.NewLogDB(cfg, - cb, dirs, lldirs, true, false, pebble.NewKVStore) -} - -func (batchedLogDBFactory) Name() string { - return "Sharded-Pebble" -} - -type dummyStateMachine struct{} - -func newDummyStateMachine(clusterID uint64, nodeID uint64) sm.IStateMachine { - return &dummyStateMachine{} -} - -func (s *dummyStateMachine) Lookup(query interface{}) (interface{}, error) { - return query, nil -} - -func (s *dummyStateMachine) Update(data []byte) (sm.Result, error) { - return sm.Result{Value: 1}, nil -} - -func (s *dummyStateMachine) SaveSnapshot(w io.Writer, - fc sm.ISnapshotFileCollection, done <-chan struct{}) error { - v := make([]byte, 4) - if _, err := w.Write(v); err != nil { - return err - } - return nil -} - -func (s *dummyStateMachine) RecoverFromSnapshot(r io.Reader, - files []sm.SnapshotFile, done <-chan struct{}) error { - v := make([]byte, 4) - if _, err := r.Read(v); err != nil { - return err - } - return nil -} - -func (s *dummyStateMachine) Close() error { return nil } - -func main() { - flag.Parse() - fs := vfs.DefaultFS - if *inmemfs { - log.Println("using in-memory fs") - fs = vfs.NewMemFS() - } - if *mempprof { - defer func() { - f, err := os.Create("mem.pprof") - if err != nil { - log.Fatal("could not create memory profile: ", err) - } - defer func() { - if err := f.Close(); err != nil { - panic(err) - } - }() - runtime.GC() - if err := pprof.WriteHeapProfile(f); err != nil { - log.Fatal("could not write memory profile: ", err) - } - }() - log.Println("memory profile will be saved into file mem.pprof") - } - if err := fs.RemoveAll(dataDirectoryName); err != nil { - panic(err) - } - if err := fs.RemoveAll(dataDirectoryName2); err != nil { - panic(err) - } - defer func() { - if err := fs.RemoveAll(dataDirectoryName); err != nil { - panic(err) - } - }() - defer func() { - if err := fs.RemoveAll(dataDirectoryName2); err != nil { - panic(err) - } - }() - logger.GetLogger("raft").SetLevel(logger.WARNING) - logger.GetLogger("rsm").SetLevel(logger.WARNING) - logger.GetLogger("logdb").SetLevel(logger.WARNING) - logger.GetLogger("transport").SetLevel(logger.WARNING) - logger.GetLogger("dragonboat").SetLevel(logger.WARNING) - lc := config.GetLargeMemLogDBConfig() - lc.SaveBufferSize = 64 * 1024 * 1024 - lc.KVMaxWriteBufferNumber = 8 - lc.KVWriteBufferSize = 256 * 1024 * 1024 - lc.KVLevel0FileNumCompactionTrigger = 6 - nhc := config.NodeHostConfig{ - NodeHostDir: dataDirectoryName, - RTTMillisecond: 200, - RaftAddress: raftAddress, - Expert: config.ExpertConfig{FS: fs, LogDB: lc}, - } - if *tiny { - log.Println("using tiny LogDB memory limit") - nhc.Expert.LogDB = config.GetTinyMemLogDBConfig() - } - if *batched { - log.Println("using batched logdb") - nhc.Expert.LogDBFactory = batchedLogDBFactory{} - } - nh, err := raft.NewNodeHost(nhc) - if err != nil { - panic(err) - } - defer nh.Close() - var nh2 *raft.NodeHost - if *twonh { - nhc.NodeHostDir = dataDirectoryName2 - nhc.RaftAddress = raftAddress2 - nh2, err = raft.NewNodeHost(nhc) - if err != nil { - panic(err) - } - defer nh2.Close() - } - rc := config.Config{ - ClusterID: 1, - NodeID: 1, - ElectionRTT: 10, - HeartbeatRTT: 1, - CheckQuorum: true, - SnapshotEntries: uint64(*ckpt), - } - nodes := make(map[uint64]string) - nodes[1] = raftAddress - if *twonh { - nodes[2] = raftAddress2 - } - nhList := make([]*raft.NodeHost, 0) - for i := uint64(1); i <= uint64(*clustercount); i++ { - rc.ClusterID = i - if err := nh.StartCluster(nodes, false, newDummyStateMachine, rc); err != nil { - panic(err) - } - if *twonh { - rc2 := rc - rc2.NodeID = 2 - if err := nh2.StartCluster(nodes, false, newDummyStateMachine, rc2); err != nil { - panic(err) - } - } - } - for i := uint64(1); i <= uint64(*clustercount); i++ { - for j := 0; j < 10000; j++ { - leaderID, ok, err := nh.GetLeaderID(i) - if err != nil { - panic(err) - } - if !*twonh { - if ok && leaderID == 1 { - nhList = append(nhList, nh) - break - } - } else { - if ok && (leaderID == 1 || leaderID == 2) { - if leaderID == 1 { - nhList = append(nhList, nh) - } else { - nhList = append(nhList, nh2) - } - break - } - } - time.Sleep(time.Millisecond) - if j == 9999 { - panic("failed to elect leader") - } - } - } - if len(nhList) != *clustercount { - panic(fmt.Sprintf("nhList len unexpected, %d", len(nhList))) - } - fmt.Printf("clusters are ready, will run for %d seconds\n", *seconds) - if *cpupprof { - f, err := os.Create("cpu.pprof") - if err != nil { - log.Fatal("could not create CPU profile: ", err) - } - defer func() { - if err := f.Close(); err != nil { - panic(err) - } - }() - if err := pprof.StartCPUProfile(f); err != nil { - log.Fatal("could not start CPU profile: ", err) - } - defer pprof.StopCPUProfile() - log.Println("cpu profile will be saved into file cpu.pprof") - } - - doneCh := make(chan struct{}, 1) - timer := time.NewTimer(time.Duration(*seconds) * time.Second) - defer timer.Stop() - go func() { - <-timer.C - close(doneCh) - }() - // keep proposing for 60 seconds - stopper := syncutil.NewStopper() - results := make([]uint64, *clientcount) - if !*readonly { - for i := uint64(0); i < uint64(*clientcount); i++ { - workerID := i - stopper.RunWorker(func() { - clusterID := (workerID % uint64(*clustercount)) + 1 - nh := nhList[clusterID-1] - cs := nh.GetNoOPSession(clusterID) - cmd := make([]byte, 16) - results[workerID] = 0 - for { - for j := 0; j < 32; j++ { - rs, err := nh.Propose(cs, cmd, 4*time.Second) - if err != nil { - panic(err) - } - v := <-rs.ResultC() - if v.Completed() { - results[workerID] = results[workerID] + 1 - rs.Release() - } - } - select { - case <-doneCh: - return - default: - } - } - }) - } - } - reads := struct{ v uint64 }{} - if *read || *readonly { - for i := uint64(0); i < uint64(*clientcount); i++ { - workerID := i - stopper.RunWorker(func() { - clusterID := (workerID % uint64(*clustercount)) + 1 - nh := nhList[clusterID-1] - for { - for j := 0; j < 32; j++ { - rs, err := nh.ReadIndex(clusterID, 4*time.Second) - if err != nil { - panic(err) - } - v := <-rs.ResultC() - if v.Completed() { - atomic.AddUint64(&reads.v, 1) - if _, err := nh.ReadLocalNode(rs, nil); err != nil { - panic(err) - } - rs.Release() - } - } - select { - case <-doneCh: - return - default: - } - } - }) - } - } - stopper.Stop() - total := uint64(0) - for _, v := range results { - total = total + v - } - rv := atomic.LoadUint64(&reads.v) - fmt.Printf("read %d, %d reads per second\n", rv, rv/uint64(*seconds)) - fmt.Printf("total %d, %d proposals per second\n", total, total/uint64(*seconds)) -} diff --git a/raft/tools/fsync/main.go b/raft/tools/fsync/main.go deleted file mode 100644 index f0cc44c..0000000 --- a/raft/tools/fsync/main.go +++ /dev/null @@ -1,90 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "os" - "sync" -) - -const ( - // BATCH is the number of write operations to repeat - BATCH = 10000 - // BUCKETS is the number of parallel write jobs - BUCKETS = 16 -) - -var data []byte -var fs []*os.File - -var parallel = flag.Bool("parallel", false, "whether to use parallel writes") -var size = flag.Int("size", 64*1024, "size of each write in bytes") - -func init() { - flag.Parse() - data = make([]byte, *size) - if *parallel { - fs = make([]*os.File, BUCKETS) - for i := 0; i < len(fs); i++ { - f, err := os.Create(fmt.Sprintf("%v.dat", i)) - if err != nil { - panic(err) - } - fs[i] = f - } - } else { - fs = make([]*os.File, 1) - f, err := os.Create("test.dat") - if err != nil { - panic(err) - } - fs[0] = f - } -} - -func main() { - if *parallel { - parallelWrite() - } else { - sequentialWrite() - } - for _, f := range fs { - f.Close() - } -} - -func sequentialWrite() { - for i := 0; i < BATCH; i++ { - for j := 0; j < BUCKETS; j++ { - if _, err := fs[0].Write(data); err != nil { - panic(err) - } - } - if err := fs[0].Sync(); err != nil { - panic(err) - } - } -} - -func parallelWrite() { - var wg sync.WaitGroup - if len(fs) != BUCKETS { - panic("unexpected file count") - } - for _, f := range fs { - wg.Add(1) - gf := f - go func() { - for i := 0; i < BATCH; i++ { - if _, err := gf.Write(data); err != nil { - panic(err) - } - if err := gf.Sync(); err != nil { - panic(err) - } - } - wg.Done() - }() - } - wg.Wait() -} diff --git a/raft/tools/import.go b/raft/tools/import.go index 2548516..bdd4f13 100644 --- a/raft/tools/import.go +++ b/raft/tools/import.go @@ -26,12 +26,8 @@ import ( "runtime" "strings" - "github.com/zuoyebang/bitalostored/raft/config" - "github.com/zuoyebang/bitalostored/raft/logger" - "github.com/zuoyebang/bitalostored/raft/raftio" - "github.com/cockroachdb/errors" - + "github.com/zuoyebang/bitalostored/raft/config" "github.com/zuoyebang/bitalostored/raft/internal/fileutil" "github.com/zuoyebang/bitalostored/raft/internal/logdb" "github.com/zuoyebang/bitalostored/raft/internal/rsm" @@ -39,6 +35,8 @@ import ( "github.com/zuoyebang/bitalostored/raft/internal/settings" "github.com/zuoyebang/bitalostored/raft/internal/utils" "github.com/zuoyebang/bitalostored/raft/internal/vfs" + "github.com/zuoyebang/bitalostored/raft/logger" + "github.com/zuoyebang/bitalostored/raft/raftio" pb "github.com/zuoyebang/bitalostored/raft/raftpb" ) diff --git a/stored/cmd/main.go b/stored/cmd/main.go index fac6a90..b93bdac 100644 --- a/stored/cmd/main.go +++ b/stored/cmd/main.go @@ -16,18 +16,14 @@ package main import ( "fmt" + "net/http" "os" "os/signal" - "runtime" "syscall" "github.com/zuoyebang/bitalostored/stored/internal/config" + "github.com/zuoyebang/bitalostored/stored/internal/raft" "github.com/zuoyebang/bitalostored/stored/internal/tclock" - "github.com/zuoyebang/bitalostored/stored/plugin/anticc" - "github.com/zuoyebang/bitalostored/stored/plugin/catch_panic" - "github.com/zuoyebang/bitalostored/stored/plugin/info" - "github.com/zuoyebang/bitalostored/stored/plugin/pprof" - "github.com/zuoyebang/bitalostored/stored/plugin/raft" "github.com/zuoyebang/bitalostored/stored/server" "github.com/zuoyebang/bitalostored/stored/internal/log" @@ -46,8 +42,6 @@ func main() { panic(fmt.Sprintf("load global config failed err:%s", err.Error())) } - runtime.GOMAXPROCS(config.GlobalConfig.Server.Maxprocs) - log.NewLogger(&log.Options{ IsDebug: config.GlobalConfig.Log.IsDebug, RotationTime: config.GlobalConfig.Log.RotationTime, @@ -58,6 +52,8 @@ func main() { log.Infof("create server with config\n%s", config.GlobalConfig) + startPprof() + s, err := server.NewServer() if err != nil { log.Errorf("new server fail err:%s", err.Error()) @@ -67,11 +63,9 @@ func main() { log.Info("server is working ...") server.InitLuaPool(s) - info.Init() - catch_panic.Init() - pprof.Init() - raft.Init() - anticc.Init() + raft.RaftInit(s) + server.RunInfoCollection(s) + raft.RaftStart(s) sc := make(chan os.Signal, 1) signal.Notify(sc, @@ -82,7 +76,7 @@ func main() { syscall.SIGTERM, syscall.SIGQUIT) - go s.Run() + go s.ListenAndServe() <-sc @@ -92,3 +86,16 @@ func main() { s.Close() log.Info("server is closed ...") } + +func startPprof() { + if !config.GlobalConfig.Plugin.OpenPprof { + return + } + + go func() { + pprofAddr := config.GlobalConfig.Plugin.PprofAddr + if err := http.ListenAndServe(pprofAddr, nil); err != nil { + log.Fatal(err) + } + }() +} diff --git a/stored/engine/bitsdb/bitsdb/base/base_cmd.go b/stored/engine/bitsdb/bitsdb/base/base_cmd.go index c5477e0..431845c 100644 --- a/stored/engine/bitsdb/bitsdb/base/base_cmd.go +++ b/stored/engine/bitsdb/bitsdb/base/base_cmd.go @@ -43,6 +43,12 @@ func (bo *BaseObject) Del(khash uint32, keys ...[]byte) (n int64, err error) { unlockKey := bo.LockKey(khash) defer unlockKey() + bitmapExist, _ := bo.BaseDb.ClearBitmap(key, true) + if bitmapExist { + n++ + return + } + mk, mkCloser := EncodeMetaKey(key, khash) defer mkCloser() mkv, err := bo.BaseDb.BaseGetMetaWithoutValue(mk) @@ -122,6 +128,10 @@ func (bo *BaseObject) BaseExpireAt(key []byte, khash uint32, when int64) (int64, unlockKey := bo.LockKey(khash) defer unlockKey() + if ret, ok := bo.bitmapMemExpireAt(key, uint64(when)); ok { + return ret, nil + } + mk, mkCloser := EncodeMetaKey(key, khash) mkv, mvCloser, err := bo.BaseDb.BaseGetMetaWithValue(mk) defer func() { @@ -170,6 +180,13 @@ func (bo *BaseObject) BasePTTL(key []byte, khash uint32, p bool) (int64, error) return -2, err } + if ttl, ok := bo.bitmapMemTTL(key); ok { + if !p && ttl > 0 { + ttl = tclock.SetTtlMilliToSec(ttl) + } + return ttl, nil + } + mk, mkCloser := EncodeMetaKey(key, khash) defer mkCloser() mkv, err := bo.BaseDb.BaseGetMetaWithoutValue(mk) @@ -205,6 +222,10 @@ func (bo *BaseObject) BaseExists(key []byte, khash uint32) (int64, error) { return 0, err } + if ret, ok := bo.bitmapMemExists(key); ok { + return ret, nil + } + mkv, err := bo.BaseDb.BaseGetMetaDataCheckAlive(key, khash) if mkv == nil { return 0, err @@ -219,6 +240,10 @@ func (bo *BaseObject) BasePersist(key []byte, khash uint32) (int64, error) { return 0, err } + if ret, ok := bo.bitmapMemPersist(key); ok { + return ret, nil + } + unlockKey := bo.LockKey(khash) defer unlockKey() diff --git a/stored/engine/bitsdb/bitsdb/base/base_data_zset_format.go b/stored/engine/bitsdb/bitsdb/base/base_data_zset_format.go index 9c12e1f..58f962c 100644 --- a/stored/engine/bitsdb/bitsdb/base/base_data_zset_format.go +++ b/stored/engine/bitsdb/bitsdb/base/base_data_zset_format.go @@ -24,27 +24,21 @@ import ( "github.com/zuoyebang/bitalostored/stored/internal/utils" ) -func EncodeZsetDataKey(buf []byte, version uint64, khash uint32, member []byte) { - binary.LittleEndian.PutUint16(buf, utils.GetSlotId(khash)) - - var verBytes [8]byte - binary.LittleEndian.PutUint64(verBytes[:], version) - verMember := append(member, verBytes[:]...) - verMemberMd5 := md5.Sum(verMember) - - copy(buf[keySlotIdLength:DataKeyZsetLength], verMemberMd5[0:FieldMd5Length]) -} - -func DecodeZsetDataKey(ekf []byte) ([]byte, error) { - if ekf == nil { - return nil, nil +func EncodeZsetDataKey(buf []byte, version uint64, khash uint32, member []byte, isOld bool) int { + if !isOld { + PutDataKeyHeader(buf, version, khash) + memberMd5 := md5.Sum(member) + copy(buf[DataKeyHeaderLength:DataKeyZsetLength], memberMd5[0:FieldMd5Length]) + return DataKeyZsetLength + } else { + var verBytes [8]byte + binary.LittleEndian.PutUint16(buf, utils.GetSlotId(khash)) + binary.LittleEndian.PutUint64(verBytes[:], version) + verMember := append(member, verBytes[:]...) + verMemberMd5 := md5.Sum(verMember) + copy(buf[keySlotIdLength:DataKeyZsetOldLength], verMemberMd5[0:FieldMd5Length]) + return DataKeyZsetOldLength } - - if len(ekf) < DataKeyZsetLength { - return nil, errFieldEncodeKey - } - - return ekf[keySlotIdLength:DataKeyZsetLength], nil } func EncodeZsetIndexKeyScore(buf []byte, version uint64, khash uint32, score float64) { diff --git a/stored/engine/bitsdb/bitsdb/base/base_db.go b/stored/engine/bitsdb/bitsdb/base/base_db.go index fe0a748..2dc4245 100644 --- a/stored/engine/bitsdb/bitsdb/base/base_db.go +++ b/stored/engine/bitsdb/bitsdb/base/base_db.go @@ -19,7 +19,6 @@ import ( "sync/atomic" "time" - "github.com/panjf2000/ants/v2" "github.com/zuoyebang/bitalostored/butils/vectormap" "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/bitsdb/locker" "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/bitskv" @@ -31,18 +30,21 @@ import ( ) const ( - cacheBucketNum int = 1024 - cacheEliminateThreadNum int = 1 - cacheEliminateDuration int = 900 + defaultCacheShardNum int = 1024 + defaultCacheEliminateThreadNum int = 1 + defaultCacheEliminateDuration int = 1080 + + missCacheValue = byte(btools.NoneType) ) type BaseDB struct { - DB *bitskv.DB - MetaCache *vectormap.VectorMap - DelExpirePool *ants.PoolWithFunc - IsKeyScan atomic.Int32 - Ready atomic.Bool - KeyLocker *locker.ScopeLocker + DB *bitskv.DB + MetaCache *vectormap.VectorMap + EnableMissCache bool + IsKeyScan atomic.Int32 + Ready atomic.Bool + KeyLocker *locker.ScopeLocker + BitmapMem *BitmapMem } func NewBaseDB(cfg *dbconfig.Config) (*BaseDB, error) { @@ -52,21 +54,27 @@ func NewBaseDB(cfg *dbconfig.Config) (*BaseDB, error) { } baseDb := &BaseDB{ - DB: db, - KeyLocker: locker.NewScopeLocker(btools.KeyLockerPoolCap), - MetaCache: nil, + DB: db, + KeyLocker: locker.NewScopeLocker(true), + MetaCache: nil, + EnableMissCache: false, } + baseDb.BitmapMem = NewBitmapMem(baseDb) if cfg.CacheSize > 0 { if cfg.CacheEliminateDuration <= 0 { - cfg.CacheEliminateDuration = cacheEliminateDuration + cfg.CacheEliminateDuration = defaultCacheEliminateDuration + } + if cfg.CacheShardNum < defaultCacheShardNum { + cfg.CacheShardNum = defaultCacheShardNum } - eliminateDuration := time.Duration(cfg.CacheEliminateDuration) * time.Second + + baseDb.EnableMissCache = cfg.EnableMissCache baseDb.MetaCache = vectormap.NewVectorMap(uint32(cfg.CacheHashSize), vectormap.WithType(vectormap.MapTypeLRU), - vectormap.WithBuckets(cacheBucketNum), + vectormap.WithBuckets(cfg.CacheShardNum), vectormap.WithLogger(log.GetLogger()), - vectormap.WithEliminate(vectormap.Byte(cfg.CacheSize), cacheEliminateThreadNum, eliminateDuration)) + vectormap.WithEliminate(vectormap.Byte(cfg.CacheSize), defaultCacheEliminateThreadNum, time.Duration(cfg.CacheEliminateDuration)*time.Second)) } return baseDb, nil @@ -88,10 +96,15 @@ func (b *BaseDB) Close() { b.SetNoReady() b.DB.Close() if b.MetaCache != nil { - b.MetaCache = nil + b.MetaCache.Close() + log.Infof("MetaCache Close finish") } } +func (b *BaseDB) FlushBitmap() { + b.BitmapMem.Close() +} + func (b *BaseDB) ClearCache() { if b.MetaCache != nil { b.MetaCache.Clear() @@ -100,20 +113,28 @@ func (b *BaseDB) ClearCache() { func (b *BaseDB) GetMeta(key []byte) ([]byte, func(), error) { if b.MetaCache != nil { - v, closer, ok := b.MetaCache.Get(key) - if ok { + v, closer, exist := b.MetaCache.Get(key) + if exist { + if b.EnableMissCache && v != nil && v[0] == missCacheValue { + closer() + return nil, nil, nil + } return v, closer, nil } } val, closer, err := b.DB.GetMeta(key) if b.DB.IsNotFound(err) { + if b.EnableMissCache { + b.MetaCache.RePut(key, []byte{missCacheValue}) + } return nil, nil, nil } if b.MetaCache != nil && len(val) > 0 { b.MetaCache.RePut(key, val) } + return val, closer, err } @@ -158,7 +179,7 @@ func (b *BaseDB) getMetaWithValue(ek []byte, dt btools.DataType) (mkv *MetaData, return nil, nil, err } - if dt != btools.NoneType && dt != mkv.dt { + if mkv.IsWrongType(dt) { log.Errorf("getMetaWithValue dataType notmatch ek:%s exp:%d act:%d mkv:%v", string(ek), dt, mkv.dt, mkv) PutMkvToPool(mkv) return nil, nil, errn.ErrWrongType @@ -186,33 +207,6 @@ func (b *BaseDB) getMetaWithoutValue(ek []byte, dt btools.DataType) (*MetaData, return mkv, nil } -func (b *BaseDB) DeleteMetaKeyByExpire( - dt btools.DataType, key []byte, khash uint32, keyVersion uint64, expireTime uint64, -) (bool, error) { - var isDel bool - mk, mkCloser := EncodeMetaKey(key, khash) - defer mkCloser() - mkv, err := b.getMetaWithoutValue(mk, dt) - if mkv == nil { - return isDel, err - } - defer PutMkvToPool(mkv) - - if dt == btools.STRING { - if mkv.timestamp == expireTime { - isDel = true - } - } else if mkv.version <= keyVersion && mkv.timestamp > 0 && mkv.timestamp <= expireTime { - isDel = true - } - - if isDel { - return isDel, b.DeleteMetaKey(mk) - } - - return isDel, nil -} - func (b *BaseDB) DeleteMetaKey(key []byte) error { wb := b.DB.GetMetaWriteBatchFromPool() defer b.DB.PutWriteBatchToPool(wb) @@ -233,8 +227,20 @@ func (b *BaseDB) DeleteExpireKey(key []byte) error { return wb.Commit() } -func (b *BaseDB) SetDelExpireDataPool(pool *ants.PoolWithFunc) { - b.DelExpirePool = pool +func (b *BaseDB) ClearBitmap(key []byte, deleteDB bool) (bool, error) { + return b.BitmapMem.Delete(key, deleteDB) +} + +func (b *BaseDB) SetMetaDataByValues(ek []byte, vlen int, value ...[]byte) error { + wb := b.DB.GetMetaWriteBatchFromPool() + defer b.DB.PutWriteBatchToPool(wb) + + _ = wb.PutMultiValue(ek, value...) + err := wb.Commit() + if err == nil && b.MetaCache != nil { + b.MetaCache.PutMultiValue(ek, vlen, value...) + } + return err } func (b *BaseDB) GetAllDB() []kv.IKVStore { @@ -247,6 +253,7 @@ func (b *BaseDB) CacheInfo() string { } memCap := b.MetaCache.MaxMem() usedMem := b.MetaCache.UsedMem() + sahrdNum := b.MetaCache.Shards() effectiveMem := b.MetaCache.EffectiveMem() remainItemNum := b.MetaCache.Capacity() itemNum := b.MetaCache.Count() @@ -258,6 +265,6 @@ func (b *BaseDB) CacheInfo() string { hitRate = float64(queryCount-missCount) / float64(queryCount) } - return fmt.Sprintf("memCap:%d usedMem:%d effectiveMem:%d remainItem:%d Items:%d reputFailsCount:%d queryCount:%d missCount:%d hitRate:%.6f", - memCap, usedMem, effectiveMem, remainItemNum, itemNum, reputFailsCount, queryCount, missCount, hitRate) + return fmt.Sprintf("shardNum:%d memCap:%d usedMem:%d effectiveMem:%d remainItem:%d Items:%d reputFailsCount:%d queryCount:%d missCount:%d hitRate:%.6f", + sahrdNum, memCap, usedMem, effectiveMem, remainItemNum, itemNum, reputFailsCount, queryCount, missCount, hitRate) } diff --git a/stored/engine/bitsdb/bitsdb/base/base_expire_format.go b/stored/engine/bitsdb/bitsdb/base/base_expire_format.go index bc9c893..0e1f463 100644 --- a/stored/engine/bitsdb/bitsdb/base/base_expire_format.go +++ b/stored/engine/bitsdb/bitsdb/base/base_expire_format.go @@ -83,7 +83,7 @@ func (bo *BaseObject) DeleteZsetIndexKeyByExpire(keyVersion uint64, keyHash uint return wb.Commit() } -func (bo *BaseObject) DeleteZsetKeyByExpire(keyVersion uint64, keyKind uint8, khash uint32) (bool, uint64, error) { +func (bo *BaseObject) DeleteZsetOldKeyByExpire(keyVersion uint64, keyKind uint8, khash uint32) (bool, uint64, error) { var cnt uint64 var dataKey [DataKeyZsetLength]byte var lowerBound [DataKeyHeaderLength]byte @@ -99,17 +99,17 @@ func (bo *BaseObject) DeleteZsetKeyByExpire(keyVersion uint64, keyKind uint8, kh indexWb := bo.GetIndexWriteBatchFromPool() dataWb := bo.GetDataWriteBatchFromPool() defer func() { + it.Close() bo.PutWriteBatchToPool(indexWb) bo.PutWriteBatchToPool(dataWb) - it.Close() }() for it.Seek(lowerBound[:]); it.Valid() && it.ValidForPrefix(lowerBound[:]); it.Next() { indexKey := it.RawKey() _ = indexWb.Delete(indexKey) _, _, fp := DecodeZsetIndexKey(keyKind, indexKey, it.RawValue()) - EncodeZsetDataKey(dataKey[:], keyVersion, khash, fp.Merge()) - _ = dataWb.Delete(dataKey[:]) + dataKeyLen := EncodeZsetDataKey(dataKey[:], keyVersion, khash, fp.Merge(), true) + _ = dataWb.Delete(dataKey[:dataKeyLen]) cnt++ if cnt >= DeleteMixFieldMaxNum { break @@ -119,11 +119,9 @@ func (bo *BaseObject) DeleteZsetKeyByExpire(keyVersion uint64, keyKind uint8, kh if cnt == 0 { return true, 0, nil } - if err := dataWb.Commit(); err != nil { return false, 0, err } - if err := indexWb.Commit(); err != nil { return false, 0, err } diff --git a/stored/engine/bitsdb/bitsdb/base/base_meta.go b/stored/engine/bitsdb/bitsdb/base/base_meta.go index a30e00c..8a18cc2 100644 --- a/stored/engine/bitsdb/bitsdb/base/base_meta.go +++ b/stored/engine/bitsdb/bitsdb/base/base_meta.go @@ -160,6 +160,26 @@ func (mkv *MetaData) SetDataType(dt btools.DataType) { mkv.dt = dt } +func (mkv *MetaData) IsWrongType(exp btools.DataType) bool { + if exp == btools.NoneType { + return false + } + + if exp == btools.ZSET { + if mkv.dt != btools.ZSET && mkv.dt != btools.ZSETOLD { + return true + } + } else if exp != mkv.dt { + return true + } + + return false +} + +func (mkv *MetaData) IsZsetOld() bool { + return mkv.dt == btools.ZSETOLD +} + func (mkv *MetaData) DecrSize(delat uint32) { if delat > mkv.size { mkv.size = 0 diff --git a/stored/engine/bitsdb/bitsdb/base/base_meta_format.go b/stored/engine/bitsdb/bitsdb/base/base_meta_format.go index c0da135..d9d27e9 100644 --- a/stored/engine/bitsdb/bitsdb/base/base_meta_format.go +++ b/stored/engine/bitsdb/bitsdb/base/base_meta_format.go @@ -43,7 +43,8 @@ const ( MetaListValueLen = MetaMixValueLen + MetaListPosIndex*2 DataKeyHeaderLength = keySlotIdLength + keyVersionLength - DataKeyZsetLength = keySlotIdLength + FieldMd5Length + DataKeyZsetLength = DataKeyHeaderLength + FieldMd5Length + DataKeyZsetOldLength = keySlotIdLength + FieldMd5Length DataKeyListIndex = DataKeyHeaderLength + 4 DataKeyUpperBoundLength = DataKeyHeaderLength + MaxFieldLength diff --git a/stored/engine/bitsdb/bitsdb/base/base_object.go b/stored/engine/bitsdb/bitsdb/base/base_object.go index b165867..1df413c 100644 --- a/stored/engine/bitsdb/bitsdb/base/base_object.go +++ b/stored/engine/bitsdb/bitsdb/base/base_object.go @@ -70,7 +70,7 @@ func (bo *BaseObject) Close() { func (bo *BaseObject) CheckMetaData(mkv *MetaData) (isAlive bool, err error) { if mkv.IsAlive() { isAlive = true - if mkv.dt != bo.DataType { + if mkv.IsWrongType(bo.DataType) { err = errn.ErrWrongType } } else { @@ -150,7 +150,7 @@ func (bo *BaseObject) SetMetaDataSize(ek []byte, khash uint32, delta int64) erro } switch mkv.dt { - case btools.ZSET, btools.SET, btools.HASH: + case btools.ZSET, btools.ZSETOLD, btools.SET, btools.HASH: var meta [MetaMixValueLen]byte EncodeMetaDbValueForMix(meta[:], mkv) return bo.SetMetaDataByValue(ek, meta[:]) diff --git a/stored/engine/bitsdb/bitsdb/base/bitmap_cmd.go b/stored/engine/bitsdb/bitsdb/base/bitmap_cmd.go new file mode 100644 index 0000000..3745d2d --- /dev/null +++ b/stored/engine/bitsdb/bitsdb/base/bitmap_cmd.go @@ -0,0 +1,62 @@ +package base + +import "github.com/zuoyebang/bitalostored/stored/internal/tclock" + +func (bo *BaseObject) bitmapMemExpireAt(key []byte, when uint64) (int64, bool) { + if bi, ok := bo.BaseDb.BitmapMem.Get(key); ok { + if bi.Expired() { + return 0, true + } else { + bi.SetExpire(uint64(when)) + return 1, true + } + } + + return 0, false +} + +func (bo *BaseObject) bitmapMemTTL(key []byte) (int64, bool) { + if bi, ok := bo.BaseDb.BitmapMem.Get(key); ok { + expire := int64(bi.expireMs.Load()) + return checkTTL(expire), true + } + + return 0, false +} + +func checkTTL(expire int64) int64 { + if expire == 0 { + return ErrnoKeyPersist + } else { + nowtime := tclock.GetTimestampMilli() + if expire <= nowtime { + return ErrnoKeyNotFoundOrExpire + } else { + return int64(expire) - nowtime + } + } +} + +func (bo *BaseObject) bitmapMemPersist(key []byte) (int64, bool) { + if bi, ok := bo.BaseDb.BitmapMem.Get(key); ok { + if bi.Expired() { + return 0, true + } else { + bi.SetExpire(0) + return 1, true + } + } + + return 0, false +} + +func (bo *BaseObject) bitmapMemExists(key []byte) (int64, bool) { + if bi, ok := bo.BaseDb.BitmapMem.Get(key); ok { + if bi.Expired() { + return 0, true + } + return 1, true + } + + return 0, false +} diff --git a/stored/engine/bitsdb/bitsdb/base/bitmap_mem.go b/stored/engine/bitsdb/bitsdb/base/bitmap_mem.go new file mode 100644 index 0000000..51e6ffd --- /dev/null +++ b/stored/engine/bitsdb/bitsdb/base/bitmap_mem.go @@ -0,0 +1,472 @@ +package base + +import ( + "runtime/debug" + "sort" + "sync" + "sync/atomic" + "time" + + "github.com/RoaringBitmap/roaring/roaring64" + "github.com/zuoyebang/bitalostored/butils/unsafe2" + "github.com/zuoyebang/bitalostored/stored/internal/log" + "github.com/zuoyebang/bitalostored/stored/internal/tclock" + "github.com/zuoyebang/bitalostored/stored/internal/utils" +) + +const bitmapItemMax = 2048 +const bitmapFlushSecond = 2700 + +type BitmapItem struct { + key []byte + khash uint32 + mu struct { + sync.RWMutex + rb *roaring64.Bitmap + } + expireMs atomic.Uint64 // millisecond + modify atomic.Int64 // second +} + +type BitmapMem struct { + enable bool + + mu struct { + sync.RWMutex + items map[string]*BitmapItem + count int + } + + migrating atomic.Bool + migrateSlot atomic.Uint32 + + baseDB *BaseDB + flushing atomic.Bool + fasting bool + flushLock sync.Mutex + flushSecond int64 + scanItems []*BitmapItem + closeCh chan struct{} + wg sync.WaitGroup +} + +func NewBitmapItem(key []byte, khash uint32, rb *roaring64.Bitmap, timestamp uint64) *BitmapItem { + bi := &BitmapItem{ + key: key, + khash: khash, + } + bi.mu.rb = rb + bi.expireMs.Store(timestamp) + now := tclock.GetTimestampSecond() + bi.modify.Store(now) + return bi +} + +func (bi *BitmapItem) GetReader() (*roaring64.Bitmap, func()) { + if bi.Expired() { + return nil, nil + } else { + bi.mu.RLock() + return bi.mu.rb, func() { + bi.mu.RUnlock() + } + } +} + +func (bi *BitmapItem) Expired() bool { + expire := bi.expireMs.Load() + if expire == 0 { + return false + } else { + return expire <= uint64(tclock.GetTimestampMilli()) + } +} + +func (bi *BitmapItem) GetWriter() (*roaring64.Bitmap, func()) { + bi.mu.Lock() + if bi.Expired() { + bi.reset() + } + now := tclock.GetTimestampSecond() + bi.modify.Store(now) + return bi.mu.rb, func() { + bi.mu.Unlock() + } +} + +func (bi *BitmapItem) reset() { + bi.mu.rb = roaring64.NewBitmap() + bi.expireMs.Store(0) +} + +func (bi *BitmapItem) SetExpire(expire uint64) { + bi.expireMs.Store(expire) + bi.modify.Store(tclock.GetTimestampSecond()) +} + +func NewBitmapMem(db *BaseDB) *BitmapMem { + bm := &BitmapMem{ + baseDB: db, + flushSecond: bitmapFlushSecond, + scanItems: make([]*BitmapItem, 0, bitmapItemMax), + closeCh: make(chan struct{}), + flushLock: sync.Mutex{}, + } + + bm.mu.items = make(map[string]*BitmapItem, 10) + bm.wg.Add(1) + go bm.RunFlushWorker() + return bm +} + +func (bm *BitmapMem) SetEnable() { + if !bm.enable { + bm.enable = true + } +} + +func (bm *BitmapMem) GetEnable() bool { + return bm.enable +} + +func (bm *BitmapMem) Get(key []byte) (*BitmapItem, bool) { + if !bm.enable { + return nil, false + } + + bm.mu.RLock() + defer bm.mu.RUnlock() + + if v, ok := bm.mu.items[unsafe2.String(key)]; ok { + return v, true + } else { + return nil, false + } +} + +func (bm *BitmapMem) AddItem(key []byte, khash uint32, newBi func(k []byte) *BitmapItem) bool { + if bm.flushing.Load() { + return false + } + + if bm.migrating.Load() && khash%utils.TotalSlot == bm.migrateSlot.Load() { + return false + } + + bm.mu.Lock() + defer bm.mu.Unlock() + + if bm.IsFull() { + return false + } + + keyStr := string(key) + bm.mu.items[keyStr] = newBi(unsafe2.ByteSlice(keyStr)) + bm.mu.count++ + return true +} + +func (bm *BitmapMem) Delete(key []byte, deleteDB bool) (bool, error) { + if !bm.enable { + return false, nil + } + + bm.mu.RLock() + _, exist := bm.mu.items[unsafe2.String(key)] + bm.mu.RUnlock() + if !exist { + return false, nil + } + + bm.mu.Lock() + defer bm.mu.Unlock() + return bm.doDeleteKey(key, deleteDB) +} + +func (bm *BitmapMem) deleteItem(it *BitmapItem, deleteDB bool) (bool, error) { + bm.mu.Lock() + defer bm.mu.Unlock() + return bm.doDeleteItem(it, deleteDB) +} + +func (bm *BitmapMem) checkItem(it *BitmapItem) bool { + bm.mu.RLock() + defer bm.mu.RUnlock() + return bm.doCheckItem(it) +} + +func (bm *BitmapMem) Close() { + close(bm.closeCh) + bm.wg.Wait() + log.Infof("bitmap mem closed") +} + +func (bm *BitmapMem) StartMigrate(slotId uint32) { + bm.migrateSlot.Store(slotId) + bm.migrating.Store(true) + bm.flushSlot(slotId) +} + +func (bm *BitmapMem) ClearMigrate() { + bm.migrating.Store(false) +} + +func (bm *BitmapMem) RunFlushWorker() { + log.Infof("bitmap flush starts to work") + defer func() { + bm.wg.Done() + log.Infof("bitmap flush closed") + }() + + worker := func() (closed bool) { + defer func() { + if r := recover(); r != nil { + log.Errorf("bitmap flush panic err:%v stack=%s", r, string(debug.Stack())) + } + }() + + tick := time.NewTicker(time.Duration(bm.flushSecond) * time.Second) + defer tick.Stop() + for { + select { + case <-tick.C: + now := tclock.GetTimestampSecond() + bm.Flush(false) + bm.Evict(now) + case <-bm.closeCh: + closed = true + bm.Flush(true) + return + } + } + } + for { + if worker() { + break + } + } +} + +func (bm *BitmapMem) Flush(fast bool) { + if !bm.enable { + return + } + + if fast { + bm.fasting = true + } + defer func() { + if fast { + bm.fasting = false + } + }() + + bm.flushLock.Lock() + defer bm.flushLock.Unlock() + + bm.flushing.Store(true) + defer bm.flushing.Store(false) + + now := tclock.GetTimestampSecond() + bm.scanItems = bm.scanItems[:0] + bm.mu.RLock() + for _, it := range bm.mu.items { + bm.scanItems = append(bm.scanItems, it) + } + bm.mu.RUnlock() + total := len(bm.scanItems) + + var expireNum, nullNum, flushNum, flushBytes int + var meta [MetaStringValueLen]byte + for _, it := range bm.scanItems { + if bm.fasting != fast { + break + } + + if it.Expired() { + bm.deleteItem(it, true) + expireNum++ + continue + } + + if now-it.modify.Load() >= 2*bm.flushSecond { + bm.deleteItem(it, false) + continue + } + + it.mu.RLock() + if it.mu.rb.IsEmpty() { + nullNum++ + it.mu.RUnlock() + bm.deleteItem(it, true) + continue + } + + val, err := it.mu.rb.MarshalBinary() + it.mu.RUnlock() + if err != nil { + continue + } + + if !bm.checkItem(it) { + continue + } + + flushNum++ + ek, ekCloser := EncodeMetaKey(it.key, it.khash) + EncodeMetaDbValueForString(meta[:], it.expireMs.Load()) + vlen := MetaStringValueLen + len(val) + bm.baseDB.SetMetaDataByValues(ek, vlen, meta[:], val) + ekCloser() + flushBytes += vlen + + if !fast { + time.Sleep(500 * time.Millisecond) + } + } + log.Infof("bitmap item flush. cost:%d(s) total:%d expireNum:%d nullNum:%d flushNum:%d flushBytes:%d", tclock.GetTimestampSecond()-now, total, expireNum, nullNum, flushNum, flushBytes) +} + +func (bm *BitmapMem) IsFull() bool { + return bm.mu.count >= bitmapItemMax +} + +func (bm *BitmapMem) doDeleteKey(key []byte, deleteDB bool) (bool, error) { + var err error + keyStr := unsafe2.String(key) + if v, ok := bm.mu.items[keyStr]; ok { + delete(bm.mu.items, keyStr) + bm.mu.count-- + + if deleteDB { + ek, ekCloser := EncodeMetaKey(v.key, v.khash) + defer ekCloser() + err = bm.baseDB.DeleteMetaKey(ek) + } + return true, err + } else { + return false, nil + } +} + +func (bm *BitmapMem) doDeleteItem(it *BitmapItem, deleteDB bool) (bool, error) { + var err error + keyStr := unsafe2.String(it.key) + if v, ok := bm.mu.items[keyStr]; ok && v == it { + delete(bm.mu.items, keyStr) + bm.mu.count-- + + if deleteDB { + ek, ekCloser := EncodeMetaKey(v.key, v.khash) + defer ekCloser() + err = bm.baseDB.DeleteMetaKey(ek) + } + return true, err + } + return false, nil +} + +func (bm *BitmapMem) doCheckItem(it *BitmapItem) bool { + if v, ok := bm.mu.items[unsafe2.String(it.key)]; ok && v == it { + return true + } + return false +} + +func (bm *BitmapMem) Evict(modifyTime int64) { + bm.mu.RLock() + if !bm.IsFull() { + bm.mu.RUnlock() + return + } + + bm.scanItems = bm.scanItems[:0] + for _, it := range bm.mu.items { + if it.modify.Load() < modifyTime { + bm.scanItems = append(bm.scanItems, it) + } + } + bm.mu.RUnlock() + + sort.Slice(bm.scanItems, func(i, j int) bool { + if bm.scanItems[i].modify.Load() <= bm.scanItems[j].modify.Load() { + return true + } else { + return false + } + }) + + evictMax := bitmapItemMax * 3 / 10 + evictCount := 0 + for _, it := range bm.scanItems { + if it.modify.Load() >= modifyTime { + continue + } + ok, _ := bm.deleteItem(it, false) + if ok { + evictCount++ + } + if evictCount >= evictMax { + break + } + } + log.Infof("bitmap evict itemNum:%d", evictCount) +} + +func (bm *BitmapMem) flushSlot(slotId uint32) { + if !bm.enable { + return + } + + bm.flushLock.Lock() + defer bm.flushLock.Unlock() + + bm.scanItems = bm.scanItems[:0] + bm.mu.RLock() + for _, it := range bm.mu.items { + if it.khash%utils.TotalSlot == slotId { + bm.scanItems = append(bm.scanItems, it) + } + } + bm.mu.RUnlock() + + var meta [MetaStringValueLen]byte + for _, it := range bm.scanItems { + bm.mu.Lock() + if it.Expired() { + bm.doDeleteItem(it, true) + bm.mu.Unlock() + continue + } + + it.mu.RLock() + if it.mu.rb.IsEmpty() { + it.mu.RUnlock() + bm.doDeleteItem(it, true) + bm.mu.Unlock() + continue + } + + val, err := it.mu.rb.MarshalBinary() + it.mu.RUnlock() + if err != nil { + log.Errorf("migrate flush bitmap err:%s key:%s", err, it.key) + bm.doDeleteItem(it, false) + bm.mu.Unlock() + continue + } + + if !bm.doCheckItem(it) { + bm.mu.Unlock() + continue + } + + ek, ekCloser := EncodeMetaKey(it.key, it.khash) + EncodeMetaDbValueForString(meta[:], it.expireMs.Load()) + vlen := MetaStringValueLen + len(val) + bm.baseDB.SetMetaDataByValues(ek, vlen, meta[:], val) + ekCloser() + bm.doDeleteItem(it, false) + bm.mu.Unlock() + } +} diff --git a/stored/engine/bitsdb/bitsdb/bitsdb.go b/stored/engine/bitsdb/bitsdb/bitsdb.go index e95e979..458f7b1 100644 --- a/stored/engine/bitsdb/bitsdb/bitsdb.go +++ b/stored/engine/bitsdb/bitsdb/bitsdb.go @@ -20,9 +20,7 @@ import ( "errors" "sync" "sync/atomic" - "time" - "github.com/panjf2000/ants/v2" "github.com/zuoyebang/bitalostored/butils" "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/bitsdb/base" "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/bitsdb/hash" @@ -47,21 +45,15 @@ type BitsDB struct { SetObj *set.SetObject ZsetObj *zset.ZSetObject - baseDb *base.BaseDB - isDelExpireRun atomic.Int32 - isCheckpoint atomic.Bool - ckpExpLock sync.Mutex - flushTask *FlushTask - isRaftRestore bool - statQPS atomic.Uint64 - - delExpireStat struct { - expireDbKeys atomic.Uint64 - metaDbKeys atomic.Uint64 - zsetDataDbKeys atomic.Uint64 - zsetIndexDbKeys atomic.Uint64 - prefixDeleteKeys atomic.Uint64 - } + baseDb *base.BaseDB + isDelExpireRun atomic.Int32 + isCheckpoint atomic.Bool + ckpExpLock sync.Mutex + flushTask *FlushTask + isRaftRestore bool + statQPS atomic.Uint64 + delExpireKeys atomic.Uint64 + delExpireZsetKeys atomic.Uint64 } func NewBitsDB(cfg *dbconfig.Config, meta *dbmeta.Meta) (*BitsDB, error) { @@ -77,7 +69,6 @@ func NewBitsDB(cfg *dbconfig.Config, meta *dbmeta.Meta) (*BitsDB, error) { flushTask: flushTask, isRaftRestore: cfg.EnableRaftlogRestore, } - cfg.IOWriteLoadThresholdFunc = bdb.CheckIOWriteLoadThreshold cfg.KvCheckExpireFunc = bdb.CheckKvExpire cfg.KvTimestampFunc = bdb.GetMetaValueTimestamp @@ -85,24 +76,14 @@ func NewBitsDB(cfg *dbconfig.Config, meta *dbmeta.Meta) (*BitsDB, error) { if err != nil { return nil, err } + bdb.baseDb = baseDb bdb.StringObj = rstring.NewStringObject(baseDb, cfg) bdb.ZsetObj = zset.NewZSetObject(baseDb, cfg) bdb.HashObj = hash.NewHashObject(baseDb, cfg) bdb.SetObj = set.NewSetObject(baseDb, cfg) bdb.ListObj = list.NewListObject(baseDb, cfg) - - delExpirePool, err := ants.NewPoolWithFunc(cfg.DelExpireDataPoolNum, - bdb.deleteExpireDataFunc, - ants.WithExpiryDuration(1*time.Hour), - ants.WithPreAlloc(true)) - if err != nil { - return nil, err - } - bdb.baseDb.SetDelExpireDataPool(delExpirePool) - bdb.flushTask.initTask(bdb) - bdb.baseDb.SetReady() return bdb, nil } @@ -156,6 +137,7 @@ func (bdb *BitsDB) ClearCache() { func (bdb *BitsDB) Close() { log.Infof("bitsDB Close start") + bdb.baseDb.FlushBitmap() bdb.Flush(btools.FlushTypeDbClose, 0) bdb.flushTask.Close() @@ -333,7 +315,7 @@ func (bdb *BitsDB) Flush(reason btools.FlushType, compactIndex uint64) { task.index = bdb.flushTask.meta.GetUpdateIndex() } - if flushCh, err := bdb.flushTask.AyncFlush(task); err != nil { + if flushCh, err := bdb.flushTask.AsyncFlush(task); err != nil { log.Errorf("async flush err:%s", err) } else { <-flushCh diff --git a/stored/engine/bitsdb/bitsdb/bitsdb_test.go b/stored/engine/bitsdb/bitsdb/bitsdb_test.go new file mode 100644 index 0000000..88f3e4e --- /dev/null +++ b/stored/engine/bitsdb/bitsdb/bitsdb_test.go @@ -0,0 +1,51 @@ +// Copyright 2019-2024 Xu Ruibo (hustxurb@163.com) and Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package bitsdb + +import ( + "os" + "testing" + + "github.com/stretchr/testify/require" + "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/dbconfig" +) + +func TestCache_New(t *testing.T) { + dbPath := testCacheDBPath + os.RemoveAll(dbPath) + defer os.RemoveAll(dbPath) + cfg := dbconfig.NewConfigDefault() + cfg.CacheSize = 10 << 20 + cfg.CacheHashSize = 10000 + db := testOpenBitsDb(true, dbPath, cfg) + require.Equal(t, 1<<30, int(db.baseDb.MetaCache.MaxMem())) + require.Equal(t, 1024, db.baseDb.MetaCache.Shards()) + db.Close() + + cfg.CacheSize = 200 << 20 + cfg.CacheEliminateDuration = 10 + cfg.CacheShardNum = 3 + db = testOpenBitsDb(true, dbPath, cfg) + require.Equal(t, 1<<30, int(db.baseDb.MetaCache.MaxMem())) + require.Equal(t, 1024, db.baseDb.MetaCache.Shards()) + db.Close() + + cfg.CacheSize = 1<<30 + 1<<20 + cfg.CacheShardNum = 1100 + db = testOpenBitsDb(true, dbPath, cfg) + require.Equal(t, 1<<30+1<<20, int(db.baseDb.MetaCache.MaxMem())) + require.Equal(t, 2048, db.baseDb.MetaCache.Shards()) + db.Close() +} diff --git a/stored/engine/bitsdb/bitsdb/expire.go b/stored/engine/bitsdb/bitsdb/expire.go index 043bb96..e5d0a60 100644 --- a/stored/engine/bitsdb/bitsdb/expire.go +++ b/stored/engine/bitsdb/bitsdb/expire.go @@ -16,14 +16,10 @@ package bitsdb import ( "encoding/binary" - "errors" - "fmt" "runtime/debug" - "sync" "time" "github.com/zuoyebang/bitalostored/butils/hash" - "github.com/zuoyebang/bitalostored/butils/unsafe2" "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/bitsdb/base" "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/bitskv" "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/bitskv/kv" @@ -33,16 +29,6 @@ import ( "github.com/zuoyebang/bitalostored/stored/internal/tclock" ) -type expirePoolArgs struct { - expireKey []byte - expireTime uint64 - version uint64 - kind uint8 - key []byte - dt btools.DataType - wg *sync.WaitGroup -} - func (bdb *BitsDB) CheckKvExpire(dbId int, key, value []byte) bool { switch dbId { case kv.DB_ID_META: @@ -57,50 +43,30 @@ func (bdb *BitsDB) CheckKvExpire(dbId int, key, value []byte) bool { } func (bdb *BitsDB) ScanDeleteExpireDb(jobId uint64) { - if !bdb.isDelExpireRun.CompareAndSwap(0, 1) { - log.Infof("[DELEXPIRE %d] ScanDelExpire is running, do nothing", jobId) + if !bdb.IsReady() || bdb.IsCheckpointHighPriority() { return } - logTag := fmt.Sprintf("[DELEXPIRE %d] scan delete expireDb", jobId) - log.Infof("%s start", logTag) - - start := time.Now() - delKeyNum := 0 - delKeyThreshold := base.DeleteMixKeyMaxNum - + if !bdb.isDelExpireRun.CompareAndSwap(0, 1) { + return + } defer func() { bdb.isDelExpireRun.Store(0) - if r := recover(); r != nil { - log.Errorf("%s panic err:%s stack:%s", logTag, r, string(debug.Stack())) + log.Errorf("[DELEXPIRE %d] panic err:%s stack:%s", jobId, r, string(debug.Stack())) return } - - log.Infof("%s end delKeys:%d expireDbKeys:%d metaDbKeys:%d prefixDeleteKeys:%d zsetDataDbKeys:%d zsetIndexDbKeys:%d cost:%.3fs", - logTag, - delKeyNum, - bdb.delExpireStat.expireDbKeys.Load(), - bdb.delExpireStat.metaDbKeys.Load(), - bdb.delExpireStat.prefixDeleteKeys.Load(), - bdb.delExpireStat.zsetDataDbKeys.Load(), - bdb.delExpireStat.zsetIndexDbKeys.Load(), - time.Now().Sub(start).Seconds()) }() - if !bdb.IsReady() || bdb.IsCheckpointHighPriority() { - return - } + start := time.Now() + delKeyNum := 0 + bdb.delExpireKeys.Store(0) + bdb.delExpireZsetKeys.Store(0) + log.Infof("[DELEXPIRE %d] scan delete start", jobId) bdb.CheckpointExpireLock(true) defer bdb.CheckpointExpireLock(false) - bdb.delExpireStat.expireDbKeys.Store(0) - bdb.delExpireStat.metaDbKeys.Store(0) - bdb.delExpireStat.prefixDeleteKeys.Store(0) - bdb.delExpireStat.zsetDataDbKeys.Store(0) - bdb.delExpireStat.zsetIndexDbKeys.Store(0) - var nowTimeBuf [8]byte nowTime := uint64(tclock.GetTimestampMilli()) binary.BigEndian.PutUint64(nowTimeBuf[:], nowTime+1) @@ -111,113 +77,62 @@ func (bdb *BitsDB) ScanDeleteExpireDb(jobId uint64) { it := bdb.baseDb.DB.NewIteratorExpire(iterOpts) defer it.Close() - wg := &sync.WaitGroup{} for it.First(); it.Valid(); it.Next() { if !bdb.IsReady() || bdb.IsCheckpointHighPriority() { break } - iterKey := it.Key() - timestamp, dt, keyVersion, keyKind, key, err := base.DecodeExpireKey(iterKey) + iterKey := it.RawKey() + timestamp, dataType, keyVersion, keyKind, key, err := base.DecodeExpireKey(iterKey) if err != nil { - log.Errorf("%s decode expireKey fail key:%s err:%s", logTag, string(iterKey), err) - continue - } - if dt == btools.STRING { + log.Errorf("[DELEXPIRE %d] decode expireKey fail key:%s err:%s", jobId, string(iterKey), err) continue } - if timestamp > nowTime || delKeyNum >= delKeyThreshold { + if timestamp > nowTime || delKeyNum >= base.DeleteMixKeyMaxNum { break } - delKeyNum++ - wg.Add(1) - ep := &expirePoolArgs{ - expireKey: iterKey, - expireTime: timestamp, - version: keyVersion, - kind: keyKind, - key: key, - dt: dt, - wg: wg, - } - _ = bdb.baseDb.DelExpirePool.Invoke(ep) - } - wg.Wait() -} - -func (bdb *BitsDB) deleteExpireDataFunc(args interface{}) { - ep, ok := args.(*expirePoolArgs) - if !ok { - return - } - - defer func() { - ep.wg.Done() - if r := recover(); r != nil { - log.Errorf("deleteExpireDataFunc panic dt:%s err:%v stack:%s", ep.dt, r, string(debug.Stack())) - } - }() - - expireKey := ep.expireKey - expireTime := ep.expireTime - keyVersion := ep.version - keyKind := ep.kind - dataType := ep.dt - key := ep.key - keyHash := hash.Fnv32(key) - - var retErr error - defer func() { - if retErr != nil { - log.Errorf("deleteExpireDataFunc fail dt:%s key:%s err:%s", dataType, unsafe2.String(key), retErr) - } - }() - - if dataType == btools.ZSET { - var finished bool - var zetDelCnt uint64 - finished, zetDelCnt, retErr = bdb.ZsetObj.DeleteZsetKeyByExpire(keyVersion, keyKind, keyHash) - if retErr != nil { - return - } - - bdb.delExpireStat.zsetDataDbKeys.Add(zetDelCnt) - bdb.delExpireStat.zsetIndexDbKeys.Add(zetDelCnt) - - if !finished { - return - } - } else { + keyHash := hash.Fnv32(key) switch dataType { + case btools.HASH: + err = bdb.HashObj.DeleteDataKeyByExpire(keyVersion, keyHash) case btools.SET: - retErr = bdb.SetObj.DeleteDataKeyByExpire(keyVersion, keyHash) + err = bdb.SetObj.DeleteDataKeyByExpire(keyVersion, keyHash) case btools.LIST: - retErr = bdb.ListObj.DeleteDataKeyByExpire(keyVersion, keyHash) - case btools.HASH: - retErr = bdb.HashObj.DeleteDataKeyByExpire(keyVersion, keyHash) + err = bdb.ListObj.DeleteDataKeyByExpire(keyVersion, keyHash) + case btools.ZSET: + err = bdb.ZsetObj.DeleteZsetIndexKeyByExpire(keyVersion, keyHash) + if err == nil { + err = bdb.ZsetObj.DeleteDataKeyByExpire(keyVersion, keyHash) + } + case btools.ZSETOLD: + finished, zetDelCnt, err := bdb.ZsetObj.DeleteZsetOldKeyByExpire(keyVersion, keyKind, keyHash) + if err != nil { + continue + } + bdb.delExpireZsetKeys.Add(zetDelCnt) + if !finished { + continue + } default: - retErr = errors.New("not support dataType") + err = errn.ErrDataType } - if retErr != nil { - return + if err == nil { + err = bdb.baseDb.DeleteExpireKey(iterKey) + } + if err != nil { + log.Errorf("[DELEXPIRE %d] delete key fail dt:%s err:%s", jobId, dataType, err) + continue } - bdb.delExpireStat.prefixDeleteKeys.Add(1) - } - - isDelMetaKey, err := bdb.baseDb.DeleteMetaKeyByExpire(dataType, key, keyHash, keyVersion, expireTime) - if err != nil && !errors.Is(err, errn.ErrWrongType) { - log.Errorf("delete metaKey fail dt:%s key:%s err:%s", dataType, unsafe2.String(key), err) - } - - if err = bdb.baseDb.DeleteExpireKey(expireKey); err != nil { - log.Errorf("delete expireKey fail dt:%s key:%s err:%s", dataType, unsafe2.String(key), err) + bdb.delExpireKeys.Add(1) + delKeyNum++ } - bdb.delExpireStat.expireDbKeys.Add(1) - if isDelMetaKey { - bdb.delExpireStat.metaDbKeys.Add(1) - } + log.Infof("[DELEXPIRE %d] scan delete end delKeys:%d expireKeys:%d zsetKeys:%d cost:%.3fs", + jobId, delKeyNum, + bdb.delExpireKeys.Load(), + bdb.delExpireZsetKeys.Load(), + time.Now().Sub(start).Seconds()) } diff --git a/stored/engine/bitsdb/bitsdb/expire_test.go b/stored/engine/bitsdb/bitsdb/expire_test.go index e2a8d97..f34e114 100644 --- a/stored/engine/bitsdb/bitsdb/expire_test.go +++ b/stored/engine/bitsdb/bitsdb/expire_test.go @@ -23,15 +23,29 @@ import ( "github.com/zuoyebang/bitalostored/butils/hash" "github.com/zuoyebang/bitalostored/butils/numeric" "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/bitsdb/base" - "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/btools" + "github.com/zuoyebang/bitalostored/stored/internal/log" "github.com/zuoyebang/bitalostored/stored/internal/tclock" ) -func TestDB_ScanExpireData(t *testing.T) { +func testNewLogger() { + log.NewLogger(&log.Options{ + LogPath: testLogPath + "/log", + }) +} + +func Cap4Size(vSize uint32) uint32 { + if vSize&3 != 0 { + return (vSize>>2 + 1) << 2 + } + return vSize +} + +func TestExpireScanDeleteExpireDb(t *testing.T) { cores := testTwoBitsCores() defer closeCores(cores) - for _, cr := range cores { + for dbi, cr := range cores { + fmt.Printf("run db=%d\n", dbi) bdb := cr.db jobId := uint64(0) @@ -63,7 +77,16 @@ func TestDB_ScanExpireData(t *testing.T) { zskeyStale := []byte("zset_stale") zskeyStaleField := []byte("zset_stale_field") zskeyStaleHash := hash.Fnv32(zskeyStale) - if n, err := bdb.ZsetObj.ZAdd(zskeyStale, zskeyStaleHash, btools.ScorePair{Score: 10, Member: zskeyStaleField}); err != nil { + if n, err := bdb.ZsetObj.ZAdd(zskeyStale, zskeyStaleHash, false, spair(10, zskeyStaleField)); err != nil { + t.Fatal(err) + } else if n != 1 { + t.Fatal(n) + } + + zsoldkeyStale := []byte("zsetold_stale") + zsoldkeyStaleField := []byte("zsetold_stale_field") + zsoldkeyStaleHash := hash.Fnv32(zsoldkeyStale) + if n, err := bdb.ZsetObj.ZAdd(zsoldkeyStale, zsoldkeyStaleHash, true, spair(10, zsoldkeyStaleField)); err != nil { t.Fatal(err) } else if n != 1 { t.Fatal(n) @@ -139,17 +162,31 @@ func TestDB_ScanExpireData(t *testing.T) { zskeyHash := hash.Fnv32(zskey) for j := 0; j < 100; j++ { zskeyField := []byte(fmt.Sprintf("zset_field_%d_%d", i, j)) - if n, err := bdb.ZsetObj.ZAdd(zskey, zskeyHash, btools.ScorePair{Score: float64(j), Member: zskeyField}); err != nil { + if n, err := bdb.ZsetObj.ZAdd(zskey, zskeyHash, false, spair(float64(j), zskeyField)); err != nil { t.Fatal(err) } else if n != 1 { t.Fatal(n) } - if j == 0 { setKeyVerMap(zskey) } } opKey(i, zskey, zskeyHash) + + zsoldkey := []byte(fmt.Sprintf("zsetold_key_%d", i)) + zsoldkeyHash := hash.Fnv32(zsoldkey) + for j := 0; j < 100; j++ { + zsoldkeyField := []byte(fmt.Sprintf("zsetold_field_%d_%d", i, j)) + if n, err := bdb.ZsetObj.ZAdd(zsoldkey, zsoldkeyHash, true, spair(float64(j), zsoldkeyField)); err != nil { + t.Fatal(err) + } else if n != 1 { + t.Fatal(n) + } + if j == 0 { + setKeyVerMap(zsoldkey) + } + } + opKey(i, zsoldkey, zsoldkeyHash) } for i := 0; i < 100; i++ { @@ -182,6 +219,17 @@ func TestDB_ScanExpireData(t *testing.T) { } else if n != 100 { t.Fatalf("zcard exp=100, key=%s, n=%d", string(zskey), n) } + + zsoldkey := []byte(fmt.Sprintf("zsetold_key_%d", i)) + if n, err := bdb.ZsetObj.ZCard(zsoldkey, hash.Fnv32(zsoldkey)); err != nil { + t.Fatal(err) + } else if i >= 20 && i < 40 { + if n != 0 { + t.Fatalf("zsoldkey zcard exp=0, key=%s, n=%d", string(zsoldkey), n) + } + } else if n != 100 { + t.Fatalf("zsoldkey zcard exp=100, key=%s, n=%d", string(zsoldkey), n) + } } time.Sleep(3 * time.Second) @@ -207,20 +255,16 @@ func TestDB_ScanExpireData(t *testing.T) { } checkDataDbNum(100*100 + 1) - checkIndexDbNum(100*100 + 1) + checkIndexDbNum(2*100*100 + 2) bdb.ScanDeleteExpireDb(jobId) - require.Equal(t, uint64(80), bdb.delExpireStat.metaDbKeys.Load()) - require.Equal(t, uint64(80), bdb.delExpireStat.expireDbKeys.Load()) - require.Equal(t, uint64(4000), bdb.delExpireStat.zsetDataDbKeys.Load()) - require.Equal(t, uint64(4000), bdb.delExpireStat.zsetIndexDbKeys.Load()) - - checkDataDbNum(100*100 + 1) - checkIndexDbNum(100*60 + 1) + require.Equal(t, uint64(120), bdb.delExpireKeys.Load()) + require.Equal(t, uint64(4000), bdb.delExpireZsetKeys.Load()) bdb.FlushAllDB() checkDataDbNum(100*60 + 1) + checkIndexDbNum(2*100*60 + 2) var keepNum, delNum int for i := 0; i < 100; i++ { @@ -259,6 +303,25 @@ func TestDB_ScanExpireData(t *testing.T) { require.Equal(t, 40, delNum) require.Equal(t, 60, keepNum) + delNum = 0 + keepNum = 0 + for i := 0; i < 100; i++ { + zskey := []byte(fmt.Sprintf("zsetold_key_%d", i)) + zsetMkv, err := bdb.ZsetObj.GetMetaDataCheckAlive(zskey, hash.Fnv32(zskey)) + if err != nil { + t.Fatal(err) + } + if zsetMkv != nil { + if i >= 40 { + keepNum++ + } + } else { + delNum++ + } + } + require.Equal(t, 40, delNum) + require.Equal(t, 60, keepNum) + delNum = 0 keepNum = 0 for i := 0; i < 100; i++ { @@ -304,25 +367,24 @@ func TestDB_ScanExpireData(t *testing.T) { zskeyHash := hash.Fnv32(zskey) count, err := bdb.ZsetObj.ZCard(zskey, zskeyHash) require.NoError(t, err) + var dataKey [base.DataKeyZsetLength]byte if i < 40 { require.Equal(t, count, int64(0)) for j := 0; j < 100; j++ { zskeyField := []byte(fmt.Sprintf("zset_field_%d_%d", i, j)) - var dataKey [base.DataKeyZsetLength]byte - base.EncodeZsetDataKey(dataKey[:], zskeyVer, zskeyHash, zskeyField) - _, dataValExist, _, err := bdb.ZsetObj.GetDataValue(dataKey[:]) + dataKeyLen := base.EncodeZsetDataKey(dataKey[:], zskeyVer, zskeyHash, zskeyField, false) + _, dataValExist, _, err := bdb.ZsetObj.GetDataValue(dataKey[:dataKeyLen]) require.NoError(t, err) if dataValExist { - t.Fatal("zset notexist key dataKey found", string(zskey), zskeyField) + t.Fatal("zset dataKey expire found", string(zskey), string(zskeyField)) } } } else { require.Equal(t, count, int64(100)) for j := 0; j < 100; j++ { zskeyField := []byte(fmt.Sprintf("zset_field_%d_%d", i, j)) - var dataKey [base.DataKeyZsetLength]byte - base.EncodeZsetDataKey(dataKey[:], zskeyVer, zskeyHash, zskeyField) - dataVal, dataValExist, dataValCloser, err := bdb.ZsetObj.GetDataValue(dataKey[:]) + dataKeyLen := base.EncodeZsetDataKey(dataKey[:], zskeyVer, zskeyHash, zskeyField, false) + dataVal, dataValExist, dataValCloser, err := bdb.ZsetObj.GetDataValue(dataKey[:dataKeyLen]) require.NoError(t, err) if !dataValExist { t.Fatal("zset dataKey not found", string(zskey), string(zskeyField)) @@ -342,5 +404,50 @@ func TestDB_ScanExpireData(t *testing.T) { } else if n != 1 { t.Fatal("zskeyStale scard err", n) } + + for i := 0; i < 100; i++ { + zsoldkey := []byte(fmt.Sprintf("zsetold_key_%d", i)) + zsoldkeyVer := keyVerMap[string(zsoldkey)] + zsoldkeyHash := hash.Fnv32(zsoldkey) + count, err := bdb.ZsetObj.ZCard(zsoldkey, zsoldkeyHash) + require.NoError(t, err) + if i < 40 { + require.Equal(t, count, int64(0)) + for j := 0; j < 100; j++ { + zsoldkeyField := []byte(fmt.Sprintf("zsetold_field_%d_%d", i, j)) + var dataKey [base.DataKeyZsetLength]byte + dataKeyLen := base.EncodeZsetDataKey(dataKey[:], zsoldkeyVer, zsoldkeyHash, zsoldkeyField, true) + _, dataValExist, _, err := bdb.ZsetObj.GetDataValue(dataKey[:dataKeyLen]) + require.NoError(t, err) + if dataValExist { + t.Fatal("zsetold notexist key dataKey found", string(zsoldkey), zsoldkeyField) + } + } + } else { + require.Equal(t, count, int64(100)) + for j := 0; j < 100; j++ { + zsoldkeyField := []byte(fmt.Sprintf("zsetold_field_%d_%d", i, j)) + var dataKey [base.DataKeyZsetLength]byte + dataKeyLen := base.EncodeZsetDataKey(dataKey[:], zsoldkeyVer, zsoldkeyHash, zsoldkeyField, true) + dataVal, dataValExist, dataValCloser, err := bdb.ZsetObj.GetDataValue(dataKey[:dataKeyLen]) + require.NoError(t, err) + if !dataValExist { + t.Fatal("zsetold dataKey not found", string(zsoldkey), string(zsoldkeyField)) + } + dataScore := numeric.ByteSortToFloat64(dataVal) + if dataScore != float64(j) { + t.Fatal("zsetold dataKey score not eq", string(zsoldkey), string(zsoldkeyField), dataScore, j) + } + if dataValCloser != nil { + dataValCloser() + } + } + } + } + if n, err := bdb.ZsetObj.ZCard(zsoldkeyStale, zsoldkeyStaleHash); err != nil { + t.Fatal(err) + } else if n != 1 { + t.Fatal("zsoldkeyStale scard err", n) + } } } diff --git a/stored/engine/bitsdb/bitsdb/flush.go b/stored/engine/bitsdb/bitsdb/flush.go index 1b4e947..00094f5 100644 --- a/stored/engine/bitsdb/bitsdb/flush.go +++ b/stored/engine/bitsdb/bitsdb/flush.go @@ -151,7 +151,7 @@ func (task *FlushTask) SyncFlush(data flushData) { task.flushEnd(data.index) } -func (task *FlushTask) AyncFlush(data flushData) (<-chan struct{}, error) { +func (task *FlushTask) AsyncFlush(data flushData) (<-chan struct{}, error) { if task.isClosed() { return nil, errors.New("task closed") } diff --git a/stored/engine/bitsdb/bitsdb/keys.go b/stored/engine/bitsdb/bitsdb/keys.go index c525964..d46855d 100644 --- a/stored/engine/bitsdb/bitsdb/keys.go +++ b/stored/engine/bitsdb/bitsdb/keys.go @@ -85,7 +85,7 @@ func (bdb *BitsDB) Scan( return nil, nil, err } - if dt != btools.NoneType && mkv.GetDataType() != dt { + if mkv.IsWrongType(dt) { continue } diff --git a/stored/engine/bitsdb/bitsdb/keys_test.go b/stored/engine/bitsdb/bitsdb/keys_test.go index 04e9331..544663f 100644 --- a/stored/engine/bitsdb/bitsdb/keys_test.go +++ b/stored/engine/bitsdb/bitsdb/keys_test.go @@ -303,7 +303,7 @@ func TestKeys_Expire_Persist_TTL_Type(t *testing.T) { key = []byte("zset_persist_test") khash = hash.Fnv32(key) - if n, err := bdb.ZsetObj.ZAdd(key, khash, btools.ScorePair{1, []byte("a")}); err != nil { + if n, err := bdb.ZsetObj.ZAdd(key, khash, false, spair(1, []byte("a"))); err != nil { t.Fatal(err) } else if n != 1 { t.Fatal(n) @@ -319,8 +319,7 @@ func TestKeys_Expire_Persist_TTL_Type(t *testing.T) { func TestKeys_Expire_Dels(t *testing.T) { for _, isFlush := range []bool{false, true} { - func() { - fmt.Println("run isFlush=", isFlush) + t.Run(fmt.Sprintf("isFlush=%v", isFlush), func(t *testing.T) { cores := testTwoBitsCores() defer closeCores(cores) @@ -388,7 +387,7 @@ func TestKeys_Expire_Dels(t *testing.T) { } else if n != 1 { t.Fatal(n) } - if n, err := bdb.ZsetObj.ZAdd(zkey, zkhash, btools.ScorePair{1, zfield}); err != nil { + if n, err := bdb.ZsetObj.ZAdd(zkey, zkhash, false, spair(1, zfield)); err != nil { t.Fatal(err) } else if n != 1 { t.Fatal(n) @@ -487,10 +486,9 @@ func TestKeys_Expire_Dels(t *testing.T) { } else { if !bytes.Equal(v, []byte(fmt.Sprintf("string_del_value_%d", i))) { t.Fatal("string val get fail", v) - // } else if vcloser == nil { - // t.Fatal("vcloser return is nil") } } + if vcloser != nil { vcloser() } @@ -555,7 +553,7 @@ func TestKeys_Expire_Dels(t *testing.T) { } } } - }() + }) } } @@ -595,7 +593,7 @@ func TestKeys_FlushCheckExpire(t *testing.T) { zkey := []byte("TestKeys_CheckExpire_zset_key") zkhash := hash.Fnv32(zkey) zfield := []byte("TestKeys_CheckExpire_zset_field") - if n, err := bdb.ZsetObj.ZAdd(zkey, zkhash, btools.ScorePair{Score: 1, Member: zfield}); err != nil { + if n, err := bdb.ZsetObj.ZAdd(zkey, zkhash, false, spair(1, zfield)); err != nil { t.Fatal(err) } else if n != 1 { t.Fatal(n) @@ -743,8 +741,8 @@ func TestKeys_WrongType(t *testing.T) { } } if dt != btools.ZSET { - args := btools.ScorePair{Score: 1, Member: zfield} - if _, err := bdb.ZsetObj.ZAdd(key, khash, args); err != errn.ErrWrongType { + args := spair(1, zfield) + if _, err := bdb.ZsetObj.ZAdd(key, khash, false, args); err != errn.ErrWrongType { t.Fatal("ZAdd ErrWrongType check fail", err) } } @@ -820,7 +818,7 @@ func TestKeys_WrongType(t *testing.T) { } time.Sleep(time.Second) - if n, err := bdb.ZsetObj.ZAdd(key, khash, btools.ScorePair{Score: 1, Member: zfield}); err != nil { + if n, err := bdb.ZsetObj.ZAdd(key, khash, false, spair(1, zfield)); err != nil { t.Fatal("Zadd err", err) } else if n != 1 { t.Fatal("Zadd return n err", n) @@ -859,8 +857,8 @@ func TestKeys_ScanBySlotId(t *testing.T) { for _, cr := range cores { bdb := cr.db - var keys []string + var isZetOld bool slotId := uint32(1) count := 10000 sfield := []byte("TestKeys_set_field") @@ -901,7 +899,12 @@ func TestKeys_ScanBySlotId(t *testing.T) { t.Fatal("SAdd return n err", n) } case 2: - if n, err := bdb.ZsetObj.ZAdd(key, khash, btools.ScorePair{1, zfield}); err != nil { + if i%2 == 0 { + isZetOld = true + } else { + isZetOld = false + } + if n, err := bdb.ZsetObj.ZAdd(key, khash, isZetOld, spair(1, zfield)); err != nil { t.Fatal("Zadd err", err) } else if n != 1 { t.Fatal("Zadd return n err", n) @@ -943,7 +946,11 @@ func TestKeys_ScanBySlotId(t *testing.T) { case 1: expDt = btools.SET case 2: - expDt = btools.ZSET + if i%2 == 0 { + expDt = btools.ZSETOLD + } else { + expDt = btools.ZSET + } case 3: expDt = btools.HASH case 4: diff --git a/stored/engine/bitsdb/bitsdb/locker/scope_locker.go b/stored/engine/bitsdb/bitsdb/locker/scope_locker.go index 5660606..80648bc 100644 --- a/stored/engine/bitsdb/bitsdb/locker/scope_locker.go +++ b/stored/engine/bitsdb/bitsdb/locker/scope_locker.go @@ -16,46 +16,69 @@ package locker import ( "sync" + + "github.com/zuoyebang/bitalostored/stored/internal/resp" +) + +const ( + lockerPoolSizeNormal uint32 = 4 << 10 + lockerPoolSizeLarge uint32 = 16 << 10 ) type locker struct { sync.RWMutex } -func (c *locker) getWLock() func() { - c.Lock() +func (l *locker) getWLock() func() { + l.Lock() return func() { - c.Unlock() + l.Unlock() } } -func (c *locker) getRLock() func() { - c.RLock() +func (l *locker) getRLock() func() { + l.RLock() return func() { - c.RUnlock() + l.RUnlock() + } +} + +func (l *locker) lockKey(cmd string) func() { + if resp.IsWriteCmd(cmd) { + return l.getWLock() + } else { + return l.getRLock() } } type ScopeLocker struct { - cap uint32 + size uint32 lockers []*locker } -func NewScopeLocker(num uint32) *ScopeLocker { - lockers := make([]*locker, 0, num) - for i := 0; i < int(num); i++ { +func NewScopeLocker(large bool) *ScopeLocker { + size := lockerPoolSizeNormal + if large { + size = lockerPoolSizeLarge + } + lockers := make([]*locker, 0, size) + for i := uint32(0); i < size; i++ { lockers = append(lockers, &locker{}) } return &ScopeLocker{ - cap: num, + size: size - 1, lockers: lockers, } } func (sl *ScopeLocker) LockWriteKey(khash uint32) func() { - return sl.lockers[khash%sl.cap].getWLock() + return sl.lockers[khash&sl.size].getWLock() } func (sl *ScopeLocker) LockReadKey(khash uint32) func() { - return sl.lockers[khash%sl.cap].getRLock() + return sl.lockers[khash&sl.size].getRLock() +} + +func (sl *ScopeLocker) LockKey(khash uint32, cmd string) func() { + return sl.lockers[khash&sl.size].lockKey(cmd) } diff --git a/stored/engine/bitsdb/bitsdb/locker/scope_locker_test.go b/stored/engine/bitsdb/bitsdb/locker/scope_locker_test.go index 4073184..1a3110a 100644 --- a/stored/engine/bitsdb/bitsdb/locker/scope_locker_test.go +++ b/stored/engine/bitsdb/bitsdb/locker/scope_locker_test.go @@ -18,13 +18,11 @@ import ( "testing" "time" - "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/btools" - "github.com/zuoyebang/bitalostored/butils/hash" ) func TestScopeLocker(t *testing.T) { - l := NewScopeLocker(btools.KeyLockerPoolCap) + l := NewScopeLocker(true) key := []byte("a") khash := hash.Fnv32(key) unlockFunc := l.LockWriteKey(khash) diff --git a/stored/engine/bitsdb/bitsdb/rstring/string_bitmap.go b/stored/engine/bitsdb/bitsdb/rstring/string_bitmap.go index 0e5ab66..840a600 100644 --- a/stored/engine/bitsdb/bitsdb/rstring/string_bitmap.go +++ b/stored/engine/bitsdb/bitsdb/rstring/string_bitmap.go @@ -21,77 +21,85 @@ import ( "github.com/RoaringBitmap/roaring/roaring64" "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/bitsdb/base" "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/btools" - "github.com/zuoyebang/bitalostored/stored/internal/resp" + "github.com/zuoyebang/bitalostored/stored/internal/errn" ) func (so *StringObject) SetBit(key []byte, khash uint32, offset int, on int) (int64, error) { if offset < 0 { - return 0, resp.ErrBitOffset + return 0, errn.ErrBitOffset } if err := btools.CheckKeySize(key); err != nil { return 0, err } if (on & ^1) != 0 { - return 0, resp.ErrBitValue + return 0, errn.ErrBitValue } + so.BaseDb.BitmapMem.SetEnable() + unlockKey := so.LockKey(khash) defer unlockKey() - ek, ekCloser := base.EncodeMetaKey(key, khash) - value, timestamp, valCloser, err := so.getValueCheckAliveForString(ek) - defer func() { - ekCloser() - if valCloser != nil { - valCloser() - } - }() - if err != nil { - return 0, err - } - - x := uint64(offset) - var ( - isExist bool - changed bool - ret int64 - ) + existFunc := func(rb *roaring64.Bitmap) (int64, bool) { + x := uint64(offset) + var ( + isExist bool + changed bool + ret int64 + ) - rb := roaring64.NewBitmap() - if value != nil { - if err = rb.UnmarshalBinary(value); err != nil { - return 0, resp.ErrBitUnmarshal - } isExist = rb.Contains(x) - } - - if isExist { - ret = 1 - } - - if on == 1 { if isExist { - return ret, nil + ret = 1 } - rb.Add(x) - changed = true - } else if on == 0 { - if isExist { - rb.Remove(x) + if on == 1 { + if isExist { + return ret, changed + } + rb.Add(x) changed = true + } else if on == 0 { + if isExist { + rb.Remove(x) + changed = true + } + } + return ret, changed + } + + rb, ok, closer := so.getBitmapFromMem(key) + defer func() { + if closer != nil { + closer() } + }() + if ok { + ret, _ := existFunc(rb) + return ret, nil + } + + rb, timestamp, err := so.getBitmapFromDB(key, khash) + if err != nil { + return 0, err } + so.BaseDb.BitmapMem.AddItem(key, khash, func(k []byte) *base.BitmapItem { + return base.NewBitmapItem(k, khash, rb, timestamp) + }) + + ret, changed := existFunc(rb) if changed { + ek, ekCloser := base.EncodeMetaKey(key, khash) + defer ekCloser() if rb.GetCardinality() == 0 { if err = so.BaseDb.DeleteMetaKey(ek); err != nil { return 0, err } } else { - value, err = rb.MarshalBinary() + value, err := rb.MarshalBinary() if err != nil { - return 0, resp.ErrBitMarshal + return 0, errn.ErrBitMarshal } if err = so.setValueForString(ek, value, timestamp); err != nil { @@ -99,7 +107,6 @@ func (so *StringObject) SetBit(key []byte, khash uint32, offset int, on int) (in } } } - return ret, nil } @@ -108,27 +115,19 @@ func (so *StringObject) GetBit(key []byte, khash uint32, offset int) (int64, err return 0, err } - ek, ekCloser := base.EncodeMetaKey(key, khash) - value, _, valCloser, err := so.getValueCheckAliveForString(ek) + rb, closer, err := so.getBitmapReader(key, khash) defer func() { - ekCloser() - if valCloser != nil { - valCloser() + if closer != nil { + closer() } }() if err != nil { return 0, err } - - if value == nil { + if rb == nil { return 0, nil } - rb := roaring64.NewBitmap() - if err = rb.UnmarshalBinary(value); err != nil { - return 0, resp.ErrBitUnmarshal - } - if rb.Contains(uint64(offset)) { return 1, nil } else { @@ -141,27 +140,19 @@ func (so *StringObject) BitCount(key []byte, khash uint32, start int, end int) ( return 0, err } - ek, ekCloser := base.EncodeMetaKey(key, khash) - value, _, valCloser, err := so.getValueCheckAliveForString(ek) + rb, closer, err := so.getBitmapReader(key, khash) defer func() { - ekCloser() - if valCloser != nil { - valCloser() + if closer != nil { + closer() } }() if err != nil { return 0, err } - - if value == nil { + if rb == nil { return 0, nil } - rb := roaring64.NewBitmap() - if err = rb.UnmarshalBinary(value); err != nil { - return 0, resp.ErrBitUnmarshal - } - if start == 0 && end == -1 { return int64(rb.GetCardinality()), nil } @@ -197,30 +188,22 @@ func (so *StringObject) BitPos(key []byte, khash uint32, on int, start int, end return 0, fmt.Errorf("bit must be 0 or 1, not %d", on) } - ek, ekCloser := base.EncodeMetaKey(key, khash) - value, _, valCloser, err := so.getValueCheckAliveForString(ek) + rb, closer, err := so.getBitmapReader(key, khash) defer func() { - ekCloser() - if valCloser != nil { - valCloser() + if closer != nil { + closer() } }() if err != nil { return -1, err } - - if value == nil { + if rb == nil { if on == 1 { return -1, nil } return 0, nil } - rb := roaring64.NewBitmap() - if err = rb.UnmarshalBinary(value); err != nil { - return -1, resp.ErrBitUnmarshal - } - begin, stop, hasRange := getBitmapRange(start, end) if !hasRange { return -1, nil @@ -254,6 +237,65 @@ func (so *StringObject) BitPos(key []byte, khash uint32, on int, start int, end } } +func (so *StringObject) getBitmapFromDB(key []byte, khash uint32) (*roaring64.Bitmap, uint64, error) { + ek, ekCloser := base.EncodeMetaKey(key, khash) + value, timestamp, valCloser, err := so.getValueCheckAliveForString(ek) + defer func() { + ekCloser() + if valCloser != nil { + valCloser() + } + }() + if err != nil { + return nil, timestamp, err + } + if value == nil { + return roaring64.NewBitmap(), timestamp, nil + } + + rb := roaring64.NewBitmap() + if err = rb.UnmarshalBinary(value); err != nil { + return nil, timestamp, errn.ErrBitUnmarshal + } + return rb, timestamp, nil +} + +func (so *StringObject) getBitmapReader(key []byte, khash uint32) (*roaring64.Bitmap, func(), error) { + bi, ok := so.BaseDb.BitmapMem.Get(key) + if ok { + rb, closer := bi.GetReader() + return rb, closer, nil + } + + ek, ekCloser := base.EncodeMetaKey(key, khash) + value, _, valCloser, err := so.getValueCheckAliveForString(ek) + ekCloser() + if err != nil || value == nil { + if valCloser != nil { + valCloser() + } + return nil, nil, err + } + + rb := roaring64.NewBitmap() + if _, err = rb.FromUnsafeBytes(value); err != nil { + if valCloser != nil { + valCloser() + } + return nil, nil, errn.ErrBitUnmarshal + } + return rb, valCloser, nil +} + +func (so *StringObject) getBitmapFromMem(key []byte) (*roaring64.Bitmap, bool, func()) { + bi, ok := so.BaseDb.BitmapMem.Get(key) + if ok { + rb, closer := bi.GetWriter() + return rb, true, closer + } + return nil, false, nil +} + func getBitmapRange(start, end int) (uint64, uint64, bool) { if start < 0 { start = math.MaxInt64 + start + 1 @@ -273,3 +315,22 @@ func getBitmapRange(start, end int) (uint64, uint64, bool) { return uint64(start), uint64(end), start <= end } + +func (so *StringObject) bitmapStrlen(key []byte) (int64, bool, error) { + bi, ok := so.BaseDb.BitmapMem.Get(key) + if !ok { + return 0, false, nil + } + + bitmapExist := true + rb, closer := bi.GetReader() + defer func() { + if closer != nil { + closer() + } + }() + if rb == nil { + return 0, bitmapExist, nil + } + return int64(rb.GetSizeInBytes()), bitmapExist, nil +} diff --git a/stored/engine/bitsdb/bitsdb/rstring/string_read.go b/stored/engine/bitsdb/bitsdb/rstring/string_read.go index 31746bc..137cf59 100644 --- a/stored/engine/bitsdb/bitsdb/rstring/string_read.go +++ b/stored/engine/bitsdb/bitsdb/rstring/string_read.go @@ -141,6 +141,11 @@ func (so *StringObject) StrLen(key []byte, khash uint32) (int64, error) { return 0, err } + t, ok, err := so.bitmapStrlen(key) + if ok { + return t, err + } + ek, ekCloser := base.EncodeMetaKey(key, khash) value, _, valueCloser, err := so.getValueCheckAliveForString(ek) defer func() { diff --git a/stored/engine/bitsdb/bitsdb/string_test.go b/stored/engine/bitsdb/bitsdb/string_test.go index 734af47..8f603d5 100644 --- a/stored/engine/bitsdb/bitsdb/string_test.go +++ b/stored/engine/bitsdb/bitsdb/string_test.go @@ -44,6 +44,7 @@ type BitalosDBMinCore struct { const testDBPath = "./test_cores" const testCacheDBPath = "./test_cache_cores" +const testLogPath = "./test_log" func testGetDefaultConfig() *dbconfig.Config { cfg := dbconfig.NewConfigDefault() @@ -74,6 +75,7 @@ func closeCores(cores []*BitalosDBMinCore) { for _, c := range cores { c.Close() } + //os.RemoveAll(testLogPath) } func testNewBitsDB() *BitsDB { @@ -150,12 +152,8 @@ func testCheckKeyValue(t *testing.T, b *BitsDB, key []byte, khash uint32, value if v != nil { t.Fatal("find not exist key value is not nil", string(key)) } - } else { - if !bytes.Equal(v, value) { - t.Fatal("v not eq", string(key), v, value) - // } else if closer == nil { - // t.Fatal("closer return nil", string(key)) - } + } else if !bytes.Equal(v, value) { + t.Fatal("v not eq", string(key), v, value) } if closer != nil { closer() @@ -414,8 +412,6 @@ func TestKVCmd(t *testing.T) { t.Fatal(err) } else if string(v) != "18.2" { t.Fatal(string(v)) - // } else if closer == nil { - // t.Fatal("key8 return closer is nil") } else { if closer != nil { closer() @@ -482,7 +478,7 @@ func TestKVSetEX(t *testing.T) { } } -func TestMSetAndDel(t *testing.T) { +func TestKVMSetAndDel(t *testing.T) { cores := testTwoBitsCores() defer closeCores(cores) @@ -568,36 +564,6 @@ func TestMSetAndDel(t *testing.T) { } } -func TestKVSetBitLen(t *testing.T) { - cores := testTwoBitsCores() - defer closeCores(cores) - - for _, cr := range cores { - bdb := cr.db - - key := []byte("TestKVSetBitLen") - khash := hash.Fnv32(key) - n, err := bdb.StringObj.SetBit(key, khash, 123456, 1) - require.NoError(t, err) - require.Equal(t, int64(0), n) - n, err = bdb.StringObj.GetBit(key, khash, 123456) - require.NoError(t, err) - require.Equal(t, int64(1), n) - - n, err = bdb.StringObj.SetBit(key, khash, 123457, 1) - require.NoError(t, err) - require.Equal(t, int64(0), n) - - l, err := bdb.StringObj.StrLen(key, khash) - require.NoError(t, err) - require.Equal(t, int64(32), l) - - n, err = bdb.StringObj.GetBit(key, khash, 123456) - require.NoError(t, err) - require.Equal(t, int64(1), n) - } -} - func TestKVSetBitGetBit(t *testing.T) { cores := testTwoBitsCores() defer closeCores(cores) @@ -661,31 +627,6 @@ func TestKVSetBit(t *testing.T) { } } -func TestKVSetBitDelete(t *testing.T) { - cores := testTwoBitsCores() - defer closeCores(cores) - - for _, cr := range cores { - bdb := cr.db - - key := []byte("TestKVSetBitDelete") - khash := hash.Fnv32(key) - n, err := bdb.StringObj.SetBit(key, khash, 1, 1) - require.NoError(t, err) - require.Equal(t, int64(0), n) - ex, err := bdb.StringObj.Exists(key, khash) - require.NoError(t, err) - require.Equal(t, int64(1), ex) - - n, err = bdb.StringObj.SetBit(key, khash, 1, 0) - require.NoError(t, err) - require.Equal(t, int64(1), n) - ex, err = bdb.StringObj.Exists(key, khash) - require.NoError(t, err) - require.Equal(t, int64(0), ex) - } -} - func TestKVBitCount(t *testing.T) { cores := testTwoBitsCores() defer closeCores(cores) @@ -888,3 +829,39 @@ func TestKVExpire(t *testing.T) { checkKey(key3, []byte(nil), false, "Del key3 2") } } + +func TestKVMissCache(t *testing.T) { + dbPath := testCacheDBPath + os.RemoveAll(dbPath) + cfg := testCacheDefaultConfig() + cfg.EnableMissCache = true + db := testOpenBitsDb(true, dbPath, cfg) + defer func() { + db.Close() + os.RemoveAll(dbPath) + config.GlobalConfig.Plugin.OpenRaft = true + }() + + key1 := []byte("testdb_kv_set_1") + khash1 := hash.Fnv32(key1) + require.NoError(t, db.StringObj.Set(key1, khash1, key1)) + testCheckKeyValue(t, db, key1, khash1, key1) + + key2 := []byte("testdb_kv_set_2") + khash2 := hash.Fnv32(key2) + testCheckKeyValue(t, db, key2, khash2, nil) + key2encode, key2encodeCloser := base.EncodeMetaKey(key2, khash2) + cv, ccloser, cexist := db.baseDb.MetaCache.Get(key2encode) + key2encodeCloser() + require.Equal(t, true, cexist) + require.Equal(t, byte(0), cv[0]) + ccloser() + + require.NoError(t, db.StringObj.Set(key2, khash2, key2)) + testCheckKeyValue(t, db, key2, khash2, key2) + cv, ccloser, cexist = db.baseDb.MetaCache.Get(key2encode) + key2encodeCloser() + require.Equal(t, true, cexist) + require.Equal(t, byte(btools.STRING), cv[0]) + ccloser() +} diff --git a/stored/engine/bitsdb/bitsdb/zset/zset.go b/stored/engine/bitsdb/bitsdb/zset/zset.go index 43932b5..c64e4b7 100644 --- a/stored/engine/bitsdb/bitsdb/zset/zset.go +++ b/stored/engine/bitsdb/bitsdb/zset/zset.go @@ -63,9 +63,9 @@ func (zo *ZSetObject) getZsetValue(key []byte, khash uint32, field []byte) ([]by defer base.PutMkvToPool(mkv) var ekf [base.DataKeyZsetLength]byte - base.EncodeZsetDataKey(ekf[:], mkv.Version(), khash, field) + ekfLen := base.EncodeZsetDataKey(ekf[:], mkv.Version(), khash, field, mkv.IsZsetOld()) - return zo.GetDataValue(ekf[:]) + return zo.GetDataValue(ekf[:ekfLen]) } func (zo *ZSetObject) zrank(key []byte, khash uint32, member []byte, reverse bool) (int64, error) { @@ -87,8 +87,9 @@ func (zo *ZSetObject) zrank(key []byte, khash uint32, member []byte, reverse boo var ekf [base.DataKeyZsetLength]byte keyVersion := mkv.Version() keyKind := mkv.Kind() - base.EncodeZsetDataKey(ekf[:], keyVersion, khash, member) - _, fexist, fCloser, err := zo.GetDataValue(ekf[:]) + isZsetOld := mkv.IsZsetOld() + ekfLen := base.EncodeZsetDataKey(ekf[:], keyVersion, khash, member, isZsetOld) + _, fexist, fCloser, err := zo.GetDataValue(ekf[:ekfLen]) defer func() { if fCloser != nil { fCloser() diff --git a/stored/engine/bitsdb/bitsdb/zset/zset_write.go b/stored/engine/bitsdb/bitsdb/zset/zset_write.go index c22f334..67ba047 100644 --- a/stored/engine/bitsdb/bitsdb/zset/zset_write.go +++ b/stored/engine/bitsdb/bitsdb/zset/zset_write.go @@ -25,7 +25,13 @@ import ( "github.com/zuoyebang/bitalostored/stored/internal/errn" ) -func (zo *ZSetObject) ZAdd(key []byte, khash uint32, args ...btools.ScorePair) (int64, error) { +func setZsetOldDataType(mkv *base.MetaData) { + if mkv.GetDataType() != btools.ZSETOLD { + mkv.SetDataType(btools.ZSETOLD) + } +} + +func (zo *ZSetObject) ZAdd(key []byte, khash uint32, isOld bool, args ...btools.ScorePair) (int64, error) { if err := btools.CheckKeySize(key); err != nil { return 0, err } @@ -35,13 +41,6 @@ func (zo *ZSetObject) ZAdd(key []byte, khash uint32, args ...btools.ScorePair) ( return 0, errn.ErrArgsEmpty } - var count int64 - uniqArgs := make(map[string]btools.ScorePair, argsNum) - for i := 0; i < argsNum; i++ { - member := args[i].Member - uniqArgs[unsafe2.String(member)] = args[i] - } - unlockKey := zo.LockKey(khash) defer unlockKey() @@ -57,57 +56,69 @@ func (zo *ZSetObject) ZAdd(key []byte, khash uint32, args ...btools.ScorePair) ( return 0, err } - wb := zo.GetDataWriteBatchFromPool() - defer zo.PutWriteBatchToPool(wb) + if isOld { + setZsetOldDataType(mkv) + } + + dataWb := zo.GetDataWriteBatchFromPool() + defer zo.PutWriteBatchToPool(dataWb) indexWb := zo.GetIndexWriteBatchFromPool() defer zo.PutWriteBatchToPool(indexWb) + + var count int64 + var scoreBuf [base.ScoreLength]byte + var ekfBuf [base.DataKeyZsetLength]byte keyVersion := mkv.Version() keyKind := mkv.Kind() - var scoreBuf [base.ScoreLength]byte - var ekf [base.DataKeyZsetLength]byte - for i := range uniqArgs { - err = func() error { - score := uniqArgs[i].Score - member := uniqArgs[i].Member - if e := btools.CheckFieldSize(member); e != nil { - return e - } + isZsetOld := mkv.IsZsetOld() - base.EncodeZsetDataKey(ekf[:], keyVersion, khash, member) - value, mbexist, valCloser, e := zo.GetDataValue(ekf[:]) - defer func() { - if valCloser != nil { - valCloser() - } - }() - if e != nil { - return e - } + zadd := func(score float64, member []byte) error { + if e := btools.CheckFieldSize(member); e != nil { + return e + } - if !mbexist { - count++ - mkv.IncrSize(1) - } else { - oldScore := numeric.ByteSortToFloat64(value) - if oldScore == score { - return nil - } + ekfLen := base.EncodeZsetDataKey(ekfBuf[:], keyVersion, khash, member, isZsetOld) + ekf := ekfBuf[:ekfLen] + value, exist, closer, e := zo.GetDataValue(ekf) + if e != nil { + return e + } + defer func() { + if closer != nil { + closer() + } + }() - _ = zo.deleteZsetIndexKey(indexWb, keyVersion, keyKind, khash, oldScore, member) + if !exist { + count++ + mkv.IncrSize(1) + } else { + oldScore := numeric.ByteSortToFloat64(value) + if oldScore == score { + return nil } + zo.deleteZsetIndexKey(indexWb, keyVersion, keyKind, khash, oldScore, member) + } - _ = wb.Put(ekf[:], numeric.Float64ToByteSort(score, scoreBuf[:])) + dataWb.Put(ekf, numeric.Float64ToByteSort(score, scoreBuf[:])) + zo.setZsetIndexValue(indexWb, keyVersion, keyKind, khash, score, member) - _ = zo.setZsetIndexValue(indexWb, keyVersion, keyKind, khash, score, member) + return nil + } - return nil - }() - if err != nil { + argsDup := make(map[string]struct{}, argsNum) + for i := range args { + member := unsafe2.String(args[i].Member) + if _, exist := argsDup[member]; exist { + continue + } + if err = zadd(args[i].Score, args[i].Member); err != nil { return 0, err } + argsDup[member] = struct{}{} } - if err = wb.Commit(); err != nil { + if err = dataWb.Commit(); err != nil { return 0, err } if err = indexWb.Commit(); err != nil { @@ -122,85 +133,7 @@ func (zo *ZSetObject) ZAdd(key []byte, khash uint32, args ...btools.ScorePair) ( return count, err } -func (zo *ZSetObject) ZRem(key []byte, khash uint32, members ...[]byte) (int64, error) { - if err := btools.CheckKeySize(key); err != nil { - return 0, err - } - - if len(members) == 0 { - return 0, nil - } - - unlockKey := zo.LockKey(khash) - defer unlockKey() - - mk, mkCloser := base.EncodeMetaKey(key, khash) - defer mkCloser() - mkv, err := zo.GetMetaData(mk) - if err != nil { - return 0, err - } - defer base.PutMkvToPool(mkv) - if !mkv.IsAlive() { - return 0, nil - } - - wb := zo.GetDataWriteBatchFromPool() - defer zo.PutWriteBatchToPool(wb) - indexWb := zo.GetIndexWriteBatchFromPool() - defer zo.PutWriteBatchToPool(indexWb) - - var count int64 - keyVersion := mkv.Version() - keyKind := mkv.Kind() - var ekf [base.DataKeyZsetLength]byte - for i := 0; i < len(members); i++ { - err = func() error { - if e := btools.CheckFieldSize(members[i]); e != nil { - return e - } - - base.EncodeZsetDataKey(ekf[:], keyVersion, khash, members[i]) - value, mbexist, valCloser, e := zo.GetDataValue(ekf[:]) - defer func() { - if valCloser != nil { - valCloser() - } - }() - if e != nil { - return e - } - - if mbexist { - count++ - mkv.DecrSize(1) - _ = wb.Delete(ekf[:]) - score := numeric.ByteSortToFloat64(value) - _ = zo.deleteZsetIndexKey(indexWb, keyVersion, keyKind, khash, score, members[i]) - } - - return nil - }() - if err != nil { - return 0, err - } - } - - if count > 0 { - if err = wb.Commit(); err != nil { - return 0, err - } - if err = indexWb.Commit(); err != nil { - return 0, err - } - if err = zo.SetMetaData(mk, mkv); err != nil { - return 0, err - } - } - return count, err -} - -func (zo *ZSetObject) ZIncrBy(key []byte, khash uint32, delta float64, member []byte) (float64, error) { +func (zo *ZSetObject) ZIncrBy(key []byte, khash uint32, isOld bool, delta float64, member []byte) (float64, error) { if err := btools.CheckKeyAndFieldSize(key, member); err != nil { return 0, err } @@ -221,8 +154,12 @@ func (zo *ZSetObject) ZIncrBy(key []byte, khash uint32, delta float64, member [] mkv.Reuse(zo.DataType, zo.GetNextKeyId()) } - wb := zo.GetDataWriteBatchFromPool() - defer zo.PutWriteBatchToPool(wb) + if isOld { + setZsetOldDataType(mkv) + } + + dataWb := zo.GetDataWriteBatchFromPool() + defer zo.PutWriteBatchToPool(dataWb) indexWb := zo.GetIndexWriteBatchFromPool() defer zo.PutWriteBatchToPool(indexWb) metaWb := zo.GetMetaWriteBatchFromPool() @@ -230,10 +167,12 @@ func (zo *ZSetObject) ZIncrBy(key []byte, khash uint32, delta float64, member [] var newScore float64 var scoreBuf [base.ScoreLength]byte - var ekf [base.DataKeyZsetLength]byte + var ekfBuf [base.DataKeyZsetLength]byte keyVersion := mkv.Version() keyKind := mkv.Kind() - base.EncodeZsetDataKey(ekf[:], keyVersion, khash, member) + isZsetOld := mkv.IsZsetOld() + ekfLen := base.EncodeZsetDataKey(ekfBuf[:], keyVersion, khash, member, isZsetOld) + ekf := ekfBuf[:ekfLen] var updateCache func() = nil @@ -242,17 +181,17 @@ func (zo *ZSetObject) ZIncrBy(key []byte, khash uint32, delta float64, member [] newScore = delta var meta [base.MetaMixValueLen]byte base.EncodeMetaDbValueForMix(meta[:], mkv) - _ = metaWb.Put(mk, meta[:]) + metaWb.Put(mk, meta[:]) updateCache = func() { if zo.BaseDb.MetaCache != nil { zo.BaseDb.MetaCache.Put(mk, meta[:]) } } - _ = wb.Put(ekf[:], numeric.Float64ToByteSort(delta, scoreBuf[:])) - _ = zo.setZsetIndexValue(indexWb, keyVersion, keyKind, khash, newScore, member) + dataWb.Put(ekf, numeric.Float64ToByteSort(delta, scoreBuf[:])) + zo.setZsetIndexValue(indexWb, keyVersion, keyKind, khash, newScore, member) } else { - value, mbexist, valCloser, e := zo.GetDataValue(ekf[:]) + value, mbexist, valCloser, e := zo.GetDataValue(ekf) defer func() { if valCloser != nil { valCloser() @@ -271,20 +210,20 @@ func (zo *ZSetObject) ZIncrBy(key []byte, khash uint32, delta float64, member [] mkv.IncrSize(1) var meta [base.MetaMixValueLen]byte base.EncodeMetaDbValueForMix(meta[:], mkv) - _ = metaWb.Put(mk, meta[:]) + metaWb.Put(mk, meta[:]) updateCache = func() { if zo.BaseDb.MetaCache != nil { zo.BaseDb.MetaCache.Put(mk, meta[:]) } } } - _ = zo.deleteZsetIndexKey(indexWb, keyVersion, keyKind, khash, oldScore, member) + zo.deleteZsetIndexKey(indexWb, keyVersion, keyKind, khash, oldScore, member) newScore = oldScore + delta - _ = wb.Put(ekf[:], numeric.Float64ToByteSort(newScore, scoreBuf[:])) - _ = zo.setZsetIndexValue(indexWb, keyVersion, keyKind, khash, newScore, member) + dataWb.Put(ekf, numeric.Float64ToByteSort(newScore, scoreBuf[:])) + zo.setZsetIndexValue(indexWb, keyVersion, keyKind, khash, newScore, member) } - if err = wb.Commit(); err != nil { + if err = dataWb.Commit(); err != nil { return 0, err } if err = indexWb.Commit(); err != nil { @@ -299,6 +238,87 @@ func (zo *ZSetObject) ZIncrBy(key []byte, khash uint32, delta float64, member [] return newScore, nil } +func (zo *ZSetObject) ZRem(key []byte, khash uint32, members ...[]byte) (int64, error) { + if err := btools.CheckKeySize(key); err != nil { + return 0, err + } + + if len(members) == 0 { + return 0, nil + } + + unlockKey := zo.LockKey(khash) + defer unlockKey() + + mk, mkCloser := base.EncodeMetaKey(key, khash) + defer mkCloser() + mkv, err := zo.GetMetaData(mk) + if err != nil { + return 0, err + } + defer base.PutMkvToPool(mkv) + if !mkv.IsAlive() { + return 0, nil + } + + dataWb := zo.GetDataWriteBatchFromPool() + defer zo.PutWriteBatchToPool(dataWb) + indexWb := zo.GetIndexWriteBatchFromPool() + defer zo.PutWriteBatchToPool(indexWb) + + var count int64 + var ekf [base.DataKeyZsetLength]byte + + keyVersion := mkv.Version() + keyKind := mkv.Kind() + isZsetOld := mkv.IsZsetOld() + + zrem := func(member []byte) error { + if e := btools.CheckFieldSize(member); e != nil { + return e + } + + ekfLen := base.EncodeZsetDataKey(ekf[:], keyVersion, khash, member, isZsetOld) + value, exist, closer, e := zo.GetDataValue(ekf[:ekfLen]) + if e != nil { + return e + } + defer func() { + if closer != nil { + closer() + } + }() + + if exist { + count++ + mkv.DecrSize(1) + dataWb.Delete(ekf[:ekfLen]) + zo.deleteZsetIndexKey(indexWb, keyVersion, keyKind, khash, numeric.ByteSortToFloat64(value), member) + } + + return nil + } + + for i := range members { + if err = zrem(members[i]); err != nil { + return 0, err + } + } + + if count > 0 { + if err = dataWb.Commit(); err != nil { + return 0, err + } + if err = indexWb.Commit(); err != nil { + return 0, err + } + if err = zo.SetMetaData(mk, mkv); err != nil { + return 0, err + } + } + return count, err +} + func (zo *ZSetObject) ZRemRangeByRank(key []byte, khash uint32, start int64, stop int64) (int64, error) { if err := btools.CheckKeySize(key); err != nil { return 0, err @@ -321,19 +341,19 @@ func (zo *ZSetObject) ZRemRangeByRank(key []byte, khash uint32, start int64, sto return 0, nil } - wb := zo.GetDataWriteBatchFromPool() + dataWb := zo.GetDataWriteBatchFromPool() + defer zo.PutWriteBatchToPool(dataWb) indexWb := zo.GetIndexWriteBatchFromPool() - defer func() { - zo.PutWriteBatchToPool(wb) - zo.PutWriteBatchToPool(indexWb) - }() + defer zo.PutWriteBatchToPool(indexWb) var index, delCnt int64 var dataKey [base.DataKeyZsetLength]byte var lowerBound [base.DataKeyHeaderLength]byte var upperBound [base.IndexKeyScoreLength]byte + keyVersion := mkv.Version() keyKind := mkv.Kind() + isZsetOld := mkv.IsZsetOld() base.EncodeDataKeyLowerBound(lowerBound[:], keyVersion, khash) base.EncodeZsetIndexKeyUpperBound(upperBound[:], keyVersion, khash) iterOpts := &bitskv.IterOptions{ @@ -348,9 +368,9 @@ func (zo *ZSetObject) ZRemRangeByRank(key []byte, khash uint32, start int64, sto if index >= startIndex { indexKey := it.RawKey() _, _, fp := base.DecodeZsetIndexKey(keyKind, indexKey, it.RawValue()) - base.EncodeZsetDataKey(dataKey[:], keyVersion, khash, fp.Merge()) - _ = wb.Delete(dataKey[:]) - _ = indexWb.Delete(indexKey) + dataKeyLen := base.EncodeZsetDataKey(dataKey[:], keyVersion, khash, fp.Merge(), isZsetOld) + dataWb.Delete(dataKey[:dataKeyLen]) + indexWb.Delete(indexKey) delCnt++ } index++ @@ -360,7 +380,7 @@ func (zo *ZSetObject) ZRemRangeByRank(key []byte, khash uint32, start int64, sto } if delCnt > 0 { - if err = wb.Commit(); err != nil { + if err = dataWb.Commit(); err != nil { return 0, err } if err = indexWb.Commit(); err != nil { @@ -391,20 +411,20 @@ func (zo *ZSetObject) ZRemRangeByScore( return 0, nil } - stopIndex := mkv.Size() - 1 - keyVersion := mkv.Version() - keyKind := mkv.Kind() - wb := zo.GetDataWriteBatchFromPool() + dataWb := zo.GetDataWriteBatchFromPool() + defer zo.PutWriteBatchToPool(dataWb) indexWb := zo.GetIndexWriteBatchFromPool() - defer func() { - zo.PutWriteBatchToPool(wb) - zo.PutWriteBatchToPool(indexWb) - }() + defer zo.PutWriteBatchToPool(indexWb) var index, delCnt int64 var dataKey [base.DataKeyZsetLength]byte var lowerBound [base.IndexKeyScoreLength]byte var upperBound [base.IndexKeyScoreUpperBoundLength]byte + + stopIndex := mkv.Size() - 1 + keyVersion := mkv.Version() + keyKind := mkv.Kind() + isZsetOld := mkv.IsZsetOld() base.EncodeZsetIndexKeyScore(lowerBound[:], keyVersion, khash, min) base.EncodeZsetIndexKeyScoreUpperBound(upperBound[:], keyVersion, khash, max) iterOpts := &bitskv.IterOptions{ @@ -429,10 +449,9 @@ func (zo *ZSetObject) ZRemRangeByScore( rightPass = true } if leftPass && rightPass { - base.EncodeZsetDataKey(dataKey[:], mkv.Version(), khash, fp.Merge()) - _ = wb.Delete(dataKey[:]) - - _ = indexWb.Delete(indexKey) + dataKeyLen := base.EncodeZsetDataKey(dataKey[:], mkv.Version(), khash, fp.Merge(), isZsetOld) + dataWb.Delete(dataKey[:dataKeyLen]) + indexWb.Delete(indexKey) delCnt++ } if !rightPass { @@ -445,7 +464,7 @@ func (zo *ZSetObject) ZRemRangeByScore( } if delCnt > 0 { - if err = wb.Commit(); err != nil { + if err = dataWb.Commit(); err != nil { return 0, err } if err = indexWb.Commit(); err != nil { @@ -482,20 +501,20 @@ func (zo *ZSetObject) ZRemRangeByLex(key []byte, khash uint32, min []byte, max [ rightNotLimit = true } - stopIndex := mkv.Size() - 1 - keyVersion := mkv.Version() - keyKind := mkv.Kind() - wb := zo.GetDataWriteBatchFromPool() + dataWb := zo.GetDataWriteBatchFromPool() + defer zo.PutWriteBatchToPool(dataWb) indexWb := zo.GetIndexWriteBatchFromPool() - defer func() { - zo.PutWriteBatchToPool(wb) - zo.PutWriteBatchToPool(indexWb) - }() + defer zo.PutWriteBatchToPool(indexWb) var index, delCnt int64 var dataKey [base.DataKeyZsetLength]byte var lowerBound [base.DataKeyHeaderLength]byte var upperBound [base.IndexKeyScoreLength]byte + + stopIndex := mkv.Size() - 1 + keyVersion := mkv.Version() + keyKind := mkv.Kind() + isZsetOld := mkv.IsZsetOld() base.EncodeDataKeyLowerBound(lowerBound[:], keyVersion, khash) base.EncodeZsetIndexKeyUpperBound(upperBound[:], keyVersion, khash) iterOpts := &bitskv.IterOptions{ @@ -526,9 +545,9 @@ func (zo *ZSetObject) ZRemRangeByLex(key []byte, khash uint32, min []byte, max [ rightPass = true } if leftPass && rightPass { - base.EncodeZsetDataKey(dataKey[:], keyVersion, khash, member) - _ = wb.Delete(dataKey[:]) - _ = indexWb.Delete(indexKey) + dataKeyLen := base.EncodeZsetDataKey(dataKey[:], keyVersion, khash, member, isZsetOld) + dataWb.Delete(dataKey[:dataKeyLen]) + indexWb.Delete(indexKey) delCnt++ } if !rightPass { @@ -541,7 +560,7 @@ func (zo *ZSetObject) ZRemRangeByLex(key []byte, khash uint32, min []byte, max [ } if delCnt > 0 { - if err = wb.Commit(); err != nil { + if err = dataWb.Commit(); err != nil { return 0, err } if err = indexWb.Commit(); err != nil { diff --git a/stored/engine/bitsdb/bitsdb/zset_test.go b/stored/engine/bitsdb/bitsdb/zset_test.go index db2994c..1a9787b 100644 --- a/stored/engine/bitsdb/bitsdb/zset_test.go +++ b/stored/engine/bitsdb/bitsdb/zset_test.go @@ -26,6 +26,7 @@ import ( "time" "github.com/stretchr/testify/require" + "github.com/zuoyebang/bitalostored/butils/extend" "github.com/zuoyebang/bitalostored/butils/hash" "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/bitsdb/base" "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/btools" @@ -70,17 +71,11 @@ func TestZSetCodec(t *testing.T) { t.Fatal("key is not eq", key, k) } - var verBytes [8]byte - binary.LittleEndian.PutUint64(verBytes[:], keyVersion) - verMember := append(member, verBytes[:]...) - memberMd5 := md5.Sum(verMember) - - var edk [base.DataKeyZsetLength]byte - base.EncodeZsetDataKey(edk[:], keyVersion, khash, member) - if m, err := base.DecodeZsetDataKey(edk[:]); err != nil { - t.Fatal(err) - } else if !bytes.Equal(m, memberMd5[:]) { - t.Fatal("memberMd5 err", m, memberMd5[:]) + var ekf [base.DataKeyZsetLength]byte + memberMd5 := md5.Sum(member) + ekfLen := base.EncodeZsetDataKey(ekf[:], keyVersion, khash, member, false) + if !bytes.Equal(ekf[base.DataKeyHeaderLength:ekfLen], memberMd5[:]) { + t.Fatal("member md5 err") } efk, efkCloser, isCompress := base.EncodeZsetIndexKey(keyVersion, keyKind, khash, 100, member) @@ -118,785 +113,918 @@ func TestZSetCodec(t *testing.T) { } } -func TestDBZSet(t *testing.T) { - cores := testTwoBitsCores() - defer closeCores(cores) - - for _, cr := range cores { - bdb := cr.db - - key := []byte("testdb_zset_a") - khash := hash.Fnv32(key) - member1 := []byte(fmt.Sprintf("a%s", string(testRandBytes(10)))) - member2 := []byte(fmt.Sprintf("b%s", string(testRandBytes(base.KeyFieldCompressSize-1)))) - member3 := []byte(fmt.Sprintf("c%s", string(testRandBytes(base.KeyFieldCompressSize)))) - member4 := []byte(fmt.Sprintf("d%s", string(testRandBytes(base.KeyFieldCompressSize*2)))) - - if n, err := bdb.ZsetObj.ZAdd(key, khash, - spair(0, member1), - spair(1, member2), - spair(2, member3), - spair(3, member4), - ); err != nil { - t.Fatal(err) - } else if n != 4 { - t.Fatal(n) - } +func TestZSetOldCodec(t *testing.T) { + bdb := testNewBitsDB() + defer closeDb(bdb) + + key := []byte("key") + member := []byte("member") + keyId := bdb.ZsetObj.GetNextKeyId() + mkv := &base.MetaData{} + mkv.SetDataType(btools.ZSET) + mkv.Reset(keyId) + keyVersion := mkv.Version() + khash := hash.Fnv32(key) + ek, ekCloser := base.EncodeMetaKey(key, khash) + defer ekCloser() + if k, err := base.DecodeMetaKey(ek); err != nil { + t.Fatal(err) + } else if !bytes.Equal(k, key) { + t.Fatal("key is not eq", key, k) + } - mk, mkCloser := base.EncodeMetaKey(key, khash) - mkv, err := bdb.ZsetObj.GetMetaData(mk) - mkCloser() - if err != nil { - t.Fatal(err) - } - require.Equal(t, base.KeyKindFieldCompress, mkv.Kind()) + var verBytes [8]byte + binary.LittleEndian.PutUint64(verBytes[:], keyVersion) + verMember := append(member, verBytes[:]...) + memberMd5 := md5.Sum(verMember) + var ekf [base.DataKeyZsetLength]byte + ekfLen := base.EncodeZsetDataKey(ekf[:], keyVersion, khash, member, true) + if !bytes.Equal(ekf[2:ekfLen], memberMd5[:]) { + t.Fatal("memberMd5 err") + } +} - if n, err := bdb.ZsetObj.ZCount(key, khash, 0, 0xFF, false, false); err != nil { - t.Fatal(err) - } else if n != 4 { - t.Fatal(n) - } +func TestZSet(t *testing.T) { + for _, isOld := range []bool{true, false} { + t.Run(fmt.Sprintf("isOld=%v", isOld), func(t *testing.T) { + cores := testTwoBitsCores() + defer closeCores(cores) + + for _, cr := range cores { + bdb := cr.db + key := []byte("testdb_zset_old") + khash := hash.Fnv32(key) + member1 := []byte(fmt.Sprintf("a%s", string(testRandBytes(10)))) + member2 := []byte(fmt.Sprintf("b%s", string(testRandBytes(base.KeyFieldCompressSize-1)))) + member3 := []byte(fmt.Sprintf("c%s", string(testRandBytes(base.KeyFieldCompressSize)))) + member4 := []byte(fmt.Sprintf("d%s", string(testRandBytes(base.KeyFieldCompressSize*2)))) + + if n, err := bdb.ZsetObj.ZAdd(key, khash, isOld, + spair(0, member1), + spair(1, member2), + spair(2, member3), + spair(3, member4), + ); err != nil { + t.Fatal(err) + } else if n != 4 { + t.Fatal(n) + } - if s, err := bdb.ZsetObj.ZScore(key, khash, member4); err != nil { - t.Fatal(err) - } else if int(s) != 3 { - t.Fatal(s) - } + mk, mkCloser := base.EncodeMetaKey(key, khash) + mkv, err := bdb.ZsetObj.GetMetaData(mk) + mkCloser() + if err != nil { + t.Fatal(err) + } + require.Equal(t, base.KeyKindFieldCompress, mkv.Kind()) - if s, err := bdb.ZsetObj.ZScore(key, khash, []byte("zzz")); err != nil && err != errn.ErrZsetMemberNil { - t.Fatal(fmt.Sprintf("s=[%d] err=[%s]", int(s), err)) - } + if n, err := bdb.StringObj.Exists(key, khash); err != nil { + t.Fatal(err) + } else if n != 1 { + t.Fatal(n) + } - if n, err := bdb.ZsetObj.ZRem(key, khash, member1, member2); err != nil { - t.Fatal(err) - } else if n != 2 { - t.Fatal(n) - } + if n, err := bdb.ZsetObj.ZCount(key, khash, 0, 0xFF, false, false); err != nil { + t.Fatal(err) + } else if n != 4 { + t.Fatal(n) + } - if n, err := bdb.ZsetObj.ZRem(key, khash, member1, member2); err != nil { - t.Fatal(err) - } else if n != 0 { - t.Fatal(n) - } + if s, err := bdb.ZsetObj.ZScore(key, khash, member4); err != nil { + t.Fatal(err) + } else if int(s) != 3 { + t.Fatal(s) + } - if n, err := bdb.ZsetObj.ZCard(key, khash); err != nil { - t.Fatal(err) - } else if n != 2 { - t.Fatal(n) - } + if s, err := bdb.ZsetObj.ZScore(key, khash, []byte("zzz")); err != nil && err != errn.ErrZsetMemberNil { + t.Fatal(fmt.Sprintf("s=[%d] err=[%s]", int(s), err)) + } - if n, err := bdb.StringObj.Del(khash, key); err != nil { - t.Fatal(err) - } else if n != 1 { - t.Fatal(n) - } + if n, err := bdb.ZsetObj.ZRem(key, khash, member1, member2); err != nil { + t.Fatal(err) + } else if n != 2 { + t.Fatal(n) + } - if n, err := bdb.ZsetObj.ZCount(key, khash, 0, 0xFF, false, false); err != nil { - t.Fatal(err) - } else if n != 0 { - t.Fatal(n) - } - } -} + if n, err := bdb.ZsetObj.ZRem(key, khash, member1, member2); err != nil { + t.Fatal(err) + } else if n != 0 { + t.Fatal(n) + } -func TestDBZSetKeyKind(t *testing.T) { - cores := testTwoBitsCores() - defer closeCores(cores) + if n, err := bdb.ZsetObj.ZCard(key, khash); err != nil { + t.Fatal(err) + } else if n != 2 { + t.Fatal(n) + } - for _, cr := range cores { - bdb := cr.db + if n, err := bdb.StringObj.Del(khash, key); err != nil { + t.Fatal(err) + } else if n != 1 { + t.Fatal(n) + } - key1 := []byte("testdb_zset_keykind1") - khash1 := hash.Fnv32(key1) - key2 := []byte("testdb_zset_keykind2") - khash2 := hash.Fnv32(key2) - member1 := []byte(fmt.Sprintf("a%s", string(testRandBytes(10)))) - member2 := []byte(fmt.Sprintf("b%s", string(testRandBytes(base.KeyFieldCompressSize-1)))) - member3 := []byte(fmt.Sprintf("c%s", string(testRandBytes(base.KeyFieldCompressSize)))) - member4 := []byte(fmt.Sprintf("d%s", string(testRandBytes(base.KeyFieldCompressSize*2)))) - - if n, err := bdb.ZsetObj.ZAdd(key1, khash1, - spair(0, member1), - spair(1, member2), - spair(2, member3), - spair(3, member4), - ); err != nil { - t.Fatal(err) - } else if n != 4 { - t.Fatal(n) - } + if n, err := bdb.StringObj.Exists(key, khash); err != nil { + t.Fatal(err) + } else if n != 0 { + t.Fatal(n) + } - checkKeyKind := func(k []byte, h uint32, kind uint8) { - mk, mkCloser := base.EncodeMetaKey(k, h) - mkv, err := bdb.ZsetObj.GetMetaData(mk) - mkCloser() - if err != nil { - t.Fatal(err) + if n, err := bdb.ZsetObj.ZCount(key, khash, 0, 0xFF, false, false); err != nil { + t.Fatal(err) + } else if n != 0 { + t.Fatal(n) + } } - require.Equal(t, kind, mkv.Kind()) - } + }) + } +} - checkCmd := func(key []byte, khash uint32, kind uint8) { - checkKeyKind(key, khash, kind) - if n, err := bdb.ZsetObj.ZCount(key, khash, 0, 0xFF, false, false); err != nil { - t.Fatal(err) - } else if n != 4 { - t.Fatal(n) - } +func TestZSetIncrBy(t *testing.T) { + for _, isOld := range []bool{true, false} { + t.Run(fmt.Sprintf("isOld=%v", isOld), func(t *testing.T) { + cores := testTwoBitsCores() + defer closeCores(cores) + + for _, cr := range cores { + bdb := cr.db + key := []byte("testdb_zincrby") + khash := hash.Fnv32(key) + + total := float64(55) + for i := 1; i <= 10; i++ { + tmp := 0 + for j := 1; j <= 10; j++ { + tmp += j + if s, err := bdb.ZsetObj.ZIncrBy(key, khash, isOld, float64(j), extend.FormatIntToSlice(i)); err != nil { + t.Fatal(err) + } else if s != float64(tmp) { + t.Fatalf("ZIncrBy err exp:%v act:%v", tmp, s) + } + } + } - if zpair, err := bdb.ZsetObj.ZRange(key, khash, 0, -1); err != nil { - t.Fatal(err) - } else if len(zpair) != 4 { - t.Fatal("zrange len err", len(zpair)) - } else if !bytes.Equal(member1, zpair[0].Member) { - t.Fatal("zrange 0 member err", string(zpair[0].Member)) - } else if 0 != zpair[0].Score { - t.Fatal("zrange 0 score err", zpair[0].Score) - } else if !bytes.Equal(member2, zpair[1].Member) { - t.Fatal("zrange 1 member err", string(zpair[1].Member)) - } else if 1 != zpair[1].Score { - t.Fatal("zrange 1 score err", zpair[1].Score) - } else if !bytes.Equal(member3, zpair[2].Member) { - t.Fatal("zrange 2 member err", string(zpair[2].Member)) - } else if 2 != zpair[2].Score { - t.Fatal("zrange 2 score err", zpair[2].Score) - } else if !bytes.Equal(member4, zpair[3].Member) { - t.Fatal("zrange 3 member err", string(zpair[3].Member)) - } else if 3 != zpair[3].Score { - t.Fatal("zrange 3 score err", zpair[3].Score) - } + for i := 1; i <= 10; i++ { + if s, err := bdb.ZsetObj.ZScore(key, khash, extend.FormatIntToSlice(i)); err != nil { + t.Fatal(err) + } else if s != total { + t.Fatalf("ZScore err exp:%v act:%v", i, s) + } + } - if zpair, err := bdb.ZsetObj.ZRevRange(key, khash, 0, -1); err != nil { - t.Fatal(err) - } else if len(zpair) != 4 { - t.Fatal("zrevrange len err", len(zpair)) - } else if !bytes.Equal(member4, zpair[0].Member) { - t.Fatal("zrevrange 0 member err", string(zpair[0].Member)) - } else if 3 != zpair[0].Score { - t.Fatal("zrevrange 0 score err", zpair[0].Score) - } else if !bytes.Equal(member3, zpair[1].Member) { - t.Fatal("zrevrange 1 member err", string(zpair[1].Member)) - } else if 2 != zpair[1].Score { - t.Fatal("zrevrange 1 score err", zpair[1].Score) - } else if !bytes.Equal(member2, zpair[2].Member) { - t.Fatal("zrevrange 2 member err", string(zpair[2].Member)) - } else if 1 != zpair[2].Score { - t.Fatal("zrevrange 2 score err", zpair[2].Score) - } else if !bytes.Equal(member1, zpair[3].Member) { - t.Fatal("zrevrange 3 member err", string(zpair[3].Member)) - } else if 0 != zpair[3].Score { - t.Fatal("zrevrange 3 score err", zpair[3].Score) - } + if n, err := bdb.ZsetObj.ZCount(key, khash, 0, 0xFF, false, false); err != nil { + t.Fatal(err) + } else if n != 10 { + t.Fatal(n) + } - if s, err := bdb.ZsetObj.ZScore(key, khash, member4); err != nil { - t.Fatal(err) - } else if int(s) != 3 { - t.Fatal(s) - } + if _, err := bdb.ZsetObj.ZScore(key, khash, []byte("zzz")); err != nil && err != errn.ErrZsetMemberNil { + t.Fatal(err) + } - if s, err := bdb.ZsetObj.ZScore(key, khash, []byte("zzz")); err != nil && err != errn.ErrZsetMemberNil { - t.Fatal(fmt.Sprintf("s=[%d] err=[%s]", int(s), err)) - } + for i := 1; i <= 6; i += 2 { + if n, err := bdb.ZsetObj.ZRem(key, khash, extend.FormatIntToSlice(i), extend.FormatIntToSlice(i+1)); err != nil { + t.Fatal(err) + } else if n != 2 { + t.Fatal(n) + } + } - if n, err := bdb.ZsetObj.ZRem(key, khash, member1, member2); err != nil { - t.Fatal(err) - } else if n != 2 { - t.Fatal(n) - } + if n, err := bdb.ZsetObj.ZCard(key, khash); err != nil { + t.Fatal(err) + } else if n != 4 { + t.Fatal(n) + } - if n, err := bdb.ZsetObj.ZRem(key, khash, member1, member2); err != nil { - t.Fatal(err) - } else if n != 0 { - t.Fatal(n) - } + if n, err := bdb.StringObj.Del(khash, key); err != nil { + t.Fatal(err) + } else if n != 1 { + t.Fatal(n) + } - if n, err := bdb.ZsetObj.ZCard(key, khash); err != nil { - t.Fatal(err) - } else if n != 2 { - t.Fatal(n) + if n, err := bdb.ZsetObj.ZCount(key, khash, 0, 0xFF, false, false); err != nil { + t.Fatal(err) + } else if n != 0 { + t.Fatal(n) + } } + }) + } +} - checkKeyKind(key, khash, kind) - - if n, err := bdb.StringObj.Del(khash, key); err != nil { - t.Fatal(err) - } else if n != 1 { - t.Fatal(n) - } +func TestZSetKeyKind(t *testing.T) { + for _, isOld := range []bool{true, false} { + t.Run(fmt.Sprintf("isOld=%v", isOld), func(t *testing.T) { + cores := testTwoBitsCores() + defer closeCores(cores) + + for _, cr := range cores { + bdb := cr.db + key1 := []byte("testdb_zset_keykind1") + khash1 := hash.Fnv32(key1) + key2 := []byte("testdb_zset_keykind2") + khash2 := hash.Fnv32(key2) + member1 := []byte(fmt.Sprintf("a%s", string(testRandBytes(10)))) + member2 := []byte(fmt.Sprintf("b%s", string(testRandBytes(base.KeyFieldCompressSize-1)))) + member3 := []byte(fmt.Sprintf("c%s", string(testRandBytes(base.KeyFieldCompressSize)))) + member4 := []byte(fmt.Sprintf("d%s", string(testRandBytes(base.KeyFieldCompressSize*2)))) + + checkKeyKind := func(k []byte, h uint32, kind uint8) { + mk, mkCloser := base.EncodeMetaKey(k, h) + mkv, err := bdb.ZsetObj.GetMetaData(mk) + mkCloser() + if err != nil { + t.Fatal(err) + } + require.Equal(t, kind, mkv.Kind()) + } - if n, err := bdb.ZsetObj.ZCount(key, khash, 0, 0xFF, false, false); err != nil { - t.Fatal(err) - } else if n != 0 { - t.Fatal(n) - } - } + checkCmd := func(key []byte, khash uint32, kind uint8) { + checkKeyKind(key, khash, kind) + if n, err := bdb.ZsetObj.ZCount(key, khash, 0, 0xFF, false, false); err != nil { + t.Fatal(err) + } else if n != 4 { + t.Fatal(n) + } + + if zpair, err := bdb.ZsetObj.ZRange(key, khash, 0, -1); err != nil { + t.Fatal(err) + } else if len(zpair) != 4 { + t.Fatal("zrange len err", len(zpair)) + } else if !bytes.Equal(member1, zpair[0].Member) { + t.Fatal("zrange 0 member err", string(zpair[0].Member)) + } else if 0 != zpair[0].Score { + t.Fatal("zrange 0 score err", zpair[0].Score) + } else if !bytes.Equal(member2, zpair[1].Member) { + t.Fatal("zrange 1 member err", string(zpair[1].Member)) + } else if 1 != zpair[1].Score { + t.Fatal("zrange 1 score err", zpair[1].Score) + } else if !bytes.Equal(member3, zpair[2].Member) { + t.Fatal("zrange 2 member err", string(zpair[2].Member)) + } else if 2 != zpair[2].Score { + t.Fatal("zrange 2 score err", zpair[2].Score) + } else if !bytes.Equal(member4, zpair[3].Member) { + t.Fatal("zrange 3 member err", string(zpair[3].Member)) + } else if 3 != zpair[3].Score { + t.Fatal("zrange 3 score err", zpair[3].Score) + } + + if zpair, err := bdb.ZsetObj.ZRevRange(key, khash, 0, -1); err != nil { + t.Fatal(err) + } else if len(zpair) != 4 { + t.Fatal("zrevrange len err", len(zpair)) + } else if !bytes.Equal(member4, zpair[0].Member) { + t.Fatal("zrevrange 0 member err", string(zpair[0].Member)) + } else if 3 != zpair[0].Score { + t.Fatal("zrevrange 0 score err", zpair[0].Score) + } else if !bytes.Equal(member3, zpair[1].Member) { + t.Fatal("zrevrange 1 member err", string(zpair[1].Member)) + } else if 2 != zpair[1].Score { + t.Fatal("zrevrange 1 score err", zpair[1].Score) + } else if !bytes.Equal(member2, zpair[2].Member) { + t.Fatal("zrevrange 2 member err", string(zpair[2].Member)) + } else if 1 != zpair[2].Score { + t.Fatal("zrevrange 2 score err", zpair[2].Score) + } else if !bytes.Equal(member1, zpair[3].Member) { + t.Fatal("zrevrange 3 member err", string(zpair[3].Member)) + } else if 0 != zpair[3].Score { + t.Fatal("zrevrange 3 score err", zpair[3].Score) + } + + if s, err := bdb.ZsetObj.ZScore(key, khash, member4); err != nil { + t.Fatal(err) + } else if int(s) != 3 { + t.Fatal(s) + } + + if s, err := bdb.ZsetObj.ZScore(key, khash, []byte("zzz")); err != nil && err != errn.ErrZsetMemberNil { + t.Fatal(fmt.Sprintf("s=[%d] err=[%s]", int(s), err)) + } + + if n, err := bdb.ZsetObj.ZRem(key, khash, member1, member2); err != nil { + t.Fatal(err) + } else if n != 2 { + t.Fatal(n) + } + + if n, err := bdb.ZsetObj.ZRem(key, khash, member1, member2); err != nil { + t.Fatal(err) + } else if n != 0 { + t.Fatal(n) + } + + if n, err := bdb.ZsetObj.ZCard(key, khash); err != nil { + t.Fatal(err) + } else if n != 2 { + t.Fatal(n) + } + + checkKeyKind(key, khash, kind) + + if n, err := bdb.StringObj.Del(khash, key); err != nil { + t.Fatal(err) + } else if n != 1 { + t.Fatal(n) + } + + if n, err := bdb.ZsetObj.ZCount(key, khash, 0, 0xFF, false, false); err != nil { + t.Fatal(err) + } else if n != 0 { + t.Fatal(n) + } + } - checkCmd(key1, khash1, base.KeyKindFieldCompress) + if n, err := bdb.ZsetObj.ZAdd(key1, khash1, isOld, + spair(0, member1), + spair(1, member2), + spair(2, member3), + spair(3, member4), + ); err != nil { + t.Fatal(err) + } else if n != 4 { + t.Fatal(n) + } + if n, err := bdb.ZsetObj.ZAdd(key2, khash2, isOld, + spair(0, member1), + spair(1, member2), + spair(2, member3), + spair(3, member4), + ); err != nil { + t.Fatal(err) + } else if n != 4 { + t.Fatal(n) + } - if n, err := bdb.ZsetObj.ZAdd(key2, khash2, - spair(0, member1), - spair(1, member2), - spair(2, member3), - spair(3, member4), - ); err != nil { - t.Fatal(err) - } else if n != 4 { - t.Fatal(n) - } - checkCmd(key2, khash2, base.KeyKindFieldCompress) + checkCmd(key1, khash1, base.KeyKindFieldCompress) + checkCmd(key2, khash2, base.KeyKindFieldCompress) + } + }) } } func TestZSetOrder(t *testing.T) { - cores := testTwoBitsCores() - defer closeCores(cores) - - for _, cr := range cores { - bdb := cr.db - - key := []byte("testdb_zset_order") - khash := hash.Fnv32(key) - member1 := []byte(fmt.Sprintf("a%s", string(testRandBytes(10)))) - member2 := []byte(fmt.Sprintf("b%s", string(testRandBytes(base.KeyFieldCompressSize/2)))) - member3 := []byte(fmt.Sprintf("c%s", string(testRandBytes(base.KeyFieldCompressSize-1)))) - member4 := []byte(fmt.Sprintf("d%s", string(testRandBytes(base.KeyFieldCompressSize)))) - member5 := []byte(fmt.Sprintf("e%s", string(testRandBytes(base.KeyFieldCompressSize*2)))) - member6 := []byte(fmt.Sprintf("f%s", string(testRandBytes(base.KeyFieldCompressSize*5)))) - membs := [][]byte{member1, member2, member3, member4, member5, member6} - membCnt := len(membs) - - for i := 0; i < membCnt; i++ { - if n, err := bdb.ZsetObj.ZAdd(key, khash, spair(float64(i), membs[i])); err != nil { - t.Fatal(err) - } else if n != 1 { - t.Fatal(n) - } - } - - if n, _ := bdb.ZsetObj.ZCount(key, khash, -math.MaxFloat64, math.MaxFloat64, false, false); int(n) != membCnt { - t.Fatal(n) - } - if n, _ := bdb.ZsetObj.ZCount(key, khash, 0, 5, true, false); n != 5 { - t.Fatal(n) - } - if n, _ := bdb.ZsetObj.ZCount(key, khash, 0, 5, true, true); n != 4 { - t.Fatal(n) - } - if n, _ := bdb.ZsetObj.ZCount(key, khash, 0, 5, false, false); n != 6 { - t.Fatal(n) - } - if n, _ := bdb.ZsetObj.ZCount(key, khash, 0, 5, false, true); n != 5 { - t.Fatal(n) - } - if n, _ := bdb.ZsetObj.ZCount(key, khash, 0.1, 4, true, true); n != 3 { - t.Fatal(n) - } + for _, isOld := range []bool{true, false} { + t.Run(fmt.Sprintf("isOld=%v", isOld), func(t *testing.T) { + cores := testTwoBitsCores() + defer closeCores(cores) + + for _, cr := range cores { + bdb := cr.db + key := []byte("testdb_zset_order") + khash := hash.Fnv32(key) + member1 := []byte(fmt.Sprintf("a%s", string(testRandBytes(10)))) + member2 := []byte(fmt.Sprintf("b%s", string(testRandBytes(base.KeyFieldCompressSize/2)))) + member3 := []byte(fmt.Sprintf("c%s", string(testRandBytes(base.KeyFieldCompressSize-1)))) + member4 := []byte(fmt.Sprintf("d%s", string(testRandBytes(base.KeyFieldCompressSize)))) + member5 := []byte(fmt.Sprintf("e%s", string(testRandBytes(base.KeyFieldCompressSize*2)))) + member6 := []byte(fmt.Sprintf("f%s", string(testRandBytes(base.KeyFieldCompressSize*5)))) + membs := [][]byte{member1, member2, member3, member4, member5, member6} + membCnt := len(membs) + + for i := 0; i < membCnt; i++ { + if n, err := bdb.ZsetObj.ZAdd(key, khash, isOld, spair(float64(i), membs[i])); err != nil { + t.Fatal(err) + } else if n != 1 { + t.Fatal(n) + } + } - for i := 0; i < membCnt; i++ { - if pos, err := bdb.ZsetObj.ZRank(key, khash, membs[i]); err != nil { - t.Fatal(err) - } else if int(pos) != i { - t.Fatal(pos) - } + if n, _ := bdb.ZsetObj.ZCount(key, khash, -math.MaxFloat64, math.MaxFloat64, false, false); int(n) != membCnt { + t.Fatal(n) + } + if n, _ := bdb.ZsetObj.ZCount(key, khash, 0, 5, true, false); n != 5 { + t.Fatal(n) + } + if n, _ := bdb.ZsetObj.ZCount(key, khash, 0, 5, true, true); n != 4 { + t.Fatal(n) + } + if n, _ := bdb.ZsetObj.ZCount(key, khash, 0, 5, false, false); n != 6 { + t.Fatal(n) + } + if n, _ := bdb.ZsetObj.ZCount(key, khash, 0, 5, false, true); n != 5 { + t.Fatal(n) + } + if n, _ := bdb.ZsetObj.ZCount(key, khash, 0.1, 4, true, true); n != 3 { + t.Fatal(n) + } - pos, err := bdb.ZsetObj.ZRevRank(key, khash, membs[i]) - if err != nil { - t.Fatal(err) - } else if int(pos) != membCnt-i-1 { - t.Fatal(pos) - } - } + for i := 0; i < membCnt; i++ { + if pos, err := bdb.ZsetObj.ZRank(key, khash, membs[i]); err != nil { + t.Fatal(err) + } else if int(pos) != i { + t.Fatal(pos) + } + + pos, err := bdb.ZsetObj.ZRevRank(key, khash, membs[i]) + if err != nil { + t.Fatal(err) + } else if int(pos) != membCnt-i-1 { + t.Fatal(pos) + } + } - if qMembs, err := bdb.ZsetObj.ZRange(key, khash, 0, -1); err != nil { - t.Fatal(err) - } else if len(qMembs) != membCnt { - t.Fatal(fmt.Sprintf("%d vs %d", len(qMembs), membCnt)) - } else { - for i := 0; i < membCnt; i++ { - if !bytes.Equal(membs[i], qMembs[i].Member) { - t.Fatal("ZRange member not eq", i) + if qMembs, err := bdb.ZsetObj.ZRange(key, khash, 0, -1); err != nil { + t.Fatal(err) + } else if len(qMembs) != membCnt { + t.Fatal(fmt.Sprintf("%d vs %d", len(qMembs), membCnt)) + } else { + for i := 0; i < membCnt; i++ { + if !bytes.Equal(membs[i], qMembs[i].Member) { + t.Fatal("ZRange member not eq", i) + } + } } - } - } - if qMembs, err := bdb.ZsetObj.ZRevRange(key, khash, 0, -1); err != nil { - t.Fatal(err) - } else if len(qMembs) != membCnt { - t.Fatal(fmt.Sprintf("%d vs %d", len(qMembs), membCnt)) - } else { - for i := 0; i < membCnt; i++ { - if !bytes.Equal(membs[membCnt-1-i], qMembs[i].Member) { - t.Fatal("ZRevRange member not eq", i) + if qMembs, err := bdb.ZsetObj.ZRevRange(key, khash, 0, -1); err != nil { + t.Fatal(err) + } else if len(qMembs) != membCnt { + t.Fatal(fmt.Sprintf("%d vs %d", len(qMembs), membCnt)) + } else { + for i := 0; i < membCnt; i++ { + if !bytes.Equal(membs[membCnt-1-i], qMembs[i].Member) { + t.Fatal("ZRevRange member not eq", i) + } + } } - } - } - if qMembs, err := bdb.ZsetObj.ZRangeByScore(key, khash, -1, 0xFFFF, false, false, 0, membCnt); err != nil { - t.Fatal(err) - } else if len(qMembs) != membCnt { - t.Fatal(fmt.Sprintf("%d vs %d", len(qMembs), membCnt)) - } else { - for i := 0; i < membCnt; i++ { - if !bytes.Equal(membs[i], qMembs[i].Member) { - t.Fatal("ZRangeByScore member not eq", i) + if qMembs, err := bdb.ZsetObj.ZRangeByScore(key, khash, -1, 0xFFFF, false, false, 0, membCnt); err != nil { + t.Fatal(err) + } else if len(qMembs) != membCnt { + t.Fatal(fmt.Sprintf("%d vs %d", len(qMembs), membCnt)) + } else { + for i := 0; i < membCnt; i++ { + if !bytes.Equal(membs[i], qMembs[i].Member) { + t.Fatal("ZRangeByScore member not eq", i) + } + } } - } - } - if qMembs, err := bdb.ZsetObj.ZRevRangeByScore(key, khash, -1, 0xFFFF, false, false, 0, membCnt); err != nil { - t.Fatal(err) - } else if len(qMembs) != membCnt { - t.Fatal(fmt.Sprintf("%d vs %d", len(qMembs), membCnt)) - } else { - for i := 0; i < membCnt; i++ { - if !bytes.Equal(membs[membCnt-1-i], qMembs[i].Member) { - t.Fatal("ZRevRangeByScore member not eq", i) + if qMembs, err := bdb.ZsetObj.ZRevRangeByScore(key, khash, -1, 0xFFFF, false, false, 0, membCnt); err != nil { + t.Fatal(err) + } else if len(qMembs) != membCnt { + t.Fatal(fmt.Sprintf("%d vs %d", len(qMembs), membCnt)) + } else { + for i := 0; i < membCnt; i++ { + if !bytes.Equal(membs[membCnt-1-i], qMembs[i].Member) { + t.Fatal("ZRevRangeByScore member not eq", i) + } + } } - } - } - if n, err := bdb.ZsetObj.ZAdd(key, khash, spair(999, member4)); err != nil { - t.Fatal(err) - } else if n != 0 { - t.Fatal(n) - } + if n, err := bdb.ZsetObj.ZAdd(key, khash, isOld, spair(999, member4)); err != nil { + t.Fatal(err) + } else if n != 0 { + t.Fatal(n) + } - if pos, _ := bdb.ZsetObj.ZRank(key, khash, member4); pos != int64(membCnt-1) { - t.Fatal(pos) - } + if pos, _ := bdb.ZsetObj.ZRank(key, khash, member4); pos != int64(membCnt-1) { + t.Fatal(pos) + } - if pos, _ := bdb.ZsetObj.ZRevRank(key, khash, member4); pos != 0 { - t.Fatal(pos) - } + if pos, _ := bdb.ZsetObj.ZRevRank(key, khash, member4); pos != 0 { + t.Fatal(pos) + } - if pos, _ := bdb.ZsetObj.ZRank(key, khash, member5); pos != 3 { - t.Fatal(pos) - } + if pos, _ := bdb.ZsetObj.ZRank(key, khash, member5); pos != 3 { + t.Fatal(pos) + } - if pos, _ := bdb.ZsetObj.ZRank(key, khash, member6); pos != 4 { - t.Fatal(pos) - } + if pos, _ := bdb.ZsetObj.ZRank(key, khash, member6); pos != 4 { + t.Fatal(pos) + } - if qMembs, err := bdb.ZsetObj.ZRangeByScore(key, khash, 999, 0xFFFF, false, false, 0, membCnt); err != nil { - t.Fatal(err) - } else if len(qMembs) != 1 { - t.Fatal(len(qMembs)) - } + if qMembs, err := bdb.ZsetObj.ZRangeByScore(key, khash, 999, 0xFFFF, false, false, 0, membCnt); err != nil { + t.Fatal(err) + } else if len(qMembs) != 1 { + t.Fatal(len(qMembs)) + } - if s, err := bdb.ZsetObj.ZIncrBy(key, khash, 2, member5); err != nil { - t.Fatal(err) - } else if s != 6 { - t.Fatal(s) - } + if s, err := bdb.ZsetObj.ZIncrBy(key, khash, isOld, 2, member5); err != nil { + t.Fatal(err) + } else if s != 6 { + t.Fatal(s) + } - if pos, _ := bdb.ZsetObj.ZRank(key, khash, member5); int(pos) != 4 { - t.Fatal(pos) - } + if pos, _ := bdb.ZsetObj.ZRank(key, khash, member5); int(pos) != 4 { + t.Fatal(pos) + } - if pos, _ := bdb.ZsetObj.ZRevRank(key, khash, member5); int(pos) != 1 { - t.Fatal(pos) - } + if pos, _ := bdb.ZsetObj.ZRevRank(key, khash, member5); int(pos) != 1 { + t.Fatal(pos) + } - if datas, _ := bdb.ZsetObj.ZRange(key, khash, 0, -1); len(datas) != 6 { - t.Fatal(len(datas)) - } else { - scores := []int64{0, 1, 2, 5, 6, 999} - for i := 0; i < len(datas); i++ { - if int64(datas[i].Score) != scores[i] { - t.Fatal(fmt.Sprintf("[%d]=%v", i, datas[i])) + if datas, _ := bdb.ZsetObj.ZRange(key, khash, 0, -1); len(datas) != 6 { + t.Fatal(len(datas)) + } else { + scores := []int64{0, 1, 2, 5, 6, 999} + for i := 0; i < len(datas); i++ { + if int64(datas[i].Score) != scores[i] { + t.Fatal(fmt.Sprintf("[%d]=%v", i, datas[i])) + } + } } } - } - } -} - -func TestZsetIncrby(t *testing.T) { - cores := testTwoBitsCores() - defer closeCores(cores) - - for _, cr := range cores { - bdb := cr.db - incrKey := []byte("zset_incrby_case") - khash := hash.Fnv32(incrKey) - member := []byte("zincr_field") - - newScore, err := bdb.ZsetObj.ZIncrBy(incrKey, khash, 1, member) - if err != nil { - t.Fatal(err) - } else if newScore != 1 { - t.Fatal("newScore != 1") - } - - member2 := []byte("zincr_field_two") - _, err = bdb.ZsetObj.ZIncrBy(incrKey, khash, 1, member2) - if err != nil { - t.Fatal(err) - } + }) } } func TestZsetScore(t *testing.T) { - cores := testTwoBitsCores() - defer closeCores(cores) - - for _, cr := range cores { - bdb := cr.db - - key := []byte("a") - khash := hash.Fnv32(key) - - member1 := []byte(fmt.Sprintf("a%s", string(testRandBytes(base.KeyFieldCompressSize-10)))) - member2 := []byte(fmt.Sprintf("b%s", string(testRandBytes(10)))) - member3 := []byte(fmt.Sprintf("c%s", string(testRandBytes(base.KeyFieldCompressSize*10)))) - member4 := []byte(fmt.Sprintf("d%s", string(testRandBytes(base.KeyFieldCompressSize-1)))) - member5 := []byte(fmt.Sprintf("e%s", string(testRandBytes(base.KeyFieldCompressSize*2)))) - if n, err := bdb.ZsetObj.ZAdd(key, khash, spair(1, member3)); err != nil { - t.Fatal(err) - } else if n != 1 { - t.Fatal(n) - } - if n, err := bdb.ZsetObj.ZAdd(key, khash, spair(15, member1)); err != nil { - t.Fatal(err) - } else if n != 1 { - t.Fatal(n) - } - if n, err := bdb.ZsetObj.ZAdd(key, khash, spair(-15, member5)); err != nil { - t.Fatal(err) - } else if n != 1 { - t.Fatal(n) - } - if n, err := bdb.ZsetObj.ZAdd(key, khash, spair(0, member4)); err != nil { - t.Fatal(err) - } else if n != 1 { - t.Fatal(n) - } - if n, err := bdb.ZsetObj.ZAdd(key, khash, spair(13, member2)); err != nil { - t.Fatal(err) - } else if n != 1 { - t.Fatal(n) - } - if qMembs, err := bdb.ZsetObj.ZRange(key, khash, 0, -1); err != nil { - t.Fatal(err) - } else if len(qMembs) != 5 { - t.Fatal(fmt.Sprintf("%d vs %d", len(qMembs), 1)) - } else { - if !bytes.Equal(qMembs[0].Member, member5) { - t.Fatal("ZRange 0 member err", string(qMembs[0].Member)) - } - if !bytes.Equal(qMembs[1].Member, member4) { - t.Fatal("ZRange 1 member err", string(qMembs[1].Member)) - } - if !bytes.Equal(qMembs[2].Member, member3) { - t.Fatal("ZRange 2 member err", string(qMembs[2].Member)) - } - if !bytes.Equal(qMembs[3].Member, member2) { - t.Fatal("ZRange 3 member err", string(qMembs[3].Member)) - } - if !bytes.Equal(qMembs[4].Member, member1) { - t.Fatal("ZRange 4 member err", string(qMembs[4].Member)) + for _, isOld := range []bool{true, false} { + t.Run(fmt.Sprintf("isOld=%v", isOld), func(t *testing.T) { + cores := testTwoBitsCores() + defer closeCores(cores) + + for _, cr := range cores { + bdb := cr.db + key := []byte("a") + khash := hash.Fnv32(key) + + member1 := []byte(fmt.Sprintf("a%s", string(testRandBytes(base.KeyFieldCompressSize-10)))) + member2 := []byte(fmt.Sprintf("b%s", string(testRandBytes(10)))) + member3 := []byte(fmt.Sprintf("c%s", string(testRandBytes(base.KeyFieldCompressSize*10)))) + member4 := []byte(fmt.Sprintf("d%s", string(testRandBytes(base.KeyFieldCompressSize-1)))) + member5 := []byte(fmt.Sprintf("e%s", string(testRandBytes(base.KeyFieldCompressSize*2)))) + + if n, err := bdb.ZsetObj.ZAdd(key, khash, isOld, spair(1, member3)); err != nil { + t.Fatal(err) + } else if n != 1 { + t.Fatal(n) + } + if n, err := bdb.ZsetObj.ZAdd(key, khash, isOld, spair(15, member1)); err != nil { + t.Fatal(err) + } else if n != 1 { + t.Fatal(n) + } + if n, err := bdb.ZsetObj.ZAdd(key, khash, isOld, spair(-15, member5)); err != nil { + t.Fatal(err) + } else if n != 1 { + t.Fatal(n) + } + if n, err := bdb.ZsetObj.ZAdd(key, khash, isOld, spair(0, member4)); err != nil { + t.Fatal(err) + } else if n != 1 { + t.Fatal(n) + } + if n, err := bdb.ZsetObj.ZAdd(key, khash, isOld, spair(13, member2)); err != nil { + t.Fatal(err) + } else if n != 1 { + t.Fatal(n) + } + if qMembs, err := bdb.ZsetObj.ZRange(key, khash, 0, -1); err != nil { + t.Fatal(err) + } else if len(qMembs) != 5 { + t.Fatal(fmt.Sprintf("%d vs %d", len(qMembs), 1)) + } else { + if !bytes.Equal(qMembs[0].Member, member5) { + t.Fatal("ZRange 0 member err", string(qMembs[0].Member)) + } + if !bytes.Equal(qMembs[1].Member, member4) { + t.Fatal("ZRange 1 member err", string(qMembs[1].Member)) + } + if !bytes.Equal(qMembs[2].Member, member3) { + t.Fatal("ZRange 2 member err", string(qMembs[2].Member)) + } + if !bytes.Equal(qMembs[3].Member, member2) { + t.Fatal("ZRange 3 member err", string(qMembs[3].Member)) + } + if !bytes.Equal(qMembs[4].Member, member1) { + t.Fatal("ZRange 4 member err", string(qMembs[4].Member)) + } + } } - } + }) } } func TestZSetPersist(t *testing.T) { - cores := testTwoBitsCores() - defer closeCores(cores) - - for _, cr := range cores { - bdb := cr.db - - key := []byte("persist") - khash := hash.Fnv32(key) - if n, err := bdb.ZsetObj.ZAdd(key, khash, spair(1, []byte("a"))); err != nil { - t.Fatal(err) - } else if n != 1 { - t.Fatal(n) - } + for _, isOld := range []bool{true, false} { + t.Run(fmt.Sprintf("isOld=%v", isOld), func(t *testing.T) { + cores := testTwoBitsCores() + defer closeCores(cores) + + for _, cr := range cores { + bdb := cr.db + key := []byte("persist") + khash := hash.Fnv32(key) + if n, err := bdb.ZsetObj.ZAdd(key, khash, isOld, spair(1, []byte("a"))); err != nil { + t.Fatal(err) + } else if n != 1 { + t.Fatal(n) + } + if n, err := bdb.ZsetObj.ZCard(key, khash); err != nil { + t.Fatal(err) + } else if n != 1 { + t.Fatal(n) + } - if n, err := bdb.ZsetObj.ZCard(key, khash); err != nil { - t.Fatal(err) - } else if n != 1 { - t.Fatal(n) - } + if qMembs, err := bdb.ZsetObj.ZRange(key, khash, 0, -1); err != nil { + t.Fatal(err) + } else if len(qMembs) != 1 { + t.Fatal(fmt.Sprintf("%d vs %d", len(qMembs), 1)) + } else { + for i := 0; i < 1; i++ { + if string(qMembs[i].Member) != "a" { + t.Fatalf("[%v] vs [%v]", qMembs[i], "a") + } + } + } - if qMembs, err := bdb.ZsetObj.ZRange(key, khash, 0, -1); err != nil { - t.Fatal(err) - } else if len(qMembs) != 1 { - t.Fatal(fmt.Sprintf("%d vs %d", len(qMembs), 1)) - } else { - for i := 0; i < 1; i++ { - if string(qMembs[i].Member) != "a" { - t.Fatalf("[%v] vs [%v]", qMembs[i], "a") + if n, err := bdb.StringObj.BasePersist(key, khash); err != nil { + t.Fatal(err) + } else if n != 0 { + t.Fatal(n) } - } - } - if n, err := bdb.StringObj.BasePersist(key, khash); err != nil { - t.Fatal(err) - } else if n != 0 { - t.Fatal(n) - } + if _, err := bdb.StringObj.Expire(key, khash, 10); err != nil { + t.Fatal(err) + } - if _, err := bdb.StringObj.Expire(key, khash, 10); err != nil { - t.Fatal(err) - } + if n, err := bdb.StringObj.BasePersist(key, khash); err != nil { + t.Fatal(err) + } else if n != 1 { + t.Fatal(n) + } - if n, err := bdb.StringObj.BasePersist(key, khash); err != nil { - t.Fatal(err) - } else if n != 1 { - t.Fatal(n) - } + if n, err := bdb.StringObj.TTL(key, khash); err != nil { + t.Fatal(err) + } else if n != -1 { + t.Fatal(n) + } - if n, err := bdb.StringObj.TTL(key, khash); err != nil { - t.Fatal(err) - } else if n != -1 { - t.Fatal(n) - } + if _, err := bdb.StringObj.Expire(key, khash, 2); err != nil { + t.Fatal(err) + } + time.Sleep(3 * time.Second) + if n, err := bdb.StringObj.TTL(key, khash); err != nil { + t.Fatal(err) + } else if n != -2 { + t.Fatal(n) + } + } + }) } } -func TestZLex(t *testing.T) { - cores := testTwoBitsCores() - defer closeCores(cores) - - for _, cr := range cores { - bdb := cr.db - - key := []byte("myzset") - khash := hash.Fnv32(key) - if _, err := bdb.ZsetObj.ZAdd(key, khash, - btools.ScorePair{0, []byte("a")}, - btools.ScorePair{0, []byte("b")}, - btools.ScorePair{0, []byte("c")}, - btools.ScorePair{0, []byte("d")}, - btools.ScorePair{0, []byte("e")}, - btools.ScorePair{0, []byte("f")}, - btools.ScorePair{0, []byte("g")}); err != nil { - t.Fatal(err) - } +func TestZsetLex(t *testing.T) { + for _, isOld := range []bool{true, false} { + t.Run(fmt.Sprintf("isOld=%v", isOld), func(t *testing.T) { + cores := testTwoBitsCores() + defer closeCores(cores) + + for _, cr := range cores { + bdb := cr.db + key := []byte("test_zlex") + khash := hash.Fnv32(key) + if n, err := bdb.ZsetObj.ZAdd(key, khash, isOld, + spair(0, []byte("a")), + spair(0, []byte("b")), + spair(0, []byte("c")), + spair(0, []byte("d")), + spair(0, []byte("e")), + spair(0, []byte("f")), + spair(0, []byte("g"))); err != nil { + t.Fatal(err) + } else if n != 7 { + t.Fatal(n) + } - if ay, err := bdb.ZsetObj.ZRangeByLex(key, khash, nil, []byte("c"), false, false, 0, 100); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(ay, [][]byte{[]byte("a"), []byte("b"), []byte("c")}) { - t.Fatal("must equal a, b, c", ay) - } + if ay, err := bdb.ZsetObj.ZRangeByLex(key, khash, nil, []byte("c"), false, false, 0, 100); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(ay, [][]byte{[]byte("a"), []byte("b"), []byte("c")}) { + t.Fatal("must equal a, b, c", ay) + } - if ay, err := bdb.ZsetObj.ZRangeByLex(key, khash, nil, []byte("c"), false, true, 0, 100); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(ay, [][]byte{[]byte("a"), []byte("b")}) { - t.Fatal("must equal a, b") - } + if ay, err := bdb.ZsetObj.ZRangeByLex(key, khash, nil, []byte("c"), false, true, 0, 100); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(ay, [][]byte{[]byte("a"), []byte("b")}) { + t.Fatal("must equal a, b") + } - if ay, err := bdb.ZsetObj.ZRangeByLex(key, khash, []byte("aaa"), []byte("g"), false, true, 0, 100); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(ay, [][]byte{[]byte("b"), - []byte("c"), []byte("d"), []byte("e"), []byte("f")}) { - t.Fatal("must equal b, c, d, e, f", fmt.Sprintf("%q", ay)) - } + if ay, err := bdb.ZsetObj.ZRangeByLex(key, khash, []byte("aaa"), []byte("g"), false, true, 0, 100); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(ay, [][]byte{[]byte("b"), + []byte("c"), []byte("d"), []byte("e"), []byte("f")}) { + t.Fatal("must equal b, c, d, e, f", fmt.Sprintf("%q", ay)) + } - if n, err := bdb.ZsetObj.ZLexCount(key, khash, []byte{'-'}, []byte{'+'}, false, false); err != nil { - t.Fatal(err) - } else if n != 7 { - t.Fatal(n) - } + if n, err := bdb.ZsetObj.ZLexCount(key, khash, []byte{'-'}, []byte{'+'}, false, false); err != nil { + t.Fatal(err) + } else if n != 7 { + t.Fatal(n) + } - if n, err := bdb.ZsetObj.ZRemRangeByLex(key, khash, []byte("aaa"), []byte("g"), false, true); err != nil { - t.Fatal(err) - } else if n != 5 { - t.Fatal(n) - } + if n, err := bdb.ZsetObj.ZRemRangeByLex(key, khash, []byte("aaa"), []byte("g"), false, true); err != nil { + t.Fatal(err) + } else if n != 5 { + t.Fatal(n) + } - if n, err := bdb.ZsetObj.ZLexCount(key, khash, []byte{'-'}, []byte{'+'}, false, false); err != nil { - t.Fatal(err) - } else if n != 2 { - t.Fatal(n) - } + if n, err := bdb.ZsetObj.ZLexCount(key, khash, []byte{'-'}, []byte{'+'}, false, false); err != nil { + t.Fatal(err) + } else if n != 2 { + t.Fatal(n) + } + } + }) } } func TestZsetExists(t *testing.T) { - cores := testTwoBitsCores() - defer closeCores(cores) - - for _, cr := range cores { - bdb := cr.db - - key := []byte("zkeyexists_test") - khash := hash.Fnv32(key) - if n, err := bdb.StringObj.Exists(key, khash); err != nil { - t.Fatal(err) - } else if n != 0 { - t.Fatal("invalid value ", n) - } + for _, isOld := range []bool{true, false} { + t.Run(fmt.Sprintf("isOld=%v", isOld), func(t *testing.T) { + cores := testTwoBitsCores() + defer closeCores(cores) + + for _, cr := range cores { + bdb := cr.db + key := []byte("test_zset_exists") + khash := hash.Fnv32(key) + if n, err := bdb.StringObj.Exists(key, khash); err != nil { + t.Fatal(err) + } else if n != 0 { + t.Fatal("invalid value ", n) + } - bdb.ZsetObj.ZAdd(key, khash, spair(0, []byte("a")), spair(0, []byte("b"))) - if n, err := bdb.StringObj.Exists(key, khash); err != nil { - t.Fatal(err) - } else if n != 1 { - t.Fatal("invalid value ", n) - } + bdb.ZsetObj.ZAdd(key, khash, isOld, spair(0, []byte("a")), spair(0, []byte("b"))) + if n, err := bdb.StringObj.Exists(key, khash); err != nil { + t.Fatal(err) + } else if n != 1 { + t.Fatal("invalid value ", n) + } + } + }) } } -func TestZsetDBZScan(t *testing.T) { - cores := testTwoBitsCores() - defer closeCores(cores) - - for _, cr := range cores { - bdb := cr.db - - key := []byte("zscan_z_key") - key1 := []byte("myzset") - khash := hash.Fnv32(key) - k1hash := hash.Fnv32(key1) - defer bdb.StringObj.Del(khash, key, key1) - bdb.ZsetObj.ZAdd(key, khash, - spair(1, []byte("1")), - spair(2, []byte("222")), - spair(3, []byte("19")), - spair(4, []byte("1234"))) - keyKind := base.KeyKindFieldCompress - keyVersion := base.EncodeKeyVersion(bdb.ZsetObj.GetCurrentKeyId(), keyKind) - bdb.ZsetObj.ZAdd(key1, k1hash, spair(10, []byte("fff")), spair(20, []byte("ggg"))) - cursor, v, err := bdb.ZsetObj.ZScan(key, khash, nil, 100, "*") - if err != nil { - t.Fatal(err) - } else if len(v) != 4 { - t.Fatal("invalid count", len(v)) - } +func TestZsetZScan(t *testing.T) { + for _, isOld := range []bool{true, false} { + t.Run(fmt.Sprintf("isOld=%v", isOld), func(t *testing.T) { + cores := testTwoBitsCores() + defer closeCores(cores) + + for _, cr := range cores { + bdb := cr.db + key := []byte("test_zset_zscan") + key1 := []byte("test_zset_zscan_key1") + khash := hash.Fnv32(key) + k1hash := hash.Fnv32(key1) + defer bdb.StringObj.Del(khash, key, key1) + + bdb.ZsetObj.ZAdd(key, khash, isOld, + spair(1, []byte("1")), + spair(2, []byte("222")), + spair(3, []byte("19")), + spair(4, []byte("1234"))) + bdb.ZsetObj.ZAdd(key1, k1hash, isOld, spair(10, []byte("fff")), spair(20, []byte("ggg"))) + + cursor, v, err := bdb.ZsetObj.ZScan(key, khash, nil, 100, "*") + if err != nil { + t.Fatal(err) + } else if len(v) != 4 { + t.Fatal("invalid count", len(v)) + } - seek, seekCloser, _ := base.EncodeZsetIndexKey(keyVersion, keyKind, khash, 3, []byte("19")) - defer seekCloser() - _, _, _, seekCursor := base.DecodeZsetIndexKeyByCursor(keyKind, seek, base.NilDataVal) - seekNext, seekNextCloser, _ := base.EncodeZsetIndexKey(keyVersion, keyKind, khash, 4, []byte("1234")) - defer seekNextCloser() - _, _, _, seekNextCursor := base.DecodeZsetIndexKeyByCursor(keyKind, seekNext, base.NilDataVal) - cursor, v, err = bdb.ZsetObj.ZScan(key, khash, seekCursor, 1, "*") - if err != nil { - t.Fatal(err) - } else if len(v) != 1 { - t.Fatal("invalid count", len(v)) - } else if v[0].Score != 3 { - t.Fatal("score err", v[0].Score) - } else if !bytes.Equal(v[0].Member, []byte("19")) { - t.Fatal("member err", string(v[0].Member)) - } else if !bytes.Equal(cursor, seekNextCursor) { - t.Fatal("cursor err", seekNextCursor, cursor) - } + keyKind := base.KeyKindFieldCompress + keyVersion := base.EncodeKeyVersion(bdb.ZsetObj.GetCurrentKeyId(), keyKind) + seek, seekCloser, _ := base.EncodeZsetIndexKey(keyVersion, keyKind, khash, 3, []byte("19")) + defer seekCloser() + _, _, _, seekCursor := base.DecodeZsetIndexKeyByCursor(keyKind, seek, base.NilDataVal) + seekNext, seekNextCloser, _ := base.EncodeZsetIndexKey(keyVersion, keyKind, khash, 4, []byte("1234")) + defer seekNextCloser() + _, _, _, seekNextCursor := base.DecodeZsetIndexKeyByCursor(keyKind, seekNext, base.NilDataVal) + cursor, v, err = bdb.ZsetObj.ZScan(key, khash, seekCursor, 1, "*") + if err != nil { + t.Fatal(err) + } else if len(v) != 1 { + t.Fatal("invalid count", len(v)) + } else if v[0].Score != 3 { + t.Fatal("score err", v[0].Score) + } else if !bytes.Equal(v[0].Member, []byte("19")) { + t.Fatal("member err", string(v[0].Member)) + } else if !bytes.Equal(cursor, seekNextCursor) { + t.Fatal("cursor err", seekNextCursor, cursor) + } + } + }) } } -func TestZsetDBScan(t *testing.T) { - cores := testTwoBitsCores() - defer closeCores(cores) - - for _, cr := range cores { - bdb := cr.db - - _, oldV, _ := bdb.Scan(nil, 100, "", btools.ZSET) - for _, d := range oldV { - dhash := hash.Fnv32(d) - bdb.StringObj.Del(dhash, d) - } +func TestZsetScan(t *testing.T) { + for _, isOld := range []bool{true, false} { + t.Run(fmt.Sprintf("isOld=%v", isOld), func(t *testing.T) { + cores := testTwoBitsCores() + defer closeCores(cores) + + for _, cr := range cores { + bdb := cr.db + _, oldV, _ := bdb.Scan(nil, 100, "", btools.ZSET) + for _, d := range oldV { + dhash := hash.Fnv32(d) + bdb.StringObj.Del(dhash, d) + } - key := []byte("scan_aaa") - key1 := []byte("scan_bbb") - khash := hash.Fnv32(key) - k1hash := hash.Fnv32(key1) - bdb.ZsetObj.ZAdd(key, khash, - spair(1, []byte("1")), - spair(2, []byte("222")), - spair(3, []byte("19")), - spair(4, []byte("1234"))) - bdb.ZsetObj.ZAdd(key1, k1hash, spair(10, []byte("fff")), spair(20, []byte("ggg"))) - cursor, v, err := bdb.Scan(nil, 100, "", btools.ZSET) - if err != nil { - t.Fatal(err) - } else if len(v) != 2 { - t.Fatal("invalid count", len(v)) - } + key := []byte("scan_aaa") + key1 := []byte("scan_bbb") + khash := hash.Fnv32(key) + k1hash := hash.Fnv32(key1) + + bdb.ZsetObj.ZAdd(key, khash, isOld, + spair(1, []byte("1")), + spair(2, []byte("222")), + spair(3, []byte("19")), + spair(4, []byte("1234"))) + bdb.ZsetObj.ZAdd(key1, k1hash, isOld, + spair(10, []byte("fff")), + spair(20, []byte("ggg"))) + + cursor, v, err := bdb.Scan(nil, 100, "", btools.ZSET) + if err != nil { + t.Fatal(err) + } else if len(v) != 2 { + t.Fatal("invalid count", len(v)) + } - cursor, v, err = bdb.Scan([]byte("scan_aaa"), 1, "**", btools.ZSET) - t.Log(string(cursor)) - for _, d := range v { - t.Log(string(d)) - } - if err != nil { - t.Fatal(err) - } else if len(v) != 1 { - t.Fatal("invalid count", len(v)) - } else if string(v[0]) != "scan_aaa" { - t.Fatal(string(v[0])) - } else if string(cursor) != "scan_bbb" { - t.Fatal(cursor) - } + cursor, v, err = bdb.Scan([]byte("scan_aaa"), 1, "**", btools.ZSET) + if err != nil { + t.Fatal(err) + } else if len(v) != 1 { + t.Fatal("invalid count", len(v)) + } else if string(v[0]) != "scan_aaa" { + t.Fatal(string(v[0])) + } else if string(cursor) != "scan_bbb" { + t.Fatal(cursor) + } + } + }) } } func TestZsetZRem(t *testing.T) { - cores := testTwoBitsCores() - defer closeCores(cores) - - for _, cr := range cores { - bdb := cr.db - - key := []byte("testdb_zrem_a") - khash := hash.Fnv32(key) - member1 := []byte(fmt.Sprintf("a%s", string(testRandBytes(10)))) - member2 := []byte(fmt.Sprintf("b%s", string(testRandBytes(base.KeyFieldCompressSize-1)))) - member3 := []byte(fmt.Sprintf("c%s", string(testRandBytes(base.KeyFieldCompressSize)))) - member4 := []byte(fmt.Sprintf("d%s", string(testRandBytes(base.KeyFieldCompressSize*2)))) - - if n, err := bdb.ZsetObj.ZAdd(key, khash, - spair(0, member1), - spair(1, member2), - spair(2, member3), - spair(3, member4), - ); err != nil { - t.Fatal(err) - } else if n != 4 { - t.Fatal(n) - } + for _, isOld := range []bool{true, false} { + t.Run(fmt.Sprintf("isOld=%v", isOld), func(t *testing.T) { + cores := testTwoBitsCores() + defer closeCores(cores) + + for _, cr := range cores { + bdb := cr.db + key := []byte("test_zset_zrem") + khash := hash.Fnv32(key) + member1 := []byte(fmt.Sprintf("a%s", string(testRandBytes(10)))) + member2 := []byte(fmt.Sprintf("b%s", string(testRandBytes(base.KeyFieldCompressSize-1)))) + member3 := []byte(fmt.Sprintf("c%s", string(testRandBytes(base.KeyFieldCompressSize)))) + member4 := []byte(fmt.Sprintf("d%s", string(testRandBytes(base.KeyFieldCompressSize*2)))) + + if n, err := bdb.ZsetObj.ZAdd(key, khash, isOld, + spair(0, member1), + spair(1, member2), + spair(2, member3), + spair(3, member4), + ); err != nil { + t.Fatal(err) + } else if n != 4 { + t.Fatal(n) + } - if n, err := bdb.ZsetObj.ZRemRangeByRank(key, khash, 0, 1); err != nil { - t.Fatal(err) - } else if n != 2 { - t.Fatal(n) - } + if n, err := bdb.ZsetObj.ZRemRangeByRank(key, khash, 0, 1); err != nil { + t.Fatal(err) + } else if n != 2 { + t.Fatal(n) + } - if res, err := bdb.ZsetObj.ZRange(key, khash, 0, 0xff); err == nil { - if !bytes.Equal(res[0].Member, member3) && !bytes.Equal(res[1].Member, member4) { - t.Fatal("member error") - } - } + if res, err := bdb.ZsetObj.ZRange(key, khash, 0, 0xff); err == nil { + if !bytes.Equal(res[0].Member, member3) && !bytes.Equal(res[1].Member, member4) { + t.Fatal("member error") + } + } - if n, err := bdb.ZsetObj.ZRemRangeByScore(key, khash, 0, 2, false, false); err != nil { - t.Fatal(err) - } else if n != 1 { - t.Fatalf("actual(%d) vs expect(%d)", n, 1) - } + if n, err := bdb.ZsetObj.ZRemRangeByScore(key, khash, 0, 2, false, false); err != nil { + t.Fatal(err) + } else if n != 1 { + t.Fatalf("actual(%d) vs expect(%d)", n, 1) + } - if res, err := bdb.ZsetObj.ZRange(key, khash, 0, 0xff); err == nil { - require.Equal(t, member4, res[0].Member) - } + if res, err := bdb.ZsetObj.ZRange(key, khash, 0, 0xff); err == nil { + require.Equal(t, member4, res[0].Member) + } - if n, err := bdb.ZsetObj.ZCard(key, khash); err != nil { - t.Fatal(err) - } else if n != 1 { - t.Fatal(n) - } + if n, err := bdb.ZsetObj.ZCard(key, khash); err != nil { + t.Fatal(err) + } else if n != 1 { + t.Fatal(n) + } - if n, err := bdb.StringObj.Del(khash, key); err != nil { - t.Fatal(err) - } else if n != 1 { - t.Fatal(n) - } + if n, err := bdb.StringObj.Del(khash, key); err != nil { + t.Fatal(err) + } else if n != 1 { + t.Fatal(n) + } - if n, err := bdb.ZsetObj.ZCount(key, khash, 0, 0xFF, false, false); err != nil { - t.Fatal(err) - } else if n != 0 { - t.Fatal(n) - } + if n, err := bdb.ZsetObj.ZCount(key, khash, 0, 0xFF, false, false); err != nil { + t.Fatal(err) + } else if n != 0 { + t.Fatal(n) + } + } + }) } } diff --git a/stored/engine/bitsdb/bitskv/kv/bitalosdb/kv_bitalosdb.go b/stored/engine/bitsdb/bitskv/kv/bitalosdb/kv_bitalosdb.go index ed35ac6..375dbcc 100644 --- a/stored/engine/bitsdb/bitskv/kv/bitalosdb/kv_bitalosdb.go +++ b/stored/engine/bitsdb/bitskv/kv/bitalosdb/kv_bitalosdb.go @@ -90,37 +90,34 @@ func openBitalosDB(dirname string, cfg *dbconfig.Config, dataType btools.DataTyp } opts := &bitalosdb.Options{ - CompressionType: cfg.BithashCompressionType, - MemTableSize: cfg.WriteBufferSize, - MemTableStopWritesThreshold: cfg.MaxWriteBufferNum, - Logger: log.GetLogger(), - Verbose: true, - AutoCompact: true, - CompactInfo: compactOpt, - DataType: dataType.String(), - DisableWAL: cfg.DisableWAL, - UseBithash: false, - UseBitable: false, - UseMapIndex: true, - FlushReporter: cfg.FlushReporterFunc, - Id: kv.GetDbId(dataType, dbType), - UsePrefixCompress: true, - UseBlockCompress: cfg.EnablePageBlockCompression, - BlockCacheSize: int64(cfg.PageBlockCacheSize), - IOWriteLoadThresholdFunc: cfg.IOWriteLoadThresholdFunc, - BytesPerSync: 1 << 20, - DeleteFileInternal: 8, - KvCheckExpireFunc: nil, - KvTimestampFunc: nil, - KeyPrefixDeleteFunc: nil, + CompressionType: cfg.BithashCompressionType, + MemTableSize: cfg.WriteBufferSize, + MemTableStopWritesThreshold: cfg.MaxWriteBufferNum, + Logger: log.GetLogger(), + Verbose: true, + AutoCompact: true, + CompactInfo: compactOpt, + DataType: dataType.String(), + DisableWAL: cfg.DisableWAL, + UseBithash: false, + UseBitable: false, + UseMapIndex: true, + FlushReporter: cfg.FlushReporterFunc, + Id: kv.GetDbId(dataType, dbType), + UsePrefixCompress: true, + UseBlockCompress: cfg.EnablePageBlockCompression, + BlockCacheSize: int64(cfg.PageBlockCacheSize), + IOWriteLoadThresholdFunc: cfg.IOWriteLoadThresholdFunc, + BytesPerSync: 1 << 20, + DeleteFileInternal: 8, + KvCheckExpireFunc: nil, + KvTimestampFunc: nil, + KeyPrefixDeleteFunc: nil, + FlushPrefixDeleteKeyMultiplier: cfg.FlushPrefixDeleteKeyMultiplier, + FlushFileLifetime: cfg.FlushFileLifetime, } - if dataType == btools.ZSET && dbType == kv.DB_TYPE_INDEX { - opts.DataType += kv.GetDbTypeDir(dbType) - opts.LogTag = fmt.Sprintf("[bitalosdb/%s]", opts.DataType) - } else { - opts.LogTag = fmt.Sprintf("[bitalosdb/%s%s]", opts.DataType, kv.GetDbTypeDir(dbType)) - } + opts.LogTag = fmt.Sprintf("[bitalosdb/%s%s]", opts.DataType, kv.GetDbTypeDir(dbType)) opts.KeyHashFunc = func(k []byte) int { return int(binary.LittleEndian.Uint16(k[0:2])) @@ -131,6 +128,13 @@ func openBitalosDB(dirname string, cfg *dbconfig.Config, dataType btools.DataTyp opts.KvTimestampFunc = cfg.KvTimestampFunc opts.KvCheckExpireFunc = cfg.KvCheckExpireFunc } else { + opts.KeyPrefixDeleteFunc = func(k []byte) uint64 { + if len(k) < 10 { + return 0 + } + return binary.LittleEndian.Uint64(k[2:10]) + } + if dataType == btools.HASH || dataType == btools.LIST { opts.UseBithash = true } @@ -139,13 +143,6 @@ func openBitalosDB(dirname string, cfg *dbconfig.Config, dataType btools.DataTyp } if dataType == btools.ZSET && dbType == kv.DB_TYPE_DATA { opts.UsePrefixCompress = false - } else { - opts.KeyPrefixDeleteFunc = func(k []byte) uint64 { - if len(k) < 10 { - return 0 - } - return binary.LittleEndian.Uint64(k[2:10]) - } } } diff --git a/stored/engine/bitsdb/btools/datatype.go b/stored/engine/bitsdb/btools/datatype.go index be7cdc5..efb8e3c 100644 --- a/stored/engine/bitsdb/btools/datatype.go +++ b/stored/engine/bitsdb/btools/datatype.go @@ -23,16 +23,18 @@ const ( STRING HASH LIST - ZSET + ZSETOLD SET + ZSET ) const ( - StringName = "string" - HashName = "hash" - ListName = "list" - ZSetName = "zset" - SetName = "set" + StringName = "string" + HashName = "hash" + ListName = "list" + ZSetName = "zset" + ZSetOldName = "zsetold" + SetName = "set" ) var DataTypeList = []DataType{STRING, HASH, LIST, SET, ZSET} @@ -46,10 +48,12 @@ func (d DataType) String() string { return HashName case LIST: return ListName - case ZSET: - return ZSetName case SET: return SetName + case ZSET: + return ZSetName + case ZSETOLD: + return ZSetOldName default: return "" } @@ -73,7 +77,7 @@ func StringToDataType(t string) DataType { } func IsDataTypeFieldCompress(dt DataType) bool { - return dt == SET || dt == ZSET + return dt == SET || dt == ZSET || dt == ZSETOLD } type ScanPair struct { diff --git a/stored/engine/bitsdb/btools/define.go b/stored/engine/bitsdb/btools/define.go index 5600093..37eee9e 100644 --- a/stored/engine/bitsdb/btools/define.go +++ b/stored/engine/bitsdb/btools/define.go @@ -24,7 +24,6 @@ import ( const ( DefaultScanCount int = 10 LuaScriptSlot uint16 = 2048 - KeyLockerPoolCap uint32 = 16 << 10 ConfigMaxFieldSize int = 60 << 10 ) diff --git a/stored/engine/bitsdb/dbconfig/dbconfig.go b/stored/engine/bitsdb/dbconfig/dbconfig.go index 4a10c1d..d1775c0 100644 --- a/stored/engine/bitsdb/dbconfig/dbconfig.go +++ b/stored/engine/bitsdb/dbconfig/dbconfig.go @@ -18,36 +18,45 @@ import ( "sync/atomic" ) +const ( + DefaultWriteBufferSize = 256 << 20 + DefaultMaxWriteBufferNum = 8 +) + type Config struct { - DBPath string - DelExpireDataPoolNum int - GetNextKeyId func() uint64 - GetCurrentKeyId func() uint64 - WriteBufferSize int - MaxWriteBufferNum int - DisableWAL bool - CacheSize int - CacheHashSize int - CacheEliminateDuration int - CompactStartTime int - CompactEndTime int - BithashGcThreshold float64 - CompactInterval int - BithashCompressionType int - EnablePageBlockCompression bool - PageBlockCacheSize int - EnableRaftlogRestore bool - KvCheckExpireFunc func(int, []byte, []byte) bool - KvTimestampFunc func([]byte, uint8) (bool, uint64) - FlushReporterFunc func(int) - IOWriteLoadThresholdFunc func() bool + DBPath string + DelExpireDataPoolNum int + GetNextKeyId func() uint64 + GetCurrentKeyId func() uint64 + WriteBufferSize int + MaxWriteBufferNum int + DisableWAL bool + CacheSize int + CacheHashSize int + CacheShardNum int + CacheEliminateDuration int + EnableMissCache bool + CompactStartTime int + CompactEndTime int + BithashGcThreshold float64 + CompactInterval int + BithashCompressionType int + EnablePageBlockCompression bool + PageBlockCacheSize int + EnableRaftlogRestore bool + KvCheckExpireFunc func(int, []byte, []byte) bool + KvTimestampFunc func([]byte, uint8) (bool, uint64) + FlushReporterFunc func(int) + IOWriteLoadThresholdFunc func() bool + FlushPrefixDeleteKeyMultiplier int + FlushFileLifetime int } func NewConfigDefault() *Config { cfg := &Config{} cfg.DelExpireDataPoolNum = 8 - cfg.WriteBufferSize = getDefault(256<<20, cfg.WriteBufferSize) - cfg.MaxWriteBufferNum = getDefault(8, cfg.MaxWriteBufferNum) + cfg.WriteBufferSize = getDefault(DefaultWriteBufferSize, cfg.WriteBufferSize) + cfg.MaxWriteBufferNum = getDefault(DefaultMaxWriteBufferNum, cfg.MaxWriteBufferNum) cfg.CacheSize = getDefault(0, cfg.CacheSize) if cfg.GetNextKeyId == nil { cfg.GetNextKeyId = DefaultGetNextKeyId diff --git a/stored/engine/logic_zset.go b/stored/engine/logic_zset.go index f4064c8..bf5a380 100644 --- a/stored/engine/logic_zset.go +++ b/stored/engine/logic_zset.go @@ -19,13 +19,13 @@ import "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/btools" func (b *Bitalos) ZAdd( key []byte, khash uint32, args ...btools.ScorePair, ) (int64, error) { - return b.bitsdb.ZsetObj.ZAdd(key, khash, args...) + return b.bitsdb.ZsetObj.ZAdd(key, khash, false, args...) } func (b *Bitalos) ZIncrBy( key []byte, khash uint32, delta float64, member []byte, ) (float64, error) { - return b.bitsdb.ZsetObj.ZIncrBy(key, khash, delta, member) + return b.bitsdb.ZsetObj.ZIncrBy(key, khash, false, delta, member) } func (b *Bitalos) ZRem( diff --git a/stored/engine/migrate.go b/stored/engine/migrate.go index 2f1b200..3f6c291 100644 --- a/stored/engine/migrate.go +++ b/stored/engine/migrate.go @@ -27,6 +27,7 @@ import ( "github.com/zuoyebang/bitalostored/butils/hash" "github.com/zuoyebang/bitalostored/butils/unsafe2" "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/bitsdb" + "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/bitsdb/locker" "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/btools" "github.com/zuoyebang/bitalostored/stored/internal/errn" "github.com/zuoyebang/bitalostored/stored/internal/log" @@ -52,7 +53,7 @@ type Migrate struct { toHost string slotId uint32 migrateDelToSlave func(uint32, [][]byte) error - lockerPool *KeyLockerPool + keyLocker *locker.ScopeLocker db *bitsdb.BitsDB status int64 total int64 @@ -424,7 +425,7 @@ func (m *Migrate) migrateRunTask(isMaster func() bool) (err error) { khash, _ := m.getKeyHash(key) func() { - defer m.lockerPool.LockKey(khash, resp.SET)() + defer m.keyLocker.LockKey(khash, resp.SET)() var e error atomic.AddInt64(&m.total, 1) @@ -433,12 +434,12 @@ func (m *Migrate) migrateRunTask(isMaster func() bool) (err error) { e = m.migrateString(key, conn) case btools.HASH: e = m.migrateHash(key, conn) - case btools.ZSET: - e = m.migrateZSet(key, conn) case btools.SET: e = m.migrateSet(key, conn) case btools.LIST: e = m.migrateList(key, conn) + case btools.ZSET, btools.ZSETOLD: + e = m.migrateZSet(key, conn) } if e != nil { atomic.AddInt64(&m.fails, 1) @@ -487,7 +488,7 @@ func (b *Bitalos) CheckRedirectAndLockFunc(cmd string, key []byte, khash uint32) return false, nil } - lockFunc := b.Migrate.lockerPool.LockKey(khash, cmd) + lockFunc := b.Migrate.keyLocker.LockKey(khash, cmd) if n, _ := b.bitsdb.StringObj.Exists(key, khash); n == 1 { return false, lockFunc @@ -496,7 +497,7 @@ func (b *Bitalos) CheckRedirectAndLockFunc(cmd string, key []byte, khash uint32) } } -func (b *Bitalos) Redirect(cmd string, key []byte, reqData [][]byte, rw *resp.RespWriter) error { +func (b *Bitalos) Redirect(cmd string, key []byte, reqData [][]byte, rw *resp.Writer) error { log.Info("redirect cmd: ", cmd, " key: ", string(key)) var arg []interface{} for _, v := range reqData[1:] { @@ -531,12 +532,12 @@ func (b *Bitalos) Redirect(cmd string, key []byte, reqData [][]byte, rw *resp.Re func (b *Bitalos) NewMigrate(slot uint32, tohost string, fromhost string) *Migrate { mg := &Migrate{ - fromHost: fromhost, - toHost: tohost, - slotId: slot, - beginTime: time.Now(), - endTime: time.Now(), - lockerPool: NewKeyLockerPool(), + fromHost: fromhost, + toHost: tohost, + slotId: slot, + beginTime: time.Now(), + endTime: time.Now(), + keyLocker: locker.NewScopeLocker(false), Conn: &redis.Pool{ MaxIdle: 10, Dial: func() (redis.Conn, error) { @@ -594,6 +595,7 @@ func (b *Bitalos) MigrateStart( return nil }) } + b.bitsdb.StringObj.BaseDb.BitmapMem.StartMigrate(slot) b.Meta.SetMigrateStatus(MigrateStatusProcess) b.Meta.SetMigrateSlotid(uint64(slot)) log.Infof("migrate end toHost:%s slotId:%d", host, slot) @@ -609,6 +611,7 @@ func (b *Bitalos) MigrateOver(slotId uint64) error { } else { log.Infof("migrate over slotId:%d", slotId) } + b.bitsdb.StringObj.BaseDb.BitmapMem.ClearMigrate() b.Meta.SetMigrateStatus(MigrateStatusPrepare) return nil } diff --git a/stored/internal/config/config.go b/stored/internal/config/config.go index a77f2b0..656e60b 100644 --- a/stored/internal/config/config.go +++ b/stored/internal/config/config.go @@ -125,13 +125,16 @@ type LogConfig struct { } type ServerConfig struct { - ProductName string `toml:"product_name" mapstructure:"product_name"` - Address string `toml:"address" mapstructure:"address"` - Maxclient int64 `toml:"max_client" mapstructure:"max_client"` - Keepalive timesize.Duration `toml:"keep_alive" mapstructure:"keep_alive"` - Maxprocs int `toml:"max_procs" mapstructure:"max_procs"` - ConfigFile string `toml:"config_file" mapstructure:"config_file"` - DBPath string `toml:"db_path" mapstructure:"db_path"` + ProductName string `toml:"product_name" mapstructure:"product_name"` + Address string `toml:"address" mapstructure:"address"` + Maxclient int64 `toml:"max_client" mapstructure:"max_client"` + Keepalive timesize.Duration `toml:"keep_alive" mapstructure:"keep_alive"` + Maxprocs int `toml:"max_procs" mapstructure:"max_procs"` + ConfigFile string `toml:"config_file" mapstructure:"config_file"` + DBPath string `toml:"db_path" mapstructure:"db_path"` + DisableEdgeTriggered bool `toml:"disable_edge_triggered" mapstructure:"disable_edge_triggered"` + NetEventLoopNum int `toml:"net_event_loop_num" mapstructure:"net_event_loop_num"` + NetWriteBuffer bytesize.Int64 `toml:"net_write_buffer" mapstructure:"net_write_buffer"` SlowShield bool `toml:"slow_shield" mapstructure:"slow_shield"` SlowTime timesize.Duration `toml:"slow_time" mapstructure:"slow_time"` @@ -150,7 +153,9 @@ type BitalosConfig struct { WriteBufferSize bytesize.Int64 `toml:"write_buffer_size" mapstructure:"write_buffer_size"` CacheSize bytesize.Int64 `toml:"cache_size" mapstructure:"cache_size"` CacheHashSize int `toml:"cache_hash_size" mapstructure:"cache_hash_size"` + CacheShardNum int `toml:"cache_shard_num" mapstructure:"cache_shard_num"` CacheEliminateDuration int `toml:"cache_eliminate_duration" mapstructure:"cache_eliminate_duration"` + EnableMissCache bool `toml:"enable_miss_cache" mapstructure:"enable_miss_cache"` CompactStartTime int `toml:"compact_start_time" mapstructure:"compact_start_time"` CompactEndTime int `toml:"compact_end_time" mapstructure:"compact_end_time"` CompactInterval int `toml:"compact_interval" mapstructure:"compact_interval"` @@ -168,6 +173,8 @@ type BitalosConfig struct { EnablePageBlockCompression bool `toml:"enable_page_block_compression" mapstructure:"enable_page_block_compression"` PageBlockCacheSize bytesize.Int64 `toml:"page_block_cache_size" mapstructure:"page_block_cache_size"` EnableClockCache bool `toml:"enable_clock_cache" mapstructure:"enable_clock_cache"` + FlushPrefixDeleteKeyMultiplier int `toml:"flush_prefix_delete_key_multiplier" mapstructure:"flush_prefixdeletekey_multiplier"` + FlushFileLifetime int `toml:"flush_file_lifetime" mapstructure:"flush_file_lifetime"` } type RaftQueueConfig struct { @@ -213,10 +220,8 @@ type RaftClusterConfig struct { type PluginConfig struct { OpenRaft bool `toml:"open_raft" mapstructure:"open_raft"` - OpenPanic bool `toml:"open_panic" mapstructure:"open_panic"` OpenPprof bool `toml:"open_pprof" mapstructure:"open_pprof"` PprofAddr string `toml:"pprof_addr" mapstructure:"pprof_addr"` - OpenGoPs bool `toml:"open_gops" mapstructure:"open_gops"` } type DynamicDeadline struct { diff --git a/stored/internal/config/default.go b/stored/internal/config/default.go index 00ebcbe..6132ff7 100644 --- a/stored/internal/config/default.go +++ b/stored/internal/config/default.go @@ -19,7 +19,8 @@ const DefaultConfig = ` address = ":10091" max_client = 5000 keep_alive = "3600s" -max_procs = 8 +max_procs = 4 +net_event_loop_num = 8 db_path = "bitalosdb" slow_time = "40ms" slow_key_window_time = "2000ms" @@ -42,7 +43,7 @@ is_debug = true rotation_time = "Daily" [bitalos] -write_buffer_size = "256mb" +write_buffer_size = "1gb" [worker_queue] enable = true @@ -54,7 +55,7 @@ set_worker = 30 zset_worker = 30 [raft_queue] -workers = 20 +workers = 32 length = 1000000 [raft_cluster] diff --git a/stored/internal/config/validate.go b/stored/internal/config/validate.go index ded2fb7..c6730f5 100644 --- a/stored/internal/config/validate.go +++ b/stored/internal/config/validate.go @@ -25,6 +25,15 @@ import ( "github.com/zuoyebang/bitalostored/stored/internal/log" ) +const ( + MinProcs = 2 + MaxProcs = 40 + MinCores = 1 + MaxCores = 20 + MinNetEventLoopNum = 8 + MaxNetEventLoopNum = 256 +) + func (c *Config) Validate() error { if err := c.checkServerConfig(); err != nil { return err @@ -60,7 +69,7 @@ func (c *Config) checkServerConfig() error { } if c.Server.Keepalive <= 0 { - c.Server.Keepalive = timesize.Duration(3600 * time.Second) + c.Server.Keepalive = timesize.Duration(1800 * time.Second) } if c.Server.SlowTime <= 0 { c.Server.SlowTime = timesize.Duration(30 * time.Millisecond) @@ -68,8 +77,20 @@ func (c *Config) checkServerConfig() error { if c.Server.Maxclient < 5000 { c.Server.Maxclient = 5000 } - if c.Server.Maxprocs < 1 { - c.Server.Maxprocs = 1 + if c.Server.Maxprocs < MinProcs { + c.Server.Maxprocs = MinProcs + } + if c.Server.Maxprocs > MaxProcs { + c.Server.Maxprocs = MaxProcs + } + if c.Server.NetEventLoopNum < c.Server.Maxprocs*2 { + c.Server.NetEventLoopNum = c.Server.Maxprocs * 2 + } + if c.Server.NetEventLoopNum < MinNetEventLoopNum { + c.Server.NetEventLoopNum = MinNetEventLoopNum + } + if c.Server.NetEventLoopNum > MaxNetEventLoopNum { + c.Server.NetEventLoopNum = MaxNetEventLoopNum } return nil diff --git a/stored/internal/errn/errn.go b/stored/internal/errn/errn.go index ebafa4e..115a992 100644 --- a/stored/internal/errn/errn.go +++ b/stored/internal/errn/errn.go @@ -16,34 +16,56 @@ package errn import ( "errors" + "fmt" ) var ( - ErrSyntax = errors.New("Err syntax error") - ErrLenArg = errors.New("Err args len is wrong") - ErrTxDisable = errors.New("Err tx command disable") - ErrWatchKeyChanged = errors.New("Err watch key changed") - ErrPrepareLockFail = errors.New("Err prepare lock fail") - ErrPrepareLockTimeout = errors.New("Err prepare lock timeout") - ErrTxNotInMaster = errors.New("ERR tx in slave node") - ErrMultiNested = errors.New("ERR MULTI calls can not be nested") - ErrTxQpsLimit = errors.New("ERR tx qps too high") - ErrPrepareNoMulti = errors.New("ERR PREPARE without MULTI") - ErrPrepareNested = errors.New("ERR PREPARE calls can not be nested") - ErrExecNotPrepared = errors.New("ERR Exec not prepared") - ErrDiscardNoMulti = errors.New("ERR DISCARD without MULTI") - ErrProtocol = errors.New("invalid request") - ErrServerClosed = errors.New("server is closed") - ErrRaftNotReady = errors.New("raft is not ready") - ErrWrongType = errors.New("WRONGTYPE Operation against a key holding the wrong kind of value") - ErrKeySize = errors.New("invalid key size") - ErrValueSize = errors.New("invalid value size") - ErrArgsEmpty = errors.New("invalid args empty") - ErrFieldSize = errors.New("invalid field size") - ErrExpireValue = errors.New("invalid expire value") - ErrZSetScoreRange = errors.New("invalid zset score range") - ErrZsetMemberNil = errors.New("zset member is nil") - ErrClientQuit = errors.New("remote client quit") - ErrSlotIdNotMatch = errors.New("migrate slotId not match") - ErrMigrateRunning = errors.New("migrate running") + ErrSyntax = errors.New("ERR syntax error") + ErrLenArg = errors.New("ERR args len is wrong") + ErrTxDisable = errors.New("ERR tx command disable") + ErrWatchKeyChanged = errors.New("ERR watch key changed") + ErrPrepareLockFail = errors.New("ERR prepare lock fail") + ErrPrepareLockTimeout = errors.New("ERR prepare lock timeout") + ErrTxNotInMaster = errors.New("ERR tx in slave node") + ErrMultiNested = errors.New("ERR MULTI calls can not be nested") + ErrTxQpsLimit = errors.New("ERR tx qps too high") + ErrPrepareNoMulti = errors.New("ERR PREPARE without MULTI") + ErrPrepareNested = errors.New("ERR PREPARE calls can not be nested") + ErrExecNotPrepared = errors.New("ERR Exec not prepared") + ErrDiscardNoMulti = errors.New("ERR DISCARD without MULTI") + ErrProtocol = errors.New("invalid request") + ErrRaftNotReady = errors.New("raft is not ready") + ErrWrongType = errors.New("WRONGTYPE Operation against a key holding the wrong kind of value") + ErrKeySize = errors.New("invalid key size") + ErrValueSize = errors.New("invalid value size") + ErrArgsEmpty = errors.New("invalid args empty") + ErrFieldSize = errors.New("invalid field size") + ErrExpireValue = errors.New("invalid expire value") + ErrZSetScoreRange = errors.New("invalid zset score range") + ErrZsetMemberNil = errors.New("zset member is nil") + ErrClientQuit = errors.New("remote client quit") + ErrSlotIdNotMatch = errors.New("migrate slotId not match") + ErrMigrateRunning = errors.New("migrate running") + ErrDataType = errors.New("not support dataType") + ErrDbSyncFailRefuse = errors.New("ERR db syncing/fail, refuse request") + ErrNotImplement = errors.New("command not implement") + ErrRangeOffset = errors.New("ERR offset is out of range") + ErrValue = errors.New("ERR value is not an integer or out of range") + ErrInvalidRangeItem = errors.New("ERR min or max not valid string range item") + ErrBitOffset = errors.New("ERR bit offset is not an integer or out of range") + ErrBitValue = errors.New("ERR bit is not an integer or out of range") + ErrBitUnmarshal = errors.New("ERR bitmap unmarshal fail") + ErrBitMarshal = errors.New("ERR bitmap marshal fail") + ErrSlowShield = errors.New("slow query shield, wait 1s to retry") + ErrUnbalancedQuotes = errors.New("ERR unbalanced quotes in request") + ErrInvalidBulkLength = errors.New("ERR invalid bulk length") + ErrInvalidMultiBulkLength = errors.New("ERR invalid multibulk length") ) + +func CmdEmptyErr(cmd string) error { + return fmt.Errorf("ERR empty command for '%s' command", cmd) +} + +func CmdParamsErr(cmd string) error { + return fmt.Errorf("ERR wrong number of arguments for '%s' command", cmd) +} diff --git a/stored/internal/log/rotate.go b/stored/internal/log/rotate.go index 30b3374..42e968b 100644 --- a/stored/internal/log/rotate.go +++ b/stored/internal/log/rotate.go @@ -66,7 +66,6 @@ func getRotateLogs(path, rotation string) *rotatelogs.RotateLogs { param := getRotationParam(rotation) rl, _ := rotatelogs.New( path+param.format, - rotatelogs.WithLinkName(path), rotatelogs.WithMaxAge(time.Hour*24*14), rotatelogs.WithRotationTime(param.duration), ) diff --git a/stored/plugin/raft/cluster.go b/stored/internal/raft/cluster.go similarity index 100% rename from stored/plugin/raft/cluster.go rename to stored/internal/raft/cluster.go diff --git a/stored/plugin/raft/cmd.go b/stored/internal/raft/cmd.go similarity index 100% rename from stored/plugin/raft/cmd.go rename to stored/internal/raft/cmd.go diff --git a/stored/plugin/raft/cmd_exe.go b/stored/internal/raft/cmd_exe.go similarity index 87% rename from stored/plugin/raft/cmd_exe.go rename to stored/internal/raft/cmd_exe.go index 57cda2c..887c721 100644 --- a/stored/plugin/raft/cmd_exe.go +++ b/stored/internal/raft/cmd_exe.go @@ -40,7 +40,7 @@ func addRaftClusterNode(raft *StartRun, c *server.Client) error { ret, err := raft.AddNode(nNodeId, unsafe2.String(c.Args[0]), raft.RetryTimes) if ret == R_SUCCESS { - c.RespWriter.WriteStatus(resp.ReplyOK) + c.Writer.WriteStatus(resp.ReplyOK) return nil } else { return err @@ -59,7 +59,7 @@ func addObserver(raft *StartRun, c *server.Client) error { ret, err := raft.AddObserver(nNodeId, unsafe2.String(c.Args[0])) if ret == R_SUCCESS { - c.RespWriter.WriteStatus(resp.ReplyOK) + c.Writer.WriteStatus(resp.ReplyOK) return nil } else { return err @@ -78,7 +78,7 @@ func addWitness(raft *StartRun, c *server.Client) error { ret, err := raft.AddWitness(nNodeId, unsafe2.String(c.Args[0])) if ret == R_SUCCESS { - c.RespWriter.WriteStatus(resp.ReplyOK) + c.Writer.WriteStatus(resp.ReplyOK) return nil } else { return err @@ -97,7 +97,7 @@ func removeRaftClusterNode(raft *StartRun, c *server.Client) error { ret, err := raft.DelNode(nNodeId, raft.RetryTimes) if ret == R_SUCCESS { - c.RespWriter.WriteStatus(resp.ReplyOK) + c.Writer.WriteStatus(resp.ReplyOK) return nil } else { return err @@ -116,7 +116,7 @@ func transferRaftClusterNode(raft *StartRun, c *server.Client) error { ret, err := raft.LeaderTransfer(targetNodeID) if ret == R_SUCCESS { - c.RespWriter.WriteStatus(resp.ReplyOK) + c.Writer.WriteStatus(resp.ReplyOK) return nil } else { return err @@ -126,7 +126,7 @@ func transferRaftClusterNode(raft *StartRun, c *server.Client) error { func getLeaderFrmRaftCluster(raft *StartRun, c *server.Client) error { id, ret, err := raft.GetLeaderId() if ret == R_SUCCESS { - c.RespWriter.WriteStatus(strconv.FormatUint(id, 10)) + c.Writer.WriteStatus(strconv.FormatUint(id, 10)) return nil } else { return err @@ -136,7 +136,7 @@ func getLeaderFrmRaftCluster(raft *StartRun, c *server.Client) error { func getNodeHostInfo(raft *StartRun, c *server.Client) error { out, ret, err := raft.GetNodeHostInfo() if ret == R_SUCCESS { - c.RespWriter.WriteStatus(out) + c.Writer.WriteStatus(out) return nil } else { return err @@ -146,7 +146,7 @@ func getNodeHostInfo(raft *StartRun, c *server.Client) error { func getClusterMemberShip(raft *StartRun, c *server.Client) error { out, ret, err := raft.GetClusterMembership() if ret == R_SUCCESS { - c.RespWriter.WriteStatus(out) + c.Writer.WriteStatus(out) return nil } else { return err @@ -163,7 +163,7 @@ func removeRaftNodeData(raft *StartRun, c *server.Client) error { } ret, err := raft.RemoveData(targetNodeID) if ret == R_SUCCESS { - c.RespWriter.WriteStatus("remove data request sent successfully !") + c.Writer.WriteStatus("remove data request sent successfully !") return nil } else { return err @@ -172,7 +172,7 @@ func removeRaftNodeData(raft *StartRun, c *server.Client) error { func deraft(s *server.Server, raft *StartRun, c *server.Client) error { if len(c.Args) != 1 { - return resp.CmdParamsErr(DERAFT) + return errn.CmdParamsErr(DERAFT) } if string(c.Args[0]) != config.GlobalConfig.Server.Token { @@ -187,16 +187,16 @@ func deraft(s *server.Server, raft *StartRun, c *server.Client) error { } s.GetDB().RaftReset() if err := config.GlobalConfig.SetDegradeSingleNode(); err != nil { - c.RespWriter.WriteError(err) + c.Writer.WriteError(err) } else { - c.RespWriter.WriteStatus(resp.ReplyOK) + c.Writer.WriteStatus(resp.ReplyOK) } return nil } func reRaft(s *server.Server, raft *StartRun, c *server.Client) error { if len(c.Args) != 2 { - return resp.CmdParamsErr(RERAFT) + return errn.CmdParamsErr(RERAFT) } if string(c.Args[0]) != config.GlobalConfig.Server.Token { @@ -217,12 +217,12 @@ func reRaft(s *server.Server, raft *StartRun, c *server.Client) error { if err = raft.Clean(); err == nil { if err = ReraftInit(s, port); err == nil { if err = config.GlobalConfig.WriteFile(config.GlobalConfig.Server.ConfigFile); err == nil { - c.RespWriter.WriteStatus(resp.ReplyOK) + c.Writer.WriteStatus(resp.ReplyOK) } } } if err != nil { - c.RespWriter.WriteError(err) + c.Writer.WriteError(err) } return err } @@ -241,9 +241,9 @@ func logCompact(raft *StartRun, c *server.Client) error { _, err := raft.Nh.SyncRequestSnapshot(ctx, raft.ClusterId, opt) if err != nil { - c.RespWriter.WriteError(err) + c.Writer.WriteError(err) } else { - c.RespWriter.WriteStatus(resp.ReplyOK) + c.Writer.WriteStatus(resp.ReplyOK) } return err } @@ -257,7 +257,7 @@ func okNodeHost(raft *StartRun, c *server.Client) error { } else { sRet = "false" } - c.RespWriter.WriteStatus(sRet) + c.Writer.WriteStatus(sRet) } else { return err } @@ -270,7 +270,7 @@ func fullSync(raft *StartRun, c *server.Client) error { } err := raft.FullSync() if err == nil { - c.RespWriter.WriteStatus(resp.ReplyOK) + c.Writer.WriteStatus(resp.ReplyOK) return nil } return err @@ -279,7 +279,7 @@ func fullSync(raft *StartRun, c *server.Client) error { func statInfo(raft *StartRun, c *server.Client) error { info, ret, err := raft.StatInfo() if R_SUCCESS == ret { - c.RespWriter.WriteStatus(info) + c.Writer.WriteStatus(info) } else { return err } diff --git a/stored/plugin/raft/commfun.go b/stored/internal/raft/commfun.go similarity index 100% rename from stored/plugin/raft/commfun.go rename to stored/internal/raft/commfun.go diff --git a/stored/plugin/raft/common.go b/stored/internal/raft/common.go similarity index 82% rename from stored/plugin/raft/common.go rename to stored/internal/raft/common.go index 9eff1c4..97e53ac 100644 --- a/stored/plugin/raft/common.go +++ b/stored/internal/raft/common.go @@ -29,8 +29,10 @@ import ( "github.com/zuoyebang/bitalostored/raft/statemachine" "github.com/zuoyebang/bitalostored/stored/internal/config" "github.com/zuoyebang/bitalostored/stored/internal/log" + "github.com/zuoyebang/bitalostored/stored/internal/marshal/update" "github.com/zuoyebang/bitalostored/stored/internal/utils" "github.com/zuoyebang/bitalostored/stored/server" + "google.golang.org/protobuf/proto" ) var raftInstance = &StartRun{} @@ -77,7 +79,7 @@ func (p *StartRun) LoadConfig(s *server.Server) { logger.GetLogger("grpc").SetLevel(logger.ERROR) logger.GetLogger("logdb").SetLevel(logger.ERROR) logger.GetLogger("raftpb").SetLevel(logger.ERROR) - logger.GetLogger("raft").SetLevel(logger.ERROR) + logger.GetLogger("dragonboat").SetLevel(logger.ERROR) logger.GetLogger("dbconfig").SetLevel(logger.ERROR) logger.GetLogger("settings").SetLevel(logger.ERROR) logger.GetLogger("order").SetLevel(logger.ERROR) @@ -110,9 +112,13 @@ func (p *StartRun) LoadConfig(s *server.Server) { p.Nhc.Expert.Engine = dconfig.GetDefaultEngineConfig() p.Nhc.Expert.LogDB.Shards = 1 - p.Nhc.Expert.LogDB.KVWriteBufferSize = 256 << 20 - p.Nhc.Expert.LogDB.KVTargetFileSizeBase = 32 << 20 + p.Nhc.Expert.LogDB.KVWriteBufferSize = 128 << 20 + p.Nhc.Expert.LogDB.KVTargetFileSizeBase = 128 << 20 p.Nhc.Expert.Engine.ExecShards = 1 + p.Nhc.Expert.Engine.CommitShards = 1 + p.Nhc.Expert.Engine.ApplyShards = 1 + p.Nhc.Expert.Engine.SnapshotShards = 1 + p.Nhc.Expert.Engine.CloseShards = 1 var flushCallback func(uint64) if !s.IsWitness { @@ -250,16 +256,89 @@ func (p *StartRun) getAddr() string { return p.Addr } +func (p *StartRun) Start(s *server.Server) { + p.LoadConfig(s) + + p.registerRaftCommand(s) + p.registerIsMasterCF(s) + p.registerSyncToSlave(s) + + if !config.GlobalConfig.Plugin.OpenRaft || config.GlobalConfig.CheckIsDegradeSingleNode() { + s.Info.Cluster.Role = "single" + s.Info.Cluster.Status = true + } else { + node, err := braft.NewNodeHost(p.Nhc) + if err != nil { + log.Error("new host: ", err) + panic(err) + } + p.Nh = node + + if err := p.Nh.StartOnDiskCluster(p.AddrList, p.Join, func(clusterID uint64, nodeID uint64) statemachine.IOnDiskStateMachine { + s.Info.Cluster.ClusterId = clusterID + s.Info.Cluster.CurrentNodeId = nodeID + return NewDiskKV(clusterID, nodeID, s, p) + }, p.Rc); err != nil { + log.Error("start cluster: ", err) + panic(err) + } + p.RaftReady = true + } + + p.doRaftClusterStat(s) +} + +func (p *StartRun) Stop() { + if p != nil && p.Nh != nil { + p.StopNodeHost() + } +} + +func (p *StartRun) Sync(keyHash uint32, data [][]byte) ([]byte, error) { + migrate := false + + b, err := proto.Marshal(&update.ByteSlice{ + IsMigrate: &migrate, + NodeId: &p.NodeID, + Data: data, + KeyHash: &keyHash, + }) + if err != nil { + return nil, err + } + + if p.AsyncPropose { + _, err = p.Propose(b, p.RetryTimes) + return nil, err + } else { + res, err := p.SyncPropose(b) + if err != nil { + return nil, err + } + + if bytes.Equal(res.Data, UpdateSelfNodeDoing) { + return nil, nil + } else { + return res.Data, nil + } + } +} + func GetClusterNodeOK(nCluster uint64) bool { return order.G_NodeSates.OK(nCluster) } -func Init() { +func RaftInit(s *server.Server) { logger.SetLoggerFactory(func(name string) logger.ILogger { return DefaultLogger }) - addPluginStartInitRaft(raftInstance) - addPluginPreparePropose(raftInstance) + + s.DoRaftSync = raftInstance.Sync + s.DoRaftStop = raftInstance.Stop +} + +func RaftStart(s *server.Server) { + raftInstance.Start(s) } func ReraftInit(s *server.Server, port string) error { @@ -271,7 +350,7 @@ func ReraftInit(s *server.Server, port string) error { node, err := braft.NewNodeHost(raftInstance.Nhc) if err != nil { - log.Error("new host: ", err) + log.Errorf("new node host fail err:%v", err) return err } raftInstance.Nh = node @@ -283,7 +362,7 @@ func ReraftInit(s *server.Server, port string) error { config.GlobalConfig.Server.DegradeSingleNode = false return NewDiskKV(clusterID, nodeID, s, raftInstance) }, raftInstance.Rc); err != nil { - log.Error("start cluster: ", err) + log.Errorf("start cluster fail err:%v", err) return err } raftInstance.RaftReady = true diff --git a/stored/plugin/raft/define.go b/stored/internal/raft/define.go similarity index 99% rename from stored/plugin/raft/define.go rename to stored/internal/raft/define.go index 4a832a9..38e398c 100644 --- a/stored/plugin/raft/define.go +++ b/stored/internal/raft/define.go @@ -15,9 +15,8 @@ package raft import ( - braft "github.com/zuoyebang/bitalostored/raft" - jsoniter "github.com/json-iterator/go" + braft "github.com/zuoyebang/bitalostored/raft" ) type RetType int diff --git a/stored/plugin/raft/logger.go b/stored/internal/raft/logger.go similarity index 100% rename from stored/plugin/raft/logger.go rename to stored/internal/raft/logger.go diff --git a/stored/plugin/raft/ondisk.go b/stored/internal/raft/ondisk.go similarity index 97% rename from stored/plugin/raft/ondisk.go rename to stored/internal/raft/ondisk.go index f171eb5..e42418e 100644 --- a/stored/plugin/raft/ondisk.go +++ b/stored/internal/raft/ondisk.go @@ -21,7 +21,6 @@ import ( sm "github.com/zuoyebang/bitalostored/raft/statemachine" "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/dbmeta" "github.com/zuoyebang/bitalostored/stored/internal/config" - "github.com/zuoyebang/bitalostored/stored/internal/errn" "github.com/zuoyebang/bitalostored/stored/internal/log" "github.com/zuoyebang/bitalostored/stored/internal/marshal/update" "github.com/zuoyebang/bitalostored/stored/server" @@ -61,10 +60,6 @@ func (pD *DiskKV) Open(stopc <-chan struct{}) (uint64, uint64, error) { } func (pD *DiskKV) Update(es []sm.Entry) ([]sm.Entry, error) { - if pD.s.GetIsClosed() { - return nil, errn.ErrServerClosed - } - var originUpdateIndex uint64 var originFlushIndex uint64 var res []sm.Entry diff --git a/stored/plugin/raft/propose.go b/stored/internal/raft/propose.go similarity index 100% rename from stored/plugin/raft/propose.go rename to stored/internal/raft/propose.go diff --git a/stored/plugin/raft/queue.go b/stored/internal/raft/queue.go similarity index 84% rename from stored/plugin/raft/queue.go rename to stored/internal/raft/queue.go index d65505e..86c4c74 100644 --- a/stored/plugin/raft/queue.go +++ b/stored/internal/raft/queue.go @@ -22,7 +22,6 @@ import ( "github.com/zuoyebang/bitalostored/butils/unsafe2" "github.com/zuoyebang/bitalostored/stored/internal/log" - "github.com/zuoyebang/bitalostored/stored/internal/resp" "github.com/zuoyebang/bitalostored/stored/server" ) @@ -32,7 +31,7 @@ const ( ) type Queue struct { - worknum uint32 + workNum uint32 length uint32 pD *DiskKV qchans []chan *QData @@ -45,27 +44,27 @@ type QData struct { keyHash uint32 } -func NewQueue(worknum, length int, pD *DiskKV) *Queue { - if worknum < DefaultWorkNum { - worknum = DefaultWorkNum +func NewQueue(workNum, length int, pD *DiskKV) *Queue { + if workNum < DefaultWorkNum { + workNum = DefaultWorkNum } if length < DefaultQueueLength { length = DefaultQueueLength } queue := &Queue{ - worknum: uint32(worknum), + workNum: uint32(workNum), length: uint32(length), - qchans: make([]chan *QData, worknum), + qchans: make([]chan *QData, workNum), pD: pD, } - for i := 0; i < worknum; i++ { + for i := 0; i < workNum; i++ { queue.qchans[i] = make(chan *QData, length) queue.consume(queue.qchans[i]) } - log.Infof("raft consume queue start worknum:%d length:%d", worknum, length) + log.Infof("raft consume queue start workNum:%d length:%d", workNum, length) return queue } @@ -93,7 +92,7 @@ func (q *Queue) push(data [][]byte, isMigrate bool, keyHash uint32) error { return errors.New("raft consume queue push data err") } - index := (keyHash + uint32(data[1][len(data[1])/2])) % q.worknum + index := (keyHash + uint32(data[1][len(data[1])/2])) % q.workNum q.qchans[index] <- &QData{ data: data, isMigrate: isMigrate, @@ -131,7 +130,7 @@ func (q *Queue) consume(qchan chan *QData) { server.PutRaftClientToPool(c) continue } - c.Cmd = c.Cmd + unsafe2.String(resp.LowerSlice(c.Args[0])) + c.Cmd = c.Cmd + unsafe2.String(server.LowerSlice(c.Args[0])) } if err := c.ApplyDB(0); err != nil { log.Errorf("qchans consume applydb fail command:%s err:%v", c.Cmd, err) diff --git a/stored/internal/resp/cmd.go b/stored/internal/resp/cmd.go index 23eb90b..77253f6 100644 --- a/stored/internal/resp/cmd.go +++ b/stored/internal/resp/cmd.go @@ -15,14 +15,14 @@ package resp const ( - DBSYNC string = "dbsync" - DBSYNCCONNECT string = "dbsyncconnect" - - PING string = "ping" - PONG string = "pong" - ECHO string = "echo" - TYPE string = "type" - CONFIG string = "config" + PING string = "ping" + PONG string = "pong" + ECHO string = "echo" + TYPE string = "type" + CONFIG string = "config" + INFO string = "info" + TIME string = "time" + SHUTDOWN string = "shutdown" DEL string = "del" TTL string = "ttl" @@ -59,10 +59,8 @@ const ( GETRANGE string = "getrange" SETRANGE string = "setrange" STRLEN string = "strlen" - GETSLICE string = "getslice" BITCOUNT string = "bitcount" - BITOP string = "bitop" BITPOS string = "bitpos" GETBIT string = "getbit" SETBIT string = "setbit" @@ -157,10 +155,6 @@ const ( XSSCAN string = "xsscan" XZSCAN string = "xzscan" - SELECT string = "select" - INFO string = "info" - TIME string = "time" - GEOADD string = "geoadd" GEODIST string = "geodist" GEOPOS string = "geopos" @@ -349,3 +343,8 @@ func IsWriteCmd(cmd string) bool { } return false } + +type Command struct { + Raw []byte + Args [][]byte +} diff --git a/stored/internal/resp/error.go b/stored/internal/resp/error.go deleted file mode 100644 index bc6c915..0000000 --- a/stored/internal/resp/error.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2019-2024 Xu Ruibo (hustxurb@163.com) and Contributors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resp - -import ( - "errors" - "fmt" -) - -var ( - ErrNotImplement = errors.New("command not implement") - ErrSyntax = errors.New("ERR syntax error") - ErrRangeOffset = errors.New("ERR offset is out of range") - ErrValue = errors.New("ERR value is not an integer or out of range") - ErrInvalidRangeItem = errors.New("ERR min or max not valid string range item") - ErrBitOffset = errors.New("ERR bit offset is not an integer or out of range") - ErrBitValue = errors.New("ERR bit is not an integer or out of range") - ErrBitUnmarshal = errors.New("ERR bitmap unmarshal fail") - ErrBitMarshal = errors.New("ERR bitmap marshal fail") - ErrSlowShield = errors.New("slow query shield, wait 1s to retry") -) - -func CmdEmptyErr(cmd string) error { - return fmt.Errorf("ERR empty command for '%s' command", cmd) -} - -func CmdParamsErr(cmd string) error { - return fmt.Errorf("ERR wrong number of arguments for '%s' command", cmd) -} diff --git a/stored/internal/resp/reader.go b/stored/internal/resp/reader.go new file mode 100644 index 0000000..1227cdb --- /dev/null +++ b/stored/internal/resp/reader.go @@ -0,0 +1,222 @@ +// Copyright 2019-2024 Xu Ruibo (hustxurb@163.com) and Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resp + +import ( + "bytes" + "fmt" + + "github.com/zuoyebang/bitalostored/stored/internal/errn" +) + +type Reader struct { + bytes.Buffer + Offset int +} + +func NewReader() *Reader { + return &Reader{Offset: 0} +} + +func parseInt(b []byte) (int, bool) { + if len(b) == 1 && b[0] >= '0' && b[0] <= '9' { + return int(b[0] - '0'), true + } + var n int + var sign bool + var i int + if len(b) > 0 && b[0] == '-' { + sign = true + i++ + } + for ; i < len(b); i++ { + if b[i] < '0' || b[i] > '9' { + return 0, false + } + n = n*10 + int(b[i]-'0') + } + if sign { + n *= -1 + } + return n, true +} + +func ParseCommands(buf []byte, marks []int) ([]Command, []byte, error) { + var cmds []Command + var writeBack []byte + b := buf + if len(b) > 0 { + next: + switch b[0] { + default: + for i := 0; i < len(b); i++ { + if b[i] == '\n' { + var line []byte + if i > 0 && b[i-1] == '\r' { + line = b[:i-1] + } else { + line = b[:i] + } + var cmd Command + var quote bool + var quoteCh byte + var escape bool + outer: + for { + nline := make([]byte, 0, len(line)) + for i := 0; i < len(line); i++ { + c := line[i] + if !quote { + if c == ' ' { + if len(nline) > 0 { + cmd.Args = append(cmd.Args, nline) + } + line = line[i+1:] + continue outer + } + if c == '"' || c == '\'' { + if i != 0 { + return nil, writeBack, errn.ErrUnbalancedQuotes + } + quoteCh = c + quote = true + line = line[i+1:] + continue outer + } + } else { + if escape { + escape = false + switch c { + case 'n': + c = '\n' + case 'r': + c = '\r' + case 't': + c = '\t' + } + } else if c == quoteCh { + quote = false + quoteCh = 0 + cmd.Args = append(cmd.Args, nline) + line = line[i+1:] + if len(line) > 0 && line[0] != ' ' { + return nil, writeBack, errn.ErrUnbalancedQuotes + } + continue outer + } else if c == '\\' { + escape = true + continue + } + } + nline = append(nline, c) + } + if quote { + return nil, writeBack, errn.ErrUnbalancedQuotes + } + if len(line) > 0 { + cmd.Args = append(cmd.Args, line) + } + break + } + if len(cmd.Args) > 0 { + var wr Writer2 + wr.WriteArray(len(cmd.Args)) + for i := range cmd.Args { + wr.WriteBulk(cmd.Args[i]) + cmd.Args[i] = append([]byte(nil), cmd.Args[i]...) + } + cmd.Raw = wr.b + cmds = append(cmds, cmd) + } + b = b[i+1:] + if len(b) > 0 { + goto next + } else { + goto done + } + } + } + case '*': + outer2: + for i := 1; i < len(b); i++ { + if b[i] == '\n' { + if b[i-1] != '\r' { + return nil, writeBack, errn.ErrInvalidMultiBulkLength + } + count, ok := parseInt(b[1 : i-1]) + if !ok || count <= 0 { + return nil, writeBack, errn.ErrInvalidMultiBulkLength + } + marks = marks[:0] + for j := 0; j < count; j++ { + i++ + if i < len(b) { + if b[i] != '$' { + return nil, writeBack, fmt.Errorf("expected '$', got '%v'", string(b[i])) + } + si := i + for ; i < len(b); i++ { + if b[i] == '\n' { + if b[i-1] != '\r' { + return nil, writeBack, errn.ErrInvalidBulkLength + } + size, ok := parseInt(b[si+1 : i-1]) + if !ok || size < 0 { + return nil, writeBack, errn.ErrInvalidBulkLength + } + if i+size+2 >= len(b) { + break outer2 + } + if b[i+size+2] != '\n' || b[i+size+1] != '\r' { + return nil, writeBack, errn.ErrInvalidBulkLength + } + i++ + marks = append(marks, i, i+size) + i += size + 1 + break + } + } + } + } + if len(marks) == count*2 { + var cmd Command + cmd.Raw = b[:i+1] + cmd.Args = make([][]byte, len(marks)/2) + for h := 0; h < len(marks); h += 2 { + cmd.Args[h/2] = cmd.Raw[marks[h]:marks[h+1]] + } + cmds = append(cmds, cmd) + b = b[i+1:] + if len(b) > 0 { + goto next + } else { + goto done + } + } + } + } + } + done: + //rd.start = rd.end - len(b) + } + if len(b) > 0 { + writeBack = b + } + if len(cmds) > 0 { + return cmds, writeBack, nil + } else { + return nil, writeBack, nil + } +} diff --git a/stored/internal/resp/resp_reader.go b/stored/internal/resp/resp_reader.go deleted file mode 100644 index 59fc22b..0000000 --- a/stored/internal/resp/resp_reader.go +++ /dev/null @@ -1,251 +0,0 @@ -// Copyright 2019-2024 Xu Ruibo (hustxurb@163.com) and Contributors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resp - -import ( - "bufio" - "errors" - "fmt" - "io" - "net" -) - -type Error string - -func (err Error) Error() string { return string(err) } - -type RespReader struct { - br *bufio.Reader -} - -func NewRespReader(conn net.Conn, size int) *RespReader { - br := bufio.NewReaderSize(conn, size) - r := &RespReader{br} - return r -} - -func (resp *RespReader) Parse() (interface{}, error) { - line, err := readLine(resp.br) - if err != nil { - return nil, err - } - if len(line) == 0 { - return nil, errors.New("short resp line") - } - switch line[0] { - case '+': - switch { - case len(line) == 3 && line[1] == 'O' && line[2] == 'K': - return ReplyOK, nil - case len(line) == 5 && line[1] == 'P' && line[2] == 'O' && line[3] == 'N' && line[4] == 'G': - return ReplyPONG, nil - default: - return string(line[1:]), nil - } - case '-': - return Error(string(line[1:])), nil - case ':': - n, err := parseInt(line[1:]) - return n, err - case '$': - n, err := parseLen(line[1:]) - if n < 0 || err != nil { - return nil, err - } - p := make([]byte, n) - _, err = io.ReadFull(resp.br, p) - if err != nil { - return nil, err - } - if line, err := readLine(resp.br); err != nil { - return nil, err - } else if len(line) != 0 { - return nil, errors.New("bad bulk rstring format") - } - return p, nil - case '*': - n, err := parseLen(line[1:]) - if n < 0 || err != nil { - return nil, err - } - r := make([]interface{}, n) - for i := range r { - r[i], err = resp.Parse() - if err != nil { - return nil, err - } - } - return r, nil - } - return nil, errors.New("unexpected response line") -} - -func (resp *RespReader) ParseRequest() ([][]byte, error) { - line, err := readLine(resp.br) - if err != nil { - return nil, err - } - if len(line) == 0 { - return resp.ParseRequest() - } - switch line[0] { - case '*': - n, err := parseLen(line[1:]) - if n < 0 || err != nil { - return nil, err - } - r := make([][]byte, n) - for i := range r { - r[i], err = parseBulk(resp.br) - if err != nil { - return nil, err - } - } - return r, nil - default: - return nil, fmt.Errorf("not invalid array of bulk rstring type, but %c", line[0]) - } -} - -func (resp *RespReader) ParseBulkTo(w io.Writer) error { - line, err := readLine(resp.br) - if err != nil { - return err - } - if len(line) == 0 { - return errors.New("ledis: short response line") - } - - switch line[0] { - case '-': - return Error(line[1:]) - case '$': - n, err := parseLen(line[1:]) - if n < 0 || err != nil { - return err - } - - var nn int64 - if nn, err = io.CopyN(w, resp.br, int64(n)); err != nil { - return err - } else if nn != int64(n) { - return io.ErrShortWrite - } - - if line, err := readLine(resp.br); err != nil { - return err - } else if len(line) != 0 { - return errors.New("bad bulk rstring format") - } - return nil - default: - return fmt.Errorf("not invalid bulk rstring type, but %c", line[0]) - } -} - -func readLine(br *bufio.Reader) ([]byte, error) { - p, err := br.ReadSlice('\n') - if err == bufio.ErrBufferFull { - return nil, errors.New("long resp line") - } - if err != nil { - return nil, err - } - i := len(p) - 2 - if i < 0 || p[i] != '\r' { - return nil, errors.New("bad resp line terminator") - } - return p[:i], nil -} - -func parseLen(p []byte) (int, error) { - if len(p) == 0 { - return -1, errors.New("malformed length") - } - - if p[0] == '-' && len(p) == 2 && p[1] == '1' { - return -1, nil - } - - var n int - for _, b := range p { - n *= 10 - if b < '0' || b > '9' { - return -1, errors.New("illegal bytes in length") - } - n += int(b - '0') - } - - return n, nil -} - -func parseInt(p []byte) (int64, error) { - if len(p) == 0 { - return 0, errors.New("malformed integer") - } - - var negate bool - if p[0] == '-' { - negate = true - p = p[1:] - if len(p) == 0 { - return 0, errors.New("malformed integer") - } - } - - var n int64 - for _, b := range p { - n *= 10 - if b < '0' || b > '9' { - return 0, errors.New("illegal bytes in length") - } - n += int64(b - '0') - } - - if negate { - n = -n - } - return n, nil -} - -func parseBulk(br *bufio.Reader) ([]byte, error) { - line, err := readLine(br) - if err != nil { - return nil, err - } else if len(line) == 0 { - return nil, errors.New("short resp line") - } - - switch line[0] { - case '$': - n, err := parseLen(line[1:]) - if n < 0 || err != nil { - return nil, err - } - p := make([]byte, n) - if _, err = io.ReadFull(br, p); err != nil { - return nil, err - } - if line, err := readLine(br); err != nil { - return nil, err - } else if len(line) != 0 { - return nil, errors.New("bad bulk rstring format") - } else { - return p, nil - } - default: - return nil, fmt.Errorf("not invalid bulk rstring type, but %c", line[0]) - } -} diff --git a/stored/internal/resp/session.go b/stored/internal/resp/session.go deleted file mode 100644 index d285bfa..0000000 --- a/stored/internal/resp/session.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright 2019-2024 Xu Ruibo (hustxurb@163.com) and Contributors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resp - -import ( - "net" - "sync/atomic" - "time" - - "github.com/zuoyebang/bitalostored/stored/internal/dostats" - "github.com/zuoyebang/bitalostored/stored/plugin/anticc" - - "github.com/zuoyebang/bitalostored/stored/internal/log" -) - -type Session struct { - Cmd string - Args [][]byte - Keys []byte - Data [][]byte - ID int - RespWriter *RespWriter - RespReader *RespReader - - conn net.Conn - remoteAddr string - keepalive time.Duration - isAuthed bool - isRaftSession bool -} - -var ( - SessionId int32 = 0 - ConnBufferSize int = 8 << 10 -) - -func NewSession(conn net.Conn, keepalive time.Duration) *Session { - var s *Session - - if conn == nil { - s = &Session{ - RespWriter: NewRespWriter(ConnBufferSize), - isRaftSession: true, - } - s.ID = int(atomic.AddInt32(&SessionId, 1)) - } else { - s = &Session{ - conn: conn, - Cmd: "", - Args: nil, - keepalive: keepalive, - isAuthed: false, - RespReader: NewRespReader(conn, ConnBufferSize), - RespWriter: NewRespWriter(ConnBufferSize), - isRaftSession: false, - } - s.ID = int(atomic.AddInt32(&SessionId, 1)) - } - s.doStats() - return s -} - -func (s *Session) SetReadDeadline() { - if anticc.Enable && !s.isRaftSession { - s.conn.SetReadDeadline(anticc.GetConfigDeadline()) - } else { - if s.keepalive > 0 { - s.conn.SetReadDeadline(time.Now().Add(s.keepalive)) - } - } -} - -func (s *Session) Close() { - if s.conn == nil { - return - } - - if err := s.conn.Close(); err != nil { - log.Errorf("conn close err:%v", err) - return - } - - if !s.isRaftSession { - dostats.DecrConns() - } -} - -func (s *Session) doStats() { - if s.isRaftSession { - return - } - dostats.IncrConns() -} diff --git a/stored/internal/resp/resp_writer.go b/stored/internal/resp/writer.go similarity index 64% rename from stored/internal/resp/resp_writer.go rename to stored/internal/resp/writer.go index 0d5a50e..dad4603 100644 --- a/stored/internal/resp/resp_writer.go +++ b/stored/internal/resp/writer.go @@ -19,14 +19,15 @@ import ( "io" "strconv" - "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/btools" - "github.com/zuoyebang/bitalostored/stored/internal/log" - "github.com/zuoyebang/bitalostored/butils/deepcopy" "github.com/zuoyebang/bitalostored/butils/extend" "github.com/zuoyebang/bitalostored/butils/unsafe2" + "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/btools" + "github.com/zuoyebang/bitalostored/stored/internal/log" ) +const writerBufferSize = 8 << 10 + var ( respArray byte = '*' respInt byte = ':' @@ -49,10 +50,10 @@ var ( ReplyQUEUED = "QUEUED" ) -type RespWriter struct { - BuffNew *bytes.Buffer - Cached bool - Resps []RespOuput +type Writer struct { + Buf *bytes.Buffer + Cached bool + Resps []RespOuput } type RespOuput struct { @@ -61,25 +62,25 @@ type RespOuput struct { Output interface{} } -func NewRespWriter(size int) *RespWriter { - w := &RespWriter{ - BuffNew: bytes.NewBuffer(make([]byte, 0, size)), +func NewWriter() *Writer { + w := &Writer{ + Buf: bytes.NewBuffer(make([]byte, 0, writerBufferSize)), } return w } -func (w *RespWriter) SetCached() { +func (w *Writer) SetCached() { w.Cached = true } -func (w *RespWriter) UnsetCached() { +func (w *Writer) UnsetCached() { w.Cached = false } -func (w *RespWriter) FlushCached() { - w.BuffNew.WriteByte(respArray) - w.BuffNew.Write(unsafe2.ByteSlice(strconv.Itoa(len(w.Resps)))) - w.BuffNew.Write(Delims) +func (w *Writer) FlushCached() { + w.Buf.WriteByte(respArray) + w.Buf.Write(unsafe2.ByteSlice(strconv.Itoa(len(w.Resps)))) + w.Buf.Write(Delims) for _, resp := range w.Resps { switch resp.Type { @@ -139,45 +140,45 @@ func (w *RespWriter) FlushCached() { w.Resps = w.Resps[:0] } -func (w *RespWriter) WriteError(err error) { +func (w *Writer) WriteError(err error) { if w.Cached { w.Resps = append(w.Resps, RespOuput{Type: respErr, Output: err}) return } - w.BuffNew.WriteByte(respErr) + w.Buf.WriteByte(respErr) if err != nil { - w.BuffNew.Write(unsafe2.ByteSlice(err.Error())) + w.Buf.Write(unsafe2.ByteSlice(err.Error())) } - w.BuffNew.Write(Delims) + w.Buf.Write(Delims) } -func (w *RespWriter) WriteStatus(status string) { +func (w *Writer) WriteStatus(status string) { if w.Cached { w.Resps = append(w.Resps, RespOuput{Type: respSinge, Output: status}) return } - w.BuffNew.WriteByte(respSinge) - w.BuffNew.Write(unsafe2.ByteSlice(status)) - w.BuffNew.Write(Delims) + w.Buf.WriteByte(respSinge) + w.Buf.Write(unsafe2.ByteSlice(status)) + w.Buf.Write(Delims) } -func (w *RespWriter) WriteInteger(n int64) { +func (w *Writer) WriteInteger(n int64) { if w.Cached { w.Resps = append(w.Resps, RespOuput{Type: respInt, Output: n}) return } - w.BuffNew.WriteByte(respInt) - w.BuffNew.Write(extend.FormatInt64ToSlice(n)) - w.BuffNew.Write(Delims) + w.Buf.WriteByte(respInt) + w.Buf.Write(extend.FormatInt64ToSlice(n)) + w.Buf.Write(Delims) } -func (w *RespWriter) WriteLen(n int) { - w.BuffNew.WriteByte(respArray) - w.BuffNew.Write(unsafe2.ByteSlice(strconv.Itoa(n))) - w.BuffNew.Write(Delims) +func (w *Writer) WriteLen(n int) { + w.Buf.WriteByte(respArray) + w.Buf.Write(unsafe2.ByteSlice(strconv.Itoa(n))) + w.Buf.Write(Delims) } -func (w *RespWriter) WriteBulk(b []byte) { +func (w *Writer) WriteBulk(b []byte) { if w.Cached { if b == nil { w.Resps = append(w.Resps, RespOuput{Type: respMutil, Output: nil}) @@ -188,19 +189,19 @@ func (w *RespWriter) WriteBulk(b []byte) { } return } - w.BuffNew.WriteByte(respMutil) + w.Buf.WriteByte(respMutil) if b == nil { - w.BuffNew.Write(NullBulk) + w.Buf.Write(NullBulk) } else { - w.BuffNew.Write(unsafe2.ByteSlice(strconv.Itoa(len(b)))) - w.BuffNew.Write(Delims) - w.BuffNew.Write(b) + w.Buf.Write(unsafe2.ByteSlice(strconv.Itoa(len(b)))) + w.Buf.Write(Delims) + w.Buf.Write(b) } - w.BuffNew.Write(Delims) + w.Buf.Write(Delims) } -func (w *RespWriter) WriteBulkMulti(bs ...[]byte) { - w.BuffNew.WriteByte(respMutil) +func (w *Writer) WriteBulkMulti(bs ...[]byte) { + w.Buf.WriteByte(respMutil) blen := 0 for i := range bs { @@ -208,21 +209,21 @@ func (w *RespWriter) WriteBulkMulti(bs ...[]byte) { } if blen == 0 { - w.BuffNew.Write(NullBulk) + w.Buf.Write(NullBulk) } else { - w.BuffNew.Write(unsafe2.ByteSlice(strconv.Itoa(blen))) - w.BuffNew.Write(Delims) + w.Buf.Write(unsafe2.ByteSlice(strconv.Itoa(blen))) + w.Buf.Write(Delims) for i := range bs { if len(bs[i]) > 0 { - w.BuffNew.Write(bs[i]) + w.Buf.Write(bs[i]) } } } - w.BuffNew.Write(Delims) + w.Buf.Write(Delims) } -func (w *RespWriter) WriteArray(lst []interface{}) { +func (w *Writer) WriteArray(lst []interface{}) { if w.Cached { if lst == nil { w.Resps = append(w.Resps, RespOuput{Type: respInternalArray, Output: nil}) @@ -231,14 +232,14 @@ func (w *RespWriter) WriteArray(lst []interface{}) { } return } - w.BuffNew.WriteByte(respArray) + w.Buf.WriteByte(respArray) if lst == nil { - w.BuffNew.Write(NullBulk) - w.BuffNew.Write(Delims) + w.Buf.Write(NullBulk) + w.Buf.Write(Delims) } else { - w.BuffNew.Write(unsafe2.ByteSlice(strconv.Itoa(len(lst)))) - w.BuffNew.Write(Delims) + w.Buf.Write(unsafe2.ByteSlice(strconv.Itoa(len(lst)))) + w.Buf.Write(Delims) for i := 0; i < len(lst); i++ { switch v := lst[i].(type) { @@ -263,7 +264,7 @@ func (w *RespWriter) WriteArray(lst []interface{}) { } } -func (w *RespWriter) WriteSliceArray(lst [][]byte) { +func (w *Writer) WriteSliceArray(lst [][]byte) { if w.Cached { if lst == nil { w.Resps = append(w.Resps, RespOuput{Type: respInternalSliceArray, Output: nil}) @@ -272,14 +273,14 @@ func (w *RespWriter) WriteSliceArray(lst [][]byte) { } return } - w.BuffNew.WriteByte(respArray) + w.Buf.WriteByte(respArray) if lst == nil { - w.BuffNew.Write(NullArray) - w.BuffNew.Write(Delims) + w.Buf.Write(NullArray) + w.Buf.Write(Delims) } else { - w.BuffNew.Write(unsafe2.ByteSlice(strconv.Itoa(len(lst)))) - w.BuffNew.Write(Delims) + w.Buf.Write(unsafe2.ByteSlice(strconv.Itoa(len(lst)))) + w.Buf.Write(Delims) for i := 0; i < len(lst); i++ { w.WriteBulk(lst[i]) @@ -287,7 +288,7 @@ func (w *RespWriter) WriteSliceArray(lst [][]byte) { } } -func (w *RespWriter) WriteFVPairArray(lst []btools.FVPair) { +func (w *Writer) WriteFVPairArray(lst []btools.FVPair) { if w.Cached { if lst == nil { w.Resps = append(w.Resps, RespOuput{Type: respInternalFVPair, Output: nil}) @@ -296,14 +297,14 @@ func (w *RespWriter) WriteFVPairArray(lst []btools.FVPair) { } return } - w.BuffNew.WriteByte(respArray) + w.Buf.WriteByte(respArray) if lst == nil { - w.BuffNew.Write(NullArray) - w.BuffNew.Write(Delims) + w.Buf.Write(NullArray) + w.Buf.Write(Delims) } else { - w.BuffNew.Write(unsafe2.ByteSlice(strconv.Itoa(len(lst) * 2))) - w.BuffNew.Write(Delims) + w.Buf.Write(unsafe2.ByteSlice(strconv.Itoa(len(lst) * 2))) + w.Buf.Write(Delims) for i := 0; i < len(lst); i++ { w.WriteBulk(lst[i].Field) @@ -312,7 +313,7 @@ func (w *RespWriter) WriteFVPairArray(lst []btools.FVPair) { } } -func (w *RespWriter) WriteFieldPairArray(lst []btools.FieldPair) { +func (w *Writer) WriteFieldPairArray(lst []btools.FieldPair) { if w.Cached { if lst == nil { w.Resps = append(w.Resps, RespOuput{Type: respInternalFieldPair, Output: nil}) @@ -321,14 +322,14 @@ func (w *RespWriter) WriteFieldPairArray(lst []btools.FieldPair) { } return } - w.BuffNew.WriteByte(respArray) + w.Buf.WriteByte(respArray) if lst == nil { - w.BuffNew.Write(NullArray) - w.BuffNew.Write(Delims) + w.Buf.Write(NullArray) + w.Buf.Write(Delims) } else { - w.BuffNew.Write(unsafe2.ByteSlice(strconv.Itoa(len(lst) * 2))) - w.BuffNew.Write(Delims) + w.Buf.Write(unsafe2.ByteSlice(strconv.Itoa(len(lst) * 2))) + w.Buf.Write(Delims) for i := 0; i < len(lst); i++ { w.WriteBulkMulti(lst[i].Prefix, lst[i].Suffix) @@ -336,7 +337,7 @@ func (w *RespWriter) WriteFieldPairArray(lst []btools.FieldPair) { } } -func (w *RespWriter) WriteScorePairArray(lst []btools.ScorePair, withScores bool) { +func (w *Writer) WriteScorePairArray(lst []btools.ScorePair, withScores bool) { if w.Cached { if lst == nil { w.Resps = append(w.Resps, RespOuput{Type: respInternalScorePair, WithScores: withScores, Output: nil}) @@ -345,19 +346,19 @@ func (w *RespWriter) WriteScorePairArray(lst []btools.ScorePair, withScores bool } return } - w.BuffNew.WriteByte(respArray) + w.Buf.WriteByte(respArray) if lst == nil { - w.BuffNew.Write(NullArray) - w.BuffNew.Write(Delims) + w.Buf.Write(NullArray) + w.Buf.Write(Delims) } else { if withScores { - w.BuffNew.Write(unsafe2.ByteSlice(strconv.Itoa(len(lst) * 2))) - w.BuffNew.Write(Delims) + w.Buf.Write(unsafe2.ByteSlice(strconv.Itoa(len(lst) * 2))) + w.Buf.Write(Delims) } else { - w.BuffNew.Write(unsafe2.ByteSlice(strconv.Itoa(len(lst)))) - w.BuffNew.Write(Delims) + w.Buf.Write(unsafe2.ByteSlice(strconv.Itoa(len(lst)))) + w.Buf.Write(Delims) } for i := 0; i < len(lst); i++ { @@ -370,25 +371,21 @@ func (w *RespWriter) WriteScorePairArray(lst []btools.ScorePair, withScores bool } } -func (w *RespWriter) WriteBytes(args ...[]byte) { +func (w *Writer) WriteBytes(args ...[]byte) { for _, v := range args { - w.BuffNew.Write(v) + w.Buf.Write(v) } } -func (w *RespWriter) FlushBytesEmpty() { - w.BuffNew.Reset() +func (w *Writer) Bytes() []byte { + return w.Buf.Bytes() } -func (w *RespWriter) FlushToBytes() []byte { - defer w.BuffNew.Reset() - respData := w.BuffNew.Bytes() - copyResp := make([]byte, len(respData)) - copy(copyResp, respData) - return copyResp +func (w *Writer) Reset() { + w.Buf.Reset() } -func (w *RespWriter) FlushToWriterIO(writer io.Writer) (int, error) { - defer w.BuffNew.Reset() - return writer.Write(w.BuffNew.Bytes()) +func (w *Writer) FlushToWriterIO(writer io.Writer) (int, error) { + defer w.Buf.Reset() + return writer.Write(w.Buf.Bytes()) } diff --git a/stored/internal/resp/writer2.go b/stored/internal/resp/writer2.go new file mode 100644 index 0000000..e9424ce --- /dev/null +++ b/stored/internal/resp/writer2.go @@ -0,0 +1,365 @@ +// Copyright 2019-2024 Xu Ruibo (hustxurb@163.com) and Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resp + +import ( + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// Writer2 allows for writing RESP messages. +type Writer2 struct { + b []byte +} + +// WriteNull writes a null to the client +func (w *Writer2) WriteNull() { + w.b = AppendNull(w.b) +} + +// WriteArray writes an array header. You must then write additional +// sub-responses to the client to complete the response. +// For example to write two strings: +// +// c.WriteArray(2) +// c.WriteBulk("item 1") +// c.WriteBulk("item 2") +func (w *Writer2) WriteArray(count int) { + w.b = AppendArray(w.b, count) +} + +// WriteBulk writes bulk bytes to the client. +func (w *Writer2) WriteBulk(bulk []byte) { + w.b = AppendBulk(w.b, bulk) +} + +// WriteBulkString writes a bulk string to the client. +func (w *Writer2) WriteBulkString(bulk string) { + w.b = AppendBulkString(w.b, bulk) +} + +// Buffer returns the unflushed buffer. This is a copy so changes +// to the resulting []byte will not affect the writer. +func (w *Writer2) Buffer() []byte { + return append([]byte(nil), w.b...) +} + +// SetBuffer replaces the unflushed buffer with new bytes. +func (w *Writer2) SetBuffer(raw []byte) { + w.b = w.b[:0] + w.b = append(w.b, raw...) +} + +// Flush writes all unflushed Write* calls to the underlying writer. +func (w *Writer2) Flush() error { + w.b = w.b[:0] + return nil +} + +// WriteError writes an error to the client. +func (w *Writer2) WriteError(msg string) { + w.b = AppendError(w.b, msg) +} + +// WriteString writes a string to the client. +func (w *Writer2) WriteString(msg string) { + w.b = AppendString(w.b, msg) +} + +// WriteInt writes an integer to the client. +func (w *Writer2) WriteInt(num int) { + w.WriteInt64(int64(num)) +} + +// WriteInt64 writes a 64-bit signed integer to the client. +func (w *Writer2) WriteInt64(num int64) { + w.b = AppendInt(w.b, num) +} + +// WriteUint64 writes a 64-bit unsigned integer to the client. +func (w *Writer2) WriteUint64(num uint64) { + w.b = AppendUint(w.b, num) +} + +// WriteRaw writes raw data to the client. +func (w *Writer2) WriteRaw(data []byte) { + w.b = append(w.b, data...) +} + +// WriteAny writes any type to client. +// +// nil -> null +// error -> error (adds "ERR " when first word is not uppercase) +// string -> bulk-string +// numbers -> bulk-string +// []byte -> bulk-string +// bool -> bulk-string ("0" or "1") +// slice -> array +// map -> array with key/value pairs +// SimpleString -> string +// SimpleInt -> integer +// everything-else -> bulk-string representation using fmt.Sprint() +func (w *Writer2) WriteAny(v interface{}) { + w.b = AppendAny(w.b, v) +} + +// appendPrefix will append a "$3\r\n" style redis prefix for a message. +func appendPrefix(b []byte, c byte, n int64) []byte { + if n >= 0 && n <= 9 { + return append(b, c, byte('0'+n), '\r', '\n') + } + b = append(b, c) + b = strconv.AppendInt(b, n, 10) + return append(b, '\r', '\n') +} + +// AppendUint appends a Redis protocol uint64 to the input bytes. +func AppendUint(b []byte, n uint64) []byte { + b = append(b, ':') + b = strconv.AppendUint(b, n, 10) + return append(b, '\r', '\n') +} + +// AppendInt appends a Redis protocol int64 to the input bytes. +func AppendInt(b []byte, n int64) []byte { + return appendPrefix(b, ':', n) +} + +// AppendArray appends a Redis protocol array to the input bytes. +func AppendArray(b []byte, n int) []byte { + return appendPrefix(b, '*', int64(n)) +} + +// AppendBulk appends a Redis protocol bulk byte slice to the input bytes. +func AppendBulk(b []byte, bulk []byte) []byte { + b = appendPrefix(b, '$', int64(len(bulk))) + b = append(b, bulk...) + return append(b, '\r', '\n') +} + +// AppendBulkString appends a Redis protocol bulk string to the input bytes. +func AppendBulkString(b []byte, bulk string) []byte { + b = appendPrefix(b, '$', int64(len(bulk))) + b = append(b, bulk...) + return append(b, '\r', '\n') +} + +// AppendString appends a Redis protocol string to the input bytes. +func AppendString(b []byte, s string) []byte { + b = append(b, '+') + b = append(b, stripNewlines(s)...) + return append(b, '\r', '\n') +} + +// AppendError appends a Redis protocol error to the input bytes. +func AppendError(b []byte, s string) []byte { + b = append(b, '-') + b = append(b, stripNewlines(s)...) + return append(b, '\r', '\n') +} + +// AppendOK appends a Redis protocol OK to the input bytes. +func AppendOK(b []byte) []byte { + return append(b, '+', 'O', 'K', '\r', '\n') +} +func stripNewlines(s string) string { + for i := 0; i < len(s); i++ { + if s[i] == '\r' || s[i] == '\n' { + s = strings.Replace(s, "\r", " ", -1) + s = strings.Replace(s, "\n", " ", -1) + break + } + } + return s +} + +// AppendTile38 appends a Tile38 message to the input bytes. +func AppendTile38(b []byte, data []byte) []byte { + b = append(b, '$') + b = strconv.AppendInt(b, int64(len(data)), 10) + b = append(b, ' ') + b = append(b, data...) + return append(b, '\r', '\n') +} + +// AppendNull appends a Redis protocol null to the input bytes. +func AppendNull(b []byte) []byte { + return append(b, '$', '-', '1', '\r', '\n') +} + +// AppendBulkFloat appends a float64, as bulk bytes. +func AppendBulkFloat(dst []byte, f float64) []byte { + return AppendBulk(dst, strconv.AppendFloat(nil, f, 'f', -1, 64)) +} + +// AppendBulkInt appends an int64, as bulk bytes. +func AppendBulkInt(dst []byte, x int64) []byte { + return AppendBulk(dst, strconv.AppendInt(nil, x, 10)) +} + +// AppendBulkUint appends an uint64, as bulk bytes. +func AppendBulkUint(dst []byte, x uint64) []byte { + return AppendBulk(dst, strconv.AppendUint(nil, x, 10)) +} + +func prefixERRIfNeeded(msg string) string { + msg = strings.TrimSpace(msg) + firstWord := strings.Split(msg, " ")[0] + addERR := len(firstWord) == 0 + for i := 0; i < len(firstWord); i++ { + if firstWord[i] < 'A' || firstWord[i] > 'Z' { + addERR = true + break + } + } + if addERR { + msg = strings.TrimSpace("ERR " + msg) + } + return msg +} + +// SimpleString is for representing a non-bulk representation of a string +// from an *Any call. +type SimpleString string + +// SimpleInt is for representing a non-bulk representation of a int +// from an *Any call. +type SimpleInt int + +// Marshaler is the interface implemented by types that +// can marshal themselves into a Redis response type from an *Any call. +// The return value is not check for validity. +type Marshaler interface { + MarshalRESP() []byte +} + +// AppendAny appends any type to valid Redis type. +// +// nil -> null +// error -> error (adds "ERR " when first word is not uppercase) +// string -> bulk-string +// numbers -> bulk-string +// []byte -> bulk-string +// bool -> bulk-string ("0" or "1") +// slice -> array +// map -> array with key/value pairs +// SimpleString -> string +// SimpleInt -> integer +// Marshaler -> raw bytes +// everything-else -> bulk-string representation using fmt.Sprint() +func AppendAny(b []byte, v interface{}) []byte { + switch v := v.(type) { + case SimpleString: + b = AppendString(b, string(v)) + case SimpleInt: + b = AppendInt(b, int64(v)) + case nil: + b = AppendNull(b) + case error: + b = AppendError(b, prefixERRIfNeeded(v.Error())) + case string: + b = AppendBulkString(b, v) + case []byte: + b = AppendBulk(b, v) + case bool: + if v { + b = AppendBulkString(b, "1") + } else { + b = AppendBulkString(b, "0") + } + case int: + b = AppendBulkInt(b, int64(v)) + case int8: + b = AppendBulkInt(b, int64(v)) + case int16: + b = AppendBulkInt(b, int64(v)) + case int32: + b = AppendBulkInt(b, int64(v)) + case int64: + b = AppendBulkInt(b, v) + case uint: + b = AppendBulkUint(b, uint64(v)) + case uint8: + b = AppendBulkUint(b, uint64(v)) + case uint16: + b = AppendBulkUint(b, uint64(v)) + case uint32: + b = AppendBulkUint(b, uint64(v)) + case uint64: + b = AppendBulkUint(b, v) + case float32: + b = AppendBulkFloat(b, float64(v)) + case float64: + b = AppendBulkFloat(b, float64(v)) + case Marshaler: + b = append(b, v.MarshalRESP()...) + default: + vv := reflect.ValueOf(v) + switch vv.Kind() { + case reflect.Slice: + n := vv.Len() + b = AppendArray(b, n) + for i := 0; i < n; i++ { + b = AppendAny(b, vv.Index(i).Interface()) + } + case reflect.Map: + n := vv.Len() + b = AppendArray(b, n*2) + var i int + var strKey bool + var strsKeyItems []strKeyItem + + iter := vv.MapRange() + for iter.Next() { + key := iter.Key().Interface() + if i == 0 { + if _, ok := key.(string); ok { + strKey = true + strsKeyItems = make([]strKeyItem, n) + } + } + if strKey { + strsKeyItems[i] = strKeyItem{ + key.(string), iter.Value().Interface(), + } + } else { + b = AppendAny(b, key) + b = AppendAny(b, iter.Value().Interface()) + } + i++ + } + if strKey { + sort.Slice(strsKeyItems, func(i, j int) bool { + return strsKeyItems[i].key < strsKeyItems[j].key + }) + for _, item := range strsKeyItems { + b = AppendBulkString(b, item.key) + b = AppendAny(b, item.value) + } + } + default: + b = AppendBulkString(b, fmt.Sprint(v)) + } + } + return b +} + +type strKeyItem struct { + key string + value interface{} +} diff --git a/stored/internal/dostats/conn_stats.go b/stored/internal/trycatch/panic.go similarity index 65% rename from stored/internal/dostats/conn_stats.go rename to stored/internal/trycatch/panic.go index 20eb610..6546f89 100644 --- a/stored/internal/dostats/conn_stats.go +++ b/stored/internal/trycatch/panic.go @@ -12,30 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -package dostats +package trycatch import ( - "sync/atomic" -) - -var connStats struct { - total atomic.Int64 - alive atomic.Int64 -} - -func IncrConns() int64 { - connStats.total.Add(1) - return connStats.alive.Add(1) -} + "runtime" -func DecrConns() { - connStats.alive.Add(-1) -} - -func ConnsTotal() int64 { - return connStats.total.Load() -} + "github.com/zuoyebang/bitalostored/butils/unsafe2" + "github.com/zuoyebang/bitalostored/stored/internal/log" +) -func ConnsAlive() int64 { - return connStats.alive.Load() +func Panic(s string, err any) { + if err != nil { + buf := make([]byte, 2048) + n := runtime.Stack(buf, false) + log.Errorf("%s panic err:%v stack:%s", s, err, unsafe2.String(buf[0:n])) + } } diff --git a/stored/plugin/anticc/anticc.go b/stored/plugin/anticc/anticc.go deleted file mode 100644 index 0310127..0000000 --- a/stored/plugin/anticc/anticc.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2019-2024 Xu Ruibo (hustxurb@163.com) and Contributors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package anticc - -import ( - "errors" - "time" - - "github.com/zuoyebang/bitalostored/stored/internal/config" - "github.com/zuoyebang/bitalostored/stored/internal/dostats" - - "github.com/zuoyebang/bitalostored/stored/internal/log" - - "github.com/zuoyebang/bitalostored/butils/timesize" -) - -var Enable bool - -func Init() { - if len(config.GlobalConfig.DynamicDeadline.ClientRatios) == 0 || len(config.GlobalConfig.DynamicDeadline.DeadlineThreshold) == 0 { - return - } - err := LoadConfig(config.GlobalConfig.DynamicDeadline.ClientRatios, config.GlobalConfig.DynamicDeadline.DeadlineThreshold, int(config.GlobalConfig.Server.Maxclient)) - if err != nil { - log.Errorf("load anticc config fail err:%s", err.Error()) - return - } - Enable = true -} - -func LoadConfig(aliveConnRatios []int, deadlineThreshold []timesize.Duration, maxClients int) error { - if len(aliveConnRatios) != len(deadlineThreshold) { - return errors.New("length of array client_ratio_threshold and deadline_threshold should be equal") - } - if len(aliveConnRatios) == 0 { - return errors.New("missing client deadline config") - } - dd.aliveConnRatios = aliveConnRatios[:] - dd.deadlineThreshold = deadlineThreshold[:] - dd.maxClients = maxClients - return nil -} - -type dynamicDeadline struct { - maxClients int - aliveConnRatios []int - deadlineThreshold []timesize.Duration -} - -var dd dynamicDeadline - -func GetConfigDeadline() time.Time { - for index, connThreshold := range dd.aliveConnRatios { - if (dostats.ConnsAlive()*100)/int64(dd.maxClients) < int64(connThreshold) { - if index == 0 { - return time.Now().Add(dd.deadlineThreshold[0].Duration()) - } - return time.Now().Add(dd.deadlineThreshold[index-1].Duration()) - } - } - t := dd.deadlineThreshold[len(dd.deadlineThreshold)-1].Duration() - return time.Now().Add(t) -} diff --git a/stored/plugin/catch_panic/panic.go b/stored/plugin/catch_panic/panic.go deleted file mode 100644 index eb06d69..0000000 --- a/stored/plugin/catch_panic/panic.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2019-2024 Xu Ruibo (hustxurb@163.com) and Contributors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package catch_panic - -import ( - "runtime" - - "github.com/zuoyebang/bitalostored/stored/internal/config" - "github.com/zuoyebang/bitalostored/stored/server" - - "github.com/zuoyebang/bitalostored/stored/internal/log" - - "github.com/zuoyebang/bitalostored/butils/unsafe2" -) - -func Init() { - if !config.GlobalConfig.Plugin.OpenPanic { - return - } - - server.AddPlugin(&server.Proc{Disconn: func(s *server.Server, c *server.Client, err interface{}) { - if err != nil { - buf := make([]byte, 2048) - n := runtime.Stack(buf, false) - log.Errorf("client run panic err:%v stack:%s", err, unsafe2.String(buf[0:n])) - } - }}) -} diff --git a/stored/plugin/info/info.go b/stored/plugin/info/info.go deleted file mode 100644 index a9f77f1..0000000 --- a/stored/plugin/info/info.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2019-2024 Xu Ruibo (hustxurb@163.com) and Contributors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package info - -import ( - "math" - "runtime/debug" - "time" - - "github.com/zuoyebang/bitalostored/stored/internal/config" - "github.com/zuoyebang/bitalostored/stored/server" - - "github.com/zuoyebang/bitalostored/stored/internal/log" -) - -const ( - RuntimeInterval = 4 - ClientInterval = 16 - DiskInterval = 120 -) - -func Init() { - interval := time.Second - - server.AddPlugin(&server.Proc{Start: func(s *server.Server) { - go func() { - dataInterval := 60 - info := func() { - defer func() { - if r := recover(); r != nil { - log.Errorf("plugin doinfo panic err:%v stack=%s", r, string(debug.Stack())) - } - }() - - start := time.Now() - total := s.Info.Stats.TotolCmd.Load() - - time.Sleep(interval) - - delta := s.Info.Stats.TotolCmd.Load() - total - normalized := math.Max(0, float64(delta)) * float64(time.Second) / float64(time.Since(start)) - qps := uint64(normalized + 0.5) - s.Info.Stats.QPS.Store(qps) - db := s.GetDB() - if db != nil { - db.SetQPS(qps) - s.Info.Stats.RaftLogIndex = db.Meta.GetUpdateIndex() - if db.Migrate != nil { - s.Info.Stats.IsMigrate.Store(db.Migrate.IsMigrate.Load()) - } - s.Info.Stats.IsDelExpire = db.GetIsDelExpire() - } - - singleDegradeChange := s.Info.Server.SingleDegrade != config.GlobalConfig.Server.DegradeSingleNode - s.Info.Server.SingleDegrade = config.GlobalConfig.Server.DegradeSingleNode - if singleDegradeChange { - s.Info.Server.UpdateCache() - } - - if dataInterval%RuntimeInterval == 0 { - s.Info.Stats.UpdateCache() - s.Info.RuntimeStats.Samples() - } - - if dataInterval%ClientInterval == 0 { - s.Info.Client.UpdateCache() - } - - if dataInterval%DiskInterval == 0 { - s.Info.Data.Samples() - if db != nil { - db.BitalosdbUsage(s.Info.BitalosdbUsage) - } - } - - dataInterval++ - } - - for { - info() - } - }() - }}) -} diff --git a/stored/plugin/pprof/pprof.go b/stored/plugin/pprof/pprof.go deleted file mode 100644 index d3288e6..0000000 --- a/stored/plugin/pprof/pprof.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2019-2024 Xu Ruibo (hustxurb@163.com) and Contributors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package pprof - -import ( - "net/http" - _ "net/http/pprof" - - "github.com/zuoyebang/bitalostored/stored/internal/config" - "github.com/zuoyebang/bitalostored/stored/server" - - "github.com/zuoyebang/bitalostored/stored/internal/log" -) - -func Init() { - if !config.GlobalConfig.Plugin.OpenPprof { - return - } - - server.AddPlugin(&server.Proc{Start: func(s *server.Server) { - go func() { - pprofAddr := config.GlobalConfig.Plugin.PprofAddr - if err := http.ListenAndServe(pprofAddr, nil); err != nil { - log.Errorf("pprof ListenAndServe err:%v", err) - } else { - log.Infof("pprof addr:%s", pprofAddr) - } - }() - }}) -} diff --git a/stored/plugin/raft/addplugin.go b/stored/plugin/raft/addplugin.go deleted file mode 100644 index fc6a77d..0000000 --- a/stored/plugin/raft/addplugin.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2019-2024 Xu Ruibo (hustxurb@163.com) and Contributors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "bytes" - "time" - - braft "github.com/zuoyebang/bitalostored/raft" - "github.com/zuoyebang/bitalostored/raft/statemachine" - "github.com/zuoyebang/bitalostored/stored/internal/config" - "github.com/zuoyebang/bitalostored/stored/internal/log" - "github.com/zuoyebang/bitalostored/stored/internal/marshal/update" - "github.com/zuoyebang/bitalostored/stored/server" - "google.golang.org/protobuf/proto" -) - -func addPluginStartInitRaft(raft *StartRun) { - server.AddPlugin(&server.Proc{ - Start: func(s *server.Server) { - raft.LoadConfig(s) - - raft.registerRaftCommand(s) - raft.registerIsMasterCF(s) - raft.registerSyncToSlave(s) - - if !config.GlobalConfig.Plugin.OpenRaft || config.GlobalConfig.CheckIsDegradeSingleNode() { - s.Info.Cluster.Role = "single" - s.Info.Cluster.Status = true - } else { - node, err := braft.NewNodeHost(raft.Nhc) - if err != nil { - log.Error("new host: ", err) - panic(err) - } - raft.Nh = node - - if err := raft.Nh.StartOnDiskCluster(raft.AddrList, raft.Join, func(clusterID uint64, nodeID uint64) statemachine.IOnDiskStateMachine { - s.Info.Cluster.ClusterId = clusterID - s.Info.Cluster.CurrentNodeId = nodeID - return NewDiskKV(clusterID, nodeID, s, raft) - }, raft.Rc); err != nil { - log.Error("start cluster: ", err) - panic(err) - } - raft.RaftReady = true - } - - raft.doRaftClusterStat(s) - }, - Stop: func(s *server.Server, e interface{}) { - if raft != nil && raft.Nh != nil { - raft.StopNodeHost() - } - }, - }) -} - -func addPluginPreparePropose(raft *StartRun) { - server.AddRaftPlugin(&server.Proc{DoRaftSync: func(c *server.Client, cmd *server.Cmd, key string) error { - migrate := false - if b, e := proto.Marshal(&update.ByteSlice{IsMigrate: &migrate, NodeId: &raft.NodeID, Data: c.Data, KeyHash: &c.KeyHash}); e != nil { - return e - } else { - start := time.Now() - if raft.AsyncPropose { - if ret, err := raft.Propose(b, raft.RetryTimes); ret != R_SUCCESS { - c.RespWriter.WriteError(err) - } else { - raftSyncCostUs := time.Since(start).Nanoseconds() - return c.ApplyDB(raftSyncCostUs) - } - } else { - if res, err := raft.SyncPropose(b); err != nil { - return err - } else { - if bytes.Equal(res.Data, UpdateSelfNodeDoing) { - raftSyncCostNs := time.Since(start).Nanoseconds() - return c.ApplyDB(raftSyncCostNs) - } else { - c.RespWriter.WriteBytes(res.Data) - } - } - } - } - return nil - }}) -} diff --git a/stored/server/client.go b/stored/server/client.go index 75282c8..9b20ecc 100644 --- a/stored/server/client.go +++ b/stored/server/client.go @@ -15,9 +15,7 @@ package server import ( - "errors" "fmt" - "net" "sync" "sync/atomic" "time" @@ -50,22 +48,25 @@ const ( var raftClientPool sync.Pool type Client struct { - *resp.Session - + Cmd string + Args [][]byte + Keys []byte + Data [][]byte + ParseMarks []int + Reader *resp.Reader + Writer *resp.Writer DB *engine.Bitalos QueryStartTime time.Time KeyHash uint32 IsMaster func() bool - server *Server - closed atomic.Bool - conn net.Conn - - txState int - txCommandQueued bool - watchKeys map[string]int64 - commandQueue [][][]byte - + server *Server + remoteAddr string + closed atomic.Bool + txState int + txCommandQueued bool + watchKeys map[string]int64 + commandQueue [][][]byte hasPrepareLock atomic.Bool prepareState atomic.Int32 prepareUnlockSig chan struct{} @@ -76,9 +77,7 @@ type Client struct { func init() { raftClientPool = sync.Pool{ New: func() interface{} { - return &Client{ - Session: resp.NewSession(nil, 0), - } + return newRaftClient() }, } for i := 0; i < 128; i++ { @@ -105,84 +104,49 @@ func GetVmFromPool(s *Server) *Client { } func PutRaftClientToPool(c *Client) { - c.RespWriter.FlushBytesEmpty() + c.Writer.Reset() raftClientPool.Put(c) } -func NewClientRESP(conn net.Conn, s *Server) *Client { - c := new(Client) - s.connWait.Add(1) +func newRaftClient() *Client { + return &Client{ + Writer: resp.NewWriter(), + } +} - keepAlive := config.GlobalConfig.Server.Keepalive.Duration() +func newConnClient(s *Server, remoteAddr string) *Client { + c := &Client{ + DB: s.GetDB(), + IsMaster: s.IsMaster, + ParseMarks: make([]int, 0, 1<<4), + Reader: resp.NewReader(), + Writer: resp.NewWriter(), + remoteAddr: remoteAddr, + server: s, + } + + s.Info.Client.ClientTotal.Add(1) + s.Info.Client.ClientAlive.Add(1) - c.conn = conn - c.Session = resp.NewSession(conn, keepAlive) - c.DB = s.GetDB() - c.IsMaster = s.IsMaster - c.KeyHash = 0 - c.server = s if s.openDistributedTx { c.prepareUnlockSig = make(chan struct{}, 1) c.queueCommandDone = make(chan struct{}, 1) c.prepareUnlockDone = make(chan struct{}, 1) } + return c } func (c *Client) Close() { - if c.closed.Load() { + if !c.closed.CompareAndSwap(false, true) { return } - c.closed.Store(true) - if c.server.openDistributedTx { c.discard() } - c.Session.Close() -} - -func (c *Client) run() { - c.server.addRespClient(c) - - defer func() { - c.Close() - - c.server.delRespClient(c) - c.server.connWait.Done() - runPluginDisconn(c.server, c, recover()) - }() - runPluginConnect(c.server, c) - - isPlugin := config.GlobalConfig.Plugin.OpenRaft - - for { - c.Session.SetReadDeadline() - - c.Cmd = "" - c.Args = nil - reqData, err := c.RespReader.ParseRequest() - if err != nil { - return - } - - if c.server.Info.Stats.DbSyncStatus == DB_SYNC_RECVING_FAIL || c.server.Info.Stats.DbSyncStatus == DB_SYNC_RECVING { - c.RespWriter.WriteError(errors.New("ERR db syncing/fail, refuse request")) - n, err := c.RespWriter.FlushToWriterIO(c.conn) - if err != nil { - log.Errorf("FlushToWriterIO length:%d error:%v", n, err) - } - log.Info("db syncing/fail, refuse request") - } else { - if err = c.HandleRequest(isPlugin, reqData, false); err != nil { - log.Errorf("handleRequest error:%v", err) - } - if n, err := c.RespWriter.FlushToWriterIO(c.conn); err != nil { - log.Errorf("FlushToWriterIO length:%d error:%v", n, err) - } - } - } + c.server.Info.Client.ClientAlive.Add(-1) } func (c *Client) ResetQueryStartTime() { @@ -192,11 +156,13 @@ func (c *Client) ResetQueryStartTime() { func (c *Client) FormatData(reqData [][]byte) { c.ResetQueryStartTime() c.Data = reqData - if c.Cmd = ""; len(reqData) == 0 { + c.Cmd = "" + if len(reqData) == 0 { c.Args = reqData[0:0] } else { - c.Cmd = unsafe2.String(resp.LowerSlice(reqData[0])) - if c.Args = reqData[1:]; len(c.Args) > 0 { + c.Cmd = unsafe2.String(LowerSlice(reqData[0])) + c.Args = reqData[1:] + if len(c.Args) > 0 { c.Keys = c.Args[0] } else { c.Keys = c.Keys[0:0] @@ -204,17 +170,22 @@ func (c *Client) FormatData(reqData [][]byte) { } } -func (c *Client) HandleRequest(plugin bool, reqData [][]byte, isHashTag bool) (err error) { +func (c *Client) HandleRequest(reqData [][]byte, isHashTag bool) (err error) { c.FormatData(reqData) + if len(c.Cmd) == 0 { - err = resp.CmdEmptyErr(c.Cmd) - c.RespWriter.WriteError(err) + err = errn.CmdEmptyErr(c.Cmd) + c.Writer.WriteError(err) return err } if c.server.openDistributedTx && c.checkCommandEnterQueue() { - c.commandQueue = append(c.commandQueue, reqData) - c.RespWriter.WriteStatus(resp.ReplyQUEUED) + txReqData := make([][]byte, len(reqData)) + for i := range reqData { + txReqData[i] = append([]byte{}, reqData[i]...) + } + c.commandQueue = append(c.commandQueue, txReqData) + c.Writer.WriteStatus(resp.ReplyQUEUED) return nil } @@ -231,20 +202,20 @@ func (c *Client) HandleRequest(plugin bool, reqData [][]byte, isHashTag bool) (e if c.Cmd == "script" { if len(c.Args) < 1 { - err = resp.CmdParamsErr(c.Cmd) - c.RespWriter.WriteError(err) + err = errn.CmdParamsErr(c.Cmd) + c.Writer.WriteError(err) return err } - c.Cmd = c.Cmd + unsafe2.String(resp.LowerSlice(c.Args[0])) + c.Cmd = c.Cmd + unsafe2.String(LowerSlice(c.Args[0])) } if c.Cmd == "QUIT" { - c.RespWriter.WriteStatus(resp.ReplyOK) + c.Writer.WriteStatus(resp.ReplyOK) return errn.ErrClientQuit } - if !c.checkCommand(c.Cmd) { - c.RespWriter.WriteBulk(nil) + if !c.checkCommand() { + c.Writer.WriteBulk(nil) return nil } @@ -252,28 +223,27 @@ func (c *Client) HandleRequest(plugin bool, reqData [][]byte, isHashTag bool) (e var execCmd *Cmd if execCmd, ok = commands[c.Cmd]; !ok { - err = resp.CmdEmptyErr(c.Cmd) - c.RespWriter.WriteError(err) + err = errn.CmdEmptyErr(c.Cmd) + c.Writer.WriteError(err) return err } if c.server.openDistributedTx && c.txState&TxStateMulti != 0 && execCmd.NotAllowedInTx { err = fmt.Errorf("ERR %s inside MULTI is not allowed", c.Cmd) - c.RespWriter.WriteError(err) + c.Writer.WriteError(err) return err } if c.server.IsWitness { err = c.ApplyDB(0) if err != nil { - c.RespWriter.WriteError(err) + c.Writer.WriteError(err) } return err } - if plugin && c.server.slowQuery != nil && c.server.slowQuery.CheckSlowShield(c.Cmd, c.Keys) { - c.RespWriter.WriteError(resp.ErrSlowShield) - return resp.ErrSlowShield + if c.server.isOpenRaft && c.server.slowQuery != nil && c.server.slowQuery.CheckSlowShield(c.Cmd, c.Keys) { + c.Writer.WriteError(errn.ErrSlowShield) + return errn.ErrSlowShield } - defer runPluginHandled(c, execCmd, c.Cmd) if !isHashTag { c.KeyHash = hash.Fnv32(c.Keys) @@ -293,28 +263,43 @@ func (c *Client) HandleRequest(plugin bool, reqData [][]byte, isHashTag bool) (e if c.server.openDistributedTx { updateKeyModifyTs = c.markWatchKeyModified(execCmd) } - err = c.DB.Redirect(c.Cmd, c.Keys, reqData, c.RespWriter) + err = c.DB.Redirect(c.Cmd, c.Keys, reqData, c.Writer) if updateKeyModifyTs != nil { updateKeyModifyTs() } - } else if plugin && execCmd.Sync && !config.GlobalConfig.CheckIsDegradeSingleNode() { - err = runPluginRaft(c, execCmd, c.Cmd) + } else if c.server.isOpenRaft && execCmd.Sync && !config.GlobalConfig.CheckIsDegradeSingleNode() { + err = c.RaftSync() } else { err = c.ApplyDB(0) } if err != nil { - c.RespWriter.WriteError(err) + c.Writer.WriteError(err) } return err } +func (c *Client) RaftSync() error { + start := time.Now() + resData, err := c.server.DoRaftSync(c.KeyHash, c.Data) + if err != nil { + return err + } + + if resData == nil { + return c.ApplyDB(time.Since(start).Nanoseconds()) + } else { + c.Writer.WriteBytes(resData) + return nil + } +} + func (c *Client) ApplyDB(raftSyncCostNs int64) error { var err error var ok bool var execCmd *Cmd if execCmd, ok = commands[c.Cmd]; !ok { - err = resp.CmdEmptyErr(c.Cmd) + err = errn.CmdEmptyErr(c.Cmd) return err } @@ -342,11 +327,7 @@ func (c *Client) ApplyDB(raftSyncCostNs int64) error { } costUs := costNs / 1000 raftSyncCostUs := raftSyncCostNs / 1000 - if c.conn == nil { - log.SlowLog("", costUs, raftSyncCostUs, c.Data, err) - } else { - log.SlowLog(c.conn.RemoteAddr().String(), costUs, raftSyncCostUs, c.Data, err) - } + log.SlowLog(c.remoteAddr, costUs, raftSyncCostUs, c.Data, err) } return err } @@ -355,7 +336,7 @@ func (c *Client) GetInfo() *SInfo { return c.server.Info } -func (c *Client) checkCommand(command string) bool { +func (c *Client) checkCommand() bool { if !c.server.IsWitness { return true } @@ -367,7 +348,7 @@ func (c *Client) checkCommand(command string) bool { return true case resp.ECHO: return true - case "shutdown": + case resp.SHUTDOWN: return true default: return false @@ -460,7 +441,7 @@ func (c *Client) resetTx() { } func (c *Client) addWatchKey(txLock *TxLocker, key []byte, ts time.Time) { - keyStr := unsafe2.String(key) + keyStr := string(key) txLock.addWatchKey(c, keyStr, true) if len(c.watchKeys) == 0 { diff --git a/stored/server/cmd_config.go b/stored/server/cmd_config.go index 0249490..a8569aa 100644 --- a/stored/server/cmd_config.go +++ b/stored/server/cmd_config.go @@ -15,10 +15,11 @@ package server import ( + "strconv" "strings" "github.com/zuoyebang/bitalostored/butils/unsafe2" - "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/btools" + "github.com/zuoyebang/bitalostored/stored/internal/errn" "github.com/zuoyebang/bitalostored/stored/internal/resp" ) @@ -35,23 +36,39 @@ func init() { func configCommand(c *Client) error { args := c.Args - if len(args) != 2 { - return resp.CmdParamsErr(resp.CONFIG) + if len(args) < 2 { + return errn.CmdParamsErr(resp.CONFIG) } op := strings.ToUpper(unsafe2.String(args[0])) - if op != CONFIGGET { - return resp.ErrNotImplement + if op != CONFIGSET { + return errn.ErrNotImplement } - configName := strings.ToLower(unsafe2.String(args[1])) - if configName == "maxmemory" { - fvPair := btools.FVPair{ - Field: []byte("maxmemory"), - Value: []byte("268435456"), + configName := strings.ToUpper(unsafe2.String(args[1])) + if configName == "AUTOCOMPACT" { + if len(args) < 3 { + return errn.CmdParamsErr(resp.CONFIG) + } + configValue, err := strconv.Atoi(string(args[2])) + if err != nil { + return err } - c.RespWriter.WriteFVPairArray([]btools.FVPair{fvPair}) - } + db := c.server.GetDB() + if db != nil { + if configValue == 1 { + db.SetAutoCompact(true) + c.server.Info.Server.AutoCompact = true + } else { + db.SetAutoCompact(false) + c.server.Info.Server.AutoCompact = false + } + c.server.Info.Server.UpdateCache() + c.Writer.WriteStatus(resp.ReplyOK) + } + } else { + return errn.ErrNotImplement + } return nil } diff --git a/stored/server/cmd_dbsync.go b/stored/server/cmd_dbsync.go deleted file mode 100644 index 7615642..0000000 --- a/stored/server/cmd_dbsync.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2019-2024 Xu Ruibo (hustxurb@163.com) and Contributors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package server - -import ( - "errors" - "fmt" - - "github.com/gomodule/redigo/redis" - "github.com/zuoyebang/bitalostored/stored/internal/config" - "github.com/zuoyebang/bitalostored/stored/internal/resp" -) - -func init() { - AddCommand(map[string]*Cmd{ - resp.DBSYNC: {Sync: false, Handler: dbsyncCommand, NoKey: true}, - resp.DBSYNCCONNECT: {Sync: false, Handler: dbsyncconnectCommand, NoKey: true}, - }) -} - -func dbsyncCommand(c *Client) error { - if len(c.Args) != 3 { - return resp.CmdParamsErr(resp.DBSYNC) - } - token := string(c.Args[0]) - if token != config.GlobalConfig.Server.Token { - return errors.New("valid token err") - } - - ip := string(c.Args[1]) - port := string(c.Args[2]) - if len(ip) <= 0 || len(port) <= 0 { - return errors.New("valid ip/port err") - } - - host := fmt.Sprintf("%s:%s", ip, port) - rs, err := redis.Dial("tcp", host) - - defer rs.Close() - - if err != nil { - return err - } - - if addr, err := redis.String(rs.Do(resp.DBSYNCCONNECT, token)); err != nil { - c.server.Info.Stats.DbSyncErr = err.Error() - c.server.Info.Stats.DbSyncStatus = DB_SYNC_NOTHING - c.RespWriter.WriteError(err) - return err - } else { - c.server.buildDbAsyncConn(addr) - c.RespWriter.WriteStatus(resp.ReplyOK) - } - return nil -} - -func dbsyncconnectCommand(c *Client) error { - if len(c.Args) != 1 { - return resp.CmdParamsErr(resp.DBSYNCCONNECT) - } - if string(c.Args[0]) != config.GlobalConfig.Server.Token { - return errors.New("valid token err") - } - - if addr, err := c.server.buildDbSyncListener(); err != nil { - c.RespWriter.WriteError(err) - return err - } else { - c.RespWriter.WriteBulk([]byte(addr)) - } - - return nil -} diff --git a/stored/server/cmd_geo.go b/stored/server/cmd_geo.go index 0a06757..b25ea02 100644 --- a/stored/server/cmd_geo.go +++ b/stored/server/cmd_geo.go @@ -22,6 +22,7 @@ import ( "strings" "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/btools" + "github.com/zuoyebang/bitalostored/stored/internal/errn" "github.com/zuoyebang/bitalostored/stored/internal/geohash" "github.com/zuoyebang/bitalostored/stored/internal/resp" ) @@ -48,7 +49,7 @@ func init() { func geoaddCommand(c *Client) error { args := c.Args if len(args) < 3 || len(args[1:])%3 != 0 { - return resp.CmdParamsErr(resp.GEOADD) + return errn.CmdParamsErr(resp.GEOADD) } key, args := args[0], args[1:] @@ -80,7 +81,7 @@ func geoaddCommand(c *Client) error { n, err := c.DB.ZAdd(key, c.KeyHash, params...) if err == nil { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return err @@ -89,10 +90,10 @@ func geoaddCommand(c *Client) error { func geodistCommand(c *Client) error { args := c.Args if len(args) < 3 { - return resp.CmdParamsErr(resp.GEODIST) + return errn.CmdParamsErr(resp.GEODIST) } if len(args) > 4 { - return resp.ErrSyntax + return errn.ErrSyntax } key, from, to, args := args[0], args[1], args[2], args[3:] @@ -108,18 +109,18 @@ func geodistCommand(c *Client) error { fromD, errFrom := c.DB.ZScore(key, c.KeyHash, from) toD, errTo := c.DB.ZScore(key, c.KeyHash, to) if errFrom != nil || errTo != nil { - c.RespWriter.WriteBulk(nil) + c.Writer.WriteBulk(nil) return nil } dist := geohash.DistBetweenGeoHashWGS84(uint64(fromD), uint64(toD)) / toMeter - c.RespWriter.WriteBulk([]byte(fmt.Sprintf("%.4f", dist))) + c.Writer.WriteBulk([]byte(fmt.Sprintf("%.4f", dist))) return nil } func geoposCommand(c *Client) error { args := c.Args if len(args) < 1 { - return resp.CmdParamsErr(resp.GEOPOS) + return errn.CmdParamsErr(resp.GEOPOS) } key, args := args[0], args[1:] arr := []interface{}{} @@ -131,14 +132,14 @@ func geoposCommand(c *Client) error { arr = append(arr, []interface{}{[]byte(strconv.FormatFloat(long, 'f', 17, 64)), []byte(strconv.FormatFloat(lat, 'f', 17, 64))}) } } - c.RespWriter.WriteArray(arr) + c.Writer.WriteArray(arr) return nil } func geohashCommand(c *Client) error { args := c.Args if len(args) < 1 { - return resp.CmdParamsErr(resp.GEOHASH) + return errn.CmdParamsErr(resp.GEOHASH) } key, args := args[0], args[1:] arr := []interface{}{} @@ -156,32 +157,32 @@ func geohashCommand(c *Client) error { arr = append(arr, geohash.EncodeToBase32(code.Bits)) } } - c.RespWriter.WriteArray(arr) + c.Writer.WriteArray(arr) return nil } func georadiusCommand(c *Client) error { args := c.Args if len(args) < 5 { - return resp.CmdParamsErr(resp.GEORADIUS) + return errn.CmdParamsErr(resp.GEORADIUS) } key := args[0] longitude, err := strconv.ParseFloat(string(args[1]), 64) if err != nil { - return resp.CmdParamsErr(resp.GEORADIUS) + return errn.CmdParamsErr(resp.GEORADIUS) } latitude, err := strconv.ParseFloat(string(args[2]), 64) if err != nil { - return resp.CmdParamsErr(resp.GEORADIUS) + return errn.CmdParamsErr(resp.GEORADIUS) } radius, err := strconv.ParseFloat(string(args[3]), 64) if err != nil || radius < 0 { - return resp.CmdParamsErr(resp.GEORADIUS) + return errn.CmdParamsErr(resp.GEORADIUS) } toMeter := parseUnit(string(args[4])) if toMeter == 0 { - return resp.CmdParamsErr(resp.GEORADIUS) + return errn.CmdParamsErr(resp.GEORADIUS) } args = args[5:] @@ -208,11 +209,11 @@ func georadiusCommand(c *Client) error { direction = desc case "COUNT": if len(args) == 0 { - return resp.ErrSyntax + return errn.ErrSyntax } n, err := strconv.Atoi(string(args[0])) if err != nil { - return resp.ErrValue + return errn.ErrValue } if n <= 0 { return errors.New("ERR COUNT must be > 0") @@ -220,7 +221,7 @@ func georadiusCommand(c *Client) error { args = args[1:] count = n default: - return resp.ErrSyntax + return errn.ErrSyntax } } @@ -265,26 +266,26 @@ func georadiusCommand(c *Client) error { } arr = append(arr, item) } - c.RespWriter.WriteArray(arr) + c.Writer.WriteArray(arr) return nil } func georadiusbymemberCommand(c *Client) error { args := c.Args if len(args) < 4 { - return resp.CmdParamsErr(resp.GEORADIUSBYMEMBER) + return errn.CmdParamsErr(resp.GEORADIUSBYMEMBER) } key := args[0] member := args[1] radius, err := strconv.ParseFloat(string(args[2]), 64) if err != nil { - return resp.CmdParamsErr(resp.GEORADIUSBYMEMBER) + return errn.CmdParamsErr(resp.GEORADIUSBYMEMBER) } toMeter := parseUnit(string(args[3])) if toMeter == 0 { - return resp.CmdParamsErr(resp.GEORADIUSBYMEMBER) + return errn.CmdParamsErr(resp.GEORADIUSBYMEMBER) } args = args[4:] @@ -311,11 +312,11 @@ func georadiusbymemberCommand(c *Client) error { direction = desc case "COUNT": if len(args) == 0 { - return resp.ErrSyntax + return errn.ErrSyntax } n, err := strconv.Atoi(string(args[0])) if err != nil { - return resp.ErrValue + return errn.ErrValue } if n <= 0 { return errors.New("ERR COUNT must be > 0") @@ -323,7 +324,7 @@ func georadiusbymemberCommand(c *Client) error { args = args[1:] count = n default: - return resp.ErrSyntax + return errn.ErrSyntax } } @@ -374,7 +375,7 @@ func georadiusbymemberCommand(c *Client) error { } arr = append(arr, item) } - c.RespWriter.WriteArray(arr) + c.Writer.WriteArray(arr) return nil } diff --git a/stored/server/cmd_global.go b/stored/server/cmd_global.go index b842872..8ab4c2f 100644 --- a/stored/server/cmd_global.go +++ b/stored/server/cmd_global.go @@ -15,9 +15,7 @@ package server import ( - "os" "runtime/debug" - "syscall" "github.com/zuoyebang/bitalostored/stored/internal/errn" "github.com/zuoyebang/bitalostored/stored/internal/resp" @@ -33,63 +31,51 @@ func init() { "debuginfo": {Sync: false, Handler: debugInfoCommand, NoKey: true}, "cacheinfo": {Sync: false, Handler: cacheInfoCommand, NoKey: true}, "freememory": {Sync: false, Handler: freeOsMemoryCommand, NoKey: true}, - "shutdown": {Sync: false, Handler: shutdownCommand, NoKey: true}, }) } -func shutdownCommand(c *Client) error { - c.conn.Close() - - p, _ := os.FindProcess(os.Getpid()) - - p.Signal(syscall.SIGTERM) - p.Signal(os.Interrupt) - - return errn.ErrClientQuit -} - func freeOsMemoryCommand(c *Client) error { debug.FreeOSMemory() - c.RespWriter.WriteStatus(resp.ReplyOK) + c.Writer.WriteStatus(resp.ReplyOK) return nil } func keyslotCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.TYPE) + return errn.CmdParamsErr(resp.TYPE) } slotId := c.KeyHash % utils.TotalSlot - c.RespWriter.WriteInteger(int64(slotId)) + c.Writer.WriteInteger(int64(slotId)) return nil } func keyUniqIdCommand(c *Client) error { id := c.DB.Meta.GetCurrentKeyUniqId() - c.RespWriter.WriteInteger(int64(id)) + c.Writer.WriteInteger(int64(id)) return nil } func compactCommand(c *Client) error { c.DB.Compact() - c.RespWriter.WriteStatus("OK") + c.Writer.WriteStatus("OK") return nil } func debugInfoCommand(c *Client) error { info := c.DB.DebugInfo() - c.RespWriter.WriteBulk(info) + c.Writer.WriteBulk(info) return nil } func cacheInfoCommand(c *Client) error { info := c.DB.CacheInfo() - c.RespWriter.WriteBulk(info) + c.Writer.WriteBulk(info) return nil } func delExpireCommand(c *Client) error { c.DB.ScanDelExpireAsync() - c.RespWriter.WriteStatus("OK") + c.Writer.WriteStatus("OK") return nil } diff --git a/stored/server/cmd_hash.go b/stored/server/cmd_hash.go index 2a6eba4..326cb42 100644 --- a/stored/server/cmd_hash.go +++ b/stored/server/cmd_hash.go @@ -16,24 +16,24 @@ package server import ( "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/btools" + "github.com/zuoyebang/bitalostored/stored/internal/errn" "github.com/zuoyebang/bitalostored/stored/internal/resp" "github.com/zuoyebang/bitalostored/stored/internal/utils" ) func init() { AddCommand(map[string]*Cmd{ - resp.HDEL: {Sync: resp.IsWriteCmd(resp.HDEL), Handler: hdelCommand}, - resp.HINCRBY: {Sync: resp.IsWriteCmd(resp.HINCRBY), Handler: hincrbyCommand}, - resp.HMSET: {Sync: resp.IsWriteCmd(resp.HMSET), Handler: hmsetCommand}, - resp.HSET: {Sync: resp.IsWriteCmd(resp.HSET), Handler: hsetCommand}, - resp.HVALS: {Sync: resp.IsWriteCmd(resp.HVALS), Handler: hvalsCommand}, - resp.HEXISTS: {Sync: resp.IsWriteCmd(resp.HEXISTS), Handler: hexistsCommand}, - resp.HGET: {Sync: resp.IsWriteCmd(resp.HGET), Handler: hgetCommand}, - resp.HGETALL: {Sync: resp.IsWriteCmd(resp.HGETALL), Handler: hgetallCommand}, - resp.HKEYS: {Sync: resp.IsWriteCmd(resp.HKEYS), Handler: hkeysCommand}, - resp.HLEN: {Sync: resp.IsWriteCmd(resp.HLEN), Handler: hlenCommand}, - resp.HMGET: {Sync: resp.IsWriteCmd(resp.HMGET), Handler: hmgetCommand}, - + resp.HDEL: {Sync: resp.IsWriteCmd(resp.HDEL), Handler: hdelCommand}, + resp.HINCRBY: {Sync: resp.IsWriteCmd(resp.HINCRBY), Handler: hincrbyCommand}, + resp.HMSET: {Sync: resp.IsWriteCmd(resp.HMSET), Handler: hmsetCommand}, + resp.HSET: {Sync: resp.IsWriteCmd(resp.HSET), Handler: hsetCommand}, + resp.HVALS: {Sync: resp.IsWriteCmd(resp.HVALS), Handler: hvalsCommand}, + resp.HEXISTS: {Sync: resp.IsWriteCmd(resp.HEXISTS), Handler: hexistsCommand}, + resp.HGET: {Sync: resp.IsWriteCmd(resp.HGET), Handler: hgetCommand}, + resp.HGETALL: {Sync: resp.IsWriteCmd(resp.HGETALL), Handler: hgetallCommand}, + resp.HKEYS: {Sync: resp.IsWriteCmd(resp.HKEYS), Handler: hkeysCommand}, + resp.HLEN: {Sync: resp.IsWriteCmd(resp.HLEN), Handler: hlenCommand}, + resp.HMGET: {Sync: resp.IsWriteCmd(resp.HMGET), Handler: hmgetCommand}, resp.HCLEAR: {Sync: resp.IsWriteCmd(resp.HCLEAR), Handler: hclearCommand, KeySkip: 1}, resp.HEXPIRE: {Sync: resp.IsWriteCmd(resp.HEXPIRE), Handler: hexpireCommand}, resp.HEXPIREAT: {Sync: resp.IsWriteCmd(resp.HEXPIREAT), Handler: hexpireAtCommand}, @@ -46,13 +46,13 @@ func init() { func hsetCommand(c *Client) error { args := c.Args if len(args) != 3 { - return resp.CmdParamsErr(resp.HSET) + return errn.CmdParamsErr(resp.HSET) } if n, err := c.DB.HSet(args[0], c.KeyHash, args[1], args[2]); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -61,7 +61,7 @@ func hsetCommand(c *Client) error { func hgetCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.HGET) + return errn.CmdParamsErr(resp.HGET) } v, vCloser, err := c.DB.HGet(args[0], c.KeyHash, args[1]) @@ -74,14 +74,14 @@ func hgetCommand(c *Client) error { return err } - c.RespWriter.WriteBulk(v) + c.Writer.WriteBulk(v) return nil } func hexistsCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.HEXISTS) + return errn.CmdParamsErr(resp.HEXISTS) } var n int64 = 1 @@ -99,7 +99,7 @@ func hexistsCommand(c *Client) error { n = 0 } - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil } @@ -107,13 +107,13 @@ func hexistsCommand(c *Client) error { func hdelCommand(c *Client) error { args := c.Args if len(args) < 2 { - return resp.CmdParamsErr(resp.HDEL) + return errn.CmdParamsErr(resp.HDEL) } if n, err := c.DB.HDel(args[0], c.KeyHash, args[1:]...); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -122,13 +122,13 @@ func hdelCommand(c *Client) error { func hlenCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.HLEN) + return errn.CmdParamsErr(resp.HLEN) } if n, err := c.DB.HLen(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -137,13 +137,13 @@ func hlenCommand(c *Client) error { func hincrbyCommand(c *Client) error { args := c.Args if len(args) != 3 { - return resp.CmdParamsErr(resp.HINCRBY) + return errn.CmdParamsErr(resp.HINCRBY) } delta, err := utils.ByteToInt64(args[2]) if err != nil { - return resp.ErrValue + return errn.ErrValue } var n int64 @@ -151,7 +151,7 @@ func hincrbyCommand(c *Client) error { if n, err = c.DB.HIncrBy(args[0], c.KeyHash, args[1], delta); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil } @@ -159,11 +159,11 @@ func hincrbyCommand(c *Client) error { func hmsetCommand(c *Client) error { args := c.Args if len(args) < 3 { - return resp.CmdParamsErr(resp.HMSET) + return errn.CmdParamsErr(resp.HMSET) } if len(args[1:])%2 != 0 { - return resp.CmdParamsErr(resp.HMSET) + return errn.CmdParamsErr(resp.HMSET) } key := args[0] @@ -179,7 +179,7 @@ func hmsetCommand(c *Client) error { if err := c.DB.HMset(key, c.KeyHash, kvs...); err != nil { return err } else { - c.RespWriter.WriteStatus(resp.ReplyOK) + c.Writer.WriteStatus(resp.ReplyOK) } return nil @@ -188,7 +188,7 @@ func hmsetCommand(c *Client) error { func hmgetCommand(c *Client) error { args := c.Args if len(args) < 2 { - return resp.CmdParamsErr(resp.HMGET) + return errn.CmdParamsErr(resp.HMGET) } v, vClosers, err := c.DB.HMget(args[0], c.KeyHash, args[1:]...) @@ -203,14 +203,14 @@ func hmgetCommand(c *Client) error { return err } - c.RespWriter.WriteSliceArray(v) + c.Writer.WriteSliceArray(v) return nil } func hgetallCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.HGETALL) + return errn.CmdParamsErr(resp.HGETALL) } v, closers, err := c.DB.HGetAll(args[0], c.KeyHash) @@ -225,14 +225,14 @@ func hgetallCommand(c *Client) error { return err } - c.RespWriter.WriteFVPairArray(v) + c.Writer.WriteFVPairArray(v) return nil } func hkeysCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.HKEYS) + return errn.CmdParamsErr(resp.HKEYS) } v, closers, err := c.DB.HKeys(args[0], c.KeyHash) @@ -247,14 +247,14 @@ func hkeysCommand(c *Client) error { return err } - c.RespWriter.WriteSliceArray(v) + c.Writer.WriteSliceArray(v) return nil } func hvalsCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.HVALS) + return errn.CmdParamsErr(resp.HVALS) } v, closers, err := c.DB.HValues(args[0], c.KeyHash) @@ -269,20 +269,20 @@ func hvalsCommand(c *Client) error { return err } - c.RespWriter.WriteSliceArray(v) + c.Writer.WriteSliceArray(v) return nil } func hclearCommand(c *Client) error { args := c.Args if len(args) < 1 { - return resp.CmdParamsErr(resp.HCLEAR) + return errn.CmdParamsErr(resp.HCLEAR) } if n, err := c.DB.HClear(c.KeyHash, args...); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -291,12 +291,12 @@ func hclearCommand(c *Client) error { func hexpireCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.HEXPIRE) + return errn.CmdParamsErr(resp.HEXPIRE) } duration, err := utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } var n int64 @@ -304,19 +304,19 @@ func hexpireCommand(c *Client) error { if err != nil { return err } - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) return nil } func hexpireAtCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.HEXPIREAT) + return errn.CmdParamsErr(resp.HEXPIREAT) } when, err := utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } var n int64 @@ -324,20 +324,20 @@ func hexpireAtCommand(c *Client) error { if err != nil { return err } - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) return nil } func httlCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.HTTL) + return errn.CmdParamsErr(resp.HTTL) } if v, err := c.DB.TTl(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(v) + c.Writer.WriteInteger(v) } return nil @@ -346,13 +346,13 @@ func httlCommand(c *Client) error { func hpersistCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.HPERSIST) + return errn.CmdParamsErr(resp.HPERSIST) } if n, err := c.DB.Persist(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -361,13 +361,13 @@ func hpersistCommand(c *Client) error { func hkeyexistsCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.HKEYEXISTS) + return errn.CmdParamsErr(resp.HKEYEXISTS) } if n, err := c.DB.Exists(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil } diff --git a/stored/server/cmd_keys.go b/stored/server/cmd_keys.go index 30d49a5..16eccf8 100644 --- a/stored/server/cmd_keys.go +++ b/stored/server/cmd_keys.go @@ -16,6 +16,7 @@ package server import ( "github.com/zuoyebang/bitalostored/butils/unsafe2" + "github.com/zuoyebang/bitalostored/stored/internal/errn" "github.com/zuoyebang/bitalostored/stored/internal/resp" "github.com/zuoyebang/bitalostored/stored/internal/utils" ) @@ -39,13 +40,13 @@ func init() { func typeCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.TYPE) + return errn.CmdParamsErr(resp.TYPE) } if t, err := c.DB.Type(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteStatus(t) + c.Writer.WriteStatus(t) return nil } } @@ -54,26 +55,26 @@ func delCommand(c *Client) error { args := c.Args argsLen := len(args) if argsLen == 0 { - return resp.CmdParamsErr(resp.DEL) + return errn.CmdParamsErr(resp.DEL) } n, err := c.DB.Del(c.KeyHash, args...) if err != nil { return err } - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) return nil } func expireCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.EXPIRE) + return errn.CmdParamsErr(resp.EXPIRE) } duration, err := utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } var n int64 @@ -81,19 +82,19 @@ func expireCommand(c *Client) error { if err != nil { return err } - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) return nil } func expireAtCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.EXPIREAT) + return errn.CmdParamsErr(resp.EXPIREAT) } when, err := utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } var n int64 @@ -101,19 +102,19 @@ func expireAtCommand(c *Client) error { if err != nil { return err } - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) return nil } func pexpireCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.EXPIRE) + return errn.CmdParamsErr(resp.EXPIRE) } duration, err := utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } var n int64 @@ -121,19 +122,19 @@ func pexpireCommand(c *Client) error { if err != nil { return err } - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) return nil } func pexpireAtCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.EXPIREAT) + return errn.CmdParamsErr(resp.EXPIREAT) } when, err := utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } var n int64 @@ -141,20 +142,20 @@ func pexpireAtCommand(c *Client) error { if err != nil { return err } - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) return nil } func existsCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.EXISTS) + return errn.CmdParamsErr(resp.EXISTS) } if n, err := c.DB.Exists(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) return nil } } @@ -162,13 +163,13 @@ func existsCommand(c *Client) error { func ttlCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.TTL) + return errn.CmdParamsErr(resp.TTL) } if n, err := c.DB.TTl(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) return nil } } @@ -176,13 +177,13 @@ func ttlCommand(c *Client) error { func pttlCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.PTTL) + return errn.CmdParamsErr(resp.PTTL) } if n, err := c.DB.PTTl(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) return nil } } @@ -190,13 +191,13 @@ func pttlCommand(c *Client) error { func persistCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.PERSIST) + return errn.CmdParamsErr(resp.PERSIST) } if n, err := c.DB.Persist(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil } @@ -223,7 +224,7 @@ func infoCommand(c *Client) error { info = []byte(sinfo.Server.ServerAddress) } } - c.RespWriter.WriteBulk(info) + c.Writer.WriteBulk(info) if closer != nil { closer() } diff --git a/stored/server/cmd_kv.go b/stored/server/cmd_kv.go index d44bb0d..a58b66f 100644 --- a/stored/server/cmd_kv.go +++ b/stored/server/cmd_kv.go @@ -18,6 +18,7 @@ import ( "strconv" "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/btools" + "github.com/zuoyebang/bitalostored/stored/internal/errn" "github.com/zuoyebang/bitalostored/stored/internal/resp" "github.com/zuoyebang/bitalostored/stored/internal/utils" ) @@ -58,7 +59,7 @@ func init() { func getCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.GET) + return errn.CmdParamsErr(resp.GET) } v, closer, err := c.DB.Get(args[0], c.KeyHash) @@ -71,7 +72,7 @@ func getCommand(c *Client) error { return err } - c.RespWriter.WriteBulk(v) + c.Writer.WriteBulk(v) return nil } @@ -79,58 +80,58 @@ func setCommand(c *Client) error { args := c.Args if len(c.Args) < 2 { - return resp.CmdParamsErr(resp.SET) + return errn.CmdParamsErr(resp.SET) } - exType, sec, setCondition, err := resp.ParseSetArgs(args[2:]) + exType, sec, setCondition, err := ParseSetArgs(args[2:]) if err != nil { return err } - if exType == resp.NO_TYPE && setCondition == resp.NO_CONDITION { + if exType == NO_TYPE && setCondition == NO_CONDITION { if err := c.DB.Set(args[0], c.KeyHash, args[1]); err != nil { return err } - c.RespWriter.WriteStatus(resp.ReplyOK) - } else if exType == resp.NO_TYPE && setCondition == resp.NX { + c.Writer.WriteStatus(resp.ReplyOK) + } else if exType == NO_TYPE && setCondition == NX { if n, err := c.DB.SetNX(args[0], c.KeyHash, args[1]); err != nil { return err } else if n == 1 { - c.RespWriter.WriteStatus(resp.ReplyOK) + c.Writer.WriteStatus(resp.ReplyOK) } else { - c.RespWriter.WriteBulk(nil) + c.Writer.WriteBulk(nil) } - } else if exType == resp.EX && setCondition == resp.NO_CONDITION { + } else if exType == EX && setCondition == NO_CONDITION { if err := c.DB.SetEX(args[0], c.KeyHash, sec, args[1]); err != nil { return err } else { - c.RespWriter.WriteStatus(resp.ReplyOK) + c.Writer.WriteStatus(resp.ReplyOK) } - } else if exType == resp.EX && setCondition == resp.NX { + } else if exType == EX && setCondition == NX { if n, err := c.DB.SetNXEX(args[0], c.KeyHash, sec, args[1]); err != nil { return err } else if n == 1 { - c.RespWriter.WriteStatus(resp.ReplyOK) + c.Writer.WriteStatus(resp.ReplyOK) } else { - c.RespWriter.WriteBulk(nil) + c.Writer.WriteBulk(nil) } - } else if exType == resp.PX && setCondition == resp.NO_CONDITION { + } else if exType == PX && setCondition == NO_CONDITION { if err := c.DB.PSetEX(args[0], c.KeyHash, sec, args[1]); err != nil { return err } else { - c.RespWriter.WriteStatus(resp.ReplyOK) + c.Writer.WriteStatus(resp.ReplyOK) } - } else if exType == resp.PX && setCondition == resp.NX { + } else if exType == PX && setCondition == NX { if n, err := c.DB.PSetNXEX(args[0], c.KeyHash, sec, args[1]); err != nil { return err } else if n == 1 { - c.RespWriter.WriteStatus(resp.ReplyOK) + c.Writer.WriteStatus(resp.ReplyOK) } else { - c.RespWriter.WriteBulk(nil) + c.Writer.WriteBulk(nil) } } else { - return resp.ErrNotImplement + return errn.ErrNotImplement } return nil @@ -139,7 +140,7 @@ func setCommand(c *Client) error { func getsetCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.GETSET) + return errn.CmdParamsErr(resp.GETSET) } v, closer, err := c.DB.GetSet(args[0], c.KeyHash, args[1]) @@ -152,20 +153,20 @@ func getsetCommand(c *Client) error { return err } - c.RespWriter.WriteBulk(v) + c.Writer.WriteBulk(v) return nil } func setnxCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.SETNX) + return errn.CmdParamsErr(resp.SETNX) } if n, err := c.DB.SetNX(args[0], c.KeyHash, args[1]); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil } @@ -173,18 +174,18 @@ func setnxCommand(c *Client) error { func setexCommand(c *Client) error { args := c.Args if len(args) != 3 { - return resp.CmdParamsErr(resp.SETEX) + return errn.CmdParamsErr(resp.SETEX) } sec, err := utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } if err := c.DB.SetEX(args[0], c.KeyHash, sec, args[2]); err != nil { return err } else { - c.RespWriter.WriteStatus(resp.ReplyOK) + c.Writer.WriteStatus(resp.ReplyOK) } return nil @@ -193,18 +194,18 @@ func setexCommand(c *Client) error { func psetexCommand(c *Client) error { args := c.Args if len(args) != 3 { - return resp.CmdParamsErr(resp.SETEX) + return errn.CmdParamsErr(resp.SETEX) } mills, err := utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } if err := c.DB.PSetEX(args[0], c.KeyHash, mills, args[2]); err != nil { return err } else { - c.RespWriter.WriteStatus(resp.ReplyOK) + c.Writer.WriteStatus(resp.ReplyOK) } return nil @@ -213,13 +214,13 @@ func psetexCommand(c *Client) error { func kexistsCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.EXISTS) + return errn.CmdParamsErr(resp.EXISTS) } if n, err := c.DB.Exists(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -228,13 +229,13 @@ func kexistsCommand(c *Client) error { func incrCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.INCR) + return errn.CmdParamsErr(resp.INCR) } if n, err := c.DB.Incr(c.Args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -243,13 +244,13 @@ func incrCommand(c *Client) error { func decrCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.DECR) + return errn.CmdParamsErr(resp.DECR) } if n, err := c.DB.Decr(c.Args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -258,18 +259,18 @@ func decrCommand(c *Client) error { func incrbyCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.INCRBY) + return errn.CmdParamsErr(resp.INCRBY) } delta, err := utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } if n, err := c.DB.IncrBy(c.Args[0], c.KeyHash, delta); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -278,17 +279,17 @@ func incrbyCommand(c *Client) error { func incrbyfloatCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.INCRBYFLOAT) + return errn.CmdParamsErr(resp.INCRBYFLOAT) } delta, err := utils.ByteToFloat64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } if n, err := c.DB.IncrByFloat(c.Args[0], c.KeyHash, delta); err != nil { return err } else { - c.RespWriter.WriteBulk([]byte(strconv.FormatFloat(n, 'f', -1, 64))) + c.Writer.WriteBulk([]byte(strconv.FormatFloat(n, 'f', -1, 64))) } return nil @@ -297,18 +298,18 @@ func incrbyfloatCommand(c *Client) error { func decrbyCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.DECRBY) + return errn.CmdParamsErr(resp.DECRBY) } delta, err := utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } if n, err := c.DB.DecrBy(c.Args[0], c.KeyHash, delta); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -317,13 +318,13 @@ func decrbyCommand(c *Client) error { func kdelCommand(c *Client) error { args := c.Args if len(args) == 0 { - return resp.CmdParamsErr(resp.KDEL) + return errn.CmdParamsErr(resp.KDEL) } if n, err := c.DB.Del(c.KeyHash, args...); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -332,7 +333,7 @@ func kdelCommand(c *Client) error { func msetCommand(c *Client) error { args := c.Args if len(args) == 0 || len(args)%2 != 0 { - return resp.CmdParamsErr(resp.MSET) + return errn.CmdParamsErr(resp.MSET) } kvs := make([]btools.KVPair, len(args)/2) @@ -344,7 +345,7 @@ func msetCommand(c *Client) error { if err := c.DB.MSet(c.KeyHash, kvs...); err != nil { return err } else { - c.RespWriter.WriteStatus(resp.ReplyOK) + c.Writer.WriteStatus(resp.ReplyOK) } return nil @@ -353,7 +354,7 @@ func msetCommand(c *Client) error { func mgetCommand(c *Client) error { args := c.Args if len(args) == 0 { - return resp.CmdParamsErr(resp.MGET) + return errn.CmdParamsErr(resp.MGET) } v, closers, err := c.DB.MGet(c.KeyHash, args...) @@ -368,19 +369,19 @@ func mgetCommand(c *Client) error { return err } - c.RespWriter.WriteSliceArray(v) + c.Writer.WriteSliceArray(v) return nil } func kexpireCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.KEXPIRE) + return errn.CmdParamsErr(resp.KEXPIRE) } duration, err := utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } var n int64 @@ -388,19 +389,19 @@ func kexpireCommand(c *Client) error { if err != nil { return err } - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) return nil } func kexpireAtCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.KEXPIREAT) + return errn.CmdParamsErr(resp.KEXPIREAT) } when, err := utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } var n int64 @@ -408,20 +409,20 @@ func kexpireAtCommand(c *Client) error { if err != nil { return err } - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) return nil } func kttlCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.KTTL) + return errn.CmdParamsErr(resp.KTTL) } if v, err := c.DB.TTl(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(v) + c.Writer.WriteInteger(v) } return nil @@ -430,13 +431,13 @@ func kttlCommand(c *Client) error { func kpersistCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.PERSIST) + return errn.CmdParamsErr(resp.PERSIST) } if n, err := c.DB.Persist(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -445,13 +446,13 @@ func kpersistCommand(c *Client) error { func appendCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.APPEND) + return errn.CmdParamsErr(resp.APPEND) } if n, err := c.DB.Append(args[0], c.KeyHash, args[1]); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil } @@ -459,7 +460,7 @@ func appendCommand(c *Client) error { func getrangeCommand(c *Client) error { args := c.Args if len(args) != 3 { - return resp.CmdParamsErr(resp.GETRANGE) + return errn.CmdParamsErr(resp.GETRANGE) } key := args[0] @@ -482,7 +483,7 @@ func getrangeCommand(c *Client) error { if err != nil { return err } else { - c.RespWriter.WriteBulk(v) + c.Writer.WriteBulk(v) } return nil @@ -492,7 +493,7 @@ func getrangeCommand(c *Client) error { func setrangeCommand(c *Client) error { args := c.Args if len(args) != 3 { - return resp.CmdParamsErr(resp.SETRANGE) + return errn.CmdParamsErr(resp.SETRANGE) } key := args[0] @@ -501,7 +502,7 @@ func setrangeCommand(c *Client) error { return err } if offset < 0 { - return resp.ErrRangeOffset + return errn.ErrRangeOffset } value := args[2] @@ -509,20 +510,20 @@ func setrangeCommand(c *Client) error { if n, err := c.DB.SetRange(key, c.KeyHash, offset, value); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil } func strlenCommand(c *Client) error { if len(c.Args) != 1 { - return resp.CmdParamsErr(resp.STRLEN) + return errn.CmdParamsErr(resp.STRLEN) } if n, err := c.DB.StrLen(c.Args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil } @@ -548,7 +549,7 @@ func bitcountCommand(c *Client) error { args := c.Args if len(args) != 1 && len(args) != 3 { - return resp.CmdParamsErr(resp.BITCOUNT) + return errn.CmdParamsErr(resp.BITCOUNT) } key := args[0] @@ -557,14 +558,14 @@ func bitcountCommand(c *Client) error { return err } if start > end && len(args[1:]) != 0 { - c.RespWriter.WriteInteger(0) + c.Writer.WriteInteger(0) return nil } if n, err := c.DB.BitCount(key, c.KeyHash, start, end); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil } @@ -572,7 +573,7 @@ func bitcountCommand(c *Client) error { func bitposCommand(c *Client) error { args := c.Args if len(args) < 2 { - return resp.CmdParamsErr(resp.BITPOS) + return errn.CmdParamsErr(resp.BITPOS) } key := args[0] @@ -588,7 +589,7 @@ func bitposCommand(c *Client) error { if n, err := c.DB.BitPos(key, c.KeyHash, bit, start, end); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil } @@ -596,7 +597,7 @@ func bitposCommand(c *Client) error { func getbitCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.GETBIT) + return errn.CmdParamsErr(resp.GETBIT) } key := args[0] @@ -605,13 +606,13 @@ func getbitCommand(c *Client) error { return err } if offset < 0 { - return resp.ErrBitOffset + return errn.ErrBitOffset } if n, err := c.DB.GetBit(key, c.KeyHash, offset); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil } @@ -619,7 +620,7 @@ func getbitCommand(c *Client) error { func setbitCommand(c *Client) error { args := c.Args if len(args) != 3 { - return resp.CmdParamsErr(resp.SETBIT) + return errn.CmdParamsErr(resp.SETBIT) } key := args[0] @@ -628,7 +629,7 @@ func setbitCommand(c *Client) error { return err } if offset < 0 { - return resp.ErrBitOffset + return errn.ErrBitOffset } value, err := strconv.Atoi(string(args[2])) @@ -639,7 +640,7 @@ func setbitCommand(c *Client) error { if n, err := c.DB.SetBit(key, c.KeyHash, offset, value); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil } diff --git a/stored/server/cmd_list.go b/stored/server/cmd_list.go index 4f1d256..81ec025 100644 --- a/stored/server/cmd_list.go +++ b/stored/server/cmd_list.go @@ -17,26 +17,26 @@ package server import ( "bytes" + "github.com/zuoyebang/bitalostored/stored/internal/errn" "github.com/zuoyebang/bitalostored/stored/internal/resp" "github.com/zuoyebang/bitalostored/stored/internal/utils" ) func init() { AddCommand(map[string]*Cmd{ - resp.LPOP: {Sync: resp.IsWriteCmd(resp.LPOP), Handler: lpopCommand}, - resp.LPUSH: {Sync: resp.IsWriteCmd(resp.LPUSH), Handler: lpushCommand}, - resp.RPOP: {Sync: resp.IsWriteCmd(resp.RPOP), Handler: rpopCommand}, - resp.RPUSH: {Sync: resp.IsWriteCmd(resp.RPUSH), Handler: rpushCommand}, - resp.LINDEX: {Sync: resp.IsWriteCmd(resp.LINDEX), Handler: lindexCommand}, - resp.LLEN: {Sync: resp.IsWriteCmd(resp.LLEN), Handler: llenCommand}, - resp.LRANGE: {Sync: resp.IsWriteCmd(resp.LRANGE), Handler: lrangeCommand}, - resp.LTRIM: {Sync: resp.IsWriteCmd(resp.LTRIM), Handler: lTrimCommand}, - resp.LREM: {Sync: resp.IsWriteCmd(resp.LREM), Handler: lremCommand}, - resp.LINSERT: {Sync: resp.IsWriteCmd(resp.LINSERT), Handler: linsertCommand}, - resp.LPUSHX: {Sync: resp.IsWriteCmd(resp.LPUSHX), Handler: lpushxCommand}, - resp.RPUSHX: {Sync: resp.IsWriteCmd(resp.RPUSHX), Handler: rpushxCommand}, - resp.LSET: {Sync: resp.IsWriteCmd(resp.LSET), Handler: lsetCommand}, - + resp.LPOP: {Sync: resp.IsWriteCmd(resp.LPOP), Handler: lpopCommand}, + resp.LPUSH: {Sync: resp.IsWriteCmd(resp.LPUSH), Handler: lpushCommand}, + resp.RPOP: {Sync: resp.IsWriteCmd(resp.RPOP), Handler: rpopCommand}, + resp.RPUSH: {Sync: resp.IsWriteCmd(resp.RPUSH), Handler: rpushCommand}, + resp.LINDEX: {Sync: resp.IsWriteCmd(resp.LINDEX), Handler: lindexCommand}, + resp.LLEN: {Sync: resp.IsWriteCmd(resp.LLEN), Handler: llenCommand}, + resp.LRANGE: {Sync: resp.IsWriteCmd(resp.LRANGE), Handler: lrangeCommand}, + resp.LTRIM: {Sync: resp.IsWriteCmd(resp.LTRIM), Handler: lTrimCommand}, + resp.LREM: {Sync: resp.IsWriteCmd(resp.LREM), Handler: lremCommand}, + resp.LINSERT: {Sync: resp.IsWriteCmd(resp.LINSERT), Handler: linsertCommand}, + resp.LPUSHX: {Sync: resp.IsWriteCmd(resp.LPUSHX), Handler: lpushxCommand}, + resp.RPUSHX: {Sync: resp.IsWriteCmd(resp.RPUSHX), Handler: rpushxCommand}, + resp.LSET: {Sync: resp.IsWriteCmd(resp.LSET), Handler: lsetCommand}, resp.LCLEAR: {Sync: resp.IsWriteCmd(resp.LCLEAR), Handler: lclearCommand, KeySkip: 1}, resp.LPERSIST: {Sync: resp.IsWriteCmd(resp.LPERSIST), Handler: lpersistCommand}, resp.LEXPIRE: {Sync: resp.IsWriteCmd(resp.LEXPIRE), Handler: lexpireCommand}, @@ -51,18 +51,18 @@ func init() { func lremCommand(c *Client) error { args := c.Args if len(args) != 3 { - return resp.CmdParamsErr(resp.LREM) + return errn.CmdParamsErr(resp.LREM) } count, err := utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } if n, err := c.DB.LRem(args[0], c.KeyHash, count, args[2]); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -71,21 +71,21 @@ func lremCommand(c *Client) error { func linsertCommand(c *Client) error { args := c.Args if len(args) != 4 { - return resp.CmdParamsErr(resp.LINSERT) + return errn.CmdParamsErr(resp.LINSERT) } isbefore := false - if bytes.Equal(resp.LowerSlice(args[1]), resp.BEFORE) { + if bytes.Equal(LowerSlice(args[1]), BEFORE) { isbefore = true - } else if bytes.Equal(resp.LowerSlice(args[1]), resp.AFTER) { + } else if bytes.Equal(LowerSlice(args[1]), AFTER) { isbefore = false } else { - return resp.ErrSyntax + return errn.ErrSyntax } if n, err := c.DB.LInsert(args[0], c.KeyHash, isbefore, args[2], args[3]); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil } @@ -93,13 +93,13 @@ func linsertCommand(c *Client) error { func lpushCommand(c *Client) error { args := c.Args if len(args) < 2 { - return resp.CmdParamsErr(resp.LPUSH) + return errn.CmdParamsErr(resp.LPUSH) } if n, err := c.DB.LPush(args[0], c.KeyHash, args[1:]...); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil } @@ -107,13 +107,13 @@ func lpushCommand(c *Client) error { func lpushxCommand(c *Client) error { args := c.Args if len(args) < 2 { - return resp.CmdParamsErr(resp.LPUSHX) + return errn.CmdParamsErr(resp.LPUSHX) } if n, err := c.DB.LPushX(args[0], c.KeyHash, args[1:]...); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -122,13 +122,13 @@ func lpushxCommand(c *Client) error { func rpushCommand(c *Client) error { args := c.Args if len(args) < 2 { - return resp.CmdParamsErr(resp.RPUSH) + return errn.CmdParamsErr(resp.RPUSH) } if n, err := c.DB.RPush(args[0], c.KeyHash, args[1:]...); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -137,13 +137,13 @@ func rpushCommand(c *Client) error { func rpushxCommand(c *Client) error { args := c.Args if len(args) < 2 { - return resp.CmdParamsErr(resp.RPUSHX) + return errn.CmdParamsErr(resp.RPUSHX) } if n, err := c.DB.RPushX(args[0], c.KeyHash, args[1:]...); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -152,7 +152,7 @@ func rpushxCommand(c *Client) error { func lpopCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.LPOP) + return errn.CmdParamsErr(resp.LPOP) } v, vcloser, err := c.DB.LPop(args[0], c.KeyHash) @@ -165,14 +165,14 @@ func lpopCommand(c *Client) error { return err } - c.RespWriter.WriteBulk(v) + c.Writer.WriteBulk(v) return nil } func rpopCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.RPOP) + return errn.CmdParamsErr(resp.RPOP) } v, vcloser, err := c.DB.RPop(args[0], c.KeyHash) @@ -185,20 +185,20 @@ func rpopCommand(c *Client) error { return err } - c.RespWriter.WriteBulk(v) + c.Writer.WriteBulk(v) return nil } func llenCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.LLEN) + return errn.CmdParamsErr(resp.LLEN) } if n, err := c.DB.LLen(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -207,12 +207,12 @@ func llenCommand(c *Client) error { func lindexCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.LINDEX) + return errn.CmdParamsErr(resp.LINDEX) } index, err := utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } v, closer, err := c.DB.LIndex(args[0], c.KeyHash, index) @@ -225,25 +225,25 @@ func lindexCommand(c *Client) error { return err } - c.RespWriter.WriteBulk(v) + c.Writer.WriteBulk(v) return nil } func lsetCommand(c *Client) error { args := c.Args if len(args) != 3 { - return resp.CmdParamsErr(resp.LSET) + return errn.CmdParamsErr(resp.LSET) } index, err := utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } if err := c.DB.LSet(args[0], c.KeyHash, index, args[2]); err != nil { return err } else { - c.RespWriter.WriteStatus(resp.ReplyOK) + c.Writer.WriteStatus(resp.ReplyOK) } return nil @@ -252,7 +252,7 @@ func lsetCommand(c *Client) error { func lrangeCommand(c *Client) error { args := c.Args if len(args) != 3 { - return resp.CmdParamsErr(resp.LRANGE) + return errn.CmdParamsErr(resp.LRANGE) } var start int64 @@ -261,18 +261,18 @@ func lrangeCommand(c *Client) error { start, err = utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } stop, err = utils.ByteToInt64(args[2]) if err != nil { - return resp.ErrValue + return errn.ErrValue } if v, err := c.DB.LRange(args[0], c.KeyHash, start, stop); err != nil { return err } else { - c.RespWriter.WriteSliceArray(v) + c.Writer.WriteSliceArray(v) } return nil @@ -281,13 +281,13 @@ func lrangeCommand(c *Client) error { func lclearCommand(c *Client) error { args := c.Args if len(args) < 1 { - return resp.CmdParamsErr(resp.LCLEAR) + return errn.CmdParamsErr(resp.LCLEAR) } if n, err := c.DB.LClear(c.KeyHash, args...); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil } @@ -295,12 +295,12 @@ func lclearCommand(c *Client) error { func lexpireCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.LEXPIRE) + return errn.CmdParamsErr(resp.LEXPIRE) } duration, err := utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } var n int64 @@ -308,19 +308,19 @@ func lexpireCommand(c *Client) error { if err != nil { return err } - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) return nil } func lexpireAtCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.LEXPIREAT) + return errn.CmdParamsErr(resp.LEXPIREAT) } when, err := utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } var n int64 @@ -328,20 +328,20 @@ func lexpireAtCommand(c *Client) error { if err != nil { return err } - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) return nil } func lttlCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.LTTL) + return errn.CmdParamsErr(resp.LTTL) } if v, err := c.DB.TTl(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(v) + c.Writer.WriteInteger(v) } return nil @@ -350,13 +350,13 @@ func lttlCommand(c *Client) error { func lpersistCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.LPERSIST) + return errn.CmdParamsErr(resp.LPERSIST) } if n, err := c.DB.Persist(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -365,13 +365,13 @@ func lpersistCommand(c *Client) error { func lkeyexistsCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.LKEYEXISTS) + return errn.CmdParamsErr(resp.LKEYEXISTS) } if n, err := c.DB.Exists(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil } @@ -379,7 +379,7 @@ func lkeyexistsCommand(c *Client) error { func lTrimCommand(c *Client) error { args := c.Args if len(args) != 3 { - return resp.CmdParamsErr(resp.LTRIM) + return errn.CmdParamsErr(resp.LTRIM) } var start int64 @@ -388,17 +388,17 @@ func lTrimCommand(c *Client) error { start, err = utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } stop, err = utils.ByteToInt64(args[2]) if err != nil { - return resp.ErrValue + return errn.ErrValue } if err := c.DB.LTrim(args[0], c.KeyHash, start, stop); err != nil { return err } else { - c.RespWriter.WriteStatus(resp.ReplyOK) + c.Writer.WriteStatus(resp.ReplyOK) } return nil @@ -407,18 +407,18 @@ func lTrimCommand(c *Client) error { func lTrimFrontCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.LTRIMFRONT) + return errn.CmdParamsErr(resp.LTRIMFRONT) } trimSize, err := utils.ByteToInt64(args[1]) if err != nil || trimSize < 0 { - return resp.ErrValue + return errn.ErrValue } if n, err := c.DB.LTrimFront(args[0], c.KeyHash, trimSize); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -427,18 +427,18 @@ func lTrimFrontCommand(c *Client) error { func lTrimBackCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.LTRIMBACK) + return errn.CmdParamsErr(resp.LTRIMBACK) } trimSize, err := utils.ByteToInt64(args[1]) if err != nil || trimSize < 0 { - return resp.ErrValue + return errn.ErrValue } if n, err := c.DB.LTrimBack(args[0], c.KeyHash, trimSize); err != nil { return err } else { - c.RespWriter.WriteInteger(int64(n)) + c.Writer.WriteInteger(int64(n)) } return nil diff --git a/stored/server/cmd_lua.go b/stored/server/cmd_lua.go index ddeedb2..1a3cc48 100644 --- a/stored/server/cmd_lua.go +++ b/stored/server/cmd_lua.go @@ -20,12 +20,12 @@ import ( "strconv" "strings" + lua "github.com/yuin/gopher-lua" + "github.com/yuin/gopher-lua/parse" + "github.com/zuoyebang/bitalostored/stored/internal/errn" "github.com/zuoyebang/bitalostored/stored/internal/log" "github.com/zuoyebang/bitalostored/stored/internal/resp" "github.com/zuoyebang/bitalostored/stored/internal/utils" - - lua "github.com/yuin/gopher-lua" - "github.com/yuin/gopher-lua/parse" ) var LuaShardCount uint32 = 64 @@ -80,9 +80,9 @@ func init() { } func evalCommand(c *Client) error { - args := resp.StringSlice(c.Args) + args := StringSlice(c.Args) if len(args) < 2 { - return resp.CmdParamsErr(resp.EVAL) + return errn.CmdParamsErr(resp.EVAL) } script, args := args[0], args[1:] @@ -137,7 +137,6 @@ func runLuaScript(c *Client, script string, args []string) error { luaClient.Remove(1) luaClient.Remove(lua.GlobalsIndex) }() - if err := luaClient.DoString(script); err != nil { log.Errorf("ERR Error compiling script (new function): %s", err.Error()) return errors.New(ErrLuaParseError(err)) @@ -148,9 +147,9 @@ func runLuaScript(c *Client, script string, args []string) error { } func evalShaCommand(c *Client) error { - args := resp.StringSlice(c.Args) + args := StringSlice(c.Args) if len(args) < 2 { - return resp.CmdParamsErr(resp.EVALSHA) + return errn.CmdParamsErr(resp.EVALSHA) } sha1, args := args[0], args[1:] @@ -163,10 +162,10 @@ func evalShaCommand(c *Client) error { }() if script == nil { - c.RespWriter.WriteError(errors.New(MsgNoScriptFound)) + c.Writer.WriteError(errors.New(MsgNoScriptFound)) } else { if err := runLuaScript(c, string(script), args); err != nil { - c.RespWriter.WriteError(err) + c.Writer.WriteError(err) } } @@ -174,7 +173,7 @@ func evalShaCommand(c *Client) error { } func scriptLoadCmd(c *Client) error { - args := resp.StringSlice(c.Args) + args := StringSlice(c.Args) if len(args) != 2 { return errors.New(fmt.Sprintf(MsgFScriptUsage, "LOAD")) } @@ -184,7 +183,7 @@ func scriptLoadCmd(c *Client) error { if sha1, err = saveLuaScript(c, script); err != nil { return err } - c.RespWriter.WriteBulk([]byte(sha1)) + c.Writer.WriteBulk([]byte(sha1)) return nil } @@ -204,25 +203,25 @@ func saveLuaScript(c *Client, script string) (string, error) { } func scriptExistsCmd(c *Client) error { - args := resp.StringSlice(c.Args) + args := StringSlice(c.Args) if len(args) < 2 { return errors.New(fmt.Sprintf(MsgFScriptUsage, "EXISTS")) } args = args[1:] - c.RespWriter.WriteLen(len(args)) + c.Writer.WriteLen(len(args)) for _, arg := range args { if n, _ := c.DB.ExistsLuaScript([]byte(arg)); n >= 1 { - c.RespWriter.WriteInteger(1) + c.Writer.WriteInteger(1) } else { - c.RespWriter.WriteInteger(0) + c.Writer.WriteInteger(0) } } return nil } func scriptFlushCmd(c *Client) error { - args := resp.StringSlice(c.Args) + args := StringSlice(c.Args) if len(args) != 1 { return errors.New(fmt.Sprintf(MsgFScriptUsage, "FLUSH")) } @@ -230,13 +229,13 @@ func scriptFlushCmd(c *Client) error { if err := c.DB.FlushLuaScript(); err != nil { return err } else { - c.RespWriter.WriteStatus("OK") + c.Writer.WriteStatus("OK") } return nil } func scriptLenCmd(c *Client) error { n := c.DB.LuaScriptLen() - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) return nil } diff --git a/stored/server/cmd_scan.go b/stored/server/cmd_scan.go index e1df245..c7191bb 100644 --- a/stored/server/cmd_scan.go +++ b/stored/server/cmd_scan.go @@ -24,6 +24,7 @@ import ( "github.com/zuoyebang/bitalostored/butils/extend" "github.com/zuoyebang/bitalostored/butils/unsafe2" "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/btools" + "github.com/zuoyebang/bitalostored/stored/internal/errn" "github.com/zuoyebang/bitalostored/stored/internal/resp" ) @@ -60,7 +61,7 @@ func parseXScanArgs(args [][]byte) (cursor []byte, match string, count int, err switch strings.ToUpper(unsafe2.String(args[i])) { case "MATCH": if i+1 >= len(args) { - err = resp.CmdParamsErr("scan") + err = errn.CmdParamsErr("scan") return } @@ -68,7 +69,7 @@ func parseXScanArgs(args [][]byte) (cursor []byte, match string, count int, err i++ case "COUNT": if i+1 >= len(args) { - err = resp.CmdParamsErr("scan") + err = errn.CmdParamsErr("scan") return } @@ -105,7 +106,7 @@ func (scg scanCommandGroup) xhscanCommand(c *Client) error { args := c.Args if len(args) < 2 { - return resp.CmdParamsErr(resp.HSCAN) + return errn.CmdParamsErr(resp.HSCAN) } key := args[0] @@ -133,7 +134,7 @@ func (scg scanCommandGroup) xhscanCommand(c *Client) error { data[0] = cursor data[1] = vv - c.RespWriter.WriteArray(data) + c.Writer.WriteArray(data) return nil } @@ -141,7 +142,7 @@ func (scg scanCommandGroup) xsscanCommand(c *Client) error { args := c.Args if len(args) < 2 { - return resp.CmdParamsErr(resp.SSCAN) + return errn.CmdParamsErr(resp.SSCAN) } key := args[0] @@ -164,7 +165,7 @@ func (scg scanCommandGroup) xsscanCommand(c *Client) error { data[0] = cursor data[1] = ay - c.RespWriter.WriteArray(data) + c.Writer.WriteArray(data) return nil } @@ -172,7 +173,7 @@ func (scg scanCommandGroup) xzscanCommand(c *Client) error { args := c.Args if len(args) < 2 { - return resp.CmdParamsErr(resp.ZSCAN) + return errn.CmdParamsErr(resp.ZSCAN) } key := args[0] @@ -199,14 +200,14 @@ func (scg scanCommandGroup) xzscanCommand(c *Client) error { data[0] = cursor data[1] = vv - c.RespWriter.WriteArray(data[:]) + c.Writer.WriteArray(data[:]) return nil } func scanCommand(c *Client) error { args := c.Args if len(args) < 1 { - return resp.CmdParamsErr(resp.SCAN) + return errn.CmdParamsErr(resp.SCAN) } cursor, match, count, tp, err := parseGScanArgs(args) @@ -215,7 +216,7 @@ func scanCommand(c *Client) error { } if count < 0 { - return resp.ErrSyntax + return errn.ErrSyntax } else if count > 5000 { return errors.New("ERR count more than 5000") } @@ -232,7 +233,7 @@ func scanCommand(c *Client) error { if cur == nil { cur = []byte("0") } - c.RespWriter.WriteArray([]interface{}{cur, ks}) + c.Writer.WriteArray([]interface{}{cur, ks}) return nil } @@ -248,7 +249,7 @@ func parseGScanArgs(args [][]byte) (cursor []byte, match string, count int, tp s switch strings.ToUpper(unsafe2.String(args[i])) { case "MATCH": if i+1 >= len(args) { - err = resp.CmdParamsErr("scan") + err = errn.CmdParamsErr("scan") return } @@ -256,7 +257,7 @@ func parseGScanArgs(args [][]byte) (cursor []byte, match string, count int, tp s i++ case "COUNT": if i+1 >= len(args) { - err = resp.CmdParamsErr("scan") + err = errn.CmdParamsErr("scan") return } @@ -268,7 +269,7 @@ func parseGScanArgs(args [][]byte) (cursor []byte, match string, count int, tp s i++ case "TYPE": if i+1 >= len(args) { - err = resp.CmdParamsErr("scan") + err = errn.CmdParamsErr("scan") return } diff --git a/stored/server/cmd_server.go b/stored/server/cmd_server.go index 904cfb0..0c136e9 100644 --- a/stored/server/cmd_server.go +++ b/stored/server/cmd_server.go @@ -15,37 +15,41 @@ package server import ( + "os" + "syscall" "time" "github.com/zuoyebang/bitalostored/butils/extend" + "github.com/zuoyebang/bitalostored/stored/internal/errn" "github.com/zuoyebang/bitalostored/stored/internal/resp" ) func init() { AddCommand(map[string]*Cmd{ - resp.PING: {Sync: false, Handler: pingCommand, NoKey: true}, - resp.ECHO: {Sync: false, Handler: echoCommand, NoKey: true}, - resp.TIME: {Sync: false, Handler: timeCommand, NoKey: true}, + resp.PING: {Sync: false, Handler: pingCommand, NoKey: true}, + resp.ECHO: {Sync: false, Handler: echoCommand, NoKey: true}, + resp.TIME: {Sync: false, Handler: timeCommand, NoKey: true}, + resp.SHUTDOWN: {Sync: false, Handler: shutdownCommand, NoKey: true}, }) } func pingCommand(c *Client) error { - c.RespWriter.WriteStatus(resp.ReplyPONG) + c.Writer.WriteStatus(resp.ReplyPONG) return nil } func echoCommand(c *Client) error { if len(c.Args) != 1 { - return resp.CmdParamsErr(resp.ECHO) + return errn.CmdParamsErr(resp.ECHO) } - c.RespWriter.WriteBulk(c.Args[0]) + c.Writer.WriteBulk(c.Args[0]) return nil } func timeCommand(c *Client) error { if len(c.Args) != 0 { - return resp.CmdParamsErr(resp.TIME) + return errn.CmdParamsErr(resp.TIME) } t := time.Now() @@ -59,6 +63,15 @@ func timeCommand(c *Client) error { extend.FormatInt64ToSlice(m), } - c.RespWriter.WriteArray(ay) + c.Writer.WriteArray(ay) + return nil +} + +func shutdownCommand(c *Client) error { + p, _ := os.FindProcess(os.Getpid()) + p.Signal(syscall.SIGTERM) + p.Signal(os.Interrupt) + + c.Writer.WriteStatus(resp.ReplyOK) return nil } diff --git a/stored/server/cmd_set.go b/stored/server/cmd_set.go index bdcb24a..48e28e0 100644 --- a/stored/server/cmd_set.go +++ b/stored/server/cmd_set.go @@ -17,6 +17,7 @@ package server import ( "strconv" + "github.com/zuoyebang/bitalostored/stored/internal/errn" "github.com/zuoyebang/bitalostored/stored/internal/resp" "github.com/zuoyebang/bitalostored/stored/internal/utils" ) @@ -30,26 +31,25 @@ func init() { resp.SISMEMBER: {Sync: resp.IsWriteCmd(resp.SISMEMBER), Handler: sismemberCommand}, resp.SMEMBERS: {Sync: resp.IsWriteCmd(resp.SMEMBERS), Handler: smembersCommand}, resp.SRANDMEMBER: {Sync: resp.IsWriteCmd(resp.SRANDMEMBER), Handler: srandmemberCommand}, - - resp.SCLEAR: {Sync: resp.IsWriteCmd(resp.SCLEAR), Handler: sclearCommand, KeySkip: 1}, - resp.SEXPIRE: {Sync: resp.IsWriteCmd(resp.SEXPIRE), Handler: sexpireCommand}, - resp.SEXPIREAT: {Sync: resp.IsWriteCmd(resp.SEXPIREAT), Handler: sexpireAtCommand}, - resp.SPERSIST: {Sync: resp.IsWriteCmd(resp.SPERSIST), Handler: spersistCommand}, - resp.STTL: {Sync: resp.IsWriteCmd(resp.STTL), Handler: sttlCommand}, - resp.SKEYEXISTS: {Sync: resp.IsWriteCmd(resp.SKEYEXISTS), Handler: skeyexistsCommand}, + resp.SCLEAR: {Sync: resp.IsWriteCmd(resp.SCLEAR), Handler: sclearCommand, KeySkip: 1}, + resp.SEXPIRE: {Sync: resp.IsWriteCmd(resp.SEXPIRE), Handler: sexpireCommand}, + resp.SEXPIREAT: {Sync: resp.IsWriteCmd(resp.SEXPIREAT), Handler: sexpireAtCommand}, + resp.SPERSIST: {Sync: resp.IsWriteCmd(resp.SPERSIST), Handler: spersistCommand}, + resp.STTL: {Sync: resp.IsWriteCmd(resp.STTL), Handler: sttlCommand}, + resp.SKEYEXISTS: {Sync: resp.IsWriteCmd(resp.SKEYEXISTS), Handler: skeyexistsCommand}, }) } func saddCommand(c *Client) error { args := c.Args if len(args) < 2 { - return resp.CmdParamsErr(resp.SADD) + return errn.CmdParamsErr(resp.SADD) } if n, err := c.DB.SAdd(args[0], c.KeyHash, args[1:]...); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -58,13 +58,13 @@ func saddCommand(c *Client) error { func scardCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.SCARD) + return errn.CmdParamsErr(resp.SCARD) } if n, err := c.DB.SCard(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -73,13 +73,13 @@ func scardCommand(c *Client) error { func sismemberCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.SISMEMBER) + return errn.CmdParamsErr(resp.SISMEMBER) } if n, err := c.DB.SIsMember(args[0], c.KeyHash, args[1]); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -88,7 +88,7 @@ func sismemberCommand(c *Client) error { func smembersCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.SMEMBERS) + return errn.CmdParamsErr(resp.SMEMBERS) } res, err := c.DB.SMembers(args[0], c.KeyHash) @@ -96,7 +96,7 @@ func smembersCommand(c *Client) error { return err } - c.RespWriter.WriteSliceArray(res) + c.Writer.WriteSliceArray(res) return nil } @@ -104,16 +104,16 @@ func smembersCommand(c *Client) error { func srandmemberCommand(c *Client) error { args := c.Args if len(args) < 1 { - return resp.CmdParamsErr(resp.SRANDMEMBER) + return errn.CmdParamsErr(resp.SRANDMEMBER) } else if len(args) > 2 { - return resp.ErrSyntax + return errn.ErrSyntax } count := 1 if len(args) == 2 { var err error if count, err = strconv.Atoi(string(args[1])); err != nil { - return resp.ErrValue + return errn.ErrValue } } @@ -122,12 +122,12 @@ func srandmemberCommand(c *Client) error { return err } if len(args) == 2 { - c.RespWriter.WriteSliceArray(res) + c.Writer.WriteSliceArray(res) } else { if len(res) >= 1 { - c.RespWriter.WriteBulk(res[0]) + c.Writer.WriteBulk(res[0]) } else { - c.RespWriter.WriteBulk(nil) + c.Writer.WriteBulk(nil) } } return nil @@ -137,13 +137,13 @@ func srandmemberCommand(c *Client) error { func sremCommand(c *Client) error { args := c.Args if len(args) < 2 { - return resp.CmdParamsErr(resp.SREM) + return errn.CmdParamsErr(resp.SREM) } if n, err := c.DB.SRem(args[0], c.KeyHash, args[1:]...); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -153,7 +153,7 @@ func spopCommand(c *Client) error { args := c.Args if len(args) < 1 || len(args) > 2 { - return resp.CmdParamsErr(resp.SPOP) + return errn.CmdParamsErr(resp.SPOP) } var count int64 = 1 @@ -161,10 +161,10 @@ func spopCommand(c *Client) error { var err error count, err = utils.ByteToInt64(args[1]) if err != nil || count < 0 { - return resp.ErrValue + return errn.ErrValue } if count == 0 { - c.RespWriter.WriteSliceArray(nil) + c.Writer.WriteSliceArray(nil) return nil } } @@ -174,12 +174,12 @@ func spopCommand(c *Client) error { return err } if len(args) == 2 { - c.RespWriter.WriteSliceArray(res) + c.Writer.WriteSliceArray(res) } else { if len(res) >= 1 { - c.RespWriter.WriteBulk(res[0]) + c.Writer.WriteBulk(res[0]) } else { - c.RespWriter.WriteBulk(nil) + c.Writer.WriteBulk(nil) } } return nil @@ -188,13 +188,13 @@ func spopCommand(c *Client) error { func sclearCommand(c *Client) error { args := c.Args if len(args) < 1 { - return resp.CmdParamsErr(resp.SCLEAR) + return errn.CmdParamsErr(resp.SCLEAR) } if n, err := c.DB.SClear(c.KeyHash, args...); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -203,12 +203,12 @@ func sclearCommand(c *Client) error { func sexpireCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.SEXPIRE) + return errn.CmdParamsErr(resp.SEXPIRE) } duration, err := utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } var n int64 @@ -216,19 +216,19 @@ func sexpireCommand(c *Client) error { if err != nil { return err } - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) return nil } func sexpireAtCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.SEXPIREAT) + return errn.CmdParamsErr(resp.SEXPIREAT) } when, err := utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } var n int64 @@ -236,20 +236,20 @@ func sexpireAtCommand(c *Client) error { if err != nil { return err } - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) return nil } func sttlCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.STTL) + return errn.CmdParamsErr(resp.STTL) } if v, err := c.DB.TTl(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(v) + c.Writer.WriteInteger(v) } return nil @@ -259,13 +259,13 @@ func sttlCommand(c *Client) error { func spersistCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.SPERSIST) + return errn.CmdParamsErr(resp.SPERSIST) } if n, err := c.DB.Persist(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -274,13 +274,13 @@ func spersistCommand(c *Client) error { func skeyexistsCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.SKEYEXISTS) + return errn.CmdParamsErr(resp.SKEYEXISTS) } if n, err := c.DB.Exists(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil } diff --git a/stored/server/cmd_test/cmd_global_test.go b/stored/server/cmd_test/cmd_global_test.go index f898546..d377ed1 100644 --- a/stored/server/cmd_test/cmd_global_test.go +++ b/stored/server/cmd_test/cmd_global_test.go @@ -20,8 +20,9 @@ import ( "testing" "time" - "github.com/gomodule/redigo/redis" "github.com/zuoyebang/bitalostored/stored/internal/resp" + + "github.com/gomodule/redigo/redis" ) func TestExpireKey(t *testing.T) { diff --git a/stored/server/cmd_test/cmd_hash_test.go b/stored/server/cmd_test/cmd_hash_test.go index 5d4a0f0..59e7880 100644 --- a/stored/server/cmd_test/cmd_hash_test.go +++ b/stored/server/cmd_test/cmd_hash_test.go @@ -22,9 +22,10 @@ import ( "testing" "time" + "github.com/zuoyebang/bitalostored/stored/internal/resp" + "github.com/gomodule/redigo/redis" "github.com/stretchr/testify/require" - "github.com/zuoyebang/bitalostored/stored/internal/resp" ) func TestHash(t *testing.T) { diff --git a/stored/server/cmd_test/cmd_keys_test.go b/stored/server/cmd_test/cmd_keys_test.go index 7a650a2..521391c 100644 --- a/stored/server/cmd_test/cmd_keys_test.go +++ b/stored/server/cmd_test/cmd_keys_test.go @@ -18,11 +18,12 @@ import ( "fmt" "testing" - "github.com/gomodule/redigo/redis" "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/btools" "github.com/zuoyebang/bitalostored/stored/internal/errn" "github.com/zuoyebang/bitalostored/stored/internal/resp" "github.com/zuoyebang/bitalostored/stored/internal/tclock" + + "github.com/gomodule/redigo/redis" ) func TestKeysCmd(t *testing.T) { diff --git a/stored/server/cmd_test/cmd_migrate_test.go b/stored/server/cmd_test/cmd_migrate_test.go index 010a197..7a10bae 100644 --- a/stored/server/cmd_test/cmd_migrate_test.go +++ b/stored/server/cmd_test/cmd_migrate_test.go @@ -5,9 +5,11 @@ import ( "testing" "time" - "github.com/gomodule/redigo/redis" - "github.com/zuoyebang/bitalostored/stored/internal/log" "github.com/zuoyebang/bitalostored/stored/internal/utils" + + "github.com/zuoyebang/bitalostored/stored/internal/log" + + "github.com/gomodule/redigo/redis" ) func testMigrateExpire(t *testing.T) { diff --git a/stored/server/cmd_test/cmd_pipeline_test.go b/stored/server/cmd_test/cmd_pipeline_test.go new file mode 100644 index 0000000..36a6f68 --- /dev/null +++ b/stored/server/cmd_test/cmd_pipeline_test.go @@ -0,0 +1,80 @@ +// Copyright 2019-2024 Xu Ruibo (hustxurb@163.com) and Contributors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package cmd_test + +import ( + "reflect" + "testing" +) + +func TestPipeline(t *testing.T) { + c := getTestConn() + defer c.Close() + + key1 := []byte("testpipekey1") + val1 := testRandBytes(10 << 10) + key2 := []byte("testpipekey2") + val2 := testRandBytes(10 << 10) + key3 := []byte("testpipekey3") + val3 := testRandBytes(10 << 10) + + testCommands := []struct { + args []interface{} + expected interface{} + }{ + { + []interface{}{"SET", key1, val1}, + "OK", + }, + { + []interface{}{"SET", key2, val2}, + "OK", + }, + { + []interface{}{"SET", key3, val3}, + "OK", + }, + { + []interface{}{"GET", key1}, + val1, + }, + { + []interface{}{"GET", key2}, + val2, + }, + { + []interface{}{"GET", key3}, + val3, + }, + } + + for _, cmd := range testCommands { + if err := c.Send(cmd.args[0].(string), cmd.args[1:]...); err != nil { + t.Fatalf("Send(%v) returned error %v", cmd.args, err) + } + } + if err := c.Flush(); err != nil { + t.Errorf("Flush() returned error %v", err) + } + for _, cmd := range testCommands { + actual, err := c.Receive() + if err != nil { + t.Fatalf("Receive(%v) returned error %v", cmd.args, err) + } + if !reflect.DeepEqual(actual, cmd.expected) { + t.Errorf("Receive(%v) = %v, want %v", cmd.args, actual, cmd.expected) + } + } +} diff --git a/stored/server/cmd_test/cmd_set_test.go b/stored/server/cmd_test/cmd_set_test.go index 9a01d11..b6a6c40 100644 --- a/stored/server/cmd_test/cmd_set_test.go +++ b/stored/server/cmd_test/cmd_set_test.go @@ -17,8 +17,9 @@ package cmd_test import ( "testing" + "github.com/zuoyebang/bitalostored/stored/internal/errn" + "github.com/gomodule/redigo/redis" - "github.com/zuoyebang/bitalostored/stored/internal/resp" ) func TestSet(t *testing.T) { @@ -277,13 +278,13 @@ func TestSetRandMember(t *testing.T) { for i := 0; i < readNum; i++ { if _, err := redis.Values(c.Do("srandmember", key, 2, -1)); err == nil { t.Fatal(" err should not nil") - } else if err.Error() != resp.ErrSyntax.Error() { + } else if err.Error() != errn.ErrSyntax.Error() { t.Fatal(err) } if _, err := redis.Values(c.Do("srandmember", key, 1.3)); err == nil { t.Fatal(" err should not nil") - } else if err.Error() != resp.ErrValue.Error() { + } else if err.Error() != errn.ErrValue.Error() { t.Fatal(err) } diff --git a/stored/server/cmd_test/cmd_string_bit_test.go b/stored/server/cmd_test/cmd_string_bit_test.go new file mode 100644 index 0000000..feb2f09 --- /dev/null +++ b/stored/server/cmd_test/cmd_string_bit_test.go @@ -0,0 +1,370 @@ +package cmd_test + +import ( + "fmt" + "github.com/zuoyebang/bitalostored/stored/internal/errn" + "testing" + "time" + + "github.com/RoaringBitmap/roaring/roaring64" + "github.com/gomodule/redigo/redis" + "github.com/stretchr/testify/assert" +) + +var bitmapFlushEnable bool + +func TestBitBase(t *testing.T) { + c := getTestConn() + defer c.Close() + + bitKey := "bit_key_base" + c.Do("del", bitKey) + + pos := 7 + if n, err := redis.Int(c.Do("setbit", bitKey, pos, 1)); err != nil { + t.Fatal(err) + } else if n != 0 { + t.Fatal(n) + } + if n, err := redis.Int(c.Do("setbit", bitKey, pos, 1)); err != nil { + t.Fatal(err) + } else if n != 1 { + t.Fatal(n) + } + if n, err := redis.Int(c.Do("getbit", bitKey, pos)); err != nil { + t.Fatal(err) + } else { + assert.Equal(t, 1, n) + } + if n, err := redis.Int(c.Do("getbit", bitKey, pos+1)); err != nil { + t.Fatal(err) + } else { + assert.Equal(t, 0, n) + } + if n, err := redis.Int(c.Do("bitpos", bitKey, 1, 0, 100)); err != nil { + t.Fatal(err) + } else { + assert.Equal(t, 7, n) + } + if n, err := redis.Int(c.Do("bitcount", bitKey, 0, 100)); err != nil { + t.Fatal(err) + } else { + assert.Equal(t, 1, n) + } +} + +func TestBitStrlen(t *testing.T) { + c := getTestConn() + defer c.Close() + + bitKey := "bit_key_strlen" + c.Do("del", bitKey) + + pos := 7 + if n, err := redis.Int(c.Do("setbit", bitKey, pos, 1)); err != nil { + t.Fatal(err) + } else if n != 0 { + t.Fatal(n) + } + if n, err := redis.Int(c.Do("strlen", bitKey)); err != nil || n <= 0 { + t.Fatal(err, n) + } +} + +func TestBitExist(t *testing.T) { + c := getTestConn() + defer c.Close() + + bitKey := "bit_key_exist" + c.Do("del", bitKey) + pos := 7 + if n, err := redis.Int(c.Do("setbit", bitKey, pos, 1)); err != nil { + t.Fatal(err) + } else if n != 0 { + t.Fatal(n) + } + if n, err := redis.Int(c.Do("exists", bitKey)); err != nil { + t.Fatal(err) + } else { + assert.Equal(t, 1, n) + } + if n, err := redis.Int(c.Do("del", bitKey)); err != nil { + t.Fatal(err) + } else { + assert.Equal(t, 1, n) + } + if n, err := redis.Int(c.Do("exists", bitKey)); err != nil { + t.Fatal(err) + } else { + assert.Equal(t, 0, n) + } +} + +func TestBitGetSet(t *testing.T) { + c := getTestConn() + defer c.Close() + + bitKey := "bit_key_getset" + c.Do("del", bitKey) + pos := 7 + + rb := roaring64.NewBitmap() + rb.Add(uint64(pos)) + bin, _ := rb.MarshalBinary() + + if n, err := redis.Int(c.Do("setbit", bitKey, pos, 1)); err != nil { + t.Fatal(err) + } else if n != 0 { + t.Fatal(n) + } + + newVal := "abc" + if res, err := redis.Bytes(c.Do("getset", bitKey, newVal)); err != nil { + t.Fatal(err) + } else { + assert.Equal(t, bin, res) + } + + if res, err := redis.String(c.Do("get", bitKey)); err != nil { + t.Fatal(err) + } else { + assert.Equal(t, newVal, res) + } +} + +func TestBitStringSetNx(t *testing.T) { + c := getTestConn() + defer c.Close() + + bitKey := "bit_key_setnx" + c.Do("del", bitKey) + pos := 7 + if n, err := redis.Int(c.Do("setbit", bitKey, pos, 1)); err != nil { + t.Fatal(err) + } else if n != 0 { + t.Fatal(n) + } + + newVal := "abc" + if n, err := redis.Int(c.Do("setnx", bitKey, newVal)); err != nil { + t.Fatal(err) + } else { + assert.Equal(t, 0, n) + } +} + +func TestBitStringExpire(t *testing.T) { + c := getTestConn() + defer c.Close() + + bitKey := "bit_key_expire" + c.Do("del", bitKey) + pos := 7 + if n, err := redis.Int(c.Do("setbit", bitKey, pos, 1)); err != nil { + t.Fatal(err) + } else if n != 0 { + t.Fatal(n) + } + + if n, err := redis.Int(c.Do("expire", bitKey, 10)); err != nil { + t.Fatal(err) + } else { + assert.Equal(t, 1, n) + } + + if n, err := redis.Int(c.Do("ttl", bitKey)); err != nil { + t.Fatal(err) + } else { + if n <= 0 || n > 10 { + t.Fatal("ttl", n) + } + } + + newTtl := time.Now().Unix() + 10 + if n, err := redis.Int(c.Do("expireAt", bitKey, newTtl)); err != nil { + t.Fatal(err) + } else { + assert.Equal(t, 1, n) + } + + if n, err := redis.Int(c.Do("ttl", bitKey)); err != nil { + t.Fatal(err) + } else { + if n <= 0 || n > 10 { + t.Fatal("ttl", n) + } + } + if n, err := redis.Int(c.Do("pttl", bitKey)); err != nil { + t.Fatal(err) + } else { + if n <= 0 || n > 10000 { + t.Fatal("ttl", n) + } + } + + if _, err := redis.Int(c.Do("persist", bitKey)); err != nil { + t.Fatal(err) + } + if n, err := redis.Int(c.Do("ttl", bitKey)); err != nil { + t.Fatal(err) + } else { + assert.Equal(t, -1, n) + } + + newTtl = time.Now().Unix() - 10 + if n, err := redis.Int(c.Do("expireAt", bitKey, newTtl)); err != nil { + t.Fatal(err) + } else { + assert.Equal(t, 1, n) + } + if n, err := redis.Int(c.Do("ttl", bitKey)); err != nil { + t.Fatal(err) + } else { + assert.Equal(t, -2, n) + } + + if _, err := redis.Int(c.Do("del", bitKey)); err != nil { + t.Fatal(err) + } + if n, err := redis.Int(c.Do("ttl", bitKey)); err != nil { + t.Fatal(err) + } else { + assert.Equal(t, -2, n) + } +} + +func TestBitWrongType(t *testing.T) { + c := getTestConn() + defer c.Close() + + bitKey := "bit_key_wrongtype" + c.Do("del", bitKey) + pos := 7 + if n, err := redis.Int(c.Do("setbit", bitKey, pos, 1)); err != nil { + t.Fatal(err) + } else if n != 0 { + t.Fatal(n) + } + + if _, err := (c.Do("hlen", bitKey)); err != nil { + if err.Error() != errn.ErrWrongType.Error() { + t.Fatal(err) + } + } +} + +func TestBitWriteRead20Key(t *testing.T) { + if !bitmapFlushEnable { + return + } + num := 20 + posBit1 := 7 + prefix := "TestBitWriteRead20Key" + delSeqExec(prefix, num) + setbitSeqExec(prefix, posBit1, num) + getbitSeqExec(prefix, posBit1, num) +} + +func TestBitWrite20ExpireKey(t *testing.T) { + // set bitmapItemMax = 20 + // set bitmapFlushSecond = 60 + if !bitmapFlushEnable { + return + } + + prefix := "TestBitWrite20ExpireKey" + n := 20 + pos := 7 + delSeqExec(prefix, n) + + // check log manually: bitmap item flush. expireNum:20 nullNum:0 flushNum:0 + c := getTestConn() + defer c.Close() + for i := 0; i < n; i++ { + bitKey := getBitKey(prefix, i) + c.Do("setbit", bitKey, pos, 1) + c.Do("expire", bitKey, 1) + } +} + +func TestBitWrite20EmptyKey(t *testing.T) { + // set bitmapItemMax = 20 + // set bitmapFlushSecond = 60 + if !bitmapFlushEnable { + return + } + + prefix := "TestBitWrite20EmptyKey" + n := 20 + pos := 7 + + // check log manually: bitmap item flush. expireNum:0 nullNum:20 flushNum:0 + c := getTestConn() + defer c.Close() + for i := 0; i < n; i++ { + bitKey := getBitKey(prefix, i) + c.Do("setbit", bitKey, pos, 1) + c.Do("setbit", bitKey, pos, 0) + } +} + +func TestBitEvictPolicy(t *testing.T) { + // set bitmapItemMax = 20 + // set bitmapFlushSecond = 60 + if !bitmapFlushEnable { + return + } + + n := 20 + pos := 7 + prefix := "TestBitEvictPolicy" + delSeqExec(prefix, n) + setbitSeqExec(prefix, pos, n) + + fmt.Println("wait 60 seconds...") + time.Sleep(60 * time.Second) + // check log manually: bitmap evict itemNum:6 + + // check get ok + getbitSeqExec(prefix, pos, n) +} + +func delSeqExec(prefix string, num int) { + c := getTestConn() + defer c.Close() + for index := 0; index < num; index++ { + bitKey := getBitKey(prefix, index) + _, err := c.Do("del", bitKey) + if err != nil { + fmt.Printf("del key:%s err:%s", bitKey, err) + } + } +} + +func setbitSeqExec(prefix string, pos, num int) { + c := getTestConn() + defer c.Close() + for index := 0; index < num; index++ { + bitKey := getBitKey(prefix, index) + _, err := c.Do("setbit", bitKey, pos, 1) + if err != nil { + fmt.Printf("setbit key:%s err:%s", bitKey, err) + } + } +} + +func getbitSeqExec(prefix string, pos, num int) { + c := getTestConn() + defer c.Close() + for index := 0; index < num; index++ { + bitKey := getBitKey(prefix, index) + n, err := redis.Int(c.Do("getbit", bitKey, pos)) + if n != 1 || err != nil { + fmt.Printf("key:%s pos:%d expect:1 actual:%d err:%s", bitKey, pos, 1, err) + } + } +} + +func getBitKey(prefix string, index int) string { + return fmt.Sprintf("%s_%d", prefix, index) +} diff --git a/stored/server/cmd_test/cmd_string_test.go b/stored/server/cmd_test/cmd_string_test.go index 0253302..5eeb2be 100644 --- a/stored/server/cmd_test/cmd_string_test.go +++ b/stored/server/cmd_test/cmd_string_test.go @@ -15,8 +15,8 @@ package cmd_test import ( - "crypto/md5" "fmt" + "math/rand" "sync" "sync/atomic" "testing" @@ -29,17 +29,37 @@ import ( const defaultValBytes = "1qaz2wsx3edc4rfv5tgb6yhn7ujm8ik9ol0p1qaz2wsx3edc4rfv5tgb6yhn7ujm8ik9ol0p1qaz2wsx3edc4rfv5tgb6yhn7ujm8ik9ol0p" +func testRandBytes(len int) []byte { + val := make([]byte, len) + r := rand.New(rand.NewSource(time.Now().UnixNano())) + for i := 0; i < len; i++ { + b := r.Intn(26) + 65 + val[i] = byte(b) + } + return val +} + func TestKVSet(t *testing.T) { c := getTestConn() defer c.Close() - key := fmt.Sprintf("%x", md5.Sum([]byte("xingfu"))) - val := "helloworldhelloworld" - if ok, err := redis.String(c.Do("set", key, val)); err != nil { - t.Fatal(err) - } else if ok != resp.ReplyOK { - t.Fatal(ok) - } + key := []byte("testkvkey1") + val := testRandBytes(6 << 20) + ok, err := redis.String(c.Do("set", key, val)) + require.NoError(t, err) + require.Equal(t, resp.ReplyOK, ok) + v, err := redis.String(c.Do("get", key)) + require.NoError(t, err) + require.Equal(t, string(val), v) + + key2 := []byte("testkvkey2") + val2 := testRandBytes(6 << 20) + ok, err = redis.String(c.Do("set", key2, val2)) + require.NoError(t, err) + require.Equal(t, resp.ReplyOK, ok) + v, err = redis.String(c.Do("get", key2)) + require.NoError(t, err) + require.Equal(t, string(val2), v) } func TestKVSetEx(t *testing.T) { @@ -94,7 +114,7 @@ func TestKVSetEx(t *testing.T) { for i := 0; i < readNum; i++ { if n, err := redis.Int64(c.Do("pttl", key)); err != nil { t.Fatal(err) - } else if n != 1300 { + } else if n < 1299 { t.Fatalf("ttl fail exp:%d act:%d", 1300, n) } if n, err := redis.Int64(c.Do("ttl", key)); err != nil { diff --git a/stored/server/cmd_test/cmd_tx_test.go b/stored/server/cmd_test/cmd_tx_test.go index 5d1ab28..2d64b82 100644 --- a/stored/server/cmd_test/cmd_tx_test.go +++ b/stored/server/cmd_test/cmd_tx_test.go @@ -20,9 +20,18 @@ import ( "time" "github.com/gomodule/redigo/redis" + "github.com/zuoyebang/bitalostored/stored/internal/errn" ) +func isSkipTestTx() bool { + return skipTx +} + func TestTxMulti(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() @@ -36,6 +45,10 @@ func TestTxMulti(t *testing.T) { } func TestTxMultiNested(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() @@ -47,13 +60,17 @@ func TestTxMultiNested(t *testing.T) { } } if _, err := c.Do("multi"); err != nil { - if err.Error() != "ERR MULTI calls can not be nested" { + if err.Error() != errn.ErrMultiNested.Error() { t.Fatal(err) } } } func TestTxPrepareExecNoWatch(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() @@ -88,20 +105,23 @@ func TestTxPrepareExecNoWatch(t *testing.T) { } } - if res, err := redis.Values(c.Do("exec")); err != nil { + if res, err := redis.ByteSlices(c.Do("exec")); err != nil { t.Fatal(err) } else { if len(res) != 1 { t.Fatal("res len != 1", len(res)) } - getv := res[0].([]byte) - if !bytes.Equal(getv, []byte(val)) { - t.Fatalf("res actual:%s expect:%s", getv, val) + if !bytes.Equal(res[0], []byte(val)) { + t.Fatalf("res actual:%s expect:%s", res[0], val) } } } func TestTxPrepareDiscardNoWatch(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() @@ -146,6 +166,10 @@ func TestTxPrepareDiscardNoWatch(t *testing.T) { } func TestTxPrepareExecWatchNoChange(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() @@ -184,20 +208,23 @@ func TestTxPrepareExecWatchNoChange(t *testing.T) { } } - if res, err := redis.Values(c.Do("exec")); err != nil { + if res, err := redis.ByteSlices(c.Do("exec")); err != nil { t.Fatal(err) } else { if len(res) != 1 { t.Fatal("res len != 1", len(res)) } - getv := res[0].([]byte) - if !bytes.Equal(getv, []byte(val)) { - t.Fatalf("res actual:%s expect:%s", getv, val) + if !bytes.Equal(res[0], []byte(val)) { + t.Fatalf("res actual:%s expect:%s", res[0], val) } } } func TestTxWatch(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() @@ -242,20 +269,23 @@ func TestTxWatch(t *testing.T) { } } - if res, err := redis.Values(c.Do("exec")); err != nil { + if res, err := redis.ByteSlices(c.Do("exec")); err != nil { t.Fatal(err) } else { if len(res) != 1 { t.Fatal("res len != 1", len(res)) } - getv := res[0].([]byte) - if !bytes.Equal(getv, []byte(val)) { - t.Fatalf("res actual:%s expect:%s", getv, val) + if !bytes.Equal(res[0], []byte(val)) { + t.Fatalf("res actual:%s expect:%s", res[0], val) } } } func TestTxWatchInMulti(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() @@ -282,6 +312,10 @@ func TestTxWatchInMulti(t *testing.T) { } func TestTxUnwatchInMulti(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() @@ -308,6 +342,10 @@ func TestTxUnwatchInMulti(t *testing.T) { } func TestTxUnwatchFirst(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() @@ -338,6 +376,10 @@ func TestTxUnwatchFirst(t *testing.T) { } func TestTxUnwatchBeforeMulti(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() @@ -380,20 +422,23 @@ func TestTxUnwatchBeforeMulti(t *testing.T) { } } - if res, err := redis.Values(c.Do("exec")); err != nil { + if res, err := redis.ByteSlices(c.Do("exec")); err != nil { t.Fatal(err) } else { if len(res) != 1 { t.Fatal("res len != 1", len(res)) } - getv := res[0].([]byte) - if !bytes.Equal(getv, []byte(val)) { - t.Fatalf("res actual:%s expect:%s", getv, val) + if !bytes.Equal(res[0], []byte(val)) { + t.Fatalf("res actual:%s expect:%s", res[0], val) } } } func TestTxWatchAndSet(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() @@ -450,16 +495,22 @@ func TestTxWatchAndSet(t *testing.T) { } if res, err := redis.Values(c.Do("exec")); err != nil { - t.Fatal(err) + t.Fatal(res, err) } else { if len(res) != 3 { t.Fatal("res len != 3", len(res)) } - getv := res[0].([]byte) + getv, ok := res[0].([]byte) + if !ok { + t.Fatal(res[0]) + } if !bytes.Equal(getv, []byte(val)) { t.Fatalf("res actual:%s expect:%s", getv, val) } - getv = res[2].([]byte) + getv, ok = res[2].([]byte) + if !ok { + t.Fatal(res[2]) + } if !bytes.Equal(getv, []byte(newVal)) { t.Fatalf("res actual:%s expect:%s", getv, newVal) } @@ -467,6 +518,10 @@ func TestTxWatchAndSet(t *testing.T) { } func TestTxPrepareWatchChange(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() @@ -487,13 +542,17 @@ func TestTxPrepareWatchChange(t *testing.T) { } } if _, err := c.Do("prepare"); err != nil { - if err.Error() != "Err watch key changed" { + if err.Error() != errn.ErrWatchKeyChanged.Error() { t.Fatal(err) } } } func TestTxPrepare3KeyNoChange(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() c2 := getTestConn() @@ -544,6 +603,10 @@ func TestTxPrepare3KeyNoChange(t *testing.T) { } func TestTxPrepare3KeyOtherChange(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() c2 := getTestConn() @@ -599,6 +662,10 @@ func TestTxPrepare3KeyOtherChange(t *testing.T) { } func TestTxPrepareDeadlock(t *testing.T) { + if isSkipTestTx() { + return + } + c1 := getTestConn() defer c1.Close() c2 := getTestConn() @@ -643,7 +710,7 @@ func TestTxPrepareDeadlock(t *testing.T) { } } if _, err := c2.Do("prepare"); err != nil { - if err.Error() != "Err prepare lock fail" { + if err.Error() != errn.ErrPrepareLockFail.Error() { t.Fatal(err) } } @@ -658,6 +725,10 @@ func TestTxPrepareDeadlock(t *testing.T) { } func TestTxReWatchAndChange(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() @@ -681,13 +752,17 @@ func TestTxReWatchAndChange(t *testing.T) { } } if _, err := c.Do("prepare"); err != nil { - if err.Error() != "Err watch key changed" { + if err.Error() != errn.ErrWatchKeyChanged.Error() { t.Fatal(err) } } } func TestTxDiscard(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() @@ -743,6 +818,10 @@ func TestTxDiscard(t *testing.T) { } func TestTxModifyByOtherClient(t *testing.T) { + if isSkipTestTx() { + return + } + c1 := getTestConn() defer c1.Close() @@ -778,12 +857,12 @@ func TestTxModifyByOtherClient(t *testing.T) { t.Fatal(err) } if _, err := c1.Do("prepare"); err != nil { - if err.Error() != "Err watch key changed" { + if err.Error() != errn.ErrWatchKeyChanged.Error() { t.Fatal(err) } } if _, err := c1.Do("discard"); err != nil { - if err.Error() != "ERR DISCARD without MULTI" { + if err.Error() != errn.ErrDiscardNoMulti.Error() { t.Fatal(err) } } @@ -798,6 +877,10 @@ func TestTxModifyByOtherClient(t *testing.T) { } func TestTxCloseClient(t *testing.T) { + if isSkipTestTx() { + return + } + tryNum := 5 c1 := getTestConn() @@ -853,6 +936,10 @@ func TestTxCloseClient(t *testing.T) { } func TestTxPrepareNested(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() @@ -867,24 +954,32 @@ func TestTxPrepareNested(t *testing.T) { t.Fatal(err) } if _, err := c.Do("prepare"); err != nil { - if err.Error() != "ERR PREPARE calls can not be nested" { + if err.Error() != errn.ErrPrepareNested.Error() { t.Fatal(err) } } } func TestTxPrepareWithoutMulti(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() if _, err := c.Do("prepare"); err != nil { - if err.Error() != "ERR PREPARE without MULTI" { + if err.Error() != errn.ErrPrepareNoMulti.Error() { t.Fatal(err) } } } func TestTxMultiNoCommand(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() @@ -904,28 +999,96 @@ func TestTxMultiNoCommand(t *testing.T) { } } - if res, err := c.Do("exec"); err != nil { + if res, err := redis.String(c.Do("exec")); err != nil { t.Fatal(res, err) } else { - r := res.(string) - if r != "(empty array)" { + if res != "(empty array)" { t.Fatal("res expect:empty array", res) } } } +func TestTxMultiCommand(t *testing.T) { + if isSkipTestTx() { + return + } + + c := getTestConn() + defer c.Close() + + if _, err := c.Do("set", "a", "a"); err != nil { + t.Fatal(err) + } + if _, err := c.Do("set", "b", "b"); err != nil { + t.Fatal(err) + } + + if res, err := redis.String(c.Do("multi")); err != nil { + t.Fatal(err) + } else { + if res != "OK" { + t.Fatal("res is not ok", res) + } + } + + if res, err := redis.String(c.Do("get", "a")); err != nil { + t.Fatal(err) + } else { + if res != "QUEUED" { + t.Fatal(res) + } + } + if res, err := redis.String(c.Do("get", "b")); err != nil { + t.Fatal(err) + } else { + if res != "QUEUED" { + t.Fatal(res) + } + } + + if res, err := redis.String(c.Do("prepare")); err != nil { + t.Fatal(err) + } else { + if res != "OK" { + t.Fatal("prepare not ok", res) + } + } + + if res, err := redis.Strings(c.Do("exec")); err != nil { + t.Fatal(res, err) + } else { + if len(res) != 2 { + t.Fatal("len err", len(res)) + } + if res[0] != "a" { + t.Fatal(res[0]) + } + if res[1] != "b" { + t.Fatal(res[1]) + } + } +} + func TestTxDiscardOnly(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() if _, err := c.Do("discard"); err != nil { - if err.Error() != "ERR DISCARD without MULTI" { + if err.Error() != errn.ErrDiscardNoMulti.Error() { t.Fatal(err) } } } func TestTxDiscardWatch(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() @@ -938,13 +1101,17 @@ func TestTxDiscardWatch(t *testing.T) { } } if _, err := c.Do("discard"); err != nil { - if err.Error() != "ERR DISCARD without MULTI" { + if err.Error() != errn.ErrDiscardNoMulti.Error() { t.Fatal(err) } } } func TestTxDiscardMulti(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() @@ -973,6 +1140,10 @@ func TestTxDiscardMulti(t *testing.T) { } func TestTxDiscardPrepare(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() @@ -1011,6 +1182,10 @@ func TestTxDiscardPrepare(t *testing.T) { } func TestTxDiscardPrepareWatchChanged(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() @@ -1041,18 +1216,22 @@ func TestTxDiscardPrepareWatchChanged(t *testing.T) { t.Fatal(err) } if _, err := c.Do("prepare"); err != nil { - if err.Error() != "Err watch key changed" { + if err.Error() != errn.ErrWatchKeyChanged.Error() { t.Fatal(err) } } if _, err := c.Do("discard"); err != nil { - if err.Error() != "ERR DISCARD without MULTI" { + if err.Error() != errn.ErrDiscardNoMulti.Error() { t.Fatal(err) } } } func TestTxDiscard3KeyNoChange(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() c2 := getTestConn() @@ -1112,6 +1291,10 @@ func TestTxDiscard3KeyNoChange(t *testing.T) { } func TestTxDiscard3KeyUnlockTimeout(t *testing.T) { + if isSkipTestTx() { + return + } + c := getTestConn() defer c.Close() c2 := getTestConn() @@ -1163,7 +1346,7 @@ func TestTxDiscard3KeyUnlockTimeout(t *testing.T) { } time.Sleep(5 * time.Second) if res, err := c.Do("discard"); err != nil { - if err.Error() != "ERR DISCARD without MULTI" { + if err.Error() != errn.ErrDiscardNoMulti.Error() { t.Fatal(err) } } else { diff --git a/stored/server/cmd_test/define_test.go b/stored/server/cmd_test/define_test.go index 7c7aa57..e08ec5f 100644 --- a/stored/server/cmd_test/define_test.go +++ b/stored/server/cmd_test/define_test.go @@ -27,8 +27,10 @@ func init() { if cacheEable { readNum = 2 } + skipTx = true } +var skipTx bool var readNum int = 1 var redisPool *redis.Pool diff --git a/stored/server/cmd_tx.go b/stored/server/cmd_tx.go index d77c66a..1521cf2 100644 --- a/stored/server/cmd_tx.go +++ b/stored/server/cmd_tx.go @@ -19,7 +19,6 @@ import ( "github.com/zuoyebang/bitalostored/butils/hash" "github.com/zuoyebang/bitalostored/butils/unsafe2" - "github.com/zuoyebang/bitalostored/stored/internal/config" "github.com/zuoyebang/bitalostored/stored/internal/errn" "github.com/zuoyebang/bitalostored/stored/internal/resp" "github.com/zuoyebang/bitalostored/stored/internal/utils" @@ -42,7 +41,7 @@ func watchCommand(c *Client) error { } args := c.Args if len(args) < 1 { - return resp.CmdParamsErr(resp.WATCH) + return errn.CmdParamsErr(resp.WATCH) } if !c.server.IsMaster() { return errn.ErrTxNotInMaster @@ -59,7 +58,7 @@ func watchCommand(c *Client) error { } c.addWatchKey(c.server.txLocks.GetTxLock(khash), args[i], c.QueryStartTime) } - c.RespWriter.WriteStatus("OK") + c.Writer.WriteStatus("OK") return nil } @@ -68,13 +67,13 @@ func unwatchCommand(c *Client) error { return errn.ErrTxDisable } if len(c.Args) != 0 { - return resp.CmdParamsErr(resp.WATCH) + return errn.CmdParamsErr(resp.WATCH) } if c.txState&TxStateWatch != 0 { c.txState &= ^(TxStateWatch) c.unwatchKey() } - c.RespWriter.WriteStatus("OK") + c.Writer.WriteStatus("OK") return nil } @@ -83,7 +82,7 @@ func multiCommand(c *Client) error { return errn.ErrTxDisable } if len(c.Args) != 0 { - return resp.CmdParamsErr(resp.MULTI) + return errn.CmdParamsErr(resp.MULTI) } if c.txState&TxStateMulti != 0 { return errn.ErrMultiNested @@ -98,7 +97,7 @@ func multiCommand(c *Client) error { c.txState |= TxStateMulti c.enableCommandQueued() c.server.txParallelCounter.Add(1) - c.RespWriter.WriteStatus("OK") + c.Writer.WriteStatus("OK") return nil } @@ -107,7 +106,7 @@ func prepareCommand(c *Client) error { return errn.ErrTxDisable } if len(c.Args) != 0 { - return resp.CmdParamsErr(resp.PREPARE) + return errn.CmdParamsErr(resp.PREPARE) } if c.txState&TxStateMulti == 0 { return errn.ErrPrepareNoMulti @@ -124,7 +123,7 @@ func prepareCommand(c *Client) error { } c.txState |= TxStatePrepare - c.RespWriter.WriteStatus("OK") + c.Writer.WriteStatus("OK") return nil } @@ -266,14 +265,14 @@ func execCommand(c *Client) (cerr error) { return errn.ErrTxDisable } if len(c.Args) != 0 { - return resp.CmdParamsErr(resp.EXEC) + return errn.CmdParamsErr(resp.EXEC) } if c.txState&TxStatePrepare == 0 { return errn.ErrExecNotPrepared } prepareState := c.prepareState.Load() if prepareState == PrepareStateKeyModified || prepareState == PrepareStateLockFail { - c.RespWriter.WriteBulk(nil) + c.Writer.WriteBulk(nil) return nil } @@ -293,7 +292,7 @@ func execCommand(c *Client) (cerr error) { defer releaseLock() if len(c.commandQueue) == 0 { - c.RespWriter.WriteStatus("(empty array)") + c.Writer.WriteStatus("(empty array)") return nil } @@ -308,12 +307,12 @@ func execCommand(c *Client) (cerr error) { } c.disableCommandQueued() - c.RespWriter.SetCached() + c.Writer.SetCached() for _, command := range c.commandQueue { - c.HandleRequest(config.GlobalConfig.Plugin.OpenRaft, command, false) + c.HandleRequest(command, false) } - c.RespWriter.UnsetCached() - c.RespWriter.FlushCached() + c.Writer.UnsetCached() + c.Writer.FlushCached() return nil } @@ -322,13 +321,13 @@ func discardCommand(c *Client) error { return errn.ErrTxDisable } if len(c.Args) != 0 { - return resp.CmdParamsErr(resp.DISCARD) + return errn.CmdParamsErr(resp.DISCARD) } if c.txState&TxStateMulti == 0 { return errn.ErrDiscardNoMulti } c.discard() - c.RespWriter.WriteStatus("OK") + c.Writer.WriteStatus("OK") return nil } diff --git a/stored/server/cmd_zset.go b/stored/server/cmd_zset.go index 446179b..24d8f85 100644 --- a/stored/server/cmd_zset.go +++ b/stored/server/cmd_zset.go @@ -16,7 +16,6 @@ package server import ( "bytes" - "errors" "math" "strconv" "strings" @@ -29,8 +28,6 @@ import ( "github.com/zuoyebang/bitalostored/stored/internal/utils" ) -var errScoreOverflow = errors.New("zset score overflow") - func init() { AddCommand(map[string]*Cmd{ resp.ZADD: {Sync: resp.IsWriteCmd(resp.ZADD), Handler: zaddCommand}, @@ -50,24 +47,23 @@ func init() { resp.ZLEXCOUNT: {Sync: resp.IsWriteCmd(resp.ZLEXCOUNT), Handler: zlexcountCommand}, resp.ZCOUNT: {Sync: resp.IsWriteCmd(resp.ZCOUNT), Handler: zcountCommand}, resp.ZCARD: {Sync: resp.IsWriteCmd(resp.ZCARD), Handler: zcardCommand}, - - resp.ZCLEAR: {Sync: resp.IsWriteCmd(resp.ZCLEAR), Handler: zclearCommand, KeySkip: 1}, - resp.ZKEYEXISTS: {Sync: resp.IsWriteCmd(resp.ZKEYEXISTS), Handler: zkeyexistsCommand}, - resp.ZEXPIRE: {Sync: resp.IsWriteCmd(resp.ZEXPIRE), Handler: zexpireCommand}, - resp.ZEXPIREAT: {Sync: resp.IsWriteCmd(resp.ZEXPIREAT), Handler: zexpireAtCommand}, - resp.ZTTL: {Sync: resp.IsWriteCmd(resp.ZTTL), Handler: zttlCommand}, - resp.ZPERSIST: {Sync: resp.IsWriteCmd(resp.ZPERSIST), Handler: zpersistCommand}, + resp.ZCLEAR: {Sync: resp.IsWriteCmd(resp.ZCLEAR), Handler: zclearCommand, KeySkip: 1}, + resp.ZKEYEXISTS: {Sync: resp.IsWriteCmd(resp.ZKEYEXISTS), Handler: zkeyexistsCommand}, + resp.ZEXPIRE: {Sync: resp.IsWriteCmd(resp.ZEXPIRE), Handler: zexpireCommand}, + resp.ZEXPIREAT: {Sync: resp.IsWriteCmd(resp.ZEXPIREAT), Handler: zexpireAtCommand}, + resp.ZTTL: {Sync: resp.IsWriteCmd(resp.ZTTL), Handler: zttlCommand}, + resp.ZPERSIST: {Sync: resp.IsWriteCmd(resp.ZPERSIST), Handler: zpersistCommand}, }) } func zaddCommand(c *Client) error { args := c.Args if len(args) < 3 { - return resp.CmdParamsErr(resp.ZADD) + return errn.CmdParamsErr(resp.ZADD) } if len(args[1:])&1 != 0 { - return resp.CmdParamsErr(resp.ZADD) + return errn.CmdParamsErr(resp.ZADD) } key := args[0] @@ -78,7 +74,7 @@ func zaddCommand(c *Client) error { score, err := extend.ParseFloat64(unsafe2.String(args[2*i])) if err != nil || score < float64(math.MinInt64) || score > float64(math.MaxInt64) { - return resp.ErrValue + return errn.ErrValue } params[i].Score = score @@ -88,7 +84,7 @@ func zaddCommand(c *Client) error { n, err := c.DB.ZAdd(key, c.KeyHash, params...) if err == nil { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return err @@ -97,12 +93,12 @@ func zaddCommand(c *Client) error { func zincrbyCommand(c *Client) error { args := c.Args if len(args) != 3 { - return resp.CmdParamsErr(resp.ZINCRBY) + return errn.CmdParamsErr(resp.ZINCRBY) } delta, err := extend.ParseFloat64(unsafe2.String(args[1])) if err != nil { - return resp.ErrValue + return errn.ErrValue } key := args[0] @@ -110,7 +106,7 @@ func zincrbyCommand(c *Client) error { v, err := c.DB.ZIncrBy(key, c.KeyHash, delta, args[2]) if err == nil { - c.RespWriter.WriteBulk(extend.FormatFloat64ToSlice(v)) + c.Writer.WriteBulk(extend.FormatFloat64ToSlice(v)) } return err @@ -119,13 +115,13 @@ func zincrbyCommand(c *Client) error { func zremCommand(c *Client) error { args := c.Args if len(args) < 2 { - return resp.CmdParamsErr(resp.ZREM) + return errn.CmdParamsErr(resp.ZREM) } n, err := c.DB.ZRem(args[0], c.KeyHash, args[1:]...) if err == nil { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return err @@ -134,7 +130,7 @@ func zremCommand(c *Client) error { func zremrangebyscoreCommand(c *Client) error { args := c.Args if len(args) != 3 { - return resp.CmdParamsErr(resp.ZREMRANGEBYSCORE) + return errn.CmdParamsErr(resp.ZREMRANGEBYSCORE) } min, max, leftClose, rightClose, err := zparseScoreRange(args[1], args[2]) @@ -147,7 +143,7 @@ func zremrangebyscoreCommand(c *Client) error { n, err := c.DB.ZRemRangeByScore(key, c.KeyHash, min, max, leftClose, rightClose) if err == nil { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return err @@ -156,19 +152,19 @@ func zremrangebyscoreCommand(c *Client) error { func zremrangebyrankCommand(c *Client) error { args := c.Args if len(args) != 3 { - return resp.CmdParamsErr(resp.ZREMRANGEBYRANK) + return errn.CmdParamsErr(resp.ZREMRANGEBYRANK) } start, stop, err := zparseRange(args[1], args[2]) if err != nil { - return resp.ErrValue + return errn.ErrValue } key := args[0] n, err := c.DB.ZRemRangeByRank(key, c.KeyHash, start, stop) if err == nil { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return err @@ -177,7 +173,7 @@ func zremrangebyrankCommand(c *Client) error { func zremrangebylexCommand(c *Client) error { args := c.Args if len(args) != 3 { - return resp.CmdParamsErr(resp.ZREMRANGEBYLEX) + return errn.CmdParamsErr(resp.ZREMRANGEBYLEX) } min, max, leftClose, rightClose, err := zparseLexMemberRange(args[1], args[2]) @@ -190,7 +186,7 @@ func zremrangebylexCommand(c *Client) error { if n, err := c.DB.ZRemRangeByLex(key, c.KeyHash, min, max, leftClose, rightClose); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -211,14 +207,14 @@ func zparseRange(a1 []byte, a2 []byte) (start int64, stop int64, err error) { func zrangeGeneric(c *Client, reverse bool, cmd string) error { args := c.Args if len(args) < 3 { - return resp.CmdParamsErr(resp.ZRANGE) + return errn.CmdParamsErr(resp.ZRANGE) } key := args[0] start, stop, err := zparseRange(args[1], args[2]) if err != nil { - return resp.ErrValue + return errn.ErrValue } args = args[3:] @@ -226,19 +222,19 @@ func zrangeGeneric(c *Client, reverse bool, cmd string) error { if len(args) > 0 { if len(args) != 1 { - return resp.CmdParamsErr(cmd) + return errn.CmdParamsErr(cmd) } if strings.ToLower(unsafe2.String(args[0])) == "withscores" { withScores = true } else { - return resp.ErrSyntax + return errn.ErrSyntax } } if datas, err := c.DB.ZRangeGeneric(key, c.KeyHash, start, stop, reverse); err != nil { return err } else { - c.RespWriter.WriteScorePairArray(datas, withScores) + c.Writer.WriteScorePairArray(datas, withScores) } return nil } @@ -254,7 +250,7 @@ func zrevrangeCommand(c *Client) error { func zrangebylexCommand(c *Client) error { args := c.Args if len(args) != 3 && len(args) != 6 { - return resp.CmdParamsErr(resp.ZRANGEBYLEX) + return errn.CmdParamsErr(resp.ZRANGEBYLEX) } min, max, leftClose, rightClose, err := zparseLexMemberRange(args[1], args[2]) @@ -267,20 +263,20 @@ func zrangebylexCommand(c *Client) error { if len(args) == 6 { if strings.ToLower(unsafe2.String(args[3])) != "limit" { - return resp.ErrSyntax + return errn.ErrSyntax } if offset, err = strconv.Atoi(unsafe2.String(args[4])); err != nil { - return resp.ErrValue + return errn.ErrValue } if offset < 0 { - c.RespWriter.WriteSliceArray(make([][]byte, 0, 4)) + c.Writer.WriteSliceArray(make([][]byte, 0, 4)) return nil } if count, err = strconv.Atoi(unsafe2.String(args[5])); err != nil { - return resp.ErrValue + return errn.ErrValue } } @@ -289,7 +285,7 @@ func zrangebylexCommand(c *Client) error { if ay, err := c.DB.ZRangeByLex(key, c.KeyHash, min, max, leftClose, rightClose, offset, count); err != nil { return err } else { - c.RespWriter.WriteSliceArray(ay) + c.Writer.WriteSliceArray(ay) } return nil @@ -298,7 +294,7 @@ func zrangebylexCommand(c *Client) error { func zrangebyscoreGeneric(c *Client, reverse bool) error { args := c.Args if len(args) < 3 { - return resp.CmdParamsErr(resp.ZRANGEBYSCORE) + return errn.CmdParamsErr(resp.ZRANGEBYSCORE) } key := args[0] @@ -333,19 +329,19 @@ func zrangebyscoreGeneric(c *Client, reverse bool) error { if len(args) > 0 { if len(args) < 3 { - return resp.CmdParamsErr(resp.ZRANGEBYSCORE) + return errn.CmdParamsErr(resp.ZRANGEBYSCORE) } if strings.ToLower(unsafe2.String(args[0])) != "limit" { - return resp.ErrSyntax + return errn.ErrSyntax } if offset, err = strconv.Atoi(unsafe2.String(args[1])); err != nil { - return resp.ErrValue + return errn.ErrValue } if count, err = strconv.Atoi(unsafe2.String(args[2])); err != nil { - return resp.ErrValue + return errn.ErrValue } if len(args) == 4 { @@ -356,14 +352,14 @@ func zrangebyscoreGeneric(c *Client, reverse bool) error { } if offset < 0 { - c.RespWriter.WriteArray([]interface{}{}) + c.Writer.WriteArray([]interface{}{}) return nil } if datas, err := c.DB.ZRangeByScoreGeneric(key, c.KeyHash, min, max, leftClose, rightClose, offset, count, reverse); err != nil { return err } else { - c.RespWriter.WriteScorePairArray(datas, withScores) + c.Writer.WriteScorePairArray(datas, withScores) } return nil @@ -380,18 +376,18 @@ func zrevrangebyscoreCommand(c *Client) error { func zrankCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.ZRANK) + return errn.CmdParamsErr(resp.ZRANK) } if n, err := c.DB.ZRank(args[0], c.KeyHash, args[1]); err != nil { if err == errn.ErrZsetMemberNil { - c.RespWriter.WriteBulk(nil) + c.Writer.WriteBulk(nil) } else { return err } } else if n == -1 { - c.RespWriter.WriteBulk(nil) + c.Writer.WriteBulk(nil) } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -400,19 +396,19 @@ func zrankCommand(c *Client) error { func zrevrankCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.ZREVRANK) + return errn.CmdParamsErr(resp.ZREVRANK) } if n, err := c.DB.ZRevRank(args[0], c.KeyHash, args[1]); err != nil { if err == errn.ErrZsetMemberNil { - c.RespWriter.WriteBulk(nil) + c.Writer.WriteBulk(nil) } else { return err } } else if n == -1 { - c.RespWriter.WriteBulk(nil) + c.Writer.WriteBulk(nil) } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -421,17 +417,17 @@ func zrevrankCommand(c *Client) error { func zscoreCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.ZSCORE) + return errn.CmdParamsErr(resp.ZSCORE) } if s, err := c.DB.ZScore(args[0], c.KeyHash, args[1]); err != nil { if err == errn.ErrZsetMemberNil { - c.RespWriter.WriteBulk(nil) + c.Writer.WriteBulk(nil) } else { return err } } else { - c.RespWriter.WriteBulk(extend.FormatFloat64ToSlice(s)) + c.Writer.WriteBulk(extend.FormatFloat64ToSlice(s)) } return nil @@ -440,7 +436,7 @@ func zscoreCommand(c *Client) error { func zlexcountCommand(c *Client) error { args := c.Args if len(args) != 3 { - return resp.CmdParamsErr(resp.ZLEXCOUNT) + return errn.CmdParamsErr(resp.ZLEXCOUNT) } min, max, leftClose, rightClose, err := zparseLexMemberRange(args[1], args[2]) @@ -453,7 +449,7 @@ func zlexcountCommand(c *Client) error { if n, err := c.DB.ZLexCount(key, c.KeyHash, min, max, leftClose, rightClose); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -462,24 +458,24 @@ func zlexcountCommand(c *Client) error { func zcountCommand(c *Client) error { args := c.Args if len(args) != 3 { - return resp.CmdParamsErr(resp.ZCOUNT) + return errn.CmdParamsErr(resp.ZCOUNT) } min, max, leftClose, rightClose, err := zparseScoreRange(args[1], args[2]) if err != nil { - return resp.ErrValue + return errn.ErrValue } if min > max { - c.RespWriter.WriteInteger(0) + c.Writer.WriteInteger(0) return nil } if n, err := c.DB.ZCount(args[0], c.KeyHash, min, max, leftClose, rightClose); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -488,13 +484,13 @@ func zcountCommand(c *Client) error { func zcardCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.ZCARD) + return errn.CmdParamsErr(resp.ZCARD) } if n, err := c.DB.ZCard(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil @@ -503,13 +499,13 @@ func zcardCommand(c *Client) error { func zkeyexistsCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.ZKEYEXISTS) + return errn.CmdParamsErr(resp.ZKEYEXISTS) } if n, err := c.DB.Exists(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return nil } @@ -517,13 +513,13 @@ func zkeyexistsCommand(c *Client) error { func zclearCommand(c *Client) error { args := c.Args if len(args) < 1 { - return resp.CmdParamsErr(resp.ZCLEAR) + return errn.CmdParamsErr(resp.ZCLEAR) } n, err := c.DB.ZClear(c.KeyHash, args...) if err == nil { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return err @@ -532,12 +528,12 @@ func zclearCommand(c *Client) error { func zexpireCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.ZEXPIRE) + return errn.CmdParamsErr(resp.ZEXPIRE) } duration, err := utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } var n int64 @@ -545,19 +541,19 @@ func zexpireCommand(c *Client) error { if err != nil { return err } - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) return nil } func zexpireAtCommand(c *Client) error { args := c.Args if len(args) != 2 { - return resp.CmdParamsErr(resp.ZEXPIREAT) + return errn.CmdParamsErr(resp.ZEXPIREAT) } when, err := utils.ByteToInt64(args[1]) if err != nil { - return resp.ErrValue + return errn.ErrValue } var n int64 @@ -565,20 +561,20 @@ func zexpireAtCommand(c *Client) error { if err != nil { return err } - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) return nil } func zttlCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.ZTTL) + return errn.CmdParamsErr(resp.ZTTL) } if v, err := c.DB.TTl(args[0], c.KeyHash); err != nil { return err } else { - c.RespWriter.WriteInteger(v) + c.Writer.WriteInteger(v) } return nil @@ -587,13 +583,13 @@ func zttlCommand(c *Client) error { func zpersistCommand(c *Client) error { args := c.Args if len(args) != 1 { - return resp.CmdParamsErr(resp.ZPERSIST) + return errn.CmdParamsErr(resp.ZPERSIST) } n, err := c.DB.Persist(args[0], c.KeyHash) if err == nil { - c.RespWriter.WriteInteger(n) + c.Writer.WriteInteger(n) } return err @@ -604,7 +600,7 @@ func zparseLexMemberRange(minBuf []byte, maxBuf []byte) (min []byte, max []byte, min = minBuf } else { if len(minBuf) == 0 { - err = resp.ErrInvalidRangeItem + err = errn.ErrInvalidRangeItem return } @@ -614,7 +610,7 @@ func zparseLexMemberRange(minBuf []byte, maxBuf []byte) (min []byte, max []byte, } else if minBuf[0] == '[' { min = minBuf[1:] } else { - err = resp.ErrInvalidRangeItem + err = errn.ErrInvalidRangeItem return } } @@ -623,7 +619,7 @@ func zparseLexMemberRange(minBuf []byte, maxBuf []byte) (min []byte, max []byte, max = maxBuf } else { if len(maxBuf) == 0 { - err = resp.ErrInvalidRangeItem + err = errn.ErrInvalidRangeItem return } if maxBuf[0] == '(' { @@ -632,7 +628,7 @@ func zparseLexMemberRange(minBuf []byte, maxBuf []byte) (min []byte, max []byte, } else if maxBuf[0] == '[' { max = maxBuf[1:] } else { - err = resp.ErrInvalidRangeItem + err = errn.ErrInvalidRangeItem return } } diff --git a/stored/server/cpu_adjust.go b/stored/server/cpu_adjust.go index 63e6232..13c4d50 100644 --- a/stored/server/cpu_adjust.go +++ b/stored/server/cpu_adjust.go @@ -16,55 +16,81 @@ package server import ( "bytes" + "fmt" "os" "path/filepath" "runtime" "strconv" "time" + "github.com/zuoyebang/bitalostored/stored/internal/config" "github.com/zuoyebang/bitalostored/stored/internal/log" + "github.com/zuoyebang/bitalostored/stored/internal/trycatch" ) -const cpuProcMax = 32 - type cpuAdjust struct { - path string periodPath string quotaPath string - lastCpuNum int + lastCores int + optCores int } -func NewCpuAdjust(path string, lastCpuNum int) *cpuAdjust { - c := &cpuAdjust{} +func RunCpuAdjuster(s *Server) { + var addr string + if len(s.laddr) > 1 { + addr = s.laddr[1:] + } + path := fmt.Sprintf("/sys/fs/cgroup/cpu/stored/server_%s_%s", config.GlobalConfig.Server.ProductName, addr) + log.Infof("cpu cgroup base path %s", path) - log.Infof("cpu cgroup base path: %s", path) - c.periodPath = filepath.Join(path, "cpu.cfs_period_us") - c.quotaPath = filepath.Join(path, "cpu.cfs_quota_us") - c.lastCpuNum = lastCpuNum + c := &cpuAdjust{ + periodPath: filepath.Join(path, "cpu.cfs_period_us"), + quotaPath: filepath.Join(path, "cpu.cfs_quota_us"), + } - return c -} + if config.GlobalConfig.Server.Maxprocs > 1 { + c.optCores = config.GlobalConfig.Server.Maxprocs / 2 + } + + c.setGoMaxProcs() -func (c *cpuAdjust) Run(s *Server) { - var cpuNum int go func() { for { - cpuNum = c.getCpuNum() - if cpuNum > cpuProcMax { - log.Warnf("cpu procs exceed limit. num: %d", cpuNum) - cpuNum = cpuProcMax + if s.IsClosed() { + return } - if cpuNum != c.lastCpuNum && cpuNum > 0 { - runtime.GOMAXPROCS(cpuNum) - log.Infof("cpu procs change. %d => %d", c.lastCpuNum, cpuNum) - c.lastCpuNum = cpuNum - } - s.Info.RuntimeStats.NumProcs = cpuNum + + c.setGoMaxProcs() + s.Info.RuntimeStats.NumProcs = c.lastCores * 2 time.Sleep(60 * time.Second) } }() } +func (c *cpuAdjust) setGoMaxProcs() { + defer func() { + trycatch.Panic("cpuAdjust", recover()) + }() + + cores := c.getCpuNum() + if cores == 0 && c.optCores > 0 { + cores = c.optCores + } + if cores < config.MinCores { + log.Warnf("cpu procs less than(%d). num: %d", config.MinCores, cores) + cores = config.MinCores + } + if cores > config.MaxCores { + log.Warnf("cpu procs exceed limit(%d). num: %d", config.MaxCores, cores) + cores = config.MaxCores + } + if cores != c.lastCores && cores > 0 { + runtime.GOMAXPROCS(cores * 2) + log.Infof("cpu procs change: %d => %d, GOMAXPROCS: %d => %d", c.lastCores, cores, c.lastCores*2, cores*2) + c.lastCores = cores + } +} + func (c *cpuAdjust) getCpuNum() int { periodInt, _ := readCpuInfo(c.periodPath) quotaInt, _ := readCpuInfo(c.quotaPath) diff --git a/stored/server/info.go b/stored/server/info.go index 488bef4..865e9ed 100644 --- a/stored/server/info.go +++ b/stored/server/info.go @@ -15,17 +15,18 @@ package server import ( + "math" "runtime" "sync" "sync/atomic" "time" + "github.com/zuoyebang/bitalostored/butils" "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/bitsdb" "github.com/zuoyebang/bitalostored/stored/internal/bytepools" "github.com/zuoyebang/bitalostored/stored/internal/config" + "github.com/zuoyebang/bitalostored/stored/internal/trycatch" "github.com/zuoyebang/bitalostored/stored/internal/utils" - - "github.com/zuoyebang/bitalostored/butils" ) type SInfo struct { @@ -51,20 +52,6 @@ func (sinfo *SInfo) Marshal() ([]byte, func()) { return buf[:pos], closer } -func NewSinfo() *SInfo { - sinfo := &SInfo{ - Server: SinfoServer{cache: make([]byte, 0, 2048)}, - Client: SinfoClient{cache: make([]byte, 0, 256)}, - Cluster: SinfoCluster{cache: make([]byte, 0, 2048)}, - Stats: SinfoStats{cache: make([]byte, 0, 2048)}, - Data: SinfoData{cache: make([]byte, 0, 1024)}, - RuntimeStats: SRuntimeStats{cache: make([]byte, 0, 3072)}, - BitalosdbUsage: bitsdb.NewBitsUsage(), - } - - return sinfo -} - type SinfoCluster struct { StartModel ModelType `json:"start_model"` Status bool `json:"status"` @@ -118,7 +105,6 @@ func (sc *SinfoCluster) UpdateCache() { } type SinfoServer struct { - MaxProcs int `json:"maxprocs"` ProcessId int `json:"process_id"` StartTime string `json:"start_time"` ServerAddress string `json:"server_address"` @@ -127,6 +113,7 @@ type SinfoServer struct { GitVersion string `json:"git_version"` Compile string `json:"compile"` ConfigFile string `json:"config_file"` + AutoCompact bool `json:"auto_compact"` mutex sync.RWMutex cache []byte @@ -155,7 +142,6 @@ func (ss *SinfoServer) UpdateCache() { ss.cache = ss.cache[:0] ss.cache = append(ss.cache, []byte("# Server\n")...) - ss.cache = utils.AppendInfoInt(ss.cache, "maxprocs:", int64(ss.MaxProcs)) ss.cache = utils.AppendInfoInt(ss.cache, "process_id:", int64(ss.ProcessId)) ss.cache = utils.AppendInfoString(ss.cache, "start_time:", ss.StartTime) ss.cache = utils.AppendInfoInt(ss.cache, "max_client:", ss.MaxClient) @@ -164,6 +150,7 @@ func (ss *SinfoServer) UpdateCache() { ss.cache = utils.AppendInfoString(ss.cache, "git_version:", ss.GitVersion) ss.cache = utils.AppendInfoString(ss.cache, "compile:", ss.Compile) ss.cache = utils.AppendInfoString(ss.cache, "config_file:", ss.ConfigFile) + ss.cache = utils.AppendInfoString(ss.cache, "auto_compact:", utils.BoolToString(ss.AutoCompact)) ss.cache = append(ss.cache, '\n') } @@ -491,3 +478,71 @@ func (srs *SRuntimeStats) Samples() { srs.UpdateCache() } + +const ( + infoRuntimeInterval = 4 + infoClientInterval = 16 + infoDiskInterval = 120 +) + +func RunInfoCollection(s *Server) { + go func() { + dataInterval := 60 + collectInfo := func() { + defer func() { + trycatch.Panic("plugin doinfo", recover()) + }() + + start := time.Now() + total := s.Info.Stats.TotolCmd.Load() + + time.Sleep(time.Second) + + delta := s.Info.Stats.TotolCmd.Load() - total + normalized := math.Max(0, float64(delta)) * float64(time.Second) / float64(time.Since(start)) + qps := uint64(normalized + 0.5) + s.Info.Stats.QPS.Store(qps) + db := s.GetDB() + if db != nil { + db.SetQPS(qps) + s.Info.Stats.RaftLogIndex = db.Meta.GetUpdateIndex() + if db.Migrate != nil { + s.Info.Stats.IsMigrate.Store(db.Migrate.IsMigrate.Load()) + } + s.Info.Stats.IsDelExpire = db.GetIsDelExpire() + } + + singleDegradeChange := s.Info.Server.SingleDegrade != config.GlobalConfig.Server.DegradeSingleNode + s.Info.Server.SingleDegrade = config.GlobalConfig.Server.DegradeSingleNode + if singleDegradeChange { + s.Info.Server.UpdateCache() + } + + if dataInterval%infoRuntimeInterval == 0 { + s.Info.Stats.UpdateCache() + s.Info.RuntimeStats.Samples() + } + + if dataInterval%infoClientInterval == 0 { + s.Info.Client.UpdateCache() + } + + if dataInterval%infoDiskInterval == 0 { + s.Info.Data.Samples() + if db != nil { + db.BitalosdbUsage(s.Info.BitalosdbUsage) + } + } + + dataInterval++ + } + + for { + if s.IsClosed() { + return + } + + collectInfo() + } + }() +} diff --git a/stored/server/lua.go b/stored/server/lua.go index c96e3f0..767ad70 100644 --- a/stored/server/lua.go +++ b/stored/server/lua.go @@ -22,12 +22,9 @@ import ( "strings" "sync" - "github.com/zuoyebang/bitalostored/stored/internal/config" + lua "github.com/yuin/gopher-lua" "github.com/zuoyebang/bitalostored/stored/internal/luajson" - "github.com/zuoyebang/bitalostored/stored/internal/resp" "github.com/zuoyebang/bitalostored/stored/internal/utils" - - lua "github.com/yuin/gopher-lua" ) var luaClientPool sync.Pool @@ -119,10 +116,10 @@ func MkLuaFuncs(srv *Server) map[string]lua.LGFunction { reqData := utils.StringSliceToByteSlice(args) vmClient := GetVmFromPool(srv) defer PutRaftClientToPool(vmClient) - isPlugin := config.GlobalConfig.Plugin.OpenRaft - _ = vmClient.HandleRequest(isPlugin, reqData, true) - buf := bytes.NewBuffer(vmClient.RespWriter.FlushToBytes()) - res, err := resp.ParseReply(bufio.NewReader(buf)) + _ = vmClient.HandleRequest(reqData, true) + buf := bytes.NewBuffer(vmClient.Writer.Bytes()) + defer vmClient.Writer.Reset() + res, err := ParseReply(bufio.NewReader(buf)) if err != nil { if failFast { if strings.Contains(err.Error(), "empty command") { @@ -158,6 +155,7 @@ func MkLuaFuncs(srv *Server) map[string]lua.LGFunction { panic(fmt.Sprintf("type not handled (%T)", r)) } } + return 1 } } @@ -224,31 +222,31 @@ func ConvertLuaTable(l *lua.LState, value lua.LValue) []string { func LuaToRedis(l *lua.LState, c *Client, value lua.LValue) { if value == nil { - c.RespWriter.WriteBulk(nil) + c.Writer.WriteBulk(nil) return } switch t := value.(type) { case *lua.LNilType: - c.RespWriter.WriteBulk(nil) + c.Writer.WriteBulk(nil) case lua.LBool: if lua.LVAsBool(value) { - c.RespWriter.WriteInteger(1) + c.Writer.WriteInteger(1) } else { - c.RespWriter.WriteBulk(nil) + c.Writer.WriteBulk(nil) } case lua.LNumber: - c.RespWriter.WriteInteger(int64(lua.LVAsNumber(value))) + c.Writer.WriteInteger(int64(lua.LVAsNumber(value))) case lua.LString: s := lua.LVAsString(value) - c.RespWriter.WriteBulk([]byte(s)) + c.Writer.WriteBulk([]byte(s)) case *lua.LTable: if s := t.RawGetString("err"); s.Type() != lua.LTNil { - c.RespWriter.WriteError(errors.New(s.String())) + c.Writer.WriteError(errors.New(s.String())) return } if s := t.RawGetString("ok"); s.Type() != lua.LTNil { - c.RespWriter.WriteStatus(s.String()) + c.Writer.WriteStatus(s.String()) return } @@ -267,7 +265,7 @@ func LuaToRedis(l *lua.LState, c *Client, value lua.LValue) { result = append(result, val) } - c.RespWriter.WriteLen(len(result)) + c.Writer.WriteLen(len(result)) for _, r := range result { LuaToRedis(l, c, r) } diff --git a/stored/server/migrate.go b/stored/server/migrate.go index e0d87b0..9b3e898 100644 --- a/stored/server/migrate.go +++ b/stored/server/migrate.go @@ -19,14 +19,12 @@ import ( "strconv" "github.com/zuoyebang/bitalostored/stored/internal/errn" - "github.com/zuoyebang/bitalostored/stored/internal/resp" - "github.com/zuoyebang/bitalostored/stored/internal/log" ) func migrateSlots(c *Client) error { if len(c.Args) < 3 { - return resp.CmdParamsErr("migrateslots") + return errn.CmdParamsErr("migrateslots") } slot, e := strconv.ParseUint(string(c.Args[2]), 10, 32) if e != nil { @@ -34,12 +32,12 @@ func migrateSlots(c *Client) error { } host := fmt.Sprintf("%s:%s", string(c.Args[0]), string(c.Args[1])) - if _, e := c.DB.MigrateStart(c.server.address, host, uint32(slot), c.server.IsMaster, c.server.MigrateDelToSlave); e != nil { + if _, e := c.DB.MigrateStart(c.server.laddr, host, uint32(slot), c.server.IsMaster, c.server.MigrateDelToSlave); e != nil { log.Warn("migrate error tohost: ", host, " slots: ", slot, " error: ", e) return e } - c.RespWriter.WriteStatus("OK") + c.Writer.WriteStatus("OK") return nil } @@ -53,16 +51,16 @@ func migrateStatus(c *Client) error { } if c.DB.Migrate != nil { - c.RespWriter.WriteStatus(c.DB.Migrate.Info()) + c.Writer.WriteStatus(c.DB.Migrate.Info()) } else { - c.RespWriter.WriteStatus("{}") + c.Writer.WriteStatus("{}") } return nil } func migrateEnd(c *Client) error { if len(c.Args) < 1 { - return resp.CmdParamsErr("migrateend") + return errn.CmdParamsErr("migrateend") } slot, e := strconv.ParseUint(string(c.Args[0]), 10, 32) if e != nil { @@ -73,7 +71,7 @@ func migrateEnd(c *Client) error { return e } - c.RespWriter.WriteStatus("OK") + c.Writer.WriteStatus("OK") return nil } diff --git a/stored/server/proc.go b/stored/server/proc.go deleted file mode 100644 index ad5f6b1..0000000 --- a/stored/server/proc.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2019-2024 Xu Ruibo (hustxurb@163.com) and Contributors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package server - -import ( - "errors" - - "github.com/zuoyebang/bitalostored/stored/internal/log" -) - -type Proc struct { - Name string - - Start func(*Server) - Stop func(*Server, interface{}) - - Connect func(*Server, *Client) - Disconn func(*Server, *Client, interface{}) - Prepare func(*Client, *Cmd, string) bool - Handled func(*Client, *Cmd, string) - - DoRaftSync func(*Client, *Cmd, string) error -} - -var plugins = []*Proc{} -var raftplugin *Proc - -func AddPlugin(p *Proc) { - if p.Name == "" { - p.Name = log.FileLine(2, 3) - } - plugins = append(plugins, p) -} - -func AddRaftPlugin(p *Proc) { - raftplugin = p -} - -func runPluginStart(s *Server) { - for _, p := range plugins { - if p.Start != nil { - p.Start(s) - } - } -} -func runPluginStop(s *Server, e interface{}) { - for _, p := range plugins { - if p.Stop != nil { - p.Stop(s, e) - } - } -} - -func runPluginConnect(s *Server, c *Client) { - for _, p := range plugins { - if p.Connect != nil { - p.Connect(s, c) - } - } -} -func runPluginDisconn(s *Server, c *Client, e interface{}) { - for _, p := range plugins { - if p.Disconn != nil { - p.Disconn(s, c, e) - } - } -} -func runPluginRaft(c *Client, cmd *Cmd, key string) error { - if raftplugin == nil || raftplugin.DoRaftSync == nil { - return errors.New("no raft plugin") - } - return raftplugin.DoRaftSync(c, cmd, key) - -} - -func runPluginHandled(c *Client, cmd *Cmd, key string) { - for _, p := range plugins { - if p.Handled != nil { - p.Handled(c, cmd, key) - } - } -} diff --git a/stored/server/server.go b/stored/server/server.go index 49c3f8a..0f0ff9c 100644 --- a/stored/server/server.go +++ b/stored/server/server.go @@ -15,59 +15,119 @@ package server import ( + "context" "fmt" - "net" "os" "sync" "sync/atomic" "github.com/cockroachdb/errors" + "github.com/panjf2000/gnet/v2" "github.com/zuoyebang/bitalostored/stored/engine" + "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/bitsdb" "github.com/zuoyebang/bitalostored/stored/engine/bitsdb/btools" "github.com/zuoyebang/bitalostored/stored/internal/config" + "github.com/zuoyebang/bitalostored/stored/internal/errn" "github.com/zuoyebang/bitalostored/stored/internal/log" + "github.com/zuoyebang/bitalostored/stored/internal/resp" "github.com/zuoyebang/bitalostored/stored/internal/slowshield" + "github.com/zuoyebang/bitalostored/stored/internal/trycatch" "github.com/zuoyebang/bitalostored/stored/internal/utils" - "golang.org/x/net/netutil" ) -const ( - StatusPrepare = iota - StatusStart - StatusRunning - StatusClose - StatusExited -) +const errorReadEOF = "read: EOF" type Server struct { - closed atomic.Bool - status int - quit chan struct{} - isDebug bool - address string - listener net.Listener - dbSyncListener net.Listener - connWait sync.WaitGroup - rcm sync.RWMutex - rcs map[*Client]struct{} - db *engine.Bitalos - slowQuery *slowshield.SlowShield - recoverLock sync.Mutex - syncDataDoing atomic.Int32 - dbSyncing atomic.Int32 - luaMu []*sync.Mutex - expireClosedCh chan struct{} - expireWg sync.WaitGroup - + *gnet.BuiltinEventEngine + eng gnet.Engine Info *SInfo IsMaster func() bool MigrateDelToSlave func(keyHash uint32, data [][]byte) error IsWitness bool - + DoRaftSync func(keyHash uint32, data [][]byte) ([]byte, error) + DoRaftStop func() + laddr string + db *engine.Bitalos + closed atomic.Bool + quit chan struct{} + isDebug bool + isOpenRaft bool + slowQuery *slowshield.SlowShield + recoverLock sync.Mutex + syncDataDoing atomic.Int32 + dbSyncing atomic.Int32 + luaMu []*sync.Mutex + expireClosedCh chan struct{} + expireWg sync.WaitGroup openDistributedTx bool txLocks *TxShardLocker txParallelCounter atomic.Int32 txPrepareWg sync.WaitGroup + cpu *cpuAdjust +} + +func NewServer() (*Server, error) { + s := &Server{ + laddr: config.GlobalConfig.Server.Address, + isDebug: config.GlobalConfig.Log.IsDebug, + slowQuery: slowshield.NewSlowShield(), + quit: make(chan struct{}), + recoverLock: sync.Mutex{}, + expireClosedCh: make(chan struct{}), + openDistributedTx: config.GlobalConfig.Server.OpenDistributedTx, + isOpenRaft: config.GlobalConfig.Plugin.OpenRaft, + IsWitness: config.GlobalConfig.RaftCluster.IsWitness, + } + s.Info = &SInfo{ + Client: SinfoClient{cache: make([]byte, 0, 256)}, + Cluster: SinfoCluster{cache: make([]byte, 0, 2048)}, + Stats: SinfoStats{cache: make([]byte, 0, 2048)}, + Data: SinfoData{cache: make([]byte, 0, 1024)}, + RuntimeStats: SRuntimeStats{cache: make([]byte, 0, 3072)}, + BitalosdbUsage: bitsdb.NewBitsUsage(), + Server: SinfoServer{ + cache: make([]byte, 0, 2048), + AutoCompact: true, + ConfigFile: config.GlobalConfig.Server.ConfigFile, + StartTime: utils.GetCurrentTimeString(), + ServerAddress: s.laddr, + GitVersion: utils.Version, + Compile: utils.Compile, + MaxClient: config.GlobalConfig.Server.Maxclient, + ProcessId: os.Getpid(), + }, + } + s.Info.Server.UpdateCache() + + RunCpuAdjuster(s) + + if s.IsWitness { + return s, nil + } + + if s.openDistributedTx { + s.txLocks = NewTxLockers(200) + } + + luaMux := make([]*sync.Mutex, LuaShardCount) + for i := uint32(0); i < LuaShardCount; i++ { + luaMux[i] = &sync.Mutex{} + } + s.luaMu = luaMux + + if err := os.MkdirAll(config.GetBitalosSnapshotPath(), 0755); err != nil { + return nil, errors.Wrap(err, "mkdir snapshot err") + } + + db, err := engine.NewBitalos(config.GetBitalosDbDataPath()) + if err != nil { + return nil, errors.Wrap(err, "new bitalos err") + } + + s.db = db + s.RunDeleteExpireDataTask() + + return s, nil } func (s *Server) GetDB() *engine.Bitalos { @@ -88,150 +148,137 @@ func (s *Server) FlushCallback(compactIndex uint64) { db.Flush(btools.FlushTypeRemoveLog, compactIndex) } -func (s *Server) addRespClient(c *Client) { - s.rcm.Lock() - s.Info.Client.ClientTotal.Add(1) - s.Info.Client.ClientAlive.Add(1) - s.rcs[c] = struct{}{} - s.rcm.Unlock() -} +func (s *Server) Close() { + if !s.closed.CompareAndSwap(false, true) { + return + } -func (s *Server) delRespClient(c *Client) { - s.rcm.Lock() - s.Info.Client.ClientAlive.Add(-1) - delete(s.rcs, c) - s.rcm.Unlock() -} + close(s.quit) + close(s.expireClosedCh) -func (s *Server) closeAllRespClients() { - s.rcm.Lock() - for c := range s.rcs { - c.Close() + if s.eng.Validate() == nil { + if err := s.eng.Stop(context.TODO()); err != nil { + log.Errorf("server gnet stop error %s", err) + } } - s.rcm.Unlock() + s.txPrepareWg.Wait() -} + s.DoRaftStop() -func (s *Server) Run() { - l, err := net.Listen("tcp", s.address) - if err != nil { - log.Errorf("net listen fail err:%s", err.Error()) - return + if !s.IsWitness { + s.expireWg.Wait() + s.GetDB().Close() } +} - s.Info.Server.ConfigFile = config.GlobalConfig.Server.ConfigFile - s.Info.Server.StartTime = utils.GetCurrentTimeString() - s.Info.Server.ServerAddress = s.address - s.Info.Server.GitVersion = utils.Version - s.Info.Server.Compile = utils.Compile - s.Info.Server.MaxClient = config.GlobalConfig.Server.Maxclient - s.Info.Server.MaxProcs = config.GlobalConfig.Server.Maxprocs - s.Info.Server.ProcessId = os.Getpid() - s.Info.Server.UpdateCache() +func (s *Server) IsClosed() bool { + return s.closed.Load() +} - productName := config.GlobalConfig.Server.ProductName - var addr string - if len(config.GlobalConfig.Server.Address) > 1 { - addr = config.GlobalConfig.Server.Address[1:] +func (s *Server) ListenAndServe() { + gnetOptions := gnet.Options{ + Logger: log.GetLogger(), + Multicore: true, + ReusePort: true, + ReuseAddr: true, + EdgeTriggeredIO: config.GlobalConfig.Server.DisableEdgeTriggered, } - cpuCgroupPath := fmt.Sprintf("/sys/fs/cgroup/cpu/stored/server_%s_%s", productName, addr) - cpuAdjuster := NewCpuAdjust(cpuCgroupPath, config.GlobalConfig.Server.Maxprocs) - cpuAdjuster.Run(s) - maxClientNum := int(config.GlobalConfig.Server.Maxclient) - s.listener = netutil.LimitListener(l, maxClientNum) + if config.GlobalConfig.Server.NetEventLoopNum > 0 { + gnetOptions.NumEventLoop = config.GlobalConfig.Server.NetEventLoopNum + } - log.Infof("listen:%s maxClientNum:%d", s.address, maxClientNum) - s.status = StatusStart - runPluginStart(s) - s.status = StatusRunning + if config.GlobalConfig.Server.NetWriteBuffer > 0 { + gnetOptions.WriteBufferCap = config.GlobalConfig.Server.NetWriteBuffer.AsInt() + } - defer func() { - s.status = StatusClose - }() + log.Infof("server gnet options NumEventLoop:%d EdgeTriggeredIO:%v WriteBufferCap:%d", + gnetOptions.NumEventLoop, gnetOptions.EdgeTriggeredIO, gnetOptions.WriteBufferCap) - for { - select { - case <-s.quit: - log.Info("bitalos server receive quit signal") - return - default: - if c, e := s.listener.Accept(); e != nil { - log.Errorf("accept err:%s", e.Error()) - continue - } else { - go NewClientRESP(c, s).run() - } - } + if err := gnet.Run(s, fmt.Sprintf("tcp://%s", s.laddr), gnet.WithOptions(gnetOptions)); err != nil { + log.Errorf("server gnet run error %s", err) } } -func (s *Server) Close() { - if s.closed.Load() { - return - } +func (s *Server) OnBoot(eng gnet.Engine) (action gnet.Action) { + s.eng = eng + return gnet.None +} - close(s.quit) - close(s.expireClosedCh) +func (s *Server) OnOpen(conn gnet.Conn) (out []byte, action gnet.Action) { + client := newConnClient(s, conn.RemoteAddr().String()) + conn.SetContext(client) + return +} - s.listener.Close() - s.closeAllRespClients() - s.connWait.Wait() - runPluginStop(s, recover()) +func (s *Server) OnClose(conn gnet.Conn, err error) (action gnet.Action) { + if client, ok := conn.Context().(*Client); ok { + client.Close() + } - if !s.IsWitness { - s.expireWg.Wait() - s.GetDB().Close() + if err != nil && err.Error() != errorReadEOF { + log.Errorf("conn OnClose error %s", err) } - s.closed.Store(true) - s.status = StatusExited + return gnet.None } -func (s *Server) GetIsClosed() bool { - return s.closed.Load() -} +func (s *Server) OnTraffic(conn gnet.Conn) (action gnet.Action) { + defer func() { + trycatch.Panic("conn OnTraffic", recover()) + }() -func NewServer() (*Server, error) { - s := &Server{ - address: config.GlobalConfig.Server.Address, - isDebug: config.GlobalConfig.Log.IsDebug, - slowQuery: slowshield.NewSlowShield(), - quit: make(chan struct{}), - rcm: sync.RWMutex{}, - rcs: make(map[*Client]struct{}, 128), - recoverLock: sync.Mutex{}, - expireClosedCh: make(chan struct{}), - openDistributedTx: config.GlobalConfig.Server.OpenDistributedTx, - IsWitness: config.GlobalConfig.RaftCluster.IsWitness, - Info: NewSinfo(), + client, ok := conn.Context().(*Client) + if !ok { + log.Error("conn OnTraffic get Client fail") + return gnet.Close } - if s.IsWitness { - return s, nil + dbSyncStatus := client.server.Info.Stats.DbSyncStatus + if dbSyncStatus == DB_SYNC_RECVING_FAIL || dbSyncStatus == DB_SYNC_RECVING { + client.Writer.WriteError(errn.ErrDbSyncFailRefuse) + client.Writer.FlushToWriterIO(conn) + log.Errorf("conn OnTraffic error %s", errn.ErrDbSyncFailRefuse) + return gnet.Close } - if s.openDistributedTx { - s.txLocks = NewTxLockers(200) + readBuf, _ := conn.Next(-1) + if client.Reader.Len() > 0 { + client.Reader.Write(readBuf) + readBuf = client.Reader.Bytes() } - luaMux := make([]*sync.Mutex, LuaShardCount) - for i := uint32(0); i < LuaShardCount; i++ { - luaMux[i] = &sync.Mutex{} + cmds, writeBackBytes, err := resp.ParseCommands(readBuf[client.Reader.Offset:], client.ParseMarks[:0]) + if err != nil { + client.Writer.WriteError(err) + client.Writer.FlushToWriterIO(conn) + log.Errorf("conn OnTraffic parse commands error %s", err) + return gnet.Close } - s.luaMu = luaMux - if err := os.MkdirAll(config.GetBitalosSnapshotPath(), 0755); err != nil { - return nil, errors.Wrap(err, "mkdir snapshot err") + for i := range cmds { + if err = client.HandleRequest(cmds[i].Args, false); err != nil { + log.Errorf("conn OnTraffic handle request error %s", err) + } + + if _, err = client.Writer.FlushToWriterIO(conn); err != nil { + log.Errorf("conn OnTraffic write error %s", err) + } } - db, err := engine.NewBitalos(config.GetBitalosDbDataPath()) - if err != nil { - return nil, errors.Wrap(err, "new bitalos err") + writeBackBytesLen := len(writeBackBytes) + if writeBackBytesLen > 0 && client.Reader.Len() == 0 { + client.Reader.Write(writeBackBytes) } - s.db = db - s.RunDeleteExpireDataTask() + if cmds != nil { + client.Reader.Offset = client.Reader.Len() - writeBackBytesLen + } - return s, nil + if writeBackBytesLen == 0 { + client.Reader.Reset() + client.Reader.Offset = 0 + } + + return gnet.None } diff --git a/stored/server/sync_data.go b/stored/server/sync_data.go deleted file mode 100644 index 19938c7..0000000 --- a/stored/server/sync_data.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2019-2024 Xu Ruibo (hustxurb@163.com) and Contributors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package server - -import ( - "errors" - "fmt" - "io" - "net" - "sync/atomic" - - "github.com/zuoyebang/bitalostored/stored/internal/log" -) - -var buildConn atomic.Int32 - -var dbSyncRunningErr = errors.New("db sync is running") - -func (s *Server) buildDbSyncListener() (string, error) { - if buildConn.CompareAndSwap(0, 1) { - defer buildConn.Store(0) - if s.dbSyncing.Load() == 1 { - return "", errors.New("db syncing conflicts") - } - - if s.dbSyncListener == nil { - address, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("%s:0", "0.0.0.0")) - if err != nil { - return "", err - } - s.dbSyncListener, err = net.ListenTCP("tcp", address) - if err != nil { - return "", err - } - go func() { - defer func() { - s.dbSyncListener.Close() - s.dbSyncListener = nil - }() - - if conn, err := s.dbSyncListener.Accept(); err != nil { - s.Info.Stats.DbSyncErr = err.Error() - s.Info.Stats.DbSyncStatus = DB_SYNC_CONN_FAIL - log.Error("accept dbsync conn err: ", err) - } else { - s.Info.Stats.DbSyncRunning.Store(1) - if err := s.sendEngineData(conn); err != nil { - log.Error("send engine conn err: ", err) - } - conn.Close() - s.Info.Stats.DbSyncRunning.Store(0) - } - }() - } - return s.dbSyncListener.Addr().String(), nil - } - return "", errors.New("current is build db sync") -} - -func (s *Server) buildDbAsyncConn(address string) error { - if buildConn.CompareAndSwap(0, 1) { - go func() { - defer buildConn.Store(0) - s.Info.Stats.DbSyncStatus = DB_SYNC_CONN_SUCC - tcpAddr, err := net.ResolveTCPAddr("tcp", address) - if err != nil { - s.Info.Stats.DbSyncErr = err.Error() - s.Info.Stats.DbSyncStatus = DB_SYNC_CONN_FAIL - log.Errorf("resolve tcp address : %s err :%s", tcpAddr, err.Error()) - return - } - conn, err := net.DialTCP("tcp", nil, tcpAddr) - if conn != nil { - defer conn.Close() - } - - if err != nil { - s.Info.Stats.DbSyncErr = err.Error() - s.Info.Stats.DbSyncStatus = DB_SYNC_CONN_FAIL - log.Errorf("build err conn err :%s", err.Error()) - return - } - if err := s.RecoverFromSnapshot(conn, nil); err != nil { - s.Info.Stats.DbSyncErr = err.Error() - s.Info.Stats.DbSyncStatus = DB_SYNC_CONN_FAIL - log.Errorf("build conn recover from snapshot err : %v", err) - return - } - }() - return nil - } else { - return dbSyncRunningErr - } -} - -func (s *Server) sendEngineData(w io.Writer) error { - ls, err := s.PrepareSnapshot() - if err != nil { - log.Error("dbsync prepare snapshot", err) - return err - } - - if err = s.SaveSnapshot(ls, w, nil); err != nil { - return err - } - return nil -} diff --git a/stored/internal/resp/util.go b/stored/server/utils.go similarity index 97% rename from stored/internal/resp/util.go rename to stored/server/utils.go index 67fbba7..98140ce 100644 --- a/stored/internal/resp/util.go +++ b/stored/server/utils.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package resp +package server import ( "bufio" @@ -20,9 +20,8 @@ import ( "strconv" "strings" - "github.com/zuoyebang/bitalostored/stored/internal/errn" - "github.com/zuoyebang/bitalostored/butils/unsafe2" + "github.com/zuoyebang/bitalostored/stored/internal/errn" ) const ( @@ -55,7 +54,7 @@ func ParseSetArgs(args [][]byte) (e ExpireType, t int64, c SetCondition, err err switch strings.ToUpper(unsafe2.String(args[i])) { case "EX": if i+1 >= len(args) { - err = ErrSyntax + err = errn.ErrSyntax return } @@ -67,7 +66,7 @@ func ParseSetArgs(args [][]byte) (e ExpireType, t int64, c SetCondition, err err i++ case "PX": if i+1 >= len(args) { - err = ErrSyntax + err = errn.ErrSyntax return } @@ -82,7 +81,7 @@ func ParseSetArgs(args [][]byte) (e ExpireType, t int64, c SetCondition, err err case "XX": c = XX default: - err = ErrSyntax + err = errn.ErrSyntax return } i++