diff --git a/Makefile b/Makefile index 6d19c8fc..05561595 100644 --- a/Makefile +++ b/Makefile @@ -34,7 +34,7 @@ generate: ## Generate protobuf stubs. .PHONY: build build: $(BUILD_DIR) ## Build go binary. - go build -ldflags "-s -w -X \"github.com/maticnetwork/polygon-cli/cmd/version.Version=dev ($(GIT_SHA))\"" -o $(BUILD_DIR)/$(BIN_NAME) main.go + go build -ldflags "-w -X \"github.com/maticnetwork/polygon-cli/cmd/version.Version=dev ($(GIT_SHA))\"" -o $(BUILD_DIR)/$(BIN_NAME) main.go .PHONY: install install: build ## Install the go binary. diff --git a/README.md b/README.md index 3f02db42..08ebe9b7 100644 --- a/README.md +++ b/README.md @@ -47,6 +47,8 @@ Note: Do not modify this section! It is auto-generated by `cobra` using `make ge - [polycli hash](doc/polycli_hash.md) - Provide common crypto hashing functions. +- [polycli leveldbbench](doc/polycli_leveldbbench.md) - Perform a level db benchmark + - [polycli loadtest](doc/polycli_loadtest.md) - Run a generic load test against an Eth/EVM style JSON-RPC endpoint. - [polycli metrics-to-dash](doc/polycli_metrics-to-dash.md) - Create a dashboard from an Openmetrics / Prometheus response. diff --git a/cmd/dumpblocks/dumpblocks.go b/cmd/dumpblocks/dumpblocks.go index 7a615f0c..30bdc72a 100644 --- a/cmd/dumpblocks/dumpblocks.go +++ b/cmd/dumpblocks/dumpblocks.go @@ -1,19 +1,3 @@ -/* -Copyright © 2022 Polygon - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Lesser General Public License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with this program. If not, see . -*/ package dumpblocks import ( diff --git a/cmd/forge/forge.go b/cmd/forge/forge.go index 8eb8ec6d..dd384b9f 100644 --- a/cmd/forge/forge.go +++ b/cmd/forge/forge.go @@ -1,19 +1,3 @@ -/* -Copyright © 2022 Polygon - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Lesser General Public License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with this program. If not, see . -*/ package forge import ( diff --git a/cmd/hash/hash.go b/cmd/hash/hash.go index 94fdac26..8b576273 100644 --- a/cmd/hash/hash.go +++ b/cmd/hash/hash.go @@ -1,19 +1,3 @@ -/* -Copyright © 2022 Polygon - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Lesser General Public License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with this program. If not, see . -*/ package hash import ( diff --git a/cmd/leveldbbench/leveldbbench.go b/cmd/leveldbbench/leveldbbench.go new file mode 100644 index 00000000..fe8f5bb0 --- /dev/null +++ b/cmd/leveldbbench/leveldbbench.go @@ -0,0 +1,622 @@ +package leveldbbench + +import ( + "context" + "crypto/sha512" + _ "embed" + "encoding/binary" + "encoding/hex" + "encoding/json" + "fmt" + "github.com/rs/zerolog/log" + progressbar "github.com/schollz/progressbar/v3" + "github.com/spf13/cobra" + leveldb "github.com/syndtr/goleveldb/leveldb" + "github.com/syndtr/goleveldb/leveldb/filter" + "github.com/syndtr/goleveldb/leveldb/iterator" + "github.com/syndtr/goleveldb/leveldb/opt" + "github.com/syndtr/goleveldb/leveldb/util" + "math" + "math/bits" + "math/rand" + "os" + "regexp" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +var ( + //go:embed usage.md + usage string + + randSrc *rand.Rand + randSrcMutex sync.Mutex + writeLimit *uint64 + noWriteMerge *bool + syncWrites *bool + dontFillCache *bool + readStrict *bool + keySize *uint64 + degreeOfParallelism *uint8 + readLimit *uint64 + rawSizeDistribution *string + sizeDistribution *IODistribution + overwriteCount *uint64 + sequentialReads *bool + sequentialWrites *bool + nilReadOptions *bool + cacheSize *int + openFilesCacheCapacity *int + writeZero *bool + readOnly *bool + dbPath *string + fullScan *bool +) + +const ( + // This data was obtained by running a full scan on bor level db to get a sense how the key values are distributed + // | Bucket | Min Size | Max | Count | + // |--------+-----------+------------+---------------| + // | 0 | 0 | 1 | 2,347,864 | + // | 1 | 2 | 3 | 804,394,856 | + // | 2 | 4 | 7 | 541,267,689 | + // | 3 | 8 | 15 | 738,828,593 | + // | 4 | 16 | 31 | 261,122,372 | + // | 5 | 32 | 63 | 1,063,470,933 | + // | 6 | 64 | 127 | 3,584,745,195 | + // | 7 | 128 | 255 | 1,605,760,137 | + // | 8 | 256 | 511 | 316,074,206 | + // | 9 | 512 | 1,023 | 312,887,514 | + // | 10 | 1,024 | 2,047 | 328,894,149 | + // | 11 | 2,048 | 4,095 | 141,180 | + // | 12 | 4,096 | 8,191 | 92,789 | + // | 13 | 8,192 | 16,383 | 256,060 | + // | 14 | 16,384 | 32,767 | 261,806 | + // | 15 | 32,768 | 65,535 | 191,032 | + // | 16 | 65,536 | 131,071 | 99,715 | + // | 17 | 131,072 | 262,143 | 73,782 | + // | 18 | 262,144 | 524,287 | 17,552 | + // | 19 | 524,288 | 1,048,575 | 717 | + // | 20 | 1,048,576 | 2,097,151 | 995 | + // | 21 | 2,097,152 | 4,194,303 | 1 | + // | 22 | 4,194,304 | 8,388,607 | 0 | + // | 23 | 8,388,608 | 16,777,215 | 1 | + borDistribution = "0-1:2347864,2-3:804394856,4-7:541267689,8-15:738828593,16-31:261122372,32-63:1063470933,64-127:3584745195,128-255:1605760137,256-511:316074206,512-1023:312887514,1024-2047:328894149,2048-4095:141180,4096-8191:92789,8192-16383:256060,16384-32767:261806,32768-65535:191032,65536-131071:99715,131072-262143:73782,262144-524287:17552,524288-1048575:717,1048576-2097151:995,2097152-4194303:1,8388608-16777215:1" +) + +type ( + LoadTestOperation int + TestResult struct { + StartTime time.Time + EndTime time.Time + TestDuration time.Duration + Description string + OpCount uint64 + Stats *leveldb.DBStats + OpRate float64 + ValueDist []uint64 + } + RandomKeySeeker struct { + db *leveldb.DB + iterator iterator.Iterator + iteratorMutex sync.Mutex + firstKey []byte + } + IORange struct { + StartRange int + EndRange int + Frequency int + } + IODistribution struct { + ranges []IORange + totalFrequency int + } +) + +func NewTestResult(startTime, endTime time.Time, desc string, opCount uint64, db *leveldb.DB) *TestResult { + tr := new(TestResult) + s := new(leveldb.DBStats) + err := db.Stats(s) + if err != nil { + log.Error().Err(err).Msg("Unable to retrieve db stats") + } + tr.Stats = s + tr.StartTime = startTime + tr.EndTime = endTime + tr.TestDuration = endTime.Sub(startTime) + tr.Description = desc + tr.OpCount = opCount + tr.OpRate = float64(opCount) / tr.TestDuration.Seconds() + + log.Info().Dur("testDuration", tr.TestDuration).Str("desc", tr.Description).Msg("recorded result") + log.Debug().Interface("result", tr).Msg("recorded result") + return tr +} + +var LevelDBBenchCmd = &cobra.Command{ + Use: "leveldbbench [flags]", + Short: "Perform a level db benchmark", + Long: usage, + RunE: func(cmd *cobra.Command, args []string) error { + log.Info().Msg("Starting level db test") + db, err := leveldb.OpenFile(*dbPath, &opt.Options{ + Filter: filter.NewBloomFilter(10), + DisableSeeksCompaction: true, + OpenFilesCacheCapacity: *openFilesCacheCapacity, + BlockCacheCapacity: *cacheSize / 2 * opt.MiB, + WriteBuffer: *cacheSize / 4 * opt.MiB, + // if we've disabled writes, or we're doing a full scan, we should open the database in read only mode + ReadOnly: *readOnly || *fullScan, + }) + if err != nil { + return err + } + + ctx := context.Background() + wo := opt.WriteOptions{ + NoWriteMerge: *noWriteMerge, + Sync: *syncWrites, + } + ro := &opt.ReadOptions{ + DontFillCache: *dontFillCache, + } + if *readStrict { + ro.Strict = opt.StrictAll + } else { + ro.Strict = opt.DefaultStrict + } + if *nilReadOptions { + ro = nil + } + var start time.Time + trs := make([]*TestResult, 0) + + sequentialWritesDesc := "random" + if *sequentialWrites { + sequentialWritesDesc = "sequential" + } + sequentialReadsDesc := "random" + if *sequentialReads { + sequentialReadsDesc = "sequential" + } + + if *fullScan { + start = time.Now() + opCount, valueDist := runFullScan(ctx, db, &wo, ro) + tr := NewTestResult(start, time.Now(), "full scan", opCount, db) + tr.ValueDist = valueDist + trs = append(trs, tr) + return printSummary(trs) + } + + // in no write mode, we assume the database as already been populated in a previous run or we're using some other database + if !*readOnly { + start = time.Now() + writeData(ctx, db, &wo, 0, *writeLimit, *sequentialWrites) + trs = append(trs, NewTestResult(start, time.Now(), fmt.Sprintf("initial %s write", sequentialWritesDesc), *writeLimit, db)) + + for i := 0; i < int(*overwriteCount); i += 1 { + start = time.Now() + writeData(ctx, db, &wo, 0, *writeLimit, *sequentialWrites) + trs = append(trs, NewTestResult(start, time.Now(), fmt.Sprintf("%s overwrite %d", sequentialWritesDesc, i), *writeLimit, db)) + } + + start = time.Now() + runFullCompact(ctx, db, &wo) + trs = append(trs, NewTestResult(start, time.Now(), "compaction", 1, db)) + } + + if *sequentialReads { + start = time.Now() + readSeq(ctx, db, &wo, *readLimit) + trs = append(trs, NewTestResult(start, time.Now(), fmt.Sprintf("%s read", sequentialReadsDesc), *readLimit, db)) + } else { + start = time.Now() + readRandom(ctx, db, ro, *readLimit) + trs = append(trs, NewTestResult(start, time.Now(), fmt.Sprintf("%s read", sequentialWritesDesc), *readLimit, db)) + } + + log.Info().Msg("Close DB") + err = db.Close() + if err != nil { + log.Error().Err(err).Msg("error while closing db") + } + + return printSummary(trs) + }, + Args: func(cmd *cobra.Command, args []string) error { + var err error + sizeDistribution, err = parseRawSizeDistribution(*rawSizeDistribution) + if err != nil { + return err + } + if *keySize > 64 { + return fmt.Errorf(" max supported key size is 64 bytes. %d is too big", *keySize) + } + return nil + }, +} + +func printSummary(trs []*TestResult) error { + jsonResults, err := json.Marshal(trs) + if err != nil { + return err + } + fmt.Println(string(jsonResults)) + return nil +} + +func runFullCompact(ctx context.Context, db *leveldb.DB, wo *opt.WriteOptions) { + err := db.CompactRange(util.Range{Start: nil, Limit: nil}) + if err != nil { + log.Fatal().Err(err).Msg("error compacting data") + } +} +func runFullScan(ctx context.Context, db *leveldb.DB, wo *opt.WriteOptions, ro *opt.ReadOptions) (uint64, []uint64) { + pool := make(chan bool, *degreeOfParallelism) + var wg sync.WaitGroup + // 32 should be safe here. That would correspond to a single value that's 4.2 GB + buckets := make([]uint64, 32) + var bucketsMutex sync.Mutex + iter := db.NewIterator(nil, nil) + var opCount uint64 = 0 + for iter.Next() { + pool <- true + wg.Add(1) + go func(i iterator.Iterator) { + opCount += 1 + k := i.Key() + v := i.Value() + + bucket := bits.Len(uint(len(v))) + bucketsMutex.Lock() + buckets[bucket] += 1 + bucketsMutex.Unlock() + + if bucket >= 22 { + // 9:19PM INF encountered giant value currentKey=536e617073686f744a6f75726e616c + log.Info().Str("currentKey", hex.EncodeToString(k)).Int("bytes", len(v)).Msg("encountered giant value") + } + + if opCount%1000000 == 0 { + log.Debug().Uint64("opCount", opCount).Str("currentKey", hex.EncodeToString(k)).Msg("continuing full scan") + } + wg.Done() + <-pool + }(iter) + } + iter.Release() + err := iter.Error() + if err != nil { + log.Fatal().Err(err).Msg("Error running full scan") + } + + wg.Wait() + + for k, v := range buckets { + if v == 0 { + continue + } + start := math.Exp2(float64(k)) + end := math.Exp2(float64(k+1)) - 1 + if k == 0 { + start = 0 + } + log.Debug(). + Int("bucket", k). + Float64("start", start). + Float64("end", end). + Uint64("count", v).Msg("buckets") + } + return opCount, buckets +} +func writeData(ctx context.Context, db *leveldb.DB, wo *opt.WriteOptions, startIndex, writeLimit uint64, sequential bool) { + var i uint64 = startIndex + var wg sync.WaitGroup + pool := make(chan bool, *degreeOfParallelism) + bar := getNewProgressBar(int64(writeLimit), "Writing data") + lim := writeLimit + startIndex + for ; i < lim; i = i + 1 { + pool <- true + wg.Add(1) + go func(i uint64) { + _ = bar.Add(1) + k, v := makeKV(i, sizeDistribution.GetSizeSample(), sequential) + err := db.Put(k, v, wo) + if err != nil { + log.Fatal().Err(err).Msg("Failed to put value") + } + wg.Done() + <-pool + }(i) + } + wg.Wait() + _ = bar.Finish() +} + +func readSeq(ctx context.Context, db *leveldb.DB, wo *opt.WriteOptions, limit uint64) { + pb := getNewProgressBar(int64(limit), "sequential reads") + var rCount uint64 = 0 + pool := make(chan bool, *degreeOfParallelism) + var wg sync.WaitGroup +benchLoop: + for { + iter := db.NewIterator(nil, nil) + for iter.Next() { + rCount += 1 + _ = pb.Add(1) + pool <- true + wg.Add(1) + go func(i iterator.Iterator) { + _ = i.Key() + _ = i.Value() + wg.Done() + <-pool + }(iter) + + if rCount >= limit { + iter.Release() + break benchLoop + } + } + iter.Release() + err := iter.Error() + if err != nil { + log.Fatal().Err(err).Msg("Error reading sequentially") + } + } + wg.Wait() + _ = pb.Finish() +} +func readRandom(ctx context.Context, db *leveldb.DB, ro *opt.ReadOptions, limit uint64) { + pb := getNewProgressBar(int64(limit), "random reads") + var rCount uint64 = 0 + pool := make(chan bool, *degreeOfParallelism) + var wg sync.WaitGroup + rks := NewRandomKeySeeker(db) + +benchLoop: + for { + for { + pool <- true + wg.Add(1) + go func() { + rCount += 1 + _ = pb.Add(1) + + _, err := db.Get(rks.Key(), ro) + if err != nil { + log.Error().Err(err).Msg("level db random read error") + } + wg.Done() + <-pool + }() + if rCount >= limit { + break benchLoop + } + } + } + wg.Wait() + _ = pb.Finish() +} + +func NewRandomKeySeeker(db *leveldb.DB) *RandomKeySeeker { + rks := new(RandomKeySeeker) + rks.db = db + rks.iterator = db.NewIterator(nil, nil) + rks.firstKey = rks.iterator.Key() + return rks +} +func (r *RandomKeySeeker) Key() []byte { + seekKey := make([]byte, 8) + randSrcMutex.Lock() + randSrc.Read(seekKey) + randSrcMutex.Unlock() + + log.Trace().Str("seekKey", hex.EncodeToString(seekKey)).Msg("searching for key") + + r.iteratorMutex.Lock() + defer r.iteratorMutex.Unlock() + // first try to just get a random key + exists := r.iterator.Seek(seekKey) + + // if that key doesn't exist exactly advance to the next key + if !exists { + exists = r.iterator.Next() + } + // if there is no next key, to back to the beginning + if !exists { + r.iterator.First() + r.iterator.Next() + } + if err := r.iterator.Error(); err != nil { + log.Error().Err(err).Msg("issue getting random key") + } + resultKey := r.iterator.Key() + log.Trace().Str("seekKey", hex.EncodeToString(seekKey)).Str("resultKey", hex.EncodeToString(resultKey)).Msg("found random key") + return resultKey +} + +func getNewProgressBar(max int64, description string) *progressbar.ProgressBar { + pb := progressbar.NewOptions64(max, + progressbar.OptionEnableColorCodes(false), + progressbar.OptionSetDescription(description), + progressbar.OptionSetElapsedTime(true), + progressbar.OptionSetItsString("iop"), + progressbar.OptionSetRenderBlankState(true), + progressbar.OptionShowCount(), + progressbar.OptionShowIts(), + progressbar.OptionShowElapsedTimeOnFinish(), + progressbar.OptionUseANSICodes(true), + progressbar.OptionThrottle(1*time.Second), + progressbar.OptionSetWriter(os.Stderr), + progressbar.OptionOnCompletion(func() { + _, _ = fmt.Fprintln(os.Stderr) + }), + progressbar.OptionSetTheme(progressbar.Theme{ + Saucer: "=", + SaucerHead: ">", + SaucerPadding: " ", + BarStart: "[", + BarEnd: "]", + }), + progressbar.OptionSetWidth(10), + progressbar.OptionFullWidth(), + ) + return pb +} + +func makeKV(seed, valueSize uint64, sequential bool) ([]byte, []byte) { + tmpKey := make([]byte, *keySize) + binary.LittleEndian.PutUint64(tmpKey, seed) + hashedKey := sha512.Sum512(tmpKey) + tmpKey = hashedKey[0:*keySize] + if sequential { + // binary.BigEndian.PutUint64(tmpKey, seed) + binary.BigEndian.PutUint64(tmpKey, seed) + } + + log.Trace().Str("tmpKey", hex.EncodeToString(tmpKey)).Uint64("valueSize", valueSize).Uint64("seed", seed).Msg("Generated key") + + tmpValue := make([]byte, valueSize) + if !*writeZero { + // Assuming we're not in zero mode, we'll fill the data with random data + randSrcMutex.Lock() + randSrc.Read(tmpValue) + randSrcMutex.Unlock() + } + return tmpKey, tmpValue +} + +func (i *IORange) Validate() error { + if i.EndRange < i.StartRange { + return fmt.Errorf("the end of the range %d is less than the start of the range %d", i.EndRange, i.StartRange) + } + if i.EndRange <= 0 { + return fmt.Errorf("the provided end range %d is less than 0", i.EndRange) + } + if i.StartRange < 0 { + return fmt.Errorf("the provided start range %d is less than 0", i.StartRange) + } + if i.Frequency <= 0 { + return fmt.Errorf("the relative frequency must be greater than 0, but got %d", i.Frequency) + } + return nil +} +func NewIODistribution(ranges []IORange) (*IODistribution, error) { + iod := new(IODistribution) + sort.Slice(ranges, func(i, j int) bool { + return ranges[i].StartRange < ranges[j].StartRange + }) + + for i := 0; i < len(ranges)-1; i++ { + if ranges[i].EndRange >= ranges[i+1].StartRange { + return nil, fmt.Errorf("overlap found between ranges: %v and %v", ranges[i], ranges[i+1]) + } + } + + iod.ranges = ranges + f := 0 + for _, v := range ranges { + f += v.Frequency + } + iod.totalFrequency = f + return iod, nil +} + +// GetSizeSample will return an IO size in accordance with the probability distribution +func (i *IODistribution) GetSizeSample() uint64 { + randSrcMutex.Lock() + randFreq := randSrc.Intn(i.totalFrequency) + randSrcMutex.Unlock() + + log.Trace().Int("randFreq", randFreq).Int("totalFreq", i.totalFrequency).Msg("Getting Size Sample") + var selectedRange *IORange + currentFreq := 0 + for k, v := range i.ranges { + currentFreq += v.Frequency + if randFreq <= currentFreq { + selectedRange = &i.ranges[k] + break + } + } + if selectedRange == nil { + log.Fatal().Int("randFreq", randFreq).Int("totalFreq", i.totalFrequency).Msg("Potential off by 1 error in random sample") + return 0 // lint + } + randRange := selectedRange.EndRange - selectedRange.StartRange + randSrcMutex.Lock() + randSize := randSrc.Intn(randRange + 1) + randSrcMutex.Unlock() + return uint64(randSize + selectedRange.StartRange) +} + +func parseRawSizeDistribution(dist string) (*IODistribution, error) { + buckets := strings.Split(dist, ",") + if len(buckets) == 0 { + return nil, fmt.Errorf("at least one size bucket must be provided") + } + ioDist := make([]IORange, 0) + bucketRegEx := regexp.MustCompile(`^(\d*)-(\d*):(\d*)$`) + for _, r := range buckets { + matches := bucketRegEx.FindAllStringSubmatch(r, -1) + if len(matches) != 1 { + return nil, fmt.Errorf("the bucket %s did not match expected format of start-end:ratio", r) + } + if len(matches[0]) != 4 { + return nil, fmt.Errorf("the bucket %s didn't match expected number of sub groups", r) + } + startRange, err := strconv.Atoi(matches[0][1]) + if err != nil { + return nil, err + } + endRange, err := strconv.Atoi(matches[0][2]) + if err != nil { + return nil, err + } + frequency, err := strconv.Atoi(matches[0][3]) + if err != nil { + return nil, err + } + ioRange := new(IORange) + ioRange.StartRange = startRange + ioRange.EndRange = endRange + ioRange.Frequency = frequency + err = ioRange.Validate() + if err != nil { + return nil, err + } + ioDist = append(ioDist, *ioRange) + } + return NewIODistribution(ioDist) +} + +func init() { + flagSet := LevelDBBenchCmd.PersistentFlags() + writeLimit = flagSet.Uint64("write-limit", 1000000, "The number of entries to write in the db") + readLimit = flagSet.Uint64("read-limit", 10000000, "the number of reads will attempt to complete in a given test") + overwriteCount = flagSet.Uint64("overwrite-count", 5, "the number of times to overwrite the data") + sequentialReads = flagSet.Bool("sequential-reads", false, "if true we'll perform reads sequentially") + sequentialWrites = flagSet.Bool("sequential-writes", false, "if true we'll perform writes in somewhat sequential manner") + keySize = flagSet.Uint64("key-size", 32, "The byte length of the keys that we'll use") + degreeOfParallelism = flagSet.Uint8("degree-of-parallelism", 2, "The number of concurrent goroutines we'll use") + rawSizeDistribution = flagSet.String("size-distribution", borDistribution, "the size distribution to use while testing") + nilReadOptions = flagSet.Bool("nil-read-opts", false, "if true we'll use nil read opt (this is what geth/bor does)") + dontFillCache = flagSet.Bool("dont-fill-read-cache", false, "if false, then random reads will be cached") + readStrict = flagSet.Bool("read-strict", false, "if true the rand reads will be made in strict mode") + noWriteMerge = flagSet.Bool("no-merge-write", false, "allows disabling write merge") + syncWrites = flagSet.Bool("sync-writes", false, "sync each write") + // https://github.com/maticnetwork/bor/blob/eedeaed1fb17d73dd46d8999644d5035e176e22a/eth/backend.go#L141 + // https://github.com/maticnetwork/bor/blob/eedeaed1fb17d73dd46d8999644d5035e176e22a/eth/ethconfig/config.go#L86C2-L86C15 + cacheSize = flagSet.Int("cache-size", 512, "the number of megabytes to use as our internal cache size") + openFilesCacheCapacity = flagSet.Int("handles", 500, "defines the capacity of the open files caching. Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher.") + writeZero = flagSet.Bool("write-zero", false, "if true, we'll write 0s rather than random data") + readOnly = flagSet.Bool("read-only", false, "if true, we'll skip all the write operations and open the DB in read only mode") + dbPath = flagSet.String("db-path", "_benchmark_db", "the path of the database that we'll use for testing") + fullScan = flagSet.Bool("full-scan-mode", false, "if true, the application will scan the full database as fast as possible and print a summary") + + randSrc = rand.New(rand.NewSource(1)) +} diff --git a/cmd/leveldbbench/usage.md b/cmd/leveldbbench/usage.md new file mode 100644 index 00000000..0bbc5b33 --- /dev/null +++ b/cmd/leveldbbench/usage.md @@ -0,0 +1,115 @@ +This command is meant to give us a sense of the system level +performance for leveldb: + +```bash +go run main.go leveldbbench --degree-of-parallelism 2 | jq '.' > result.json +``` + +In many cases, we'll want to emulate the performance characteristics +of `bor` or `geth`. This is the basic IO pattern when `bor` is in sync: + +```text +Process Name = bor + Kbytes : count distribution + 0 -> 1 : 0 | | + 2 -> 3 : 0 | | + 4 -> 7 : 10239 |**************** | + 8 -> 15 : 25370 |****************************************| + 16 -> 31 : 7082 |*********** | + 32 -> 63 : 1241 |* | + 64 -> 127 : 58 | | + 128 -> 255 : 11 | | +``` + +This is the IO pattern when `bor` is getting in sync. + +```text +Process Name = bor + Kbytes : count distribution + 0 -> 1 : 0 | | + 2 -> 3 : 0 | | + 4 -> 7 : 23089 |************* | + 8 -> 15 : 70350 |****************************************| + 16 -> 31 : 11790 |****** | + 32 -> 63 : 1193 | | + 64 -> 127 : 204 | | + 128 -> 255 : 271 | | + 256 -> 511 : 1381 | | +``` + +This gives us a sense of the relative size of the IOPs. We'd also want +to get a sense of the read/write ratio. This is some sample data from +bor while syncing: + +```text +12:48:08 loadavg: 5.86 6.22 7.13 16/451 56297 + +READS WRITES R_Kb W_Kb PATH +307558 1277 4339783 30488 /var/lib/bor/data/bor/chaindata/ + +12:48:38 loadavg: 6.46 6.32 7.14 3/452 56298 + +READS WRITES R_Kb W_Kb PATH +309904 946 4399349 26051 /var/lib/bor/data/bor/chaindata/ + +``` + +During the same period of time this is what the IO looks like from a +node that's in sync. + +```text +12:48:05 loadavg: 1.55 1.85 2.03 18/416 88371 + +READS WRITES R_Kb W_Kb PATH +124530 488 1437436 12165 /var/lib/bor/data/bor/chaindata/ + +12:48:35 loadavg: 4.14 2.44 2.22 1/416 88371 + +READS WRITES R_Kb W_Kb PATH +81282 215 823530 4610 /var/lib/bor/data/bor/chaindata/ + +``` + +If we want to simulate `bor` behavior, we can leverage this data to +configure the leveldb benchmark tool. + + +| Syncing | Reads | Writes | Read (kb) | Write (kb) | RW Ratio | kb/r | kb/w | +|---------|---------|--------|-----------|------------|----------|------|------| +| TRUE | 307,558 | 1,277 | 4,339,783 | 30,488 | 241 | 14.1 | 23.9 | +| TRUE | 309,904 | 946 | 7,399,349 | 26,051 | 328 | 23.9 | 27.5 | +| FALSE | 124,530 | 488 | 1,437,436 | 12,165 | 255 | 11.5 | 24.9 | +| FALSE | 51,282 | 215 | 823,530 | 4,610 | 239 | 16.1 | 21.4 | + +The number of IOps while syncing is a lot higher. The only other +obvious difference is that the IOp size is a bit larger while syncing +as well. + +- Syncing + - Read Write Ratio - 275:1 + - Small IOp - 10kb + - Large IOp - 256kb + - Small Large Ratio - 10:1 +- Synced + - Read Write Ratio - 250:1 + - Small IOp - 10kb + - Larg IOp - 32kb + - Small Large Ratio - 10:1 + +```text +7:58PM DBG buckets bucket=0 count=9559791821 end=1 start=0 +7:58PM DBG buckets bucket=1 count=141033 end=3 start=2 +7:58PM DBG buckets bucket=2 count=92899 end=7 start=4 +7:58PM DBG buckets bucket=3 count=256655 end=15 start=8 +7:58PM DBG buckets bucket=4 count=262589 end=31 start=16 +7:58PM DBG buckets bucket=5 count=191353 end=63 start=32 +7:58PM DBG buckets bucket=6 count=99519 end=127 start=64 +7:58PM DBG buckets bucket=7 count=74161 end=255 start=128 +7:58PM DBG buckets bucket=8 count=17426 end=511 start=256 +7:58PM DBG buckets bucket=9 count=692 end=1023 start=512 +7:58PM DBG buckets bucket=10 count=989 end=2047 start=1024 +7:58PM DBG buckets bucket=13 count=1 end=16383 start=8192 +7:58PM INF recorded result desc="full scan" testDuration=10381196.479925 +7:58PM DBG recorded result result={"Description":"full scan","EndTime":"2023-07-17T19:58:05.396257711Z","OpCount":9557081144,"OpRate":920614.609547304,"StartTime":"2023-07-17T17:05:04.199777776Z","Stats":{"AliveIterators":0,"AliveSnapshots":0,"BlockCache":{"Buckets":2048,"DelCount":259134854,"GrowCount":9,"HitCount":4,"MissCount":262147633,"Nodes":33294,"SetCount":259168148,"ShrinkCount":2,"Size":268427343},"BlockCacheSize":268427343,"FileCache":{"Buckets":16,"DelCount":536037,"GrowCount":0,"HitCount":2,"MissCount":536537,"Nodes":500,"SetCount":536537,"ShrinkCount":0,"Size":500},"IORead":1092651461848,"IOWrite":13032122717,"Level0Comp":0,"LevelDurations":[0,0,546151937,15675194130,100457643600,40581548153,0],"LevelRead":[0,0,45189458,1233235440,8351239571,3376108236,0],"LevelSizes":[0,103263963,1048356844,10484866671,104856767171,180600915234,797187827055],"LevelTablesCounts":[0,51,665,7066,53522,95777,371946],"LevelWrite":[0,0,45159786,1230799439,8328970986,3371359447,0],"MemComp":0,"NonLevel0Comp":1433,"OpenedTablesCount":500,"SeekComp":0,"WriteDelayCount":0,"WriteDelayDuration":0,"WritePaused":false},"TestDuration":10381196479925,"ValueDist":null} + +``` diff --git a/cmd/loadtest/app.go b/cmd/loadtest/app.go new file mode 100644 index 00000000..98b1476d --- /dev/null +++ b/cmd/loadtest/app.go @@ -0,0 +1,252 @@ +package loadtest + +import ( + "crypto/ecdsa" + _ "embed" + "fmt" + "math/big" + "math/rand" + "net/url" + "regexp" + "strings" + "sync" + "time" + + gssignature "github.com/centrifuge/go-substrate-rpc-client/v4/signature" + gstypes "github.com/centrifuge/go-substrate-rpc-client/v4/types" + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/maticnetwork/polygon-cli/rpctypes" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "github.com/spf13/cobra" +) + +type ( + blockSummary struct { + Block *rpctypes.RawBlockResponse + Receipts map[ethcommon.Hash]rpctypes.RawTxReceipt + Latencies map[uint64]time.Duration + } + hexwordReader struct { + } + loadTestSample struct { + GoRoutineID int64 + RequestID int64 + RequestTime time.Time + WaitTime time.Duration + Receipt string + IsError bool + Nonce uint64 + } + loadTestParams struct { + // inputs + Requests *int64 + Concurrency *int64 + BatchSize *uint64 + TimeLimit *int64 + ToRandom *bool + CallOnly *bool + URL *url.URL + ChainID *uint64 + PrivateKey *string + ToAddress *string + HexSendAmount *string + RateLimit *float64 + AdaptiveRateLimit *bool + SteadyStateTxPoolSize *uint64 + AdaptiveRateLimitIncrement *uint64 + AdaptiveCycleDuration *uint64 + AdaptiveBackoffFactor *float64 + Mode *string + Function *uint64 + Iterations *uint64 + ByteCount *uint64 + Seed *int64 + IsAvail *bool + LtAddress *string + ERC20Address *string + ERC721Address *string + DelAddress *string + ContractCallNumberOfBlocksToWaitFor *uint64 + ContractCallBlockInterval *uint64 + ForceContractDeploy *bool + ForceGasLimit *uint64 + ForceGasPrice *uint64 + ForcePriorityGasPrice *uint64 + ShouldProduceSummary *bool + SummaryOutputMode *string + LegacyTransactionMode *bool + RecallLength *uint64 + + // Computed + CurrentGasPrice *big.Int + CurrentGasTipCap *big.Int + CurrentNonce *uint64 + ECDSAPrivateKey *ecdsa.PrivateKey + FromETHAddress *ethcommon.Address + ToETHAddress *ethcommon.Address + SendAmount *big.Int + CurrentBaseFee *big.Int + ChainSupportBaseFee bool + + ToAvailAddress *gstypes.MultiAddress + FromAvailAddress *gssignature.KeyringPair + AvailRuntime *gstypes.RuntimeVersion + } + + txpoolStatus struct { + Pending any `json:"pending"` + Queued any `json:"queued"` + } +) + +var ( + //go:embed usage.md + usage string + inputLoadTestParams loadTestParams + loadTestResults []loadTestSample + loadTestResutsMutex sync.RWMutex + + hexwords = []byte{ + 0x00, 0x0F, 0xF1, 0xCE, + 0x00, 0xBA, 0xB1, 0x0C, + 0x1B, 0xAD, 0xB0, 0x02, + 0x8B, 0xAD, 0xF0, 0x0D, + 0xAB, 0xAD, 0xBA, 0xBE, + 0xB1, 0x05, 0xF0, 0x0D, + 0xB1, 0x6B, 0x00, 0xB5, + 0x0B, 0x00, 0xB1, 0x35, + 0xBA, 0xAA, 0xAA, 0xAD, + 0xBA, 0xAD, 0xF0, 0x0D, + 0xBA, 0xD2, 0x22, 0x22, + 0xBA, 0xDD, 0xCA, 0xFE, + 0xCA, 0xFE, 0xB0, 0xBA, + 0xB0, 0xBA, 0xBA, 0xBE, + 0xBE, 0xEF, 0xBA, 0xBE, + 0xC0, 0x00, 0x10, 0xFF, + 0xCA, 0xFE, 0xBA, 0xBE, + 0xCA, 0xFE, 0xD0, 0x0D, + 0xCE, 0xFA, 0xED, 0xFE, + 0x0D, 0x15, 0xEA, 0x5E, + 0xDA, 0xBB, 0xAD, 0x00, + 0xDE, 0xAD, 0x2B, 0xAD, + 0xDE, 0xAD, 0xBA, 0xAD, + 0xDE, 0xAD, 0xBA, 0xBE, + 0xDE, 0xAD, 0xBE, 0xAF, + 0xDE, 0xAD, 0xBE, 0xEF, + 0xDE, 0xAD, 0xC0, 0xDE, + 0xDE, 0xAD, 0xDE, 0xAD, + 0xDE, 0xAD, 0xD0, 0x0D, + 0xDE, 0xAD, 0xFA, 0x11, + 0xDE, 0xAD, 0x10, 0xCC, + 0xDE, 0xAD, 0xFE, 0xED, + 0xDE, 0xCA, 0xFB, 0xAD, + 0xDE, 0xFE, 0xC8, 0xED, + 0xD0, 0xD0, 0xCA, 0xCA, + 0xE0, 0x11, 0xCF, 0xD0, + 0xFA, 0xCE, 0xFE, 0xED, + 0xFB, 0xAD, 0xBE, 0xEF, + 0xFE, 0xE1, 0xDE, 0xAD, + 0xFE, 0xED, 0xBA, 0xBE, + 0xFE, 0xED, 0xC0, 0xDE, + 0xFF, 0xBA, 0xDD, 0x11, + 0xF0, 0x0D, 0xBA, 0xBE, + } + + randSrc *rand.Rand +) + +// LoadtestCmd represents the loadtest command +var LoadtestCmd = &cobra.Command{ + Use: "loadtest url", + Short: "Run a generic load test against an Eth/EVM style JSON-RPC endpoint.", + Long: usage, + RunE: func(cmd *cobra.Command, args []string) error { + err := runLoadTest(cmd.Context()) + if err != nil { + return err + } + return nil + }, + Args: func(cmd *cobra.Command, args []string) error { + zerolog.DurationFieldUnit = time.Second + zerolog.DurationFieldInteger = true + + if len(args) != 1 { + return fmt.Errorf("expected exactly one argument") + } + url, err := url.Parse(args[0]) + if err != nil { + log.Error().Err(err).Msg("Unable to parse url input error") + return err + } + if url.Scheme != "http" && url.Scheme != "https" && url.Scheme != "ws" && url.Scheme != "wss" { + return fmt.Errorf("the scheme %s is not supported", url.Scheme) + } + inputLoadTestParams.URL = url + r := regexp.MustCompile(fmt.Sprintf("^[%s]+$", strings.Join(validLoadTestModes, ""))) + if !r.MatchString(*inputLoadTestParams.Mode) { + return fmt.Errorf("the mode %s is not recognized", *inputLoadTestParams.Mode) + } + if *inputLoadTestParams.AdaptiveBackoffFactor <= 0.0 { + return fmt.Errorf("the backoff factor needs to be non-zero positive") + } + return nil + }, +} + +func init() { + ltp := new(loadTestParams) + + ltp.Requests = LoadtestCmd.PersistentFlags().Int64P("requests", "n", 1, "Number of requests to perform for the benchmarking session. The default is to just perform a single request which usually leads to non-representative benchmarking results.") + ltp.Concurrency = LoadtestCmd.PersistentFlags().Int64P("concurrency", "c", 1, "Number of requests to perform concurrently. Default is one request at a time.") + ltp.TimeLimit = LoadtestCmd.PersistentFlags().Int64P("time-limit", "t", -1, "Maximum number of seconds to spend for benchmarking. Use this to benchmark within a fixed total amount of time. Per default there is no time limit.") + ltp.PrivateKey = LoadtestCmd.PersistentFlags().String("private-key", codeQualityPrivateKey, "The hex encoded private key that we'll use to send transactions") + ltp.ChainID = LoadtestCmd.PersistentFlags().Uint64("chain-id", 0, "The chain id for the transactions.") + ltp.ToAddress = LoadtestCmd.PersistentFlags().String("to-address", "0xDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF", "The address that we're going to send to") + ltp.ToRandom = LoadtestCmd.PersistentFlags().Bool("to-random", false, "When doing a transfer test, should we send to random addresses rather than DEADBEEFx5") + ltp.CallOnly = LoadtestCmd.PersistentFlags().Bool("call-only", false, "When using this mode, rather than sending a transaction, we'll just call. This mode is incompatible with adaptive rate limiting, summarization, and a few other features.") + ltp.HexSendAmount = LoadtestCmd.PersistentFlags().String("send-amount", "0x38D7EA4C68000", "The amount of wei that we'll send every transaction") + ltp.RateLimit = LoadtestCmd.PersistentFlags().Float64("rate-limit", 4, "An overall limit to the number of requests per second. Give a number less than zero to remove this limit all together") + ltp.AdaptiveRateLimit = LoadtestCmd.PersistentFlags().Bool("adaptive-rate-limit", false, "Enable AIMD-style congestion control to automatically adjust request rate") + ltp.SteadyStateTxPoolSize = LoadtestCmd.PersistentFlags().Uint64("steady-state-tx-pool-size", 1000, "When using adaptive rate limiting, this value sets the target queue size. If the queue is smaller than this value, we'll speed up. If the queue is smaller than this value, we'll back off.") + ltp.AdaptiveRateLimitIncrement = LoadtestCmd.PersistentFlags().Uint64("adaptive-rate-limit-increment", 50, "When using adaptive rate limiting, this flag controls the size of the additive increases.") + ltp.AdaptiveCycleDuration = LoadtestCmd.PersistentFlags().Uint64("adaptive-cycle-duration-seconds", 10, "When using adaptive rate limiting, this flag controls how often we check the queue size and adjust the rates") + ltp.AdaptiveBackoffFactor = LoadtestCmd.PersistentFlags().Float64("adaptive-backoff-factor", 2, "When using adaptive rate limiting, this flag controls our multiplicative decrease value.") + ltp.Mode = LoadtestCmd.PersistentFlags().StringP("mode", "m", "t", `The testing mode to use. It can be multiple like: "tcdf" +t - sending transactions +d - deploy contract +c - call random contract functions +f - call specific contract function +p - call random precompiled contracts +a - call a specific precompiled contract address +s - store mode +r - random modes +2 - ERC20 Transfers +7 - ERC721 Mints +R - total recall`) + ltp.Function = LoadtestCmd.PersistentFlags().Uint64P("function", "f", 1, "A specific function to be called if running with `--mode f` or a specific precompiled contract when running with `--mode a`") + ltp.Iterations = LoadtestCmd.PersistentFlags().Uint64P("iterations", "i", 1, "If we're making contract calls, this controls how many times the contract will execute the instruction in a loop. If we are making ERC721 Mints, this indicates the minting batch size") + ltp.ByteCount = LoadtestCmd.PersistentFlags().Uint64P("byte-count", "b", 1024, "If we're in store mode, this controls how many bytes we'll try to store in our contract") + ltp.Seed = LoadtestCmd.PersistentFlags().Int64("seed", 123456, "A seed for generating random values and addresses") + ltp.IsAvail = LoadtestCmd.PersistentFlags().Bool("data-avail", false, "[DEPRECATED] Enables Avail load testing") + ltp.LtAddress = LoadtestCmd.PersistentFlags().String("lt-address", "", "The address of a pre-deployed load test contract") + ltp.ERC20Address = LoadtestCmd.PersistentFlags().String("erc20-address", "", "The address of a pre-deployed erc 20 contract") + ltp.ERC721Address = LoadtestCmd.PersistentFlags().String("erc721-address", "", "The address of a pre-deployed erc 721 contract") + ltp.ContractCallNumberOfBlocksToWaitFor = LoadtestCmd.PersistentFlags().Uint64("contract-call-nb-blocks-to-wait-for", 30, "The number of blocks to wait for before giving up on a contract deployment") + ltp.ContractCallBlockInterval = LoadtestCmd.PersistentFlags().Uint64("contract-call-block-interval", 1, "During deployment, this flag controls if we should check every block, every other block, or every nth block to determine that the contract has been deployed") + ltp.ForceContractDeploy = LoadtestCmd.PersistentFlags().Bool("force-contract-deploy", false, "Some load test modes don't require a contract deployment. Set this flag to true to force contract deployments. This will still respect the --lt-address flags.") + ltp.ForceGasLimit = LoadtestCmd.PersistentFlags().Uint64("gas-limit", 0, "In environments where the gas limit can't be computed on the fly, we can specify it manually. This can also be used to avoid eth_estimateGas") + ltp.ForceGasPrice = LoadtestCmd.PersistentFlags().Uint64("gas-price", 0, "In environments where the gas price can't be determined automatically, we can specify it manually") + ltp.ForcePriorityGasPrice = LoadtestCmd.PersistentFlags().Uint64("priority-gas-price", 0, "Specify Gas Tip Price in the case of EIP-1559") + ltp.ShouldProduceSummary = LoadtestCmd.PersistentFlags().Bool("summarize", false, "Should we produce an execution summary after the load test has finished. If you're running a large load test, this can take a long time") + ltp.BatchSize = LoadtestCmd.PersistentFlags().Uint64("batch-size", 999, "Number of batches to perform at a time for receipt fetching. Default is 999 requests at a time.") + ltp.SummaryOutputMode = LoadtestCmd.PersistentFlags().String("output-mode", "text", "Format mode for summary output (json | text)") + ltp.LegacyTransactionMode = LoadtestCmd.PersistentFlags().Bool("legacy", false, "Send a legacy transaction instead of an EIP1559 transaction.") + ltp.RecallLength = LoadtestCmd.PersistentFlags().Uint64("recall-blocks", 50, "The number of blocks that we'll attempt to fetch for recall") + inputLoadTestParams = *ltp + + // TODO batch size + // TODO Compression + // TODO array of RPC endpoints to round robin? +} diff --git a/cmd/loadtest/avail.go b/cmd/loadtest/avail.go new file mode 100644 index 00000000..7bc44847 --- /dev/null +++ b/cmd/loadtest/avail.go @@ -0,0 +1,259 @@ +package loadtest + +import ( + "context" + "fmt" + "math/big" + "sync" + "time" + + _ "embed" + gsrpc "github.com/centrifuge/go-substrate-rpc-client/v4" + gssignature "github.com/centrifuge/go-substrate-rpc-client/v4/signature" + gstypes "github.com/centrifuge/go-substrate-rpc-client/v4/types" + "github.com/rs/zerolog/log" + "golang.org/x/time/rate" +) + +func availLoop(ctx context.Context, c *gsrpc.SubstrateAPI) error { + var err error + + ltp := inputLoadTestParams + log.Trace().Interface("Input Params", ltp).Msg("Params") + + routines := *ltp.Concurrency + requests := *ltp.Requests + currentNonce := uint64(0) // *ltp.CurrentNonce + chainID := new(big.Int).SetUint64(*ltp.ChainID) + privateKey := ltp.ECDSAPrivateKey + mode := *ltp.Mode + + _ = chainID + _ = privateKey + + meta, err := c.RPC.State.GetMetadataLatest() + if err != nil { + return err + } + + genesisHash, err := c.RPC.Chain.GetBlockHash(0) + if err != nil { + return err + } + + key, err := gstypes.CreateStorageKey(meta, "System", "Account", ltp.FromAvailAddress.PublicKey, nil) + if err != nil { + log.Error().Err(err).Msg("Could not create storage key") + return err + } + + var accountInfo gstypes.AccountInfo + ok, err := c.RPC.State.GetStorageLatest(key, &accountInfo) + if err != nil { + log.Error().Err(err).Msg("Could not load storage") + return err + } + if !ok { + err = fmt.Errorf("loaded storage is not okay") + log.Error().Err(err).Msg("Loaded storage is not okay") + return err + } + + currentNonce = uint64(accountInfo.Nonce) + + rl := rate.NewLimiter(rate.Limit(*ltp.RateLimit), 1) + if *ltp.RateLimit <= 0.0 { + rl = nil + } + + var currentNonceMutex sync.Mutex + + var i int64 + + var wg sync.WaitGroup + for i = 0; i < routines; i = i + 1 { + log.Trace().Int64("routine", i).Msg("Starting Thread") + wg.Add(1) + go func(i int64) { + var j int64 + var startReq time.Time + var endReq time.Time + + for j = 0; j < requests; j = j + 1 { + + if rl != nil { + err = rl.Wait(ctx) + if err != nil { + log.Error().Err(err).Msg("Encountered a rate limiting error") + } + } + currentNonceMutex.Lock() + myNonceValue := currentNonce + currentNonce = currentNonce + 1 + currentNonceMutex.Unlock() + + localMode := mode + // if there are multiple modes, iterate through them, 'r' mode is supported here + if len(mode) > 1 { + localMode = string(mode[int(i+j)%(len(mode))]) + } + // if we're doing random, we'll just pick one based on the current index + if localMode == loadTestModeRandom { + localMode = validLoadTestModes[int(i+j)%(len(validLoadTestModes)-1)] + } + // this function should probably be abstracted + switch localMode { + case loadTestModeTransaction: + startReq, endReq, err = loadTestAvailTransfer(ctx, c, myNonceValue, meta, genesisHash) + case loadTestModeDeploy: + startReq, endReq, err = loadTestNotImplemented(ctx, c, myNonceValue) + case loadTestModeCall: + startReq, endReq, err = loadTestNotImplemented(ctx, c, myNonceValue) + case loadTestModeFunction: + startReq, endReq, err = loadTestNotImplemented(ctx, c, myNonceValue) + case loadTestModeInc: + startReq, endReq, err = loadTestNotImplemented(ctx, c, myNonceValue) + case loadTestModeStore: + startReq, endReq, err = loadTestAvailStore(ctx, c, myNonceValue, meta, genesisHash) + default: + log.Error().Str("mode", mode).Msg("We've arrived at a load test mode that we don't recognize") + } + recordSample(i, j, err, startReq, endReq, myNonceValue) + if err != nil { + log.Trace().Err(err).Msg("Recorded an error while sending transactions") + } + + log.Trace().Int64("routine", i).Str("mode", localMode).Int64("request", j).Msg("Request") + } + wg.Done() + }(i) + + } + log.Trace().Msg("Finished starting go routines. Waiting..") + wg.Wait() + return nil + +} + +func initAvailTestParams(ctx context.Context, c *gsrpc.SubstrateAPI) error { + toAddr, err := gstypes.NewMultiAddressFromHexAccountID(*inputLoadTestParams.ToAddress) + if err != nil { + log.Error().Err(err).Msg("Unable to create new multi address") + return err + } + + if *inputLoadTestParams.PrivateKey == codeQualityPrivateKey { + // Avail keys can use the same seed but the way the key is derived is different + *inputLoadTestParams.PrivateKey = codeQualitySeed + } + + kp, err := gssignature.KeyringPairFromSecret(*inputLoadTestParams.PrivateKey, uint8(*inputLoadTestParams.ChainID)) + if err != nil { + log.Error().Err(err).Msg("Could not create key pair") + return err + } + + amt, err := hexToBigInt(*inputLoadTestParams.HexSendAmount) + if err != nil { + log.Error().Err(err).Msg("Couldn't parse send amount") + return err + } + + rv, err := c.RPC.State.GetRuntimeVersionLatest() + if err != nil { + log.Error().Err(err).Msg("Couldn't get runtime version") + return err + } + + inputLoadTestParams.AvailRuntime = rv + inputLoadTestParams.SendAmount = amt + inputLoadTestParams.FromAvailAddress = &kp + inputLoadTestParams.ToAvailAddress = &toAddr + return nil +} + +func loadTestAvailTransfer(ctx context.Context, c *gsrpc.SubstrateAPI, nonce uint64, meta *gstypes.Metadata, genesisHash gstypes.Hash) (t1 time.Time, t2 time.Time, err error) { + ltp := inputLoadTestParams + + toAddr := *ltp.ToAvailAddress + if *ltp.ToRandom { + pk := make([]byte, 32) + _, err = randSrc.Read(pk) + if err != nil { + // For some reason weren't able to read the random data + log.Error().Msg("Sending to random is not implemented for substrate yet") + } else { + toAddr = gstypes.NewMultiAddressFromAccountID(pk) + } + + } + + gsCall, err := gstypes.NewCall(meta, "Balances.transfer", toAddr, gstypes.NewUCompact(ltp.SendAmount)) + if err != nil { + return + } + + ext := gstypes.NewExtrinsic(gsCall) + rv := ltp.AvailRuntime + kp := *inputLoadTestParams.FromAvailAddress + + o := gstypes.SignatureOptions{ + BlockHash: genesisHash, + Era: gstypes.ExtrinsicEra{IsMortalEra: false, IsImmortalEra: true}, + GenesisHash: genesisHash, + Nonce: gstypes.NewUCompactFromUInt(uint64(nonce)), + SpecVersion: rv.SpecVersion, + Tip: gstypes.NewUCompactFromUInt(0), + TransactionVersion: rv.TransactionVersion, + } + + err = ext.Sign(kp, o) + if err != nil { + return + } + + t1 = time.Now() + defer func() { t2 = time.Now() }() + _, err = c.RPC.Author.SubmitExtrinsic(ext) + return +} + +func loadTestAvailStore(ctx context.Context, c *gsrpc.SubstrateAPI, nonce uint64, meta *gstypes.Metadata, genesisHash gstypes.Hash) (t1 time.Time, t2 time.Time, err error) { + ltp := inputLoadTestParams + + inputData := make([]byte, *ltp.ByteCount) + _, _ = hexwordRead(inputData) + + gsCall, err := gstypes.NewCall(meta, "DataAvailability.submit_data", gstypes.NewBytes([]byte(inputData))) + if err != nil { + return + } + + // Create the extrinsic + ext := gstypes.NewExtrinsic(gsCall) + + rv := ltp.AvailRuntime + + kp := *inputLoadTestParams.FromAvailAddress + + o := gstypes.SignatureOptions{ + BlockHash: genesisHash, + Era: gstypes.ExtrinsicEra{IsMortalEra: false, IsImmortalEra: true}, + GenesisHash: genesisHash, + Nonce: gstypes.NewUCompactFromUInt(uint64(nonce)), + SpecVersion: rv.SpecVersion, + Tip: gstypes.NewUCompactFromUInt(100), + TransactionVersion: rv.TransactionVersion, + } + // Sign the transaction using Alice's default account + err = ext.Sign(kp, o) + if err != nil { + return + } + + // Send the extrinsic + t1 = time.Now() + defer func() { t2 = time.Now() }() + _, err = c.RPC.Author.SubmitExtrinsic(ext) + return +} diff --git a/cmd/loadtest/loadtest.go b/cmd/loadtest/loadtest.go index 1c73e51b..b8aaaf6f 100644 --- a/cmd/loadtest/loadtest.go +++ b/cmd/loadtest/loadtest.go @@ -1,56 +1,26 @@ -/* -Copyright © 2022 Polygon - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Lesser General Public License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with this program. If not, see . -*/ package loadtest import ( "context" - "crypto/ecdsa" "encoding/hex" - "encoding/json" "errors" "fmt" + "github.com/maticnetwork/polygon-cli/rpctypes" "io" - "math" "math/big" "math/rand" - "net/url" "os" "os/signal" - "regexp" - "sort" "strconv" "strings" "sync" "time" - "github.com/maticnetwork/polygon-cli/metrics" - "github.com/maticnetwork/polygon-cli/rpctypes" - "github.com/maticnetwork/polygon-cli/util" - "golang.org/x/exp/constraints" - "golang.org/x/text/language" - "golang.org/x/text/message" - "golang.org/x/text/number" - _ "embed" - gsrpc "github.com/centrifuge/go-substrate-rpc-client/v4" - gssignature "github.com/centrifuge/go-substrate-rpc-client/v4/signature" - gstypes "github.com/centrifuge/go-substrate-rpc-client/v4/types" + "github.com/maticnetwork/polygon-cli/metrics" + ethereum "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/accounts/abi/bind" ethcommon "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" @@ -59,9 +29,7 @@ import ( ethrpc "github.com/ethereum/go-ethereum/rpc" "github.com/maticnetwork/polygon-cli/contracts" - "github.com/rs/zerolog" "github.com/rs/zerolog/log" - "github.com/spf13/cobra" "golang.org/x/time/rate" ) @@ -73,256 +41,34 @@ const ( loadTestModeInc = "i" loadTestModeRandom = "r" loadTestModeStore = "s" - loadTestModeLong = "l" loadTestModeERC20 = "2" loadTestModeERC721 = "7" loadTestModePrecompiledContracts = "p" loadTestModePrecompiledContract = "a" + loadTestModeRecall = "R" codeQualitySeed = "code code code code code code code code code code code quality" codeQualityPrivateKey = "42b6e34dc21598a807dc19d7784c71b2a7a01f6480dc6f58258f78e539f1a1fa" ) var ( - //go:embed usage.md - usage string - inputLoadTestParams loadTestParams - loadTestResults []loadTestSample - loadTestResutsMutex sync.RWMutex - validLoadTestModes = []string{ + validLoadTestModes = []string{ loadTestModeTransaction, loadTestModeDeploy, loadTestModeCall, loadTestModeFunction, loadTestModeInc, loadTestModeStore, - loadTestModeLong, loadTestModeERC20, loadTestModeERC721, loadTestModePrecompiledContracts, loadTestModePrecompiledContract, + loadTestModeRecall, // r should be last to exclude it from random mode selection loadTestModeRandom, } - - hexwords = []byte{ - 0x00, 0x0F, 0xF1, 0xCE, - 0x00, 0xBA, 0xB1, 0x0C, - 0x1B, 0xAD, 0xB0, 0x02, - 0x8B, 0xAD, 0xF0, 0x0D, - 0xAB, 0xAD, 0xBA, 0xBE, - 0xB1, 0x05, 0xF0, 0x0D, - 0xB1, 0x6B, 0x00, 0xB5, - 0x0B, 0x00, 0xB1, 0x35, - 0xBA, 0xAA, 0xAA, 0xAD, - 0xBA, 0xAD, 0xF0, 0x0D, - 0xBA, 0xD2, 0x22, 0x22, - 0xBA, 0xDD, 0xCA, 0xFE, - 0xCA, 0xFE, 0xB0, 0xBA, - 0xB0, 0xBA, 0xBA, 0xBE, - 0xBE, 0xEF, 0xBA, 0xBE, - 0xC0, 0x00, 0x10, 0xFF, - 0xCA, 0xFE, 0xBA, 0xBE, - 0xCA, 0xFE, 0xD0, 0x0D, - 0xCE, 0xFA, 0xED, 0xFE, - 0x0D, 0x15, 0xEA, 0x5E, - 0xDA, 0xBB, 0xAD, 0x00, - 0xDE, 0xAD, 0x2B, 0xAD, - 0xDE, 0xAD, 0xBA, 0xAD, - 0xDE, 0xAD, 0xBA, 0xBE, - 0xDE, 0xAD, 0xBE, 0xAF, - 0xDE, 0xAD, 0xBE, 0xEF, - 0xDE, 0xAD, 0xC0, 0xDE, - 0xDE, 0xAD, 0xDE, 0xAD, - 0xDE, 0xAD, 0xD0, 0x0D, - 0xDE, 0xAD, 0xFA, 0x11, - 0xDE, 0xAD, 0x10, 0xCC, - 0xDE, 0xAD, 0xFE, 0xED, - 0xDE, 0xCA, 0xFB, 0xAD, - 0xDE, 0xFE, 0xC8, 0xED, - 0xD0, 0xD0, 0xCA, 0xCA, - 0xE0, 0x11, 0xCF, 0xD0, - 0xFA, 0xCE, 0xFE, 0xED, - 0xFB, 0xAD, 0xBE, 0xEF, - 0xFE, 0xE1, 0xDE, 0xAD, - 0xFE, 0xED, 0xBA, 0xBE, - 0xFE, 0xED, 0xC0, 0xDE, - 0xFF, 0xBA, 0xDD, 0x11, - 0xF0, 0x0D, 0xBA, 0xBE, - } - - randSrc *rand.Rand -) - -// LoadtestCmd represents the loadtest command -var LoadtestCmd = &cobra.Command{ - Use: "loadtest url", - Short: "Run a generic load test against an Eth/EVM style JSON-RPC endpoint.", - Long: usage, - RunE: func(cmd *cobra.Command, args []string) error { - err := runLoadTest(cmd.Context()) - if err != nil { - return err - } - return nil - }, - Args: func(cmd *cobra.Command, args []string) error { - zerolog.DurationFieldUnit = time.Second - zerolog.DurationFieldInteger = true - - if len(args) != 1 { - return fmt.Errorf("expected exactly one argument") - } - url, err := url.Parse(args[0]) - if err != nil { - log.Error().Err(err).Msg("Unable to parse url input error") - return err - } - if url.Scheme != "http" && url.Scheme != "https" && url.Scheme != "ws" && url.Scheme != "wss" { - return fmt.Errorf("the scheme %s is not supported", url.Scheme) - } - inputLoadTestParams.URL = url - r := regexp.MustCompile(fmt.Sprintf("^[%s]+$", strings.Join(validLoadTestModes, ""))) - if !r.MatchString(*inputLoadTestParams.Mode) { - return fmt.Errorf("the mode %s is not recognized", *inputLoadTestParams.Mode) - } - if *inputLoadTestParams.AdaptiveBackoffFactor <= 0.0 { - return fmt.Errorf("the backoff factor needs to be non-zero positive") - } - return nil - }, -} - -type ( - blockSummary struct { - Block *rpctypes.RawBlockResponse - Receipts map[ethcommon.Hash]rpctypes.RawTxReceipt - Latencies map[uint64]time.Duration - } - hexwordReader struct { - } - loadTestSample struct { - GoRoutineID int64 - RequestID int64 - RequestTime time.Time - WaitTime time.Duration - Receipt string - IsError bool - Nonce uint64 - } - loadTestParams struct { - // inputs - Requests *int64 - Concurrency *int64 - BatchSize *uint64 - TimeLimit *int64 - ToRandom *bool - URL *url.URL - ChainID *uint64 - PrivateKey *string - ToAddress *string - HexSendAmount *string - RateLimit *float64 - AdaptiveRateLimit *bool - SteadyStateTxPoolSize *uint64 - AdaptiveRateLimitIncrement *uint64 - AdaptiveCycleDuration *uint64 - AdaptiveBackoffFactor *float64 - Mode *string - Function *uint64 - Iterations *uint64 - ByteCount *uint64 - Seed *int64 - IsAvail *bool - LtAddress *string - DelAddress *string - ContractCallNumberOfBlocksToWaitFor *uint64 - ContractCallBlockInterval *uint64 - ForceContractDeploy *bool - ForceGasLimit *uint64 - ForceGasPrice *uint64 - ForcePriorityGasPrice *uint64 - ShouldProduceSummary *bool - SummaryOutputMode *string - LegacyTransactionMode *bool - - // Computed - CurrentGas *big.Int - CurrentGasTipCap *big.Int - CurrentNonce *uint64 - ECDSAPrivateKey *ecdsa.PrivateKey - FromETHAddress *ethcommon.Address - ToETHAddress *ethcommon.Address - SendAmount *big.Int - BaseFee *big.Int - - ToAvailAddress *gstypes.MultiAddress - FromAvailAddress *gssignature.KeyringPair - AvailRuntime *gstypes.RuntimeVersion - } - - txpoolStatus struct { - Pending any `json:"pending"` - Queued any `json:"queued"` - } ) -func init() { - ltp := new(loadTestParams) - // Apache Bench Parameters - ltp.Requests = LoadtestCmd.PersistentFlags().Int64P("requests", "n", 1, "Number of requests to perform for the benchmarking session. The default is to just perform a single request which usually leads to non-representative benchmarking results.") - ltp.Concurrency = LoadtestCmd.PersistentFlags().Int64P("concurrency", "c", 1, "Number of multiple requests to perform at a time. Default is one request at a time.") - ltp.TimeLimit = LoadtestCmd.PersistentFlags().Int64P("time-limit", "t", -1, "Maximum number of seconds to spend for benchmarking. Use this to benchmark within a fixed total amount of time. Per default there is no timelimit.") - // https://logging.apache.org/log4j/2.x/manual/customloglevels.html - - // extended parameters - ltp.PrivateKey = LoadtestCmd.PersistentFlags().String("private-key", codeQualityPrivateKey, "The hex encoded private key that we'll use to sending transactions") - ltp.ChainID = LoadtestCmd.PersistentFlags().Uint64("chain-id", 0, "The chain id for the transactions that we're going to send") - ltp.ToAddress = LoadtestCmd.PersistentFlags().String("to-address", "0xDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF", "The address that we're going to send to") - ltp.ToRandom = LoadtestCmd.PersistentFlags().Bool("to-random", false, "When doing a transfer test, should we send to random addresses rather than DEADBEEFx5") - ltp.HexSendAmount = LoadtestCmd.PersistentFlags().String("send-amount", "0x38D7EA4C68000", "The amount of wei that we'll send every transaction") - ltp.RateLimit = LoadtestCmd.PersistentFlags().Float64("rate-limit", 4, "An overall limit to the number of requests per second. Give a number less than zero to remove this limit all together") - ltp.AdaptiveRateLimit = LoadtestCmd.PersistentFlags().Bool("adaptive-rate-limit", false, "Loadtest automatically adjusts request rate to maximize utilization but prevent congestion") - ltp.SteadyStateTxPoolSize = LoadtestCmd.PersistentFlags().Uint64("steady-state-tx-pool-size", 1000, "Transaction Pool queue size which we use to either increase/decrease requests per second") - ltp.AdaptiveRateLimitIncrement = LoadtestCmd.PersistentFlags().Uint64("adaptive-rate-limit-increment", 50, "Additive increment to rate of requests if txpool below steady state size") - ltp.AdaptiveCycleDuration = LoadtestCmd.PersistentFlags().Uint64("adaptive-cycle-duration-seconds", 10, "Duration in seconds that adaptive load test will review txpool and determine whether to increase/decrease rate limit") - ltp.AdaptiveBackoffFactor = LoadtestCmd.PersistentFlags().Float64("adaptive-backoff-factor", 2, "When we detect congestion we will use this factor to determine how much we slow down") - ltp.Mode = LoadtestCmd.PersistentFlags().StringP("mode", "m", "t", `The testing mode to use. It can be multiple like: "tcdf" -t - sending transactions -d - deploy contract -c - call random contract functions -f - call specific contract function -p - call random precompiled contracts -a - call a specific precompiled contract address -s - store mode -l - long running mode -r - random modes -2 - ERC20 Transfers -7 - ERC721 Mints`) - ltp.Function = LoadtestCmd.PersistentFlags().Uint64P("function", "f", 1, "A specific function to be called if running with `--mode f` or a specific precompiled contract when running with `--mode a`") - ltp.Iterations = LoadtestCmd.PersistentFlags().Uint64P("iterations", "i", 100, "If we're making contract calls, this controls how many times the contract will execute the instruction in a loop. If we are making ERC721 Mints, this indicated the minting batch size") - ltp.ByteCount = LoadtestCmd.PersistentFlags().Uint64P("byte-count", "b", 1024, "If we're in store mode, this controls how many bytes we'll try to store in our contract") - ltp.Seed = LoadtestCmd.PersistentFlags().Int64("seed", 123456, "A seed for generating random values and addresses") - ltp.IsAvail = LoadtestCmd.PersistentFlags().Bool("data-avail", false, "Is this a test of avail rather than an EVM / Geth Chain") - ltp.LtAddress = LoadtestCmd.PersistentFlags().String("lt-address", "", "A pre-deployed load test contract address") - ltp.DelAddress = LoadtestCmd.PersistentFlags().String("del-address", "", "A pre-deployed delegator contract address") - ltp.ContractCallNumberOfBlocksToWaitFor = LoadtestCmd.PersistentFlags().Uint64("contract-call-nb-blocks-to-wait-for", 30, "The number of blocks to wait for before giving up on a contract call") - ltp.ContractCallBlockInterval = LoadtestCmd.PersistentFlags().Uint64("contract-call-block-interval", 1, "The number of blocks to wait between contract calls") - ltp.ForceContractDeploy = LoadtestCmd.PersistentFlags().Bool("force-contract-deploy", false, "Some loadtest modes don't require a contract deployment. Set this flag to true to force contract deployments. This will still respect the --del-address and --il-address flags.") - ltp.ForceGasLimit = LoadtestCmd.PersistentFlags().Uint64("gas-limit", 0, "In environments where the gas limit can't be computed on the fly, we can specify it manually") - ltp.ForceGasPrice = LoadtestCmd.PersistentFlags().Uint64("gas-price", 0, "In environments where the gas price can't be estimated, we can specify it manually") - ltp.ForcePriorityGasPrice = LoadtestCmd.PersistentFlags().Uint64("priority-gas-price", 0, "Specify Gas Tip Price in the case of EIP-1559") - ltp.ShouldProduceSummary = LoadtestCmd.PersistentFlags().Bool("summarize", false, "Should we produce an execution summary after the load test has finished. If you're running a large loadtest, this can take a long time") - ltp.BatchSize = LoadtestCmd.PersistentFlags().Uint64("batch-size", 999, "Number of batches to perform at a time for receipt fetching. Default is 999 requests at a time.") - ltp.SummaryOutputMode = LoadtestCmd.PersistentFlags().String("output-mode", "text", "Format mode for summary output (json | text)") - ltp.LegacyTransactionMode = LoadtestCmd.PersistentFlags().Bool("legacy", false, "Send a legacy transaction instead of an EIP1559 transaction.") - inputLoadTestParams = *ltp - - // TODO batch size - // TODO Compression - // TODO array of RPC endpoints to round robin? -} - func initializeLoadTestParams(ctx context.Context, c *ethclient.Client) error { log.Info().Msg("Connecting with RPC endpoint to initialize load test parameters") gas, err := c.SuggestGasPrice(ctx) @@ -383,6 +129,10 @@ func initializeLoadTestParams(ctx context.Context, c *ethclient.Client) error { log.Error().Err(err).Msg("Unable to get header") return err } + if header.BaseFee != nil { + inputLoadTestParams.ChainSupportBaseFee = true + log.Debug().Msg("eip-1559 support detected") + } chainID, err := c.ChainID(ctx) if err != nil { @@ -395,20 +145,23 @@ func initializeLoadTestParams(ctx context.Context, c *ethclient.Client) error { log.Warn().Msg("Cannot set priority gas price in legacy mode") } if *inputLoadTestParams.ForceGasPrice < *inputLoadTestParams.ForcePriorityGasPrice { - log.Error().Msg("Max priority fee per gas higher than max fee per gas") return errors.New("max priority fee per gas higher than max fee per gas") } + if *inputLoadTestParams.AdaptiveRateLimit && *inputLoadTestParams.CallOnly { + return errors.New("the adaptive rate limit is based on the pending transaction pool. It doesn't use this feature while also using call only") + } + inputLoadTestParams.ToETHAddress = &toAddr inputLoadTestParams.SendAmount = amt - inputLoadTestParams.CurrentGas = gas + inputLoadTestParams.CurrentGasPrice = gas inputLoadTestParams.CurrentNonce = &nonce inputLoadTestParams.ECDSAPrivateKey = privateKey inputLoadTestParams.FromETHAddress = ðAddress if *inputLoadTestParams.ChainID == 0 { *inputLoadTestParams.ChainID = chainID.Uint64() } - inputLoadTestParams.BaseFee = header.BaseFee + inputLoadTestParams.CurrentBaseFee = header.BaseFee randSrc = rand.New(rand.NewSource(*inputLoadTestParams.Seed)) @@ -509,46 +262,10 @@ func runLoadTest(ctx context.Context) error { return nil } - // TODO this doesn't make sense for avail - ptc, err := ec.PendingTransactionCount(ctx) - if err != nil { - log.Debug().Err(err).Msg("Unable to get the number of pending transactions before closing") - } else if ptc > 0 { - log.Info().Uint("pending", ptc).Msg("There are still outstanding transactions. There might be issues restarting with the same sending key until those transactions clear") - } log.Info().Msg("Finished") return nil } -func printResults(lts []loadTestSample) { - if len(lts) == 0 { - log.Error().Msg("No results recorded") - return - } - - log.Info().Msg("* Results") - log.Info().Int("samples", len(lts)).Msg("Samples") - - var startTime = lts[0].RequestTime - var endTime = lts[len(lts)-1].RequestTime - var meanWait float64 - var totalWait float64 = 0 - var numErrors uint64 = 0 - - for _, s := range lts { - if s.IsError { - numErrors += 1 - } - totalWait = float64(s.WaitTime.Seconds()) + totalWait - } - meanWait = totalWait / float64(len(lts)) - - log.Info().Time("startTime", startTime).Msg("Start") - log.Info().Time("endTime", endTime).Msg("End") - log.Info().Float64("meanWait", meanWait).Msg("Mean Wait") - log.Info().Uint64("numErrors", numErrors).Msg("Num errors") -} - func convHexToUint64(hexString string) (uint64, error) { hexString = strings.TrimPrefix(hexString, "0x") if len(hexString)%2 != 0 { @@ -626,7 +343,6 @@ func mainLoop(ctx context.Context, c *ethclient.Client, rpc *ethrpc.Client) erro routines := *ltp.Concurrency requests := *ltp.Requests - currentNonce := *ltp.CurrentNonce chainID := new(big.Int).SetUint64(*ltp.ChainID) privateKey := ltp.ECDSAPrivateKey mode := *ltp.Mode @@ -645,7 +361,11 @@ func mainLoop(ctx context.Context, c *ethclient.Client, rpc *ethrpc.Client) erro tops, err := bind.NewKeyedTransactorWithChainID(privateKey, chainID) tops = configureTransactOpts(tops) - tops.GasLimit = 10000000 + // configureTransactOpts will set some paramters meant for load testing that could interfere with the deployment of our contracts + tops.GasLimit = 0 + tops.GasPrice = nil + tops.GasFeeCap = nil + tops.GasTipCap = nil if err != nil { log.Error().Err(err).Msg("Unable create transaction signer") @@ -656,154 +376,44 @@ func mainLoop(ctx context.Context, c *ethclient.Client, rpc *ethrpc.Client) erro // deploy and instantiate the load tester contract var ltAddr ethcommon.Address var ltContract *contracts.LoadTester - numberOfBlocksToWaitFor := *inputLoadTestParams.ContractCallNumberOfBlocksToWaitFor - blockInterval := *inputLoadTestParams.ContractCallBlockInterval - if strings.ContainsAny(mode, "rcfislpas") || *inputLoadTestParams.ForceContractDeploy { - if *inputLoadTestParams.LtAddress == "" { - ltAddr, _, _, err = contracts.DeployLoadTester(tops, c) - if err != nil { - log.Error().Err(err).Msg("Failed to create the load testing contract. Do you have the right chain id? Do you have enough funds?") - return err - } - } else { - ltAddr = ethcommon.HexToAddress(*inputLoadTestParams.LtAddress) - } - log.Trace().Interface("contractaddress", ltAddr).Msg("Load test contract address") - // bump the nonce since deploying a contract should cause it to increase - currentNonce = currentNonce + 1 - - ltContract, err = contracts.NewLoadTester(ltAddr, c) - if err != nil { - log.Error().Err(err).Msg("Unable to instantiate new contract") - return err - } - err = blockUntilSuccessful(ctx, c, func() error { - _, err = ltContract.GetCallCounter(cops) - return err - }, numberOfBlocksToWaitFor, blockInterval) - + if strings.ContainsAny(mode, "rcfispas") || *inputLoadTestParams.ForceContractDeploy { + ltAddr, ltContract, err = getLoadTestContract(ctx, c, tops, cops) if err != nil { return err } + log.Debug().Str("ltAddr", ltAddr.String()).Msg("Obtained load test contract address") } var erc20Addr ethcommon.Address var erc20Contract *contracts.ERC20 if mode == loadTestModeERC20 || mode == loadTestModeRandom { - erc20Addr, _, _, err = contracts.DeployERC20(tops, c) - if err != nil { - log.Error().Err(err).Msg("Unable to deploy ERC20 contract") - return err - } - log.Trace().Interface("contractaddress", erc20Addr).Msg("ERC20 contract address") - - erc20Contract, err = contracts.NewERC20(erc20Addr, c) - if err != nil { - log.Error().Err(err).Msg("Unable to instantiate new erc20 contract") - return err - } - currentNonce = currentNonce + 1 - err = blockUntilSuccessful(ctx, c, func() error { - _, err = erc20Contract.BalanceOf(cops, *ltp.FromETHAddress) - return err - }, numberOfBlocksToWaitFor, blockInterval) - if err != nil { - return err - } - - tops.Nonce = new(big.Int).SetUint64(currentNonce) - - _, err = erc20Contract.Mint(tops, metrics.UnitMegaether) - if err != nil { - log.Error().Err(err).Msg("There was an error minting ERC20") - return err - } - - currentNonce = currentNonce + 1 - err = blockUntilSuccessful(ctx, c, func() error { - var balance *big.Int - balance, err = erc20Contract.BalanceOf(cops, *ltp.FromETHAddress) - if err != nil { - return err - } - if balance.Uint64() == 0 { - err = fmt.Errorf("ERC20 Balance is Zero") - return err - } - return nil - }, numberOfBlocksToWaitFor, blockInterval) + erc20Addr, erc20Contract, err = getERC20Contract(ctx, c, tops, cops) if err != nil { return err } + log.Debug().Str("erc20Addr", erc20Addr.String()).Msg("Obtained erc 20 contract address") } var erc721Addr ethcommon.Address var erc721Contract *contracts.ERC721 if mode == loadTestModeERC721 || mode == loadTestModeRandom { - erc721Addr, _, _, err = contracts.DeployERC721(tops, c) - if err != nil { - log.Error().Err(err).Msg("Unable to deploy ERC721 contract") - return err - } - log.Trace().Interface("contractaddress", erc721Addr).Msg("ERC721 contract address") - - erc721Contract, err = contracts.NewERC721(erc721Addr, c) - if err != nil { - log.Error().Err(err).Msg("Unable to instantiate new erc20 contract") - return err - } - currentNonce = currentNonce + 1 - - err = blockUntilSuccessful(ctx, c, func() error { - _, err = erc721Contract.BalanceOf(cops, *ltp.FromETHAddress) - return err - }, numberOfBlocksToWaitFor, blockInterval) - if err != nil { - return err - } - - tops.Nonce = new(big.Int).SetUint64(currentNonce) - - err = blockUntilSuccessful(ctx, c, func() error { - _, err = erc721Contract.MintBatch(tops, *ltp.FromETHAddress, new(big.Int).SetUint64(1)) - return err - }, numberOfBlocksToWaitFor, blockInterval) + erc721Addr, erc721Contract, err = getERC721Contract(ctx, c, tops, cops) if err != nil { return err } - currentNonce = currentNonce + 1 + log.Debug().Str("erc721Addr", erc721Addr.String()).Msg("Obtained erc 721 contract address") } - // deploy and instantiate the delegator contract - var delegatorContract *contracts.Delegator - if strings.ContainsAny(mode, "rl") || *inputLoadTestParams.ForceContractDeploy { - var delegatorAddr ethcommon.Address - if *inputLoadTestParams.DelAddress == "" { - delegatorAddr, _, _, err = contracts.DeployDelegator(tops, c) - if err != nil { - log.Error().Err(err).Msg("Failed to create the load testing contract. Do you have the right chain id? Do you have enough funds?") - return err - } - } else { - delegatorAddr = ethcommon.HexToAddress(*inputLoadTestParams.DelAddress) - } - log.Trace().Interface("contractaddress", delegatorAddr).Msg("Delegator contract address") - currentNonce = currentNonce + 1 - - delegatorContract, err = contracts.NewDelegator(delegatorAddr, c) + var recallTransactions []rpctypes.PolyTransaction + if mode == loadTestModeRecall || mode == loadTestModeRandom { + recallTransactions, err = getRecallTransactions(ctx, c, rpc) if err != nil { - log.Error().Err(err).Msg("Unable to instantiate new contract") return err } - - err = blockUntilSuccessful(ctx, c, func() error { - _, err = delegatorContract.Call(tops, ltAddr, []byte{0x12, 0x87, 0xa6, 0x8c}) - return err - }, numberOfBlocksToWaitFor, blockInterval) - if err != nil { - return err + if len(recallTransactions) == 0 { + return fmt.Errorf("We weren't able to fetch any recall transactions") } - currentNonce = currentNonce + 1 + log.Debug().Int("txs", len(recallTransactions)).Msg("retrieved transactions for total recall") } var currentNonceMutex sync.Mutex @@ -813,8 +423,15 @@ func mainLoop(ctx context.Context, c *ethclient.Client, rpc *ethrpc.Client) erro log.Error().Err(err).Msg("Failed to get current block number") return err } + + currentNonce, err := c.NonceAt(ctx, *ltp.FromETHAddress, new(big.Int).SetUint64(startBlockNumber)) + if err != nil { + log.Error().Err(err).Msg("Unable to get account nonce") + return err + } + startNonce := currentNonce - log.Debug().Uint64("currentNonce", currentNonce).Msg("Starting main loadtest loop") + log.Debug().Uint64("currentNonce", currentNonce).Msg("Starting main load test loop") var wg sync.WaitGroup for i = 0; i < routines; i = i + 1 { log.Trace().Int64("routine", i).Msg("Starting Thread") @@ -854,34 +471,35 @@ func mainLoop(ctx context.Context, c *ethclient.Client, rpc *ethrpc.Client) erro } switch localMode { case loadTestModeTransaction: - startReq, endReq, err = loadtestTransaction(ctx, c, myNonceValue) + startReq, endReq, err = loadTestTransaction(ctx, c, myNonceValue) case loadTestModeDeploy: - startReq, endReq, err = loadtestDeploy(ctx, c, myNonceValue) - case loadTestModeCall: - startReq, endReq, err = loadtestCall(ctx, c, myNonceValue, ltContract) - case loadTestModeFunction: - startReq, endReq, err = loadtestFunction(ctx, c, myNonceValue, ltContract) + startReq, endReq, err = loadTestDeploy(ctx, c, myNonceValue) + case loadTestModeFunction, loadTestModeCall: + startReq, endReq, err = loadTestFunction(ctx, c, myNonceValue, ltContract) case loadTestModeInc: - startReq, endReq, err = loadtestInc(ctx, c, myNonceValue, ltContract) + startReq, endReq, err = loadTestInc(ctx, c, myNonceValue, ltContract) case loadTestModeStore: - startReq, endReq, err = loadtestStore(ctx, c, myNonceValue, ltContract) - case loadTestModeLong: - startReq, endReq, err = loadtestLong(ctx, c, myNonceValue, delegatorContract, ltAddr) + startReq, endReq, err = loadTestStore(ctx, c, myNonceValue, ltContract) case loadTestModeERC20: - startReq, endReq, err = loadtestERC20(ctx, c, myNonceValue, erc20Contract, ltAddr) + startReq, endReq, err = loadTestERC20(ctx, c, myNonceValue, erc20Contract, ltAddr) case loadTestModeERC721: - startReq, endReq, err = loadtestERC721(ctx, c, myNonceValue, erc721Contract, ltAddr) + startReq, endReq, err = loadTestERC721(ctx, c, myNonceValue, erc721Contract, ltAddr) case loadTestModePrecompiledContract: - startReq, endReq, err = loadtestCallPrecompiledContracts(ctx, c, myNonceValue, ltContract, true) + startReq, endReq, err = loadTestCallPrecompiledContracts(ctx, c, myNonceValue, ltContract, true) case loadTestModePrecompiledContracts: - startReq, endReq, err = loadtestCallPrecompiledContracts(ctx, c, myNonceValue, ltContract, false) + startReq, endReq, err = loadTestCallPrecompiledContracts(ctx, c, myNonceValue, ltContract, false) + case loadTestModeRecall: + startReq, endReq, err = loadTestRecall(ctx, c, myNonceValue, recallTransactions[int(currentNonce)%len(recallTransactions)]) default: log.Error().Str("mode", mode).Msg("We've arrived at a load test mode that we don't recognize") } recordSample(i, j, err, startReq, endReq, myNonceValue) if err != nil { log.Error().Err(err).Uint64("nonce", myNonceValue).Msg("Recorded an error while sending transactions") - retryForNonce = true + // The nonce is used to index the recalled transactions in call-only mode. We don't want to retry a transaction if it legit failed on the chain + if !*ltp.CallOnly { + retryForNonce = true + } } log.Trace().Uint64("nonce", myNonceValue).Int64("routine", i).Str("mode", localMode).Int64("request", j).Msg("Request") @@ -892,8 +510,11 @@ func mainLoop(ctx context.Context, c *ethclient.Client, rpc *ethrpc.Client) erro log.Trace().Msg("Finished starting go routines. Waiting..") wg.Wait() cancel() - log.Debug().Uint64("currentNonce", currentNonce).Msg("Finished main loadtest loop") + log.Debug().Uint64("currentNonce", currentNonce).Msg("Finished main load test loop") log.Debug().Msg("Waiting for transactions to actually be mined") + if *ltp.CallOnly { + return nil + } finalBlockNumber, err := waitForFinalBlock(ctx, c, rpc, startBlockNumber, startNonce, currentNonce) if err != nil { log.Error().Err(err).Msg("there was an issue waiting for all transactions to be mined") @@ -909,34 +530,121 @@ func mainLoop(ctx context.Context, c *ethclient.Client, rpc *ethrpc.Client) erro return nil } -func lightSummary(ctx context.Context, c *ethclient.Client, rpc *ethrpc.Client, startBlockNumber, startNonce, endBlockNumber, endNonce uint64, rl *rate.Limiter) { - startBlock, err := c.BlockByNumber(ctx, new(big.Int).SetUint64(startBlockNumber)) +func getLoadTestContract(ctx context.Context, c *ethclient.Client, tops *bind.TransactOpts, cops *bind.CallOpts) (ltAddr ethcommon.Address, ltContract *contracts.LoadTester, err error) { + ltAddr = ethcommon.HexToAddress(*inputLoadTestParams.LtAddress) + + if *inputLoadTestParams.LtAddress == "" { + ltAddr, _, _, err = contracts.DeployLoadTester(tops, c) + if err != nil { + log.Error().Err(err).Msg("Failed to create the load testing contract. Do you have the right chain id? Do you have enough funds?") + return + } + } + log.Trace().Interface("contractaddress", ltAddr).Msg("Load test contract address") + + ltContract, err = contracts.NewLoadTester(ltAddr, c) + if err != nil { + log.Error().Err(err).Msg("Unable to instantiate new contract") + return + } + err = blockUntilSuccessful(ctx, c, func() error { + _, err = ltContract.GetCallCounter(cops) + return err + }) + + return +} +func getERC20Contract(ctx context.Context, c *ethclient.Client, tops *bind.TransactOpts, cops *bind.CallOpts) (erc20Addr ethcommon.Address, erc20Contract *contracts.ERC20, err error) { + erc20Addr = ethcommon.HexToAddress(*inputLoadTestParams.ERC20Address) + shouldMint := true + if *inputLoadTestParams.ERC20Address == "" { + erc20Addr, _, _, err = contracts.DeployERC20(tops, c) + if err != nil { + log.Error().Err(err).Msg("Unable to deploy ERC20 contract") + return + } + shouldMint = false + } + log.Trace().Interface("contractaddress", erc20Addr).Msg("ERC20 contract address") + + erc20Contract, err = contracts.NewERC20(erc20Addr, c) + if err != nil { + log.Error().Err(err).Msg("Unable to instantiate new erc20 contract") + return + } + + err = blockUntilSuccessful(ctx, c, func() error { + _, err = erc20Contract.BalanceOf(cops, *inputLoadTestParams.FromETHAddress) + return err + }) + if err != nil { + return + } + + if !shouldMint { + return + } + _, err = erc20Contract.Mint(tops, metrics.UnitMegaether) if err != nil { - log.Error().Err(err).Msg("unable to get start block for light summary") + log.Error().Err(err).Msg("There was an error minting ERC20") return } - endBlock, err := c.BlockByNumber(ctx, new(big.Int).SetUint64(endBlockNumber)) + + err = blockUntilSuccessful(ctx, c, func() error { + var balance *big.Int + balance, err = erc20Contract.BalanceOf(cops, *inputLoadTestParams.FromETHAddress) + if err != nil { + return err + } + if balance.Uint64() == 0 { + err = fmt.Errorf("ERC20 Balance is Zero") + return err + } + return nil + }) + + return +} +func getERC721Contract(ctx context.Context, c *ethclient.Client, tops *bind.TransactOpts, cops *bind.CallOpts) (erc721Addr ethcommon.Address, erc721Contract *contracts.ERC721, err error) { + erc721Addr = ethcommon.HexToAddress(*inputLoadTestParams.ERC721Address) + shouldMint := true + if *inputLoadTestParams.ERC721Address == "" { + erc721Addr, _, _, err = contracts.DeployERC721(tops, c) + if err != nil { + log.Error().Err(err).Msg("Unable to deploy ERC721 contract") + return + } + shouldMint = false + } + log.Trace().Interface("contractaddress", erc721Addr).Msg("ERC721 contract address") + + erc721Contract, err = contracts.NewERC721(erc721Addr, c) if err != nil { - log.Error().Err(err).Msg("unable to get end block for light summary") + log.Error().Err(err).Msg("Unable to instantiate new erc20 contract") return } - endTime := time.Unix(int64(endBlock.Time()), 0) - startTime := time.Unix(int64(startBlock.Time()), 0) - testDuration := endTime.Sub(startTime) - tps := float64(len(loadTestResults)) / testDuration.Seconds() + err = blockUntilSuccessful(ctx, c, func() error { + _, err = erc721Contract.BalanceOf(cops, *inputLoadTestParams.FromETHAddress) + return err + }) + if err != nil { + return + } + if !shouldMint { + return + } - log.Info(). - Time("firstBlockTime", startTime). - Time("lastBlockTime", endTime). - Int("transactionCount", len(loadTestResults)). - Float64("testDuration", testDuration.Seconds()). - Float64("tps", tps). - Float64("final rate limit", float64(rl.Limit())). - Msg("rough test summary (ignores errors)") + err = blockUntilSuccessful(ctx, c, func() error { + _, err = erc721Contract.MintBatch(tops, *inputLoadTestParams.FromETHAddress, new(big.Int).SetUint64(1)) + return err + }) + return } -func blockUntilSuccessful(ctx context.Context, c *ethclient.Client, f func() error, numberOfBlocksToWaitFor, blockInterval uint64) error { +func blockUntilSuccessful(ctx context.Context, c *ethclient.Client, f func() error) error { + numberOfBlocksToWaitFor := *inputLoadTestParams.ContractCallNumberOfBlocksToWaitFor + blockInterval := *inputLoadTestParams.ContractCallBlockInterval start := time.Now() startBlockNumber, err := c.BlockNumber(ctx) if err != nil { @@ -990,7 +698,7 @@ func blockUntilSuccessful(ctx context.Context, c *ethclient.Client, f func() err } } -func loadtestTransaction(ctx context.Context, c *ethclient.Client, nonce uint64) (t1 time.Time, t2 time.Time, err error) { +func loadTestTransaction(ctx context.Context, c *ethclient.Client, nonce uint64) (t1 time.Time, t2 time.Time, err error) { ltp := inputLoadTestParams to := ltp.ToETHAddress @@ -1009,19 +717,25 @@ func loadtestTransaction(ctx context.Context, c *ethclient.Client, nonce uint64) } tops.GasLimit = uint64(21000) tops = configureTransactOpts(tops) + gasPrice, gasTipCap := getSuggestedGasPrices(ctx, c) var tx *ethtypes.Transaction if *ltp.LegacyTransactionMode { - tx = ethtypes.NewTransaction(nonce, *to, amount, tops.GasLimit, tops.GasPrice, nil) + tx = ethtypes.NewTx(ðtypes.LegacyTx{ + Nonce: nonce, + To: to, + Value: amount, + Gas: tops.GasLimit, + GasPrice: gasPrice, + Data: nil, + }) } else { - gasTipCap := tops.GasTipCap - gasFeeCap := new(big.Int).Add(gasTipCap, ltp.BaseFee) dynamicFeeTx := ðtypes.DynamicFeeTx{ ChainID: chainID, Nonce: nonce, To: to, Gas: tops.GasLimit, - GasFeeCap: gasFeeCap, + GasFeeCap: gasPrice, GasTipCap: gasTipCap, Data: nil, Value: amount, @@ -1036,38 +750,77 @@ func loadtestTransaction(ctx context.Context, c *ethclient.Client, nonce uint64) } t1 = time.Now() - err = c.SendTransaction(ctx, stx) - t2 = time.Now() + defer func() { t2 = time.Now() }() + if *ltp.CallOnly { + _, err = c.CallContract(ctx, txToCallMsg(stx), nil) + } else { + err = c.SendTransaction(ctx, stx) + } return } -func loadtestDeploy(ctx context.Context, c *ethclient.Client, nonce uint64) (t1 time.Time, t2 time.Time, err error) { - ltp := inputLoadTestParams - - chainID := new(big.Int).SetUint64(*ltp.ChainID) - privateKey := ltp.ECDSAPrivateKey +var ( + cachedBlockNumber uint64 + cachedGasPriceLock sync.Mutex + cachedGasPrice *big.Int + cachedGasTipCap *big.Int +) - tops, err := bind.NewKeyedTransactorWithChainID(privateKey, chainID) +func getSuggestedGasPrices(ctx context.Context, c *ethclient.Client) (*big.Int, *big.Int) { + // this should be one of the fastest RPC calls, so hopefully there isn't too much overhead calling this + bn, err := c.BlockNumber(ctx) if err != nil { - log.Error().Err(err).Msg("Unable create transaction signer") - return + log.Error().Err(err).Msg("Unable to get block number while checking gas prices") + return nil, nil + } + isDynamic := inputLoadTestParams.ChainSupportBaseFee + + cachedGasPriceLock.Lock() + defer cachedGasPriceLock.Unlock() + if bn <= cachedBlockNumber { + return cachedGasPrice, cachedGasTipCap + } + gp, pErr := c.SuggestGasPrice(ctx) + gt, tErr := c.SuggestGasTipCap(ctx) + if pErr == nil && (tErr == nil || !isDynamic) { + cachedBlockNumber = bn + cachedGasPrice = gp + cachedGasTipCap = gt + if inputLoadTestParams.ForceGasPrice != nil && *inputLoadTestParams.ForcePriorityGasPrice != 0 { + cachedGasPrice = new(big.Int).SetUint64(*inputLoadTestParams.ForcePriorityGasPrice) + } + if inputLoadTestParams.ForcePriorityGasPrice != nil && *inputLoadTestParams.ForcePriorityGasPrice != 0 { + cachedGasTipCap = new(big.Int).SetUint64(*inputLoadTestParams.ForcePriorityGasPrice) + } + l := log.Debug().Uint64("cachedBlockNumber", bn).Uint64("cachedgasPrice", cachedGasPrice.Uint64()) + if cachedGasTipCap != nil { + l = l.Uint64("cachedGasTipCap", cachedGasTipCap.Uint64()) + } + l.Msg("Updating gas prices") + + return cachedGasPrice, cachedGasTipCap } - tops.Nonce = new(big.Int).SetUint64(nonce) - tops = configureTransactOpts(tops) - t1 = time.Now() - _, _, _, err = contracts.DeployLoadTester(tops, c) - t2 = time.Now() - return + // Something went wrong + if pErr != nil { + log.Error().Err(pErr).Msg("Unable to suggest gas price") + return cachedGasPrice, cachedGasTipCap + } + if tErr != nil && isDynamic { + log.Error().Err(tErr).Msg("Unable to suggest gas tip cap") + return cachedGasPrice, cachedGasTipCap + } + log.Error().Err(tErr).Msg("This error should not have happened. We got a gas tip price error in an environment that is not dynamic") + return cachedGasPrice, cachedGasTipCap + } -func loadtestFunction(ctx context.Context, c *ethclient.Client, nonce uint64, ltContract *contracts.LoadTester) (t1 time.Time, t2 time.Time, err error) { +// TODO - in the future it might be more interesting if this mode takes input or random contracts to be deployed +func loadTestDeploy(ctx context.Context, c *ethclient.Client, nonce uint64) (t1 time.Time, t2 time.Time, err error) { ltp := inputLoadTestParams chainID := new(big.Int).SetUint64(*ltp.ChainID) privateKey := ltp.ECDSAPrivateKey - iterations := ltp.Iterations - f := ltp.Function tops, err := bind.NewKeyedTransactorWithChainID(privateKey, chainID) if err != nil { @@ -1078,18 +831,34 @@ func loadtestFunction(ctx context.Context, c *ethclient.Client, nonce uint64, lt tops = configureTransactOpts(tops) t1 = time.Now() - _, err = contracts.CallLoadTestFunctionByOpCode(*f, ltContract, tops, *iterations) - t2 = time.Now() + defer func() { t2 = time.Now() }() + if *ltp.CallOnly { + msg := transactOptsToCallMsg(tops) + msg.Data = ethcommon.FromHex(contracts.LoadTesterMetaData.Bin) + _, err = c.CallContract(ctx, msg, nil) + } else { + _, _, _, err = contracts.DeployLoadTester(tops, c) + } return } -func loadtestCall(ctx context.Context, c *ethclient.Client, nonce uint64, ltContract *contracts.LoadTester) (t1 time.Time, t2 time.Time, err error) { +// getCurrentLoadTestFunction is meant to handle the business logic +// around deciding which function to execute. When we're in function +// mode where the user has provided a specific function to execute, we +// should use that function. Otherwise, we'll select random functions. +func getCurrentLoadTestFunction() uint64 { + if loadTestModeFunction == *inputLoadTestParams.Mode { + return *inputLoadTestParams.Function + } + return contracts.GetRandomOPCode() +} +func loadTestFunction(ctx context.Context, c *ethclient.Client, nonce uint64, ltContract *contracts.LoadTester) (t1 time.Time, t2 time.Time, err error) { ltp := inputLoadTestParams chainID := new(big.Int).SetUint64(*ltp.ChainID) privateKey := ltp.ECDSAPrivateKey iterations := ltp.Iterations - f := contracts.GetRandomOPCode() + f := getCurrentLoadTestFunction() tops, err := bind.NewKeyedTransactorWithChainID(privateKey, chainID) if err != nil { @@ -1100,12 +869,23 @@ func loadtestCall(ctx context.Context, c *ethclient.Client, nonce uint64, ltCont tops = configureTransactOpts(tops) t1 = time.Now() - _, err = contracts.CallLoadTestFunctionByOpCode(f, ltContract, tops, *iterations) - t2 = time.Now() + defer func() { t2 = time.Now() }() + if *ltp.CallOnly { + tops.NoSend = true + var tx *ethtypes.Transaction + tx, err = contracts.CallLoadTestFunctionByOpCode(f, ltContract, tops, *iterations) + if err != nil { + return + } + msg := txToCallMsg(tx) + _, err = c.CallContract(ctx, msg, nil) + } else { + _, err = contracts.CallLoadTestFunctionByOpCode(f, ltContract, tops, *iterations) + } return } -func loadtestCallPrecompiledContracts(ctx context.Context, c *ethclient.Client, nonce uint64, ltContract *contracts.LoadTester, useSelectedAddress bool) (t1 time.Time, t2 time.Time, err error) { +func loadTestCallPrecompiledContracts(ctx context.Context, c *ethclient.Client, nonce uint64, ltContract *contracts.LoadTester, useSelectedAddress bool) (t1 time.Time, t2 time.Time, err error) { var f int ltp := inputLoadTestParams @@ -1127,12 +907,23 @@ func loadtestCallPrecompiledContracts(ctx context.Context, c *ethclient.Client, tops = configureTransactOpts(tops) t1 = time.Now() - _, err = contracts.CallPrecompiledContracts(f, ltContract, tops, *iterations, privateKey) - t2 = time.Now() + defer func() { t2 = time.Now() }() + if *ltp.CallOnly { + tops.NoSend = true + var tx *ethtypes.Transaction + tx, err = contracts.CallPrecompiledContracts(f, ltContract, tops, *iterations, privateKey) + if err != nil { + return + } + msg := txToCallMsg(tx) + _, err = c.CallContract(ctx, msg, nil) + } else { + _, err = contracts.CallPrecompiledContracts(f, ltContract, tops, *iterations, privateKey) + } return } -func loadtestInc(ctx context.Context, c *ethclient.Client, nonce uint64, ltContract *contracts.LoadTester) (t1 time.Time, t2 time.Time, err error) { +func loadTestInc(ctx context.Context, c *ethclient.Client, nonce uint64, ltContract *contracts.LoadTester) (t1 time.Time, t2 time.Time, err error) { ltp := inputLoadTestParams chainID := new(big.Int).SetUint64(*ltp.ChainID) @@ -1147,12 +938,23 @@ func loadtestInc(ctx context.Context, c *ethclient.Client, nonce uint64, ltContr tops = configureTransactOpts(tops) t1 = time.Now() - _, err = ltContract.Inc(tops) - t2 = time.Now() + defer func() { t2 = time.Now() }() + if *ltp.CallOnly { + tops.NoSend = true + var tx *ethtypes.Transaction + tx, err = ltContract.Inc(tops) + if err != nil { + return + } + msg := txToCallMsg(tx) + _, err = c.CallContract(ctx, msg, nil) + } else { + _, err = ltContract.Inc(tops) + } return } -func loadtestStore(ctx context.Context, c *ethclient.Client, nonce uint64, ltContract *contracts.LoadTester) (t1 time.Time, t2 time.Time, err error) { +func loadTestStore(ctx context.Context, c *ethclient.Client, nonce uint64, ltContract *contracts.LoadTester) (t1 time.Time, t2 time.Time, err error) { ltp := inputLoadTestParams chainID := new(big.Int).SetUint64(*ltp.ChainID) @@ -1169,36 +971,23 @@ func loadtestStore(ctx context.Context, c *ethclient.Client, nonce uint64, ltCon inputData := make([]byte, *ltp.ByteCount) _, _ = hexwordRead(inputData) t1 = time.Now() - _, err = ltContract.Store(tops, inputData) - t2 = time.Now() - return -} - -func loadtestLong(ctx context.Context, c *ethclient.Client, nonce uint64, delegatorContract *contracts.Delegator, ltAddress ethcommon.Address) (t1 time.Time, t2 time.Time, err error) { - ltp := inputLoadTestParams - - chainID := new(big.Int).SetUint64(*ltp.ChainID) - privateKey := ltp.ECDSAPrivateKey - - tops, err := bind.NewKeyedTransactorWithChainID(privateKey, chainID) - if err != nil { - log.Error().Err(err).Msg("Unable create transaction signer") - return + defer func() { t2 = time.Now() }() + if *ltp.CallOnly { + tops.NoSend = true + var tx *ethtypes.Transaction + tx, err = ltContract.Store(tops, inputData) + if err != nil { + return + } + msg := txToCallMsg(tx) + _, err = c.CallContract(ctx, msg, nil) + } else { + _, err = ltContract.Store(tops, inputData) } - tops.Nonce = new(big.Int).SetUint64(nonce) - tops = configureTransactOpts(tops) - - // TODO the delegated call should be a parameter - t1 = time.Now() - // loopBlockHashUntilLimit (verify here https://abi.hashex.org/) - _, err = delegatorContract.LoopDelegateCall(tops, ltAddress, []byte{0xa2, 0x71, 0xb7, 0x21}) - // loopUntilLimit - // _, err = delegatorContract.LoopDelegateCall(tops, ltAddress, []byte{0x65, 0x9b, 0xbb, 0x4f}) - t2 = time.Now() return } -func loadtestERC20(ctx context.Context, c *ethclient.Client, nonce uint64, erc20Contract *contracts.ERC20, ltAddress ethcommon.Address) (t1 time.Time, t2 time.Time, err error) { +func loadTestERC20(ctx context.Context, c *ethclient.Client, nonce uint64, erc20Contract *contracts.ERC20, ltAddress ethcommon.Address) (t1 time.Time, t2 time.Time, err error) { ltp := inputLoadTestParams to := ltp.ToETHAddress @@ -1219,12 +1008,24 @@ func loadtestERC20(ctx context.Context, c *ethclient.Client, nonce uint64, erc20 tops = configureTransactOpts(tops) t1 = time.Now() - _, err = erc20Contract.Transfer(tops, *to, amount) - t2 = time.Now() + defer func() { t2 = time.Now() }() + if *ltp.CallOnly { + tops.NoSend = true + var tx *ethtypes.Transaction + tx, err = erc20Contract.Transfer(tops, *to, amount) + if err != nil { + return + } + msg := txToCallMsg(tx) + _, err = c.CallContract(ctx, msg, nil) + } else { + _, err = erc20Contract.Transfer(tops, *to, amount) + } + return } -func loadtestERC721(ctx context.Context, c *ethclient.Client, nonce uint64, erc721Contract *contracts.ERC721, ltAddress ethcommon.Address) (t1 time.Time, t2 time.Time, err error) { +func loadTestERC721(ctx context.Context, c *ethclient.Client, nonce uint64, erc721Contract *contracts.ERC721, ltAddress ethcommon.Address) (t1 time.Time, t2 time.Time, err error) { ltp := inputLoadTestParams iterations := ltp.Iterations @@ -1245,10 +1046,63 @@ func loadtestERC721(ctx context.Context, c *ethclient.Client, nonce uint64, erc7 tops = configureTransactOpts(tops) t1 = time.Now() - _, err = erc721Contract.MintBatch(tops, *to, new(big.Int).SetUint64(*iterations)) - t2 = time.Now() - return -} + defer func() { t2 = time.Now() }() + if *ltp.CallOnly { + tops.NoSend = true + var tx *ethtypes.Transaction + tx, err = erc721Contract.MintBatch(tops, *to, new(big.Int).SetUint64(*iterations)) + if err != nil { + return + } + msg := txToCallMsg(tx) + _, err = c.CallContract(ctx, msg, nil) + } else { + _, err = erc721Contract.MintBatch(tops, *to, new(big.Int).SetUint64(*iterations)) + } + + return +} + +func loadTestRecall(ctx context.Context, c *ethclient.Client, nonce uint64, originalTx rpctypes.PolyTransaction) (t1 time.Time, t2 time.Time, err error) { + ltp := inputLoadTestParams + + chainID := new(big.Int).SetUint64(*ltp.ChainID) + privateKey := ltp.ECDSAPrivateKey + + tops, err := bind.NewKeyedTransactorWithChainID(privateKey, chainID) + if err != nil { + log.Error().Err(err).Msg("Unable create transaction signer") + return + } + gasPrice, gasTipCap := getSuggestedGasPrices(ctx, c) + tx := rawTransactionToNewTx(originalTx, nonce, gasPrice, gasTipCap) + tops = configureTransactOpts(tops) + + stx, err := tops.Signer(*ltp.FromETHAddress, tx) + if err != nil { + log.Error().Err(err).Msg("Unable to sign transaction") + return + } + + t1 = time.Now() + defer func() { t2 = time.Now() }() + if *ltp.CallOnly { + callMsg := txToCallMsg(stx) + callMsg.From = originalTx.From() + callMsg.Gas = originalTx.Gas() + _, err = c.CallContract(ctx, callMsg, originalTx.BlockNumber()) + } else { + err = c.SendTransaction(ctx, stx) + } + return +} + +func loadTestNotImplemented(ctx context.Context, c *gsrpc.SubstrateAPI, nonce uint64) (t1 time.Time, t2 time.Time, err error) { + t1 = time.Now() + t2 = time.Now() + err = fmt.Errorf("this method is not implemented") + return +} func recordSample(goRoutineID, requestID int64, err error, start, end time.Time, nonce uint64) { s := loadTestSample{} @@ -1292,290 +1146,32 @@ func getRandomAddress() *ethcommon.Address { return &realAddr } -func availLoop(ctx context.Context, c *gsrpc.SubstrateAPI) error { - var err error - - ltp := inputLoadTestParams - log.Trace().Interface("Input Params", ltp).Msg("Params") - - routines := *ltp.Concurrency - requests := *ltp.Requests - currentNonce := uint64(0) // *ltp.CurrentNonce - chainID := new(big.Int).SetUint64(*ltp.ChainID) - privateKey := ltp.ECDSAPrivateKey - mode := *ltp.Mode - - _ = chainID - _ = privateKey - - meta, err := c.RPC.State.GetMetadataLatest() - if err != nil { - return err - } - - genesisHash, err := c.RPC.Chain.GetBlockHash(0) - if err != nil { - return err - } - - key, err := gstypes.CreateStorageKey(meta, "System", "Account", ltp.FromAvailAddress.PublicKey, nil) - if err != nil { - log.Error().Err(err).Msg("Could not create storage key") - return err - } - - var accountInfo gstypes.AccountInfo - ok, err := c.RPC.State.GetStorageLatest(key, &accountInfo) - if err != nil { - log.Error().Err(err).Msg("Could not load storage") - return err - } - if !ok { - err = fmt.Errorf("loaded storage is not okay") - log.Error().Err(err).Msg("Loaded storage is not okay") - return err - } - - currentNonce = uint64(accountInfo.Nonce) - - rl := rate.NewLimiter(rate.Limit(*ltp.RateLimit), 1) - if *ltp.RateLimit <= 0.0 { - rl = nil - } - - var currentNonceMutex sync.Mutex - - var i int64 - - var wg sync.WaitGroup - for i = 0; i < routines; i = i + 1 { - log.Trace().Int64("routine", i).Msg("Starting Thread") - wg.Add(1) - go func(i int64) { - var j int64 - var startReq time.Time - var endReq time.Time - - for j = 0; j < requests; j = j + 1 { - - if rl != nil { - err = rl.Wait(ctx) - if err != nil { - log.Error().Err(err).Msg("Encountered a rate limiting error") - } - } - currentNonceMutex.Lock() - myNonceValue := currentNonce - currentNonce = currentNonce + 1 - currentNonceMutex.Unlock() - - localMode := mode - // if there are multiple modes, iterate through them, 'r' mode is supported here - if len(mode) > 1 { - localMode = string(mode[int(i+j)%(len(mode))]) - } - // if we're doing random, we'll just pick one based on the current index - if localMode == loadTestModeRandom { - localMode = validLoadTestModes[int(i+j)%(len(validLoadTestModes)-1)] - } - // this function should probably be abstracted - switch localMode { - case loadTestModeTransaction: - startReq, endReq, err = loadtestAvailTransfer(ctx, c, myNonceValue, meta, genesisHash) - case loadTestModeDeploy: - startReq, endReq, err = loadtestNotImplemented(ctx, c, myNonceValue) - case loadTestModeCall: - startReq, endReq, err = loadtestNotImplemented(ctx, c, myNonceValue) - case loadTestModeFunction: - startReq, endReq, err = loadtestNotImplemented(ctx, c, myNonceValue) - case loadTestModeInc: - startReq, endReq, err = loadtestNotImplemented(ctx, c, myNonceValue) - case loadTestModeStore: - startReq, endReq, err = loadtestAvailStore(ctx, c, myNonceValue, meta, genesisHash) - case loadTestModeLong: - startReq, endReq, err = loadtestNotImplemented(ctx, c, myNonceValue) - default: - log.Error().Str("mode", mode).Msg("We've arrived at a load test mode that we don't recognize") - } - recordSample(i, j, err, startReq, endReq, myNonceValue) - if err != nil { - log.Trace().Err(err).Msg("Recorded an error while sending transactions") - } - - log.Trace().Int64("routine", i).Str("mode", localMode).Int64("request", j).Msg("Request") - } - wg.Done() - }(i) - - } - log.Trace().Msg("Finished starting go routines. Waiting..") - wg.Wait() - return nil - -} - -func loadtestNotImplemented(ctx context.Context, c *gsrpc.SubstrateAPI, nonce uint64) (t1 time.Time, t2 time.Time, err error) { - t1 = time.Now() - t2 = time.Now() - err = fmt.Errorf("this method is not implemented") - return -} - -func initAvailTestParams(ctx context.Context, c *gsrpc.SubstrateAPI) error { - toAddr, err := gstypes.NewMultiAddressFromHexAccountID(*inputLoadTestParams.ToAddress) - if err != nil { - log.Error().Err(err).Msg("Unable to create new multi address") - return err - } - - if *inputLoadTestParams.PrivateKey == codeQualityPrivateKey { - // Avail keys can use the same seed but the way the key is derived is different - *inputLoadTestParams.PrivateKey = codeQualitySeed - } - - kp, err := gssignature.KeyringPairFromSecret(*inputLoadTestParams.PrivateKey, uint8(*inputLoadTestParams.ChainID)) - if err != nil { - log.Error().Err(err).Msg("Could not create key pair") - return err - } - - amt, err := hexToBigInt(*inputLoadTestParams.HexSendAmount) - if err != nil { - log.Error().Err(err).Msg("Couldn't parse send amount") - return err - } - - rv, err := c.RPC.State.GetRuntimeVersionLatest() - if err != nil { - log.Error().Err(err).Msg("Couldn't get runtime version") - return err - } - - inputLoadTestParams.AvailRuntime = rv - inputLoadTestParams.SendAmount = amt - inputLoadTestParams.FromAvailAddress = &kp - inputLoadTestParams.ToAvailAddress = &toAddr - return nil -} - -func loadtestAvailTransfer(ctx context.Context, c *gsrpc.SubstrateAPI, nonce uint64, meta *gstypes.Metadata, genesisHash gstypes.Hash) (t1 time.Time, t2 time.Time, err error) { +func configureTransactOpts(tops *bind.TransactOpts) *bind.TransactOpts { ltp := inputLoadTestParams - toAddr := *ltp.ToAvailAddress - if *ltp.ToRandom { - pk := make([]byte, 32) - _, err = randSrc.Read(pk) - if err != nil { - // For some reason weren't able to read the random data - log.Error().Msg("Sending to random is not implemented for substrate yet") - } else { - toAddr = gstypes.NewMultiAddressFromAccountID(pk) - } - - } - - gsCall, err := gstypes.NewCall(meta, "Balances.transfer", toAddr, gstypes.NewUCompact(ltp.SendAmount)) - if err != nil { - return - } - - ext := gstypes.NewExtrinsic(gsCall) - rv := ltp.AvailRuntime - kp := *inputLoadTestParams.FromAvailAddress - - o := gstypes.SignatureOptions{ - BlockHash: genesisHash, - Era: gstypes.ExtrinsicEra{IsMortalEra: false, IsImmortalEra: true}, - GenesisHash: genesisHash, - Nonce: gstypes.NewUCompactFromUInt(uint64(nonce)), - SpecVersion: rv.SpecVersion, - Tip: gstypes.NewUCompactFromUInt(0), - TransactionVersion: rv.TransactionVersion, + if ltp.ForceGasPrice != nil && *ltp.ForceGasPrice != 0 { + tops.GasPrice = big.NewInt(0).SetUint64(*ltp.ForceGasPrice) } - - err = ext.Sign(kp, o) - if err != nil { - return + if ltp.ForceGasLimit != nil && *ltp.ForceGasLimit != 0 { + tops.GasLimit = *ltp.ForceGasLimit } - t1 = time.Now() - _, err = c.RPC.Author.SubmitExtrinsic(ext) - t2 = time.Now() - if err != nil { - return + // if we're in legacy mode, there's no point doing anything else in this function + if *ltp.LegacyTransactionMode { + return tops } - return -} - -func loadtestAvailStore(ctx context.Context, c *gsrpc.SubstrateAPI, nonce uint64, meta *gstypes.Metadata, genesisHash gstypes.Hash) (t1 time.Time, t2 time.Time, err error) { - ltp := inputLoadTestParams - inputData := make([]byte, *ltp.ByteCount) - _, _ = hexwordRead(inputData) - - gsCall, err := gstypes.NewCall(meta, "DataAvailability.submit_data", gstypes.NewBytes([]byte(inputData))) - if err != nil { - return + if ltp.ForcePriorityGasPrice != nil && *ltp.ForcePriorityGasPrice != 0 { + tops.GasTipCap = big.NewInt(0).SetUint64(*ltp.ForcePriorityGasPrice) } - // Create the extrinsic - ext := gstypes.NewExtrinsic(gsCall) - - rv := ltp.AvailRuntime - - kp := *inputLoadTestParams.FromAvailAddress - - o := gstypes.SignatureOptions{ - BlockHash: genesisHash, - Era: gstypes.ExtrinsicEra{IsMortalEra: false, IsImmortalEra: true}, - GenesisHash: genesisHash, - Nonce: gstypes.NewUCompactFromUInt(uint64(nonce)), - SpecVersion: rv.SpecVersion, - Tip: gstypes.NewUCompactFromUInt(100), - TransactionVersion: rv.TransactionVersion, - } - // Sign the transaction using Alice's default account - err = ext.Sign(kp, o) - if err != nil { - return + if ltp.CurrentBaseFee == nil { + log.Fatal().Msg("EIP-1559 not activated. Please use --legacy") } - // Send the extrinsic - t1 = time.Now() - _, err = c.RPC.Author.SubmitExtrinsic(ext) - t2 = time.Now() - if err != nil { - return - } - return -} + tops.GasPrice = nil + tops.GasFeeCap = big.NewInt(0).Add(ltp.CurrentBaseFee, ltp.CurrentGasTipCap) -func configureTransactOpts(tops *bind.TransactOpts) *bind.TransactOpts { - ltp := inputLoadTestParams - if ltp.ForceGasPrice != nil && *ltp.ForceGasPrice != 0 { - tops.GasPrice = big.NewInt(0).SetUint64(*ltp.ForceGasPrice) - } else { - tops.GasPrice = ltp.CurrentGas - } - if !*ltp.LegacyTransactionMode { - if ltp.ForceGasPrice != nil && *ltp.ForceGasPrice != 0 { - tops.GasPrice = big.NewInt(0).SetUint64(*ltp.ForceGasPrice) - } else { - if ltp.BaseFee != nil { - tops.GasPrice = big.NewInt(0).Add(ltp.BaseFee, ltp.CurrentGasTipCap) - } else { - log.Fatal().Msg("EIP-1559 not activated. Please use --legacy") - } - } - if ltp.ForcePriorityGasPrice != nil && *ltp.ForcePriorityGasPrice != 0 { - tops.GasTipCap = big.NewInt(0).SetUint64(*ltp.ForcePriorityGasPrice) - } else { - tops.GasTipCap = ltp.CurrentGasTipCap - } - } - if ltp.ForceGasLimit != nil && *ltp.ForceGasLimit != 0 { - tops.GasLimit = *ltp.ForceGasLimit - } return tops } @@ -1611,382 +1207,29 @@ func waitForFinalBlock(ctx context.Context, c *ethclient.Client, rpc *ethrpc.Cli return lastBlockNumber, nil } -func summarizeTransactions(ctx context.Context, c *ethclient.Client, rpc *ethrpc.Client, startBlockNumber, startNonce, lastBlockNumber, endNonce uint64) error { - ltp := inputLoadTestParams - var err error - - log.Trace().Msg("Starting block range capture") - // confirm start block number is ok - _, err = c.BlockByNumber(ctx, new(big.Int).SetUint64(startBlockNumber)) - if err != nil { - return err - } - rawBlocks, err := util.GetBlockRange(ctx, startBlockNumber, lastBlockNumber, rpc) - if err != nil { - return err - } - // TODO: Add some kind of decimation to avoid summarizing for 10 minutes? - batchSize := *ltp.BatchSize - goRoutineLimit := *ltp.Concurrency - var txGroup sync.WaitGroup - threadPool := make(chan bool, goRoutineLimit) - log.Trace().Msg("Starting tx receipt capture") - rawTxReceipts := make([]*json.RawMessage, 0) - var rawTxReceiptsLock sync.Mutex - var txGroupErr error - - startReceipt := time.Now() - for k := range rawBlocks { - threadPool <- true - txGroup.Add(1) - go func(b *json.RawMessage) { - var receipt []*json.RawMessage - receipt, err = util.GetReceipts(ctx, []*json.RawMessage{b}, rpc, batchSize) - if err != nil { - txGroupErr = err - return - } - rawTxReceiptsLock.Lock() - rawTxReceipts = append(rawTxReceipts, receipt...) - rawTxReceiptsLock.Unlock() - <-threadPool - txGroup.Done() - }(rawBlocks[k]) - } - - endReceipt := time.Now() - txGroup.Wait() - if txGroupErr != nil { - log.Error().Err(err).Msg("One of the threads fetching tx receipts failed") - return err - } - - blocks := make([]rpctypes.RawBlockResponse, 0) - for _, b := range rawBlocks { - var block rpctypes.RawBlockResponse - err = json.Unmarshal(*b, &block) - if err != nil { - log.Error().Err(err).Msg("Error decoding block response") - return err - } - blocks = append(blocks, block) - } - log.Info().Int("len", len(blocks)).Msg("Block summary") +func transactOptsToCallMsg(tops *bind.TransactOpts) ethereum.CallMsg { + cm := new(ethereum.CallMsg) + cm.From = *inputLoadTestParams.FromETHAddress - txReceipts := make([]rpctypes.RawTxReceipt, 0) - log.Trace().Int("len", len(rawTxReceipts)).Msg("Raw receipts") - for _, r := range rawTxReceipts { - if isEmptyJSONResponse(r) { - continue - } - var receipt rpctypes.RawTxReceipt - err = json.Unmarshal(*r, &receipt) - if err != nil { - log.Error().Err(err).Msg("Error decoding tx receipt response") - return err - } - txReceipts = append(txReceipts, receipt) - } - log.Info().Int("len", len(txReceipts)).Msg("Receipt summary") - - blockData := make(map[uint64]blockSummary, 0) - for k, b := range blocks { - bs := blockSummary{} - bs.Block = &blocks[k] - bs.Receipts = make(map[ethcommon.Hash]rpctypes.RawTxReceipt, 0) - bs.Latencies = make(map[uint64]time.Duration, 0) - blockData[b.Number.ToUint64()] = bs - } - - for _, r := range txReceipts { - bn := r.BlockNumber.ToUint64() - bs := blockData[bn] - if bs.Receipts == nil { - log.Error().Uint64("blocknumber", bn).Msg("Block number from receipts does not exist in block data") - } - bs.Receipts[r.TransactionHash.ToHash()] = r - blockData[bn] = bs - } - - nonceTimes := make(map[uint64]time.Time, 0) - for _, ltr := range loadTestResults { - nonceTimes[ltr.Nonce] = ltr.RequestTime - } - - minLatency := time.Millisecond * 100 - for _, bs := range blockData { - for _, tx := range bs.Block.Transactions { - // TODO: What happens when the system clock of the load tester isn't in sync with the system clock of the miner? - // TODO: the timestamp in the chain only has granularity down to the second. How to deal with this - mineTime := time.Unix(bs.Block.Timestamp.ToInt64(), 0) - requestTime := nonceTimes[tx.Nonce.ToUint64()] - txLatency := mineTime.Sub(requestTime) - if txLatency.Hours() > 2 { - log.Debug().Float64("txHours", txLatency.Hours()).Uint64("nonce", tx.Nonce.ToUint64()).Uint64("blockNumber", bs.Block.Number.ToUint64()).Time("mineTime", mineTime).Time("requestTime", requestTime).Msg("Encountered transaction with more than 2 hours latency") - } - bs.Latencies[tx.Nonce.ToUint64()] = txLatency - if txLatency < minLatency { - minLatency = txLatency - } - } - } - // TODO this might be a hack, but not sure what's a better way to deal with time discrepancies - if minLatency < time.Millisecond*100 { - log.Trace().Str("minLatency", minLatency.String()).Msg("Minimum latency is below expected threshold") - shiftSize := ((time.Millisecond * 100) - minLatency) + time.Millisecond + 100 - for _, bs := range blockData { - for _, tx := range bs.Block.Transactions { - bs.Latencies[tx.Nonce.ToUint64()] += shiftSize - } - } - } - - printBlockSummary(c, blockData, startNonce, endNonce) - - log.Trace().Str("summaryTime", (endReceipt.Sub(startReceipt)).String()).Msg("Total Summary Time") - return nil - -} - -func isEmptyJSONResponse(r *json.RawMessage) bool { - rawJson := []byte(*r) - return len(rawJson) == 0 -} - -type Latency struct { - Min float64 - Median float64 - Max float64 -} - -type Summary struct { - BlockNumber uint64 - Time time.Time - GasLimit uint64 - GasUsed uint64 - NumTx int - Utilization float64 - Latencies Latency -} - -type SummaryOutput struct { - Summaries []Summary - SuccessfulTx int64 - TotalTx int64 - TotalMiningTime time.Duration - TotalGasUsed uint64 - TransactionsPerSec float64 - GasPerSecond float64 - Latencies Latency -} - -func printBlockSummary(c *ethclient.Client, bs map[uint64]blockSummary, startNonce, endNonce uint64) { - filterBlockSummary(bs, startNonce, endNonce) - mapKeys := getSortedMapKeys(bs) - if len(mapKeys) == 0 { - return - } - - var totalTransactions uint64 = 0 - var totalGasUsed uint64 = 0 - p := message.NewPrinter(language.English) - - allLatencies := make([]time.Duration, 0) - summaryOutputMode := *inputLoadTestParams.SummaryOutputMode - jsonSummaryList := []Summary{} - for _, v := range mapKeys { - summary := bs[v] - gasUsed := getTotalGasUsed(summary.Receipts) - blockLatencies := getMapValues(summary.Latencies) - minLatency, medianLatency, maxLatency := getMinMedianMax(blockLatencies) - allLatencies = append(allLatencies, blockLatencies...) - blockUtilization := float64(gasUsed) / summary.Block.GasLimit.ToFloat64() - if gasUsed == 0 { - blockUtilization = 0 - } - // if we're at trace, debug, or info level we'll output the block level metrics - if zerolog.GlobalLevel() <= zerolog.InfoLevel { - if summaryOutputMode == "text" { - _, _ = p.Printf("Block number: %v\tTime: %s\tGas Limit: %v\tGas Used: %v\tNum Tx: %v\tUtilization %v\tLatencies: %v\t%v\t%v\n", - number.Decimal(summary.Block.Number.ToUint64()), - time.Unix(summary.Block.Timestamp.ToInt64(), 0), - number.Decimal(summary.Block.GasLimit.ToUint64()), - number.Decimal(gasUsed), - number.Decimal(len(summary.Block.Transactions)), - number.Percent(blockUtilization), - number.Decimal(minLatency.Seconds()), - number.Decimal(medianLatency.Seconds()), - number.Decimal(maxLatency.Seconds())) - } else if summaryOutputMode == "json" { - jsonSummary := Summary{} - jsonSummary.BlockNumber = summary.Block.Number.ToUint64() - jsonSummary.Time = time.Unix(summary.Block.Timestamp.ToInt64(), 0) - jsonSummary.GasLimit = summary.Block.GasLimit.ToUint64() - jsonSummary.GasUsed = gasUsed - jsonSummary.NumTx = len(summary.Block.Transactions) - jsonSummary.Utilization = blockUtilization - latencies := Latency{} - latencies.Min = minLatency.Seconds() - latencies.Median = medianLatency.Seconds() - latencies.Max = maxLatency.Seconds() - jsonSummary.Latencies = latencies - jsonSummaryList = append(jsonSummaryList, jsonSummary) - } else { - log.Error().Str("mode", summaryOutputMode).Msg("Invalid mode for summary output") - } - } - totalTransactions += uint64(len(summary.Block.Transactions)) - totalGasUsed += gasUsed - } - parentOfFirstBlock, _ := c.BlockByNumber(context.Background(), big.NewInt(bs[mapKeys[0]].Block.Number.ToInt64()-1)) - lastBlock := bs[mapKeys[len(mapKeys)-1]].Block - totalMiningTime := time.Duration(lastBlock.Timestamp.ToUint64()-parentOfFirstBlock.Time()) * time.Second - tps := float64(totalTransactions) / totalMiningTime.Seconds() - gaspersec := float64(totalGasUsed) / totalMiningTime.Seconds() - minLatency, medianLatency, maxLatency := getMinMedianMax(allLatencies) - successfulTx, totalTx := getSuccessfulTransactionCount(bs) - - if summaryOutputMode == "text" { - p.Printf("Successful Tx: %v\tTotal Tx: %v\n", number.Decimal(successfulTx), number.Decimal(totalTx)) - p.Printf("Total Mining Time: %s\n", totalMiningTime) - p.Printf("Total Transactions: %v\n", number.Decimal(totalTransactions)) - p.Printf("Total Gas Used: %v\n", number.Decimal(totalGasUsed)) - p.Printf("Transactions per sec: %v\n", number.Decimal(tps)) - p.Printf("Gas Per Second: %v\n", number.Decimal(gaspersec)) - p.Printf("Latencies - Min: %v\tMedian: %v\tMax: %v\n", number.Decimal(minLatency.Seconds()), number.Decimal(medianLatency.Seconds()), number.Decimal(maxLatency.Seconds())) - // TODO: Add some kind of indication of block time variance - } else if summaryOutputMode == "json" { - summaryOutput := SummaryOutput{} - summaryOutput.Summaries = jsonSummaryList - summaryOutput.SuccessfulTx = successfulTx - summaryOutput.TotalTx = totalTx - summaryOutput.TotalMiningTime = totalMiningTime - summaryOutput.TotalGasUsed = totalGasUsed - summaryOutput.TransactionsPerSec = tps - summaryOutput.GasPerSecond = gaspersec - - latencies := Latency{} - latencies.Min = minLatency.Seconds() - latencies.Median = medianLatency.Seconds() - latencies.Max = maxLatency.Seconds() - summaryOutput.Latencies = latencies - - val, _ := json.MarshalIndent(summaryOutput, "", " ") - p.Println(string(val)) - } else { - log.Error().Str("mode", summaryOutputMode).Msg("Invalid mode for summary output") - } + cm.Gas = tops.GasLimit + cm.GasPrice = tops.GasPrice + cm.GasFeeCap = tops.GasFeeCap + cm.GasTipCap = tops.GasTipCap + cm.Value = tops.Value + return *cm } -func getSuccessfulTransactionCount(bs map[uint64]blockSummary) (successful, total int64) { - for _, block := range bs { - total += int64(len(block.Receipts)) - for _, receipt := range block.Receipts { - successful += receipt.Status.ToInt64() - } - } - return -} - -func getTotalGasUsed(receipts map[ethcommon.Hash]rpctypes.RawTxReceipt) uint64 { - var totalGasUsed uint64 = 0 - for _, receipt := range receipts { - totalGasUsed += receipt.GasUsed.ToUint64() - } - return totalGasUsed -} - -func getMapValues[K constraints.Ordered, V any](m map[K]V) []V { - newSlice := make([]V, 0) - for _, val := range m { - newSlice = append(newSlice, val) - } - return newSlice -} - -func getMinMedianMax[V constraints.Float | constraints.Integer](values []V) (V, V, V) { - if len(values) == 0 { - return 0, 0, 0 - } - sort.Slice(values, func(i, j int) bool { - return values[i] < values[j] - }) - half := len(values) / 2 - median := values[half] - if len(values)%2 == 0 { - median = (median + values[half-1]) / V(2) - } - var min V - var max V - for k, v := range values { - if k == 0 { - min = v - max = v - continue - } - if v < min { - min = v - } - if v > max { - max = v - } - } - return min, median, max -} - -func filterBlockSummary(blockSummaries map[uint64]blockSummary, startNonce, endNonce uint64) { - validTx := make(map[ethcommon.Hash]struct{}, 0) - var minBlock uint64 = math.MaxUint64 - var maxBlock uint64 = 0 - for _, bs := range blockSummaries { - for _, tx := range bs.Block.Transactions { - if tx.Nonce.ToUint64() >= startNonce && tx.Nonce.ToUint64() <= endNonce { - validTx[tx.Hash.ToHash()] = struct{}{} - if tx.BlockNumber.ToUint64() < minBlock { - minBlock = tx.BlockNumber.ToUint64() - } - if tx.BlockNumber.ToUint64() > maxBlock { - maxBlock = tx.BlockNumber.ToUint64() - } - } - } - } - keys := getSortedMapKeys(blockSummaries) - for _, k := range keys { - if k < minBlock { - delete(blockSummaries, k) - } - if k > maxBlock { - delete(blockSummaries, k) - } - } - - for _, bs := range blockSummaries { - filteredTransactions := make([]rpctypes.RawTransactionResponse, 0) - for txKey, tx := range bs.Block.Transactions { - if _, hasKey := validTx[tx.Hash.ToHash()]; hasKey { - filteredTransactions = append(filteredTransactions, bs.Block.Transactions[txKey]) - } - } - bs.Block.Transactions = filteredTransactions - filteredReceipts := make(map[ethcommon.Hash]rpctypes.RawTxReceipt, 0) - for receiptKey, receipt := range bs.Receipts { - if _, hasKey := validTx[receipt.TransactionHash.ToHash()]; hasKey { - filteredReceipts[receipt.TransactionHash.ToHash()] = bs.Receipts[receiptKey] - } - } - bs.Receipts = filteredReceipts - - } -} - -func getSortedMapKeys[V any, K constraints.Ordered](m map[K]V) []K { - keys := make([]K, 0) - for k := range m { - keys = append(keys, k) - } - sort.Slice(keys, func(i, j int) bool { - return keys[i] < keys[j] - }) - return keys +func txToCallMsg(tx *ethtypes.Transaction) ethereum.CallMsg { + cm := new(ethereum.CallMsg) + cm.From = *inputLoadTestParams.FromETHAddress + cm.To = tx.To() + cm.Gas = tx.Gas() + cm.GasPrice = tx.GasPrice() + cm.GasFeeCap = tx.GasFeeCap() + cm.GasTipCap = tx.GasTipCap() + cm.Value = tx.Value() + cm.Data = tx.Data() + + cm.AccessList = tx.AccessList() + return *cm } diff --git a/cmd/loadtest/output.go b/cmd/loadtest/output.go new file mode 100644 index 00000000..ce5d24e5 --- /dev/null +++ b/cmd/loadtest/output.go @@ -0,0 +1,462 @@ +package loadtest + +import ( + "context" + "encoding/json" + "golang.org/x/time/rate" + "math" + "math/big" + "sort" + "sync" + "time" + + "github.com/maticnetwork/polygon-cli/rpctypes" + "github.com/maticnetwork/polygon-cli/util" + "golang.org/x/exp/constraints" + "golang.org/x/text/language" + "golang.org/x/text/message" + "golang.org/x/text/number" + + _ "embed" + + ethcommon "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" + ethrpc "github.com/ethereum/go-ethereum/rpc" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func printBlockSummary(c *ethclient.Client, bs map[uint64]blockSummary, startNonce, endNonce uint64) { + filterBlockSummary(bs, startNonce, endNonce) + mapKeys := getSortedMapKeys(bs) + if len(mapKeys) == 0 { + return + } + + var totalTransactions uint64 = 0 + var totalGasUsed uint64 = 0 + p := message.NewPrinter(language.English) + + allLatencies := make([]time.Duration, 0) + summaryOutputMode := *inputLoadTestParams.SummaryOutputMode + jsonSummaryList := []Summary{} + for _, v := range mapKeys { + summary := bs[v] + gasUsed := getTotalGasUsed(summary.Receipts) + blockLatencies := getMapValues(summary.Latencies) + minLatency, medianLatency, maxLatency := getMinMedianMax(blockLatencies) + allLatencies = append(allLatencies, blockLatencies...) + blockUtilization := float64(gasUsed) / summary.Block.GasLimit.ToFloat64() + if gasUsed == 0 { + blockUtilization = 0 + } + // if we're at trace, debug, or info level we'll output the block level metrics + if zerolog.GlobalLevel() <= zerolog.InfoLevel { + if summaryOutputMode == "text" { + _, _ = p.Printf("Block number: %v\tTime: %s\tGas Limit: %v\tGas Used: %v\tNum Tx: %v\tUtilization %v\tLatencies: %v\t%v\t%v\n", + number.Decimal(summary.Block.Number.ToUint64()), + time.Unix(summary.Block.Timestamp.ToInt64(), 0), + number.Decimal(summary.Block.GasLimit.ToUint64()), + number.Decimal(gasUsed), + number.Decimal(len(summary.Block.Transactions)), + number.Percent(blockUtilization), + number.Decimal(minLatency.Seconds()), + number.Decimal(medianLatency.Seconds()), + number.Decimal(maxLatency.Seconds())) + } else if summaryOutputMode == "json" { + jsonSummary := Summary{} + jsonSummary.BlockNumber = summary.Block.Number.ToUint64() + jsonSummary.Time = time.Unix(summary.Block.Timestamp.ToInt64(), 0) + jsonSummary.GasLimit = summary.Block.GasLimit.ToUint64() + jsonSummary.GasUsed = gasUsed + jsonSummary.NumTx = len(summary.Block.Transactions) + jsonSummary.Utilization = blockUtilization + latencies := Latency{} + latencies.Min = minLatency.Seconds() + latencies.Median = medianLatency.Seconds() + latencies.Max = maxLatency.Seconds() + jsonSummary.Latencies = latencies + jsonSummaryList = append(jsonSummaryList, jsonSummary) + } else { + log.Error().Str("mode", summaryOutputMode).Msg("Invalid mode for summary output") + } + } + totalTransactions += uint64(len(summary.Block.Transactions)) + totalGasUsed += gasUsed + } + parentOfFirstBlock, _ := c.BlockByNumber(context.Background(), big.NewInt(bs[mapKeys[0]].Block.Number.ToInt64()-1)) + lastBlock := bs[mapKeys[len(mapKeys)-1]].Block + totalMiningTime := time.Duration(lastBlock.Timestamp.ToUint64()-parentOfFirstBlock.Time()) * time.Second + tps := float64(totalTransactions) / totalMiningTime.Seconds() + gaspersec := float64(totalGasUsed) / totalMiningTime.Seconds() + minLatency, medianLatency, maxLatency := getMinMedianMax(allLatencies) + successfulTx, totalTx := getSuccessfulTransactionCount(bs) + + if summaryOutputMode == "text" { + p.Printf("Successful Tx: %v\tTotal Tx: %v\n", number.Decimal(successfulTx), number.Decimal(totalTx)) + p.Printf("Total Mining Time: %s\n", totalMiningTime) + p.Printf("Total Transactions: %v\n", number.Decimal(totalTransactions)) + p.Printf("Total Gas Used: %v\n", number.Decimal(totalGasUsed)) + p.Printf("Transactions per sec: %v\n", number.Decimal(tps)) + p.Printf("Gas Per Second: %v\n", number.Decimal(gaspersec)) + p.Printf("Latencies - Min: %v\tMedian: %v\tMax: %v\n", number.Decimal(minLatency.Seconds()), number.Decimal(medianLatency.Seconds()), number.Decimal(maxLatency.Seconds())) + // TODO: Add some kind of indication of block time variance + } else if summaryOutputMode == "json" { + summaryOutput := SummaryOutput{} + summaryOutput.Summaries = jsonSummaryList + summaryOutput.SuccessfulTx = successfulTx + summaryOutput.TotalTx = totalTx + summaryOutput.TotalMiningTime = totalMiningTime + summaryOutput.TotalGasUsed = totalGasUsed + summaryOutput.TransactionsPerSec = tps + summaryOutput.GasPerSecond = gaspersec + + latencies := Latency{} + latencies.Min = minLatency.Seconds() + latencies.Median = medianLatency.Seconds() + latencies.Max = maxLatency.Seconds() + summaryOutput.Latencies = latencies + + val, _ := json.MarshalIndent(summaryOutput, "", " ") + p.Println(string(val)) + } else { + log.Error().Str("mode", summaryOutputMode).Msg("Invalid mode for summary output") + } +} +func filterBlockSummary(blockSummaries map[uint64]blockSummary, startNonce, endNonce uint64) { + validTx := make(map[ethcommon.Hash]struct{}, 0) + var minBlock uint64 = math.MaxUint64 + var maxBlock uint64 = 0 + for _, bs := range blockSummaries { + for _, tx := range bs.Block.Transactions { + if tx.Nonce.ToUint64() >= startNonce && tx.Nonce.ToUint64() <= endNonce { + validTx[tx.Hash.ToHash()] = struct{}{} + if tx.BlockNumber.ToUint64() < minBlock { + minBlock = tx.BlockNumber.ToUint64() + } + if tx.BlockNumber.ToUint64() > maxBlock { + maxBlock = tx.BlockNumber.ToUint64() + } + } + } + } + keys := getSortedMapKeys(blockSummaries) + for _, k := range keys { + if k < minBlock { + delete(blockSummaries, k) + } + if k > maxBlock { + delete(blockSummaries, k) + } + } + + for _, bs := range blockSummaries { + filteredTransactions := make([]rpctypes.RawTransactionResponse, 0) + for txKey, tx := range bs.Block.Transactions { + if _, hasKey := validTx[tx.Hash.ToHash()]; hasKey { + filteredTransactions = append(filteredTransactions, bs.Block.Transactions[txKey]) + } + } + bs.Block.Transactions = filteredTransactions + filteredReceipts := make(map[ethcommon.Hash]rpctypes.RawTxReceipt, 0) + for receiptKey, receipt := range bs.Receipts { + if _, hasKey := validTx[receipt.TransactionHash.ToHash()]; hasKey { + filteredReceipts[receipt.TransactionHash.ToHash()] = bs.Receipts[receiptKey] + } + } + bs.Receipts = filteredReceipts + + } +} +func getMapValues[K constraints.Ordered, V any](m map[K]V) []V { + newSlice := make([]V, 0) + for _, val := range m { + newSlice = append(newSlice, val) + } + return newSlice +} + +func getMinMedianMax[V constraints.Float | constraints.Integer](values []V) (V, V, V) { + if len(values) == 0 { + return 0, 0, 0 + } + sort.Slice(values, func(i, j int) bool { + return values[i] < values[j] + }) + half := len(values) / 2 + median := values[half] + if len(values)%2 == 0 { + median = (median + values[half-1]) / V(2) + } + var min V + var max V + for k, v := range values { + if k == 0 { + min = v + max = v + continue + } + if v < min { + min = v + } + if v > max { + max = v + } + } + return min, median, max +} + +func getSortedMapKeys[V any, K constraints.Ordered](m map[K]V) []K { + keys := make([]K, 0) + for k := range m { + keys = append(keys, k) + } + sort.Slice(keys, func(i, j int) bool { + return keys[i] < keys[j] + }) + return keys +} + +func getSuccessfulTransactionCount(bs map[uint64]blockSummary) (successful, total int64) { + for _, block := range bs { + total += int64(len(block.Receipts)) + for _, receipt := range block.Receipts { + successful += receipt.Status.ToInt64() + } + } + return +} + +func getTotalGasUsed(receipts map[ethcommon.Hash]rpctypes.RawTxReceipt) uint64 { + var totalGasUsed uint64 = 0 + for _, receipt := range receipts { + totalGasUsed += receipt.GasUsed.ToUint64() + } + return totalGasUsed +} + +type Latency struct { + Min float64 + Median float64 + Max float64 +} + +type Summary struct { + BlockNumber uint64 + Time time.Time + GasLimit uint64 + GasUsed uint64 + NumTx int + Utilization float64 + Latencies Latency +} + +type SummaryOutput struct { + Summaries []Summary + SuccessfulTx int64 + TotalTx int64 + TotalMiningTime time.Duration + TotalGasUsed uint64 + TransactionsPerSec float64 + GasPerSecond float64 + Latencies Latency +} + +func summarizeTransactions(ctx context.Context, c *ethclient.Client, rpc *ethrpc.Client, startBlockNumber, startNonce, lastBlockNumber, endNonce uint64) error { + ltp := inputLoadTestParams + var err error + + log.Trace().Msg("Starting block range capture") + // confirm start block number is ok + _, err = c.BlockByNumber(ctx, new(big.Int).SetUint64(startBlockNumber)) + if err != nil { + return err + } + rawBlocks, err := util.GetBlockRange(ctx, startBlockNumber, lastBlockNumber, rpc) + if err != nil { + return err + } + // TODO: Add some kind of decimation to avoid summarizing for 10 minutes? + batchSize := *ltp.BatchSize + goRoutineLimit := *ltp.Concurrency + var txGroup sync.WaitGroup + threadPool := make(chan bool, goRoutineLimit) + log.Trace().Msg("Starting tx receipt capture") + rawTxReceipts := make([]*json.RawMessage, 0) + var rawTxReceiptsLock sync.Mutex + var txGroupErr error + + startReceipt := time.Now() + for k := range rawBlocks { + threadPool <- true + txGroup.Add(1) + go func(b *json.RawMessage) { + var receipt []*json.RawMessage + receipt, err = util.GetReceipts(ctx, []*json.RawMessage{b}, rpc, batchSize) + if err != nil { + txGroupErr = err + return + } + rawTxReceiptsLock.Lock() + rawTxReceipts = append(rawTxReceipts, receipt...) + rawTxReceiptsLock.Unlock() + <-threadPool + txGroup.Done() + }(rawBlocks[k]) + } + + endReceipt := time.Now() + txGroup.Wait() + if txGroupErr != nil { + log.Error().Err(err).Msg("One of the threads fetching tx receipts failed") + return err + } + + blocks := make([]rpctypes.RawBlockResponse, 0) + for _, b := range rawBlocks { + var block rpctypes.RawBlockResponse + err = json.Unmarshal(*b, &block) + if err != nil { + log.Error().Err(err).Msg("Error decoding block response") + return err + } + blocks = append(blocks, block) + } + log.Info().Int("len", len(blocks)).Msg("Block summary") + + txReceipts := make([]rpctypes.RawTxReceipt, 0) + log.Trace().Int("len", len(rawTxReceipts)).Msg("Raw receipts") + for _, r := range rawTxReceipts { + if isEmptyJSONResponse(r) { + continue + } + var receipt rpctypes.RawTxReceipt + err = json.Unmarshal(*r, &receipt) + if err != nil { + log.Error().Err(err).Msg("Error decoding tx receipt response") + return err + } + txReceipts = append(txReceipts, receipt) + } + log.Info().Int("len", len(txReceipts)).Msg("Receipt summary") + + blockData := make(map[uint64]blockSummary, 0) + for k, b := range blocks { + bs := blockSummary{} + bs.Block = &blocks[k] + bs.Receipts = make(map[ethcommon.Hash]rpctypes.RawTxReceipt, 0) + bs.Latencies = make(map[uint64]time.Duration, 0) + blockData[b.Number.ToUint64()] = bs + } + + for _, r := range txReceipts { + bn := r.BlockNumber.ToUint64() + bs := blockData[bn] + if bs.Receipts == nil { + log.Error().Uint64("blocknumber", bn).Msg("Block number from receipts does not exist in block data") + } + bs.Receipts[r.TransactionHash.ToHash()] = r + blockData[bn] = bs + } + + nonceTimes := make(map[uint64]time.Time, 0) + for _, ltr := range loadTestResults { + nonceTimes[ltr.Nonce] = ltr.RequestTime + } + + minLatency := time.Millisecond * 100 + for _, bs := range blockData { + for _, tx := range bs.Block.Transactions { + // TODO: What happens when the system clock of the load tester isn't in sync with the system clock of the miner? + // TODO: the timestamp in the chain only has granularity down to the second. How to deal with this + mineTime := time.Unix(bs.Block.Timestamp.ToInt64(), 0) + requestTime := nonceTimes[tx.Nonce.ToUint64()] + txLatency := mineTime.Sub(requestTime) + if txLatency.Hours() > 2 { + log.Debug().Float64("txHours", txLatency.Hours()).Uint64("nonce", tx.Nonce.ToUint64()).Uint64("blockNumber", bs.Block.Number.ToUint64()).Time("mineTime", mineTime).Time("requestTime", requestTime).Msg("Encountered transaction with more than 2 hours latency") + } + bs.Latencies[tx.Nonce.ToUint64()] = txLatency + if txLatency < minLatency { + minLatency = txLatency + } + } + } + // TODO this might be a hack, but not sure what's a better way to deal with time discrepancies + if minLatency < time.Millisecond*100 { + log.Trace().Str("minLatency", minLatency.String()).Msg("Minimum latency is below expected threshold") + shiftSize := ((time.Millisecond * 100) - minLatency) + time.Millisecond + 100 + for _, bs := range blockData { + for _, tx := range bs.Block.Transactions { + bs.Latencies[tx.Nonce.ToUint64()] += shiftSize + } + } + } + + printBlockSummary(c, blockData, startNonce, endNonce) + + log.Trace().Str("summaryTime", (endReceipt.Sub(startReceipt)).String()).Msg("Total Summary Time") + return nil + +} + +func isEmptyJSONResponse(r *json.RawMessage) bool { + rawJson := []byte(*r) + return len(rawJson) == 0 +} + +func printResults(lts []loadTestSample) { + if len(lts) == 0 { + log.Error().Msg("No results recorded") + return + } + + log.Info().Msg("* Results") + log.Info().Int("samples", len(lts)).Msg("Samples") + + var startTime = lts[0].RequestTime + var endTime = lts[len(lts)-1].RequestTime + var meanWait float64 + var totalWait float64 = 0 + var numErrors uint64 = 0 + + for _, s := range lts { + if s.IsError { + numErrors += 1 + } + totalWait = float64(s.WaitTime.Seconds()) + totalWait + } + meanWait = totalWait / float64(len(lts)) + + log.Info().Time("startTime", startTime).Msg("Start") + log.Info().Time("endTime", endTime).Msg("End") + log.Info().Float64("meanWait", meanWait).Msg("Mean Wait") + log.Info().Uint64("numErrors", numErrors).Msg("Num errors") +} + +func lightSummary(ctx context.Context, c *ethclient.Client, rpc *ethrpc.Client, startBlockNumber, startNonce, endBlockNumber, endNonce uint64, rl *rate.Limiter) { + startBlock, err := c.BlockByNumber(ctx, new(big.Int).SetUint64(startBlockNumber)) + if err != nil { + log.Error().Err(err).Msg("unable to get start block for light summary") + return + } + endBlock, err := c.BlockByNumber(ctx, new(big.Int).SetUint64(endBlockNumber)) + if err != nil { + log.Error().Err(err).Msg("unable to get end block for light summary") + return + } + endTime := time.Unix(int64(endBlock.Time()), 0) + startTime := time.Unix(int64(startBlock.Time()), 0) + + testDuration := endTime.Sub(startTime) + tps := float64(len(loadTestResults)) / testDuration.Seconds() + + log.Info(). + Time("firstBlockTime", startTime). + Time("lastBlockTime", endTime). + Int("transactionCount", len(loadTestResults)). + Float64("testDuration", testDuration.Seconds()). + Float64("tps", tps). + Float64("final rate limit", float64(rl.Limit())). + Msg("rough test summary (ignores errors)") +} diff --git a/cmd/loadtest/recall.go b/cmd/loadtest/recall.go new file mode 100644 index 00000000..f1bdfdf6 --- /dev/null +++ b/cmd/loadtest/recall.go @@ -0,0 +1,77 @@ +package loadtest + +import ( + "context" + "encoding/json" + ethtypes "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/ethclient" + ethrpc "github.com/ethereum/go-ethereum/rpc" + "github.com/maticnetwork/polygon-cli/rpctypes" + "github.com/maticnetwork/polygon-cli/util" + "math/big" +) + +// TODO allow this to be pre-specified with an input file +func getRecentBlocks(ctx context.Context, ec *ethclient.Client, c *ethrpc.Client) ([]*json.RawMessage, error) { + bn, err := ec.BlockNumber(ctx) + if err != nil { + return nil, err + } + rawBlocks, err := util.GetBlockRange(ctx, bn-*inputLoadTestParams.RecallLength, bn, c) + return rawBlocks, err +} + +func getRecallTransactions(ctx context.Context, c *ethclient.Client, rpc *ethrpc.Client) ([]rpctypes.PolyTransaction, error) { + rb, err := getRecentBlocks(ctx, c, rpc) + if err != nil { + return nil, err + } + txs := make([]rpctypes.PolyTransaction, 0) + for _, v := range rb { + pb := new(rpctypes.RawBlockResponse) + err := json.Unmarshal(*v, pb) + if err != nil { + return nil, err + } + for _, t := range pb.Transactions { + pt := rpctypes.NewPolyTransaction(&t) + txs = append(txs, pt) + } + } + return txs, nil +} + +func rawTransactionToNewTx(pt rpctypes.PolyTransaction, nonce uint64, price, tipCap *big.Int) *ethtypes.Transaction { + if pt.MaxFeePerGas() != 0 || pt.ChainID() != 0 { + return rawTransactionToDynamicFeeTx(pt, nonce, price, tipCap) + } + return rawTransactionToLegacyTx(pt, nonce, price) +} +func rawTransactionToDynamicFeeTx(pt rpctypes.PolyTransaction, nonce uint64, price, tipCap *big.Int) *ethtypes.Transaction { + toAddr := pt.To() + chainId := new(big.Int).SetUint64(pt.ChainID()) + dynamicFeeTx := ðtypes.DynamicFeeTx{ + ChainID: chainId, + To: &toAddr, + Data: pt.Data(), + Value: pt.Value(), + Gas: pt.Gas(), + GasFeeCap: price, + GasTipCap: tipCap, + Nonce: nonce, + } + tx := ethtypes.NewTx(dynamicFeeTx) + return tx +} +func rawTransactionToLegacyTx(pt rpctypes.PolyTransaction, nonce uint64, price *big.Int) *ethtypes.Transaction { + toAddr := pt.To() + tx := ethtypes.NewTx(ðtypes.LegacyTx{ + To: &toAddr, + Value: pt.Value(), + Data: pt.Data(), + Gas: pt.Gas(), + Nonce: nonce, + GasPrice: price, + }) + return tx +} diff --git a/cmd/metricsToDash/metricsToDash.go b/cmd/metricsToDash/metricsToDash.go index 31f25357..21dd5501 100644 --- a/cmd/metricsToDash/metricsToDash.go +++ b/cmd/metricsToDash/metricsToDash.go @@ -1,19 +1,3 @@ -/* -Copyright © 2022 Polygon - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Lesser General Public License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with this program. If not, see . -*/ package metricsToDash import ( diff --git a/cmd/mnemonic/mnemonic.go b/cmd/mnemonic/mnemonic.go index 3377a6d7..7b37310c 100644 --- a/cmd/mnemonic/mnemonic.go +++ b/cmd/mnemonic/mnemonic.go @@ -1,19 +1,3 @@ -/* -Copyright © 2022 Polygon - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Lesser General Public License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with this program. If not, see . -*/ package mnemonic import ( @@ -59,13 +43,4 @@ var MnemonicCmd = &cobra.Command{ func init() { inputMnemonicWords = MnemonicCmd.PersistentFlags().Int("words", 24, "The number of words to use in the mnemonic") inputMnemonicLang = MnemonicCmd.PersistentFlags().String("language", "english", "Which language to use [ChineseSimplified, ChineseTraditional, Czech, English, French, Italian, Japanese, Korean, Spanish]") - // Here you will define your flags and configuration settings. - - // Cobra supports Persistent Flags which will work for this command - // and all subcommands, e.g.: - // mnemonicCmd.PersistentFlags().String("foo", "", "A help for foo") - - // Cobra supports local flags which will only run when this command - // is called directly, e.g.: - // mnemonicCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") } diff --git a/cmd/monitor/monitor.go b/cmd/monitor/monitor.go index b1f7cdfa..a4533181 100644 --- a/cmd/monitor/monitor.go +++ b/cmd/monitor/monitor.go @@ -1,19 +1,3 @@ -/* -Copyright © 2022 Polygon - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Lesser General Public License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with this program. If not, see . -*/ package monitor import ( diff --git a/cmd/nodekey/nodekey.go b/cmd/nodekey/nodekey.go index 0327e552..bbf9aec4 100644 --- a/cmd/nodekey/nodekey.go +++ b/cmd/nodekey/nodekey.go @@ -1,19 +1,3 @@ -/* -Copyright © 2022 Polygon - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Lesser General Public License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with this program. If not, see . -*/ package nodekey import ( diff --git a/cmd/root.go b/cmd/root.go index 73c4f836..b380e5af 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -1,19 +1,3 @@ -/* -Copyright © 2022 Polygon - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Lesser General Public License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with this program. If not, see . -*/ package cmd import ( @@ -33,6 +17,7 @@ import ( "github.com/maticnetwork/polygon-cli/cmd/dumpblocks" "github.com/maticnetwork/polygon-cli/cmd/forge" "github.com/maticnetwork/polygon-cli/cmd/hash" + "github.com/maticnetwork/polygon-cli/cmd/leveldbbench" "github.com/maticnetwork/polygon-cli/cmd/loadtest" "github.com/maticnetwork/polygon-cli/cmd/metricsToDash" "github.com/maticnetwork/polygon-cli/cmd/mnemonic" @@ -119,6 +104,7 @@ func NewPolycliCommand() *cobra.Command { forge.ForgeCmd, fork.ForkCmd, hash.HashCmd, + leveldbbench.LevelDBBenchCmd, loadtest.LoadtestCmd, metricsToDash.MetricsToDashCmd, mnemonic.MnemonicCmd, @@ -135,6 +121,7 @@ func NewPolycliCommand() *cobra.Command { } // setLogLevel sets the log level based on the flags. +// https://logging.apache.org/log4j/2.x/manual/customloglevels.html func setLogLevel(verbosity int, pretty bool) { if verbosity < 100 { zerolog.SetGlobalLevel(zerolog.PanicLevel) diff --git a/cmd/rpc/rpc.go b/cmd/rpc/rpc.go index 78c6c6c9..b64c845a 100644 --- a/cmd/rpc/rpc.go +++ b/cmd/rpc/rpc.go @@ -1,19 +1,3 @@ -/* -Copyright © 2022 Polygon - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Lesser General Public License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with this program. If not, see . -*/ package rpc import ( diff --git a/cmd/version/version.go b/cmd/version/version.go index 04a79ae9..c670f66b 100644 --- a/cmd/version/version.go +++ b/cmd/version/version.go @@ -1,20 +1,3 @@ -/* -Copyright © 2022 Polygon - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Lesser General Public License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with this program. If not, see . -*/ - package version import ( @@ -37,15 +20,3 @@ var VersionCmd = &cobra.Command{ cmd.Printf("Polygon CLI Version %s\n", Version) }, } - -func init() { - // Here you will define your flags and configuration settings. - - // Cobra supports Persistent Flags which will work for this command - // and all subcommands, e.g.: - // versionCmd.PersistentFlags().String("foo", "", "A help for foo") - - // Cobra supports local flags which will only run when this command - // is called directly, e.g.: - // versionCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") -} diff --git a/cmd/wallet/wallet.go b/cmd/wallet/wallet.go index de65ef4e..868552d3 100644 --- a/cmd/wallet/wallet.go +++ b/cmd/wallet/wallet.go @@ -1,19 +1,3 @@ -/* -Copyright © 2022 Polygon - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Lesser General Public License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with this program. If not, see . -*/ package wallet import ( diff --git a/contracts/Delegator.abi b/contracts/Delegator.abi deleted file mode 100644 index a0bd75df..00000000 --- a/contracts/Delegator.abi +++ /dev/null @@ -1 +0,0 @@ -[{"inputs":[{"internalType":"address","name":"contractAddress","type":"address"},{"internalType":"bytes","name":"packedCall","type":"bytes"}],"name":"call","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"contractAddress","type":"address"},{"internalType":"bytes","name":"packedCall","type":"bytes"}],"name":"delegateCall","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"contractAddress","type":"address"},{"internalType":"bytes","name":"packedCall","type":"bytes"}],"name":"loopCall","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"contractAddress","type":"address"},{"internalType":"bytes","name":"packedCall","type":"bytes"}],"name":"loopDelegateCall","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"}] \ No newline at end of file diff --git a/contracts/Delegator.bin b/contracts/Delegator.bin deleted file mode 100644 index 891b1e69..00000000 --- a/contracts/Delegator.bin +++ /dev/null @@ -1 +0,0 @@ -608060405234801561001057600080fd5b5061052a806100206000396000f3fe608060405234801561001057600080fd5b506004361061004c5760003560e01c80631b8b921d1461005157806356e7b7aa1461008157806362a1ffbd146100b1578063757bd1e5146100e1575b600080fd5b61006b60048036038101906100669190610406565b610111565b6040516100789190610481565b60405180910390f35b61009b60048036038101906100969190610406565b610194565b6040516100a89190610481565b60405180910390f35b6100cb60048036038101906100c69190610406565b610215565b6040516100d89190610481565b60405180910390f35b6100fb60048036038101906100f69190610406565b6102a8565b6040516101089190610481565b60405180910390f35b60008060608573ffffffffffffffffffffffffffffffffffffffff16858560405161013d9291906104db565b6000604051808303816000865af19150503d806000811461017a576040519150601f19603f3d011682016040523d82523d6000602084013e61017f565b606091505b50809250819350505081925050509392505050565b60008060608573ffffffffffffffffffffffffffffffffffffffff1685856040516101c09291906104db565b600060405180830381855af49150503d80600081146101fb576040519150601f19603f3d011682016040523d82523d6000602084013e610200565b606091505b50809250819350505081925050509392505050565b60008060605b6103e85a111561029c578573ffffffffffffffffffffffffffffffffffffffff16858560405161024c9291906104db565b6000604051808303816000865af19150503d8060008114610289576040519150601f19603f3d011682016040523d82523d6000602084013e61028e565b606091505b50809250819350505061021b565b81925050509392505050565b60008060605b6103e85a111561032d578573ffffffffffffffffffffffffffffffffffffffff1685856040516102df9291906104db565b600060405180830381855af49150503d806000811461031a576040519150601f19603f3d011682016040523d82523d6000602084013e61031f565b606091505b5080925081935050506102ae565b81925050509392505050565b600080fd5b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b600061036e82610343565b9050919050565b61037e81610363565b811461038957600080fd5b50565b60008135905061039b81610375565b92915050565b600080fd5b600080fd5b600080fd5b60008083601f8401126103c6576103c56103a1565b5b8235905067ffffffffffffffff8111156103e3576103e26103a6565b5b6020830191508360018202830111156103ff576103fe6103ab565b5b9250929050565b60008060006040848603121561041f5761041e610339565b5b600061042d8682870161038c565b935050602084013567ffffffffffffffff81111561044e5761044d61033e565b5b61045a868287016103b0565b92509250509250925092565b60008115159050919050565b61047b81610466565b82525050565b60006020820190506104966000830184610472565b92915050565b600081905092915050565b82818337600083830152505050565b60006104c2838561049c565b93506104cf8385846104a7565b82840190509392505050565b60006104e88284866104b6565b9150819050939250505056fea264697066735822122026359e91fa0fb5826a461a3e171ba836040a8f6089b79691f9cdfe45f6bc99e264736f6c634300080f0033 \ No newline at end of file diff --git a/contracts/Delegator.sol b/contracts/Delegator.sol deleted file mode 100644 index b0e90a56..00000000 --- a/contracts/Delegator.sol +++ /dev/null @@ -1,34 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0 -pragma solidity ^0.8.4; - -contract Delegator { - function call(address contractAddress, bytes calldata packedCall) public returns(bool){ - bool success; - bytes memory data; - (success, data) = contractAddress.call(packedCall); - return success; - } - function delegateCall(address contractAddress, bytes calldata packedCall) public returns(bool){ - bool success; - bytes memory data; - (success, data) = contractAddress.delegatecall(packedCall); - return success; - } - - function loopCall(address contractAddress, bytes calldata packedCall) public returns(bool){ - bool success; - bytes memory data; - while(gasleft() > 1000) { - (success, data) = contractAddress.call(packedCall); - } - return success; - } - function loopDelegateCall(address contractAddress, bytes calldata packedCall) public returns(bool){ - bool success; - bytes memory data; - while(gasleft() > 1000) { - (success, data) = contractAddress.delegatecall(packedCall); - } - return success; - } -} diff --git a/contracts/delegator.go b/contracts/delegator.go deleted file mode 100644 index 94625b7c..00000000 --- a/contracts/delegator.go +++ /dev/null @@ -1,286 +0,0 @@ -// Code generated - DO NOT EDIT. -// This file is a generated binding and any manual changes will be lost. - -package contracts - -import ( - "errors" - "math/big" - "strings" - - ethereum "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/event" -) - -// Reference imports to suppress errors if they are not otherwise used. -var ( - _ = errors.New - _ = big.NewInt - _ = strings.NewReader - _ = ethereum.NotFound - _ = bind.Bind - _ = common.Big1 - _ = types.BloomLookup - _ = event.NewSubscription -) - -// DelegatorMetaData contains all meta data concerning the Delegator contract. -var DelegatorMetaData = &bind.MetaData{ - ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"contractAddress\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"packedCall\",\"type\":\"bytes\"}],\"name\":\"call\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"contractAddress\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"packedCall\",\"type\":\"bytes\"}],\"name\":\"delegateCall\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"contractAddress\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"packedCall\",\"type\":\"bytes\"}],\"name\":\"loopCall\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"contractAddress\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"packedCall\",\"type\":\"bytes\"}],\"name\":\"loopDelegateCall\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", - Bin: "0x608060405234801561001057600080fd5b5061052a806100206000396000f3fe608060405234801561001057600080fd5b506004361061004c5760003560e01c80631b8b921d1461005157806356e7b7aa1461008157806362a1ffbd146100b1578063757bd1e5146100e1575b600080fd5b61006b60048036038101906100669190610406565b610111565b6040516100789190610481565b60405180910390f35b61009b60048036038101906100969190610406565b610194565b6040516100a89190610481565b60405180910390f35b6100cb60048036038101906100c69190610406565b610215565b6040516100d89190610481565b60405180910390f35b6100fb60048036038101906100f69190610406565b6102a8565b6040516101089190610481565b60405180910390f35b60008060608573ffffffffffffffffffffffffffffffffffffffff16858560405161013d9291906104db565b6000604051808303816000865af19150503d806000811461017a576040519150601f19603f3d011682016040523d82523d6000602084013e61017f565b606091505b50809250819350505081925050509392505050565b60008060608573ffffffffffffffffffffffffffffffffffffffff1685856040516101c09291906104db565b600060405180830381855af49150503d80600081146101fb576040519150601f19603f3d011682016040523d82523d6000602084013e610200565b606091505b50809250819350505081925050509392505050565b60008060605b6103e85a111561029c578573ffffffffffffffffffffffffffffffffffffffff16858560405161024c9291906104db565b6000604051808303816000865af19150503d8060008114610289576040519150601f19603f3d011682016040523d82523d6000602084013e61028e565b606091505b50809250819350505061021b565b81925050509392505050565b60008060605b6103e85a111561032d578573ffffffffffffffffffffffffffffffffffffffff1685856040516102df9291906104db565b600060405180830381855af49150503d806000811461031a576040519150601f19603f3d011682016040523d82523d6000602084013e61031f565b606091505b5080925081935050506102ae565b81925050509392505050565b600080fd5b600080fd5b600073ffffffffffffffffffffffffffffffffffffffff82169050919050565b600061036e82610343565b9050919050565b61037e81610363565b811461038957600080fd5b50565b60008135905061039b81610375565b92915050565b600080fd5b600080fd5b600080fd5b60008083601f8401126103c6576103c56103a1565b5b8235905067ffffffffffffffff8111156103e3576103e26103a6565b5b6020830191508360018202830111156103ff576103fe6103ab565b5b9250929050565b60008060006040848603121561041f5761041e610339565b5b600061042d8682870161038c565b935050602084013567ffffffffffffffff81111561044e5761044d61033e565b5b61045a868287016103b0565b92509250509250925092565b60008115159050919050565b61047b81610466565b82525050565b60006020820190506104966000830184610472565b92915050565b600081905092915050565b82818337600083830152505050565b60006104c2838561049c565b93506104cf8385846104a7565b82840190509392505050565b60006104e88284866104b6565b9150819050939250505056fea264697066735822122026359e91fa0fb5826a461a3e171ba836040a8f6089b79691f9cdfe45f6bc99e264736f6c634300080f0033", -} - -// DelegatorABI is the input ABI used to generate the binding from. -// Deprecated: Use DelegatorMetaData.ABI instead. -var DelegatorABI = DelegatorMetaData.ABI - -// DelegatorBin is the compiled bytecode used for deploying new contracts. -// Deprecated: Use DelegatorMetaData.Bin instead. -var DelegatorBin = DelegatorMetaData.Bin - -// DeployDelegator deploys a new Ethereum contract, binding an instance of Delegator to it. -func DeployDelegator(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *Delegator, error) { - parsed, err := DelegatorMetaData.GetAbi() - if err != nil { - return common.Address{}, nil, nil, err - } - if parsed == nil { - return common.Address{}, nil, nil, errors.New("GetABI returned nil") - } - - address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(DelegatorBin), backend) - if err != nil { - return common.Address{}, nil, nil, err - } - return address, tx, &Delegator{DelegatorCaller: DelegatorCaller{contract: contract}, DelegatorTransactor: DelegatorTransactor{contract: contract}, DelegatorFilterer: DelegatorFilterer{contract: contract}}, nil -} - -// Delegator is an auto generated Go binding around an Ethereum contract. -type Delegator struct { - DelegatorCaller // Read-only binding to the contract - DelegatorTransactor // Write-only binding to the contract - DelegatorFilterer // Log filterer for contract events -} - -// DelegatorCaller is an auto generated read-only Go binding around an Ethereum contract. -type DelegatorCaller struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// DelegatorTransactor is an auto generated write-only Go binding around an Ethereum contract. -type DelegatorTransactor struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// DelegatorFilterer is an auto generated log filtering Go binding around an Ethereum contract events. -type DelegatorFilterer struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// DelegatorSession is an auto generated Go binding around an Ethereum contract, -// with pre-set call and transact options. -type DelegatorSession struct { - Contract *Delegator // Generic contract binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// DelegatorCallerSession is an auto generated read-only Go binding around an Ethereum contract, -// with pre-set call options. -type DelegatorCallerSession struct { - Contract *DelegatorCaller // Generic contract caller binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session -} - -// DelegatorTransactorSession is an auto generated write-only Go binding around an Ethereum contract, -// with pre-set transact options. -type DelegatorTransactorSession struct { - Contract *DelegatorTransactor // Generic contract transactor binding to set the session for - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// DelegatorRaw is an auto generated low-level Go binding around an Ethereum contract. -type DelegatorRaw struct { - Contract *Delegator // Generic contract binding to access the raw methods on -} - -// DelegatorCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. -type DelegatorCallerRaw struct { - Contract *DelegatorCaller // Generic read-only contract binding to access the raw methods on -} - -// DelegatorTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. -type DelegatorTransactorRaw struct { - Contract *DelegatorTransactor // Generic write-only contract binding to access the raw methods on -} - -// NewDelegator creates a new instance of Delegator, bound to a specific deployed contract. -func NewDelegator(address common.Address, backend bind.ContractBackend) (*Delegator, error) { - contract, err := bindDelegator(address, backend, backend, backend) - if err != nil { - return nil, err - } - return &Delegator{DelegatorCaller: DelegatorCaller{contract: contract}, DelegatorTransactor: DelegatorTransactor{contract: contract}, DelegatorFilterer: DelegatorFilterer{contract: contract}}, nil -} - -// NewDelegatorCaller creates a new read-only instance of Delegator, bound to a specific deployed contract. -func NewDelegatorCaller(address common.Address, caller bind.ContractCaller) (*DelegatorCaller, error) { - contract, err := bindDelegator(address, caller, nil, nil) - if err != nil { - return nil, err - } - return &DelegatorCaller{contract: contract}, nil -} - -// NewDelegatorTransactor creates a new write-only instance of Delegator, bound to a specific deployed contract. -func NewDelegatorTransactor(address common.Address, transactor bind.ContractTransactor) (*DelegatorTransactor, error) { - contract, err := bindDelegator(address, nil, transactor, nil) - if err != nil { - return nil, err - } - return &DelegatorTransactor{contract: contract}, nil -} - -// NewDelegatorFilterer creates a new log filterer instance of Delegator, bound to a specific deployed contract. -func NewDelegatorFilterer(address common.Address, filterer bind.ContractFilterer) (*DelegatorFilterer, error) { - contract, err := bindDelegator(address, nil, nil, filterer) - if err != nil { - return nil, err - } - return &DelegatorFilterer{contract: contract}, nil -} - -// bindDelegator binds a generic wrapper to an already deployed contract. -func bindDelegator(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { - parsed, err := abi.JSON(strings.NewReader(DelegatorABI)) - if err != nil { - return nil, err - } - return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_Delegator *DelegatorRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _Delegator.Contract.DelegatorCaller.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_Delegator *DelegatorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _Delegator.Contract.DelegatorTransactor.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_Delegator *DelegatorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _Delegator.Contract.DelegatorTransactor.contract.Transact(opts, method, params...) -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_Delegator *DelegatorCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _Delegator.Contract.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_Delegator *DelegatorTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _Delegator.Contract.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_Delegator *DelegatorTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _Delegator.Contract.contract.Transact(opts, method, params...) -} - -// Call is a paid mutator transaction binding the contract method 0x1b8b921d. -// -// Solidity: function call(address contractAddress, bytes packedCall) returns(bool) -func (_Delegator *DelegatorTransactor) Call(opts *bind.TransactOpts, contractAddress common.Address, packedCall []byte) (*types.Transaction, error) { - return _Delegator.contract.Transact(opts, "call", contractAddress, packedCall) -} - -// Call is a paid mutator transaction binding the contract method 0x1b8b921d. -// -// Solidity: function call(address contractAddress, bytes packedCall) returns(bool) -func (_Delegator *DelegatorSession) Call(contractAddress common.Address, packedCall []byte) (*types.Transaction, error) { - return _Delegator.Contract.Call(&_Delegator.TransactOpts, contractAddress, packedCall) -} - -// Call is a paid mutator transaction binding the contract method 0x1b8b921d. -// -// Solidity: function call(address contractAddress, bytes packedCall) returns(bool) -func (_Delegator *DelegatorTransactorSession) Call(contractAddress common.Address, packedCall []byte) (*types.Transaction, error) { - return _Delegator.Contract.Call(&_Delegator.TransactOpts, contractAddress, packedCall) -} - -// DelegateCall is a paid mutator transaction binding the contract method 0x56e7b7aa. -// -// Solidity: function delegateCall(address contractAddress, bytes packedCall) returns(bool) -func (_Delegator *DelegatorTransactor) DelegateCall(opts *bind.TransactOpts, contractAddress common.Address, packedCall []byte) (*types.Transaction, error) { - return _Delegator.contract.Transact(opts, "delegateCall", contractAddress, packedCall) -} - -// DelegateCall is a paid mutator transaction binding the contract method 0x56e7b7aa. -// -// Solidity: function delegateCall(address contractAddress, bytes packedCall) returns(bool) -func (_Delegator *DelegatorSession) DelegateCall(contractAddress common.Address, packedCall []byte) (*types.Transaction, error) { - return _Delegator.Contract.DelegateCall(&_Delegator.TransactOpts, contractAddress, packedCall) -} - -// DelegateCall is a paid mutator transaction binding the contract method 0x56e7b7aa. -// -// Solidity: function delegateCall(address contractAddress, bytes packedCall) returns(bool) -func (_Delegator *DelegatorTransactorSession) DelegateCall(contractAddress common.Address, packedCall []byte) (*types.Transaction, error) { - return _Delegator.Contract.DelegateCall(&_Delegator.TransactOpts, contractAddress, packedCall) -} - -// LoopCall is a paid mutator transaction binding the contract method 0x62a1ffbd. -// -// Solidity: function loopCall(address contractAddress, bytes packedCall) returns(bool) -func (_Delegator *DelegatorTransactor) LoopCall(opts *bind.TransactOpts, contractAddress common.Address, packedCall []byte) (*types.Transaction, error) { - return _Delegator.contract.Transact(opts, "loopCall", contractAddress, packedCall) -} - -// LoopCall is a paid mutator transaction binding the contract method 0x62a1ffbd. -// -// Solidity: function loopCall(address contractAddress, bytes packedCall) returns(bool) -func (_Delegator *DelegatorSession) LoopCall(contractAddress common.Address, packedCall []byte) (*types.Transaction, error) { - return _Delegator.Contract.LoopCall(&_Delegator.TransactOpts, contractAddress, packedCall) -} - -// LoopCall is a paid mutator transaction binding the contract method 0x62a1ffbd. -// -// Solidity: function loopCall(address contractAddress, bytes packedCall) returns(bool) -func (_Delegator *DelegatorTransactorSession) LoopCall(contractAddress common.Address, packedCall []byte) (*types.Transaction, error) { - return _Delegator.Contract.LoopCall(&_Delegator.TransactOpts, contractAddress, packedCall) -} - -// LoopDelegateCall is a paid mutator transaction binding the contract method 0x757bd1e5. -// -// Solidity: function loopDelegateCall(address contractAddress, bytes packedCall) returns(bool) -func (_Delegator *DelegatorTransactor) LoopDelegateCall(opts *bind.TransactOpts, contractAddress common.Address, packedCall []byte) (*types.Transaction, error) { - return _Delegator.contract.Transact(opts, "loopDelegateCall", contractAddress, packedCall) -} - -// LoopDelegateCall is a paid mutator transaction binding the contract method 0x757bd1e5. -// -// Solidity: function loopDelegateCall(address contractAddress, bytes packedCall) returns(bool) -func (_Delegator *DelegatorSession) LoopDelegateCall(contractAddress common.Address, packedCall []byte) (*types.Transaction, error) { - return _Delegator.Contract.LoopDelegateCall(&_Delegator.TransactOpts, contractAddress, packedCall) -} - -// LoopDelegateCall is a paid mutator transaction binding the contract method 0x757bd1e5. -// -// Solidity: function loopDelegateCall(address contractAddress, bytes packedCall) returns(bool) -func (_Delegator *DelegatorTransactorSession) LoopDelegateCall(contractAddress common.Address, packedCall []byte) (*types.Transaction, error) { - return _Delegator.Contract.LoopDelegateCall(&_Delegator.TransactOpts, contractAddress, packedCall) -} diff --git a/doc/polycli.md b/doc/polycli.md index ec61e9f7..292687e7 100644 --- a/doc/polycli.md +++ b/doc/polycli.md @@ -44,6 +44,8 @@ Polycli is a collection of tools that are meant to be useful while building, tes - [polycli hash](polycli_hash.md) - Provide common crypto hashing functions. +- [polycli leveldbbench](polycli_leveldbbench.md) - Perform a level db benchmark + - [polycli loadtest](polycli_loadtest.md) - Run a generic load test against an Eth/EVM style JSON-RPC endpoint. - [polycli metrics-to-dash](polycli_metrics-to-dash.md) - Create a dashboard from an Openmetrics / Prometheus response. diff --git a/doc/polycli_leveldbbench.md b/doc/polycli_leveldbbench.md new file mode 100644 index 00000000..48635cb6 --- /dev/null +++ b/doc/polycli_leveldbbench.md @@ -0,0 +1,179 @@ +# `polycli leveldbbench` + +> Auto-generated documentation. + +## Table of Contents + +- [Description](#description) +- [Usage](#usage) +- [Flags](#flags) +- [See Also](#see-also) + +## Description + +Perform a level db benchmark + +```bash +polycli leveldbbench [flags] +``` + +## Usage + +This command is meant to give us a sense of the system level +performance for leveldb: + +```bash +go run main.go leveldbbench --degree-of-parallelism 2 | jq '.' > result.json +``` + +In many cases, we'll want to emulate the performance characteristics +of `bor` or `geth`. This is the basic IO pattern when `bor` is in sync: + +```text +Process Name = bor + Kbytes : count distribution + 0 -> 1 : 0 | | + 2 -> 3 : 0 | | + 4 -> 7 : 10239 |**************** | + 8 -> 15 : 25370 |****************************************| + 16 -> 31 : 7082 |*********** | + 32 -> 63 : 1241 |* | + 64 -> 127 : 58 | | + 128 -> 255 : 11 | | +``` + +This is the IO pattern when `bor` is getting in sync. + +```text +Process Name = bor + Kbytes : count distribution + 0 -> 1 : 0 | | + 2 -> 3 : 0 | | + 4 -> 7 : 23089 |************* | + 8 -> 15 : 70350 |****************************************| + 16 -> 31 : 11790 |****** | + 32 -> 63 : 1193 | | + 64 -> 127 : 204 | | + 128 -> 255 : 271 | | + 256 -> 511 : 1381 | | +``` + +This gives us a sense of the relative size of the IOPs. We'd also want +to get a sense of the read/write ratio. This is some sample data from +bor while syncing: + +```text +12:48:08 loadavg: 5.86 6.22 7.13 16/451 56297 + +READS WRITES R_Kb W_Kb PATH +307558 1277 4339783 30488 /var/lib/bor/data/bor/chaindata/ + +12:48:38 loadavg: 6.46 6.32 7.14 3/452 56298 + +READS WRITES R_Kb W_Kb PATH +309904 946 4399349 26051 /var/lib/bor/data/bor/chaindata/ + +``` + +During the same period of time this is what the IO looks like from a +node that's in sync. + +```text +12:48:05 loadavg: 1.55 1.85 2.03 18/416 88371 + +READS WRITES R_Kb W_Kb PATH +124530 488 1437436 12165 /var/lib/bor/data/bor/chaindata/ + +12:48:35 loadavg: 4.14 2.44 2.22 1/416 88371 + +READS WRITES R_Kb W_Kb PATH +81282 215 823530 4610 /var/lib/bor/data/bor/chaindata/ + +``` + +If we want to simulate `bor` behavior, we can leverage this data to +configure the leveldb benchmark tool. + + +| Syncing | Reads | Writes | Read (kb) | Write (kb) | RW Ratio | kb/r | kb/w | +|---------|---------|--------|-----------|------------|----------|------|------| +| TRUE | 307,558 | 1,277 | 4,339,783 | 30,488 | 241 | 14.1 | 23.9 | +| TRUE | 309,904 | 946 | 7,399,349 | 26,051 | 328 | 23.9 | 27.5 | +| FALSE | 124,530 | 488 | 1,437,436 | 12,165 | 255 | 11.5 | 24.9 | +| FALSE | 51,282 | 215 | 823,530 | 4,610 | 239 | 16.1 | 21.4 | + +The number of IOps while syncing is a lot higher. The only other +obvious difference is that the IOp size is a bit larger while syncing +as well. + +- Syncing + - Read Write Ratio - 275:1 + - Small IOp - 10kb + - Large IOp - 256kb + - Small Large Ratio - 10:1 +- Synced + - Read Write Ratio - 250:1 + - Small IOp - 10kb + - Larg IOp - 32kb + - Small Large Ratio - 10:1 + +```text +7:58PM DBG buckets bucket=0 count=9559791821 end=1 start=0 +7:58PM DBG buckets bucket=1 count=141033 end=3 start=2 +7:58PM DBG buckets bucket=2 count=92899 end=7 start=4 +7:58PM DBG buckets bucket=3 count=256655 end=15 start=8 +7:58PM DBG buckets bucket=4 count=262589 end=31 start=16 +7:58PM DBG buckets bucket=5 count=191353 end=63 start=32 +7:58PM DBG buckets bucket=6 count=99519 end=127 start=64 +7:58PM DBG buckets bucket=7 count=74161 end=255 start=128 +7:58PM DBG buckets bucket=8 count=17426 end=511 start=256 +7:58PM DBG buckets bucket=9 count=692 end=1023 start=512 +7:58PM DBG buckets bucket=10 count=989 end=2047 start=1024 +7:58PM DBG buckets bucket=13 count=1 end=16383 start=8192 +7:58PM INF recorded result desc="full scan" testDuration=10381196.479925 +7:58PM DBG recorded result result={"Description":"full scan","EndTime":"2023-07-17T19:58:05.396257711Z","OpCount":9557081144,"OpRate":920614.609547304,"StartTime":"2023-07-17T17:05:04.199777776Z","Stats":{"AliveIterators":0,"AliveSnapshots":0,"BlockCache":{"Buckets":2048,"DelCount":259134854,"GrowCount":9,"HitCount":4,"MissCount":262147633,"Nodes":33294,"SetCount":259168148,"ShrinkCount":2,"Size":268427343},"BlockCacheSize":268427343,"FileCache":{"Buckets":16,"DelCount":536037,"GrowCount":0,"HitCount":2,"MissCount":536537,"Nodes":500,"SetCount":536537,"ShrinkCount":0,"Size":500},"IORead":1092651461848,"IOWrite":13032122717,"Level0Comp":0,"LevelDurations":[0,0,546151937,15675194130,100457643600,40581548153,0],"LevelRead":[0,0,45189458,1233235440,8351239571,3376108236,0],"LevelSizes":[0,103263963,1048356844,10484866671,104856767171,180600915234,797187827055],"LevelTablesCounts":[0,51,665,7066,53522,95777,371946],"LevelWrite":[0,0,45159786,1230799439,8328970986,3371359447,0],"MemComp":0,"NonLevel0Comp":1433,"OpenedTablesCount":500,"SeekComp":0,"WriteDelayCount":0,"WriteDelayDuration":0,"WritePaused":false},"TestDuration":10381196479925,"ValueDist":null} + +``` + +## Flags + +```bash + --cache-size int the number of megabytes to use as our internal cache size (default 512) + --db-path string the path of the database that we'll use for testing (default "_benchmark_db") + --degree-of-parallelism uint8 The number of concurrent goroutines we'll use (default 2) + --dont-fill-read-cache if false, then random reads will be cached + --full-scan-mode if true, the application will scan the full database as fast as possible and print a summary + --handles int defines the capacity of the open files caching. Use -1 for zero, this has same effect as specifying NoCacher to OpenFilesCacher. (default 500) + -h, --help help for leveldbbench + --key-size uint The byte length of the keys that we'll use (default 32) + --nil-read-opts if true we'll use nil read opt (this is what geth/bor does) + --no-merge-write allows disabling write merge + --overwrite-count uint the number of times to overwrite the data (default 5) + --read-limit uint the number of reads will attempt to complete in a given test (default 10000000) + --read-only if true, we'll skip all the write operations and open the DB in read only mode + --read-strict if true the rand reads will be made in strict mode + --sequential-reads if true we'll perform reads sequentially + --sequential-writes if true we'll perform writes in somewhat sequential manner + --size-distribution string the size distribution to use while testing (default "0-1:2347864,2-3:804394856,4-7:541267689,8-15:738828593,16-31:261122372,32-63:1063470933,64-127:3584745195,128-255:1605760137,256-511:316074206,512-1023:312887514,1024-2047:328894149,2048-4095:141180,4096-8191:92789,8192-16383:256060,16384-32767:261806,32768-65535:191032,65536-131071:99715,131072-262143:73782,262144-524287:17552,524288-1048575:717,1048576-2097151:995,2097152-4194303:1,8388608-16777215:1") + --sync-writes sync each write + --write-limit uint The number of entries to write in the db (default 1000000) + --write-zero if true, we'll write 0s rather than random data +``` + +The command also inherits flags from parent commands. + +```bash + --config string config file (default is $HOME/.polygon-cli.yaml) + --pretty-logs Should logs be in pretty format or JSON (default true) + -v, --verbosity int 0 - Silent + 100 Fatal + 200 Error + 300 Warning + 400 Info + 500 Debug + 600 Trace (default 400) +``` + +## See also + +- [polycli](polycli.md) - A Swiss Army knife of blockchain tools. diff --git a/doc/polycli_loadtest.md b/doc/polycli_loadtest.md index dbc0243c..162a91b8 100644 --- a/doc/polycli_loadtest.md +++ b/doc/polycli_loadtest.md @@ -81,26 +81,28 @@ $ polycli loadtest --app-id 0 --data-avail --verbosity 700 --chain-id 42 --concu ## Flags ```bash - --adaptive-backoff-factor float When we detect congestion we will use this factor to determine how much we slow down (default 2) - --adaptive-cycle-duration-seconds uint Duration in seconds that adaptive load test will review txpool and determine whether to increase/decrease rate limit (default 10) - --adaptive-rate-limit Loadtest automatically adjusts request rate to maximize utilization but prevent congestion - --adaptive-rate-limit-increment uint Additive increment to rate of requests if txpool below steady state size (default 50) + --adaptive-backoff-factor float When using adaptive rate limiting, this flag controls our multiplicative decrease value. (default 2) + --adaptive-cycle-duration-seconds uint When using adaptive rate limiting, this flag controls how often we check the queue size and adjust the rates (default 10) + --adaptive-rate-limit Enable AIMD-style congestion control to automatically adjust request rate + --adaptive-rate-limit-increment uint When using adaptive rate limiting, this flag controls the size of the additive increases. (default 50) --batch-size uint Number of batches to perform at a time for receipt fetching. Default is 999 requests at a time. (default 999) -b, --byte-count uint If we're in store mode, this controls how many bytes we'll try to store in our contract (default 1024) - --chain-id uint The chain id for the transactions that we're going to send - -c, --concurrency int Number of multiple requests to perform at a time. Default is one request at a time. (default 1) - --contract-call-block-interval uint The number of blocks to wait between contract calls (default 1) - --contract-call-nb-blocks-to-wait-for uint The number of blocks to wait for before giving up on a contract call (default 30) - --data-avail Is this a test of avail rather than an EVM / Geth Chain - --del-address string A pre-deployed delegator contract address - --force-contract-deploy Some loadtest modes don't require a contract deployment. Set this flag to true to force contract deployments. This will still respect the --del-address and --il-address flags. + --call-only When using this mode, rather than sending a transaction, we'll just call. This mode is incompatible with adaptive rate limiting, summarization, and a few other features. + --chain-id uint The chain id for the transactions. + -c, --concurrency int Number of requests to perform concurrently. Default is one request at a time. (default 1) + --contract-call-block-interval uint During deployment, this flag controls if we should check every block, every other block, or every nth block to determine that the contract has been deployed (default 1) + --contract-call-nb-blocks-to-wait-for uint The number of blocks to wait for before giving up on a contract deployment (default 30) + --data-avail [DEPRECATED] Enables Avail load testing + --erc20-address string The address of a pre-deployed erc 20 contract + --erc721-address string The address of a pre-deployed erc 721 contract + --force-contract-deploy Some load test modes don't require a contract deployment. Set this flag to true to force contract deployments. This will still respect the --lt-address flags. -f, --function --mode f A specific function to be called if running with --mode f or a specific precompiled contract when running with `--mode a` (default 1) - --gas-limit uint In environments where the gas limit can't be computed on the fly, we can specify it manually - --gas-price uint In environments where the gas price can't be estimated, we can specify it manually + --gas-limit uint In environments where the gas limit can't be computed on the fly, we can specify it manually. This can also be used to avoid eth_estimateGas + --gas-price uint In environments where the gas price can't be determined automatically, we can specify it manually -h, --help help for loadtest - -i, --iterations uint If we're making contract calls, this controls how many times the contract will execute the instruction in a loop. If we are making ERC721 Mints, this indicated the minting batch size (default 100) + -i, --iterations uint If we're making contract calls, this controls how many times the contract will execute the instruction in a loop. If we are making ERC721 Mints, this indicates the minting batch size (default 1) --legacy Send a legacy transaction instead of an EIP1559 transaction. - --lt-address string A pre-deployed load test contract address + --lt-address string The address of a pre-deployed load test contract -m, --mode string The testing mode to use. It can be multiple like: "tcdf" t - sending transactions d - deploy contract @@ -109,20 +111,21 @@ $ polycli loadtest --app-id 0 --data-avail --verbosity 700 --chain-id 42 --concu p - call random precompiled contracts a - call a specific precompiled contract address s - store mode - l - long running mode r - random modes 2 - ERC20 Transfers - 7 - ERC721 Mints (default "t") + 7 - ERC721 Mints + R - total recall (default "t") --output-mode string Format mode for summary output (json | text) (default "text") --priority-gas-price uint Specify Gas Tip Price in the case of EIP-1559 - --private-key string The hex encoded private key that we'll use to sending transactions (default "42b6e34dc21598a807dc19d7784c71b2a7a01f6480dc6f58258f78e539f1a1fa") + --private-key string The hex encoded private key that we'll use to send transactions (default "42b6e34dc21598a807dc19d7784c71b2a7a01f6480dc6f58258f78e539f1a1fa") --rate-limit float An overall limit to the number of requests per second. Give a number less than zero to remove this limit all together (default 4) + --recall-blocks uint The number of blocks that we'll attempt to fetch for recall (default 50) -n, --requests int Number of requests to perform for the benchmarking session. The default is to just perform a single request which usually leads to non-representative benchmarking results. (default 1) --seed int A seed for generating random values and addresses (default 123456) --send-amount string The amount of wei that we'll send every transaction (default "0x38D7EA4C68000") - --steady-state-tx-pool-size uint Transaction Pool queue size which we use to either increase/decrease requests per second (default 1000) - --summarize Should we produce an execution summary after the load test has finished. If you're running a large loadtest, this can take a long time - -t, --time-limit int Maximum number of seconds to spend for benchmarking. Use this to benchmark within a fixed total amount of time. Per default there is no timelimit. (default -1) + --steady-state-tx-pool-size uint When using adaptive rate limiting, this value sets the target queue size. If the queue is smaller than this value, we'll speed up. If the queue is smaller than this value, we'll back off. (default 1000) + --summarize Should we produce an execution summary after the load test has finished. If you're running a large load test, this can take a long time + -t, --time-limit int Maximum number of seconds to spend for benchmarking. Use this to benchmark within a fixed total amount of time. Per default there is no time limit. (default -1) --to-address string The address that we're going to send to (default "0xDEADBEEFDEADBEEFDEADBEEFDEADBEEFDEADBEEF") --to-random When doing a transfer test, should we send to random addresses rather than DEADBEEFx5 ``` diff --git a/go.mod b/go.mod index 90c1f8d8..f2cbae0a 100644 --- a/go.mod +++ b/go.mod @@ -29,6 +29,7 @@ require ( require ( github.com/cenkalti/backoff v2.2.1+incompatible github.com/google/gofuzz v1.2.0 + github.com/schollz/progressbar/v3 v3.13.1 github.com/jedib0t/go-pretty/v6 v6.4.6 github.com/xeipuuv/gojsonschema v1.2.0 ) @@ -112,6 +113,7 @@ require ( github.com/miekg/dns v1.1.53 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect + github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect @@ -157,6 +159,7 @@ require ( golang.org/x/net v0.11.0 // indirect golang.org/x/oauth2 v0.8.0 // indirect golang.org/x/sync v0.3.0 // indirect + golang.org/x/term v0.9.0 // indirect golang.org/x/tools v0.10.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/api v0.114.0 // indirect @@ -207,7 +210,7 @@ require ( github.com/magiconair/properties v1.8.6 // indirect github.com/mattn/go-colorable v0.1.12 // indirect github.com/mattn/go-isatty v0.0.19 // indirect - github.com/mattn/go-runewidth v0.0.13 // indirect + github.com/mattn/go-runewidth v0.0.14 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect github.com/minio/sha256-simd v1.0.1 // indirect @@ -237,7 +240,7 @@ require ( github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/subosito/gotenv v1.3.0 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220614013038-64ee5596c38a github.com/tklauser/go-sysconf v0.3.5 // indirect github.com/tklauser/numcpus v0.2.2 // indirect github.com/vedhavyas/go-subkey v1.0.3 // indirect diff --git a/go.sum b/go.sum index dec0125d..4d5a01da 100644 --- a/go.sum +++ b/go.sum @@ -482,6 +482,7 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213/go.mod h1:vNUNkEQ1e29fT/6vq2aBdFsgNPmy8qMdSay1npru+Sw= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= @@ -549,12 +550,13 @@ github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.13 h1:lTGmDsbAYt5DmK6OnoV7EuIF1wEIFAcxld6ypU4OSgU= -github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= +github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= @@ -576,6 +578,8 @@ github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1 github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ= +github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= @@ -744,6 +748,8 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/schollz/progressbar/v3 v3.13.1 h1:o8rySDYiQ59Mwzy2FELeHY5ZARXZTVJC7iHD6PEFUiE= +github.com/schollz/progressbar/v3 v3.13.1/go.mod h1:xvrbki8kfT1fzWzBT/UZd9L6GA+jdL7HAgq2RFnO6fQ= github.com/secure-systems-lab/go-securesystemslib v0.3.1/go.mod h1:o8hhjkbNl2gOamKUA/eNW3xUrntHT9L4W89W1nfj43U= github.com/secure-systems-lab/go-securesystemslib v0.6.0 h1:T65atpAVCJQK14UA57LMdZGpHi4QYSH/9FZyNGqMYIA= github.com/secure-systems-lab/go-securesystemslib v0.6.0/go.mod h1:8Mtpo9JKks/qhPG4HGZ2LGMvrPbzuxwfz/f/zLfEWkk= @@ -1120,6 +1126,7 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1128,6 +1135,9 @@ golang.org/x/sys v0.9.0 h1:KS/R3tvhPqvJvwcKfnBHJwwthS11LRhmM5D59eEXa0s= golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.9.0 h1:GRRCnKYhdQrD8kfRAdQ6Zcw1P0OcELxGLKJvtjVMZ28= +golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/main.go b/main.go index 4e7de1c2..d11a07d0 100644 --- a/main.go +++ b/main.go @@ -1,19 +1,3 @@ -/* -Copyright © 2022 Polygon - -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU Lesser General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU Lesser General Public License for more details. - -You should have received a copy of the GNU Lesser General Public License -along with this program. If not, see . -*/ package main import "github.com/maticnetwork/polygon-cli/cmd" diff --git a/rpctypes/rpctypes.go b/rpctypes/rpctypes.go index 45654197..8f1ee654 100644 --- a/rpctypes/rpctypes.go +++ b/rpctypes/rpctypes.go @@ -73,6 +73,10 @@ type ( // EIP 2718 Type field? Type RawQuantityResponse `json:"type"` + + ChainID RawQuantityResponse `json:"chainId"` + + AccessList []any `json:"accessList"` } RawBlockResponse struct { @@ -224,6 +228,8 @@ type ( Type() uint64 MaxPriorityFeePerGas() uint64 MaxFeePerGas() uint64 + ChainID() uint64 + BlockNumber() *big.Int V() *big.Int R() *big.Int S() *big.Int @@ -348,6 +354,9 @@ func (i *implPolyBlock) MarshalJSON() ([]byte, error) { func (i *implPolyTransaction) GasPrice() *big.Int { return i.inner.GasPrice.ToBigInt() } +func (i *implPolyTransaction) BlockNumber() *big.Int { + return i.inner.BlockNumber.ToBigInt() +} func (i *implPolyTransaction) Gas() uint64 { return i.inner.Gas.ToUint64() } @@ -360,6 +369,9 @@ func (i *implPolyTransaction) MaxFeePerGas() uint64 { func (i *implPolyTransaction) Nonce() uint64 { return i.inner.Nonce.ToUint64() } +func (i *implPolyTransaction) ChainID() uint64 { + return i.inner.ChainID.ToUint64() +} func (i *implPolyTransaction) Type() uint64 { return i.inner.Type.ToUint64() }