Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor(dashboard): added unit tests for dashboard helpers #1309

Open
wants to merge 1 commit into
base: staging
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions backend/go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ require (
cloud.google.com/go/secretmanager v1.11.5
firebase.google.com/go/v4 v4.14.1
github.com/ClickHouse/clickhouse-go/v2 v2.30.3
github.com/DATA-DOG/go-sqlmock v1.5.2
github.com/Gurpartap/storekit-go v0.0.0-20201205024111-36b6cd5c6a21
github.com/Tangui-Bitfly/ethsimtracer v0.0.0-20241031103622-e76546c3d9c1
github.com/alexedwards/scs/redisstore v0.0.0-20240316134038-7e11d57e8885
Expand Down
4 changes: 3 additions & 1 deletion backend/go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,9 @@ github.com/ClickHouse/ch-go v0.64.1 h1:FWpP+QU4KchgzpEekuv8YoI/fUc4H2r6Bwc5Wwrzv
github.com/ClickHouse/ch-go v0.64.1/go.mod h1:RBUynvczWwVzhS6Up9lPKlH1mrk4UAmle6uzCiW4Pkc=
github.com/ClickHouse/clickhouse-go/v2 v2.30.3 h1:m0VZqUNCJ7lOmZfmOE3HZUMixZHftKmZLqcrz2+UVHk=
github.com/ClickHouse/clickhouse-go/v2 v2.30.3/go.mod h1:V1aZaG0ctMbd8KVi+D4loXi97duWYtHiQHMCgipKJcI=
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
github.com/DataDog/zstd v1.5.5 h1:oWf5W7GtOLgp6bciQYDmhHHjdhYkALu6S/5Ni9ZgSvQ=
github.com/DataDog/zstd v1.5.5/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw=
github.com/Gurpartap/storekit-go v0.0.0-20201205024111-36b6cd5c6a21 h1:HcdvlzaQ4CJfH7xbfJZ3ZHN//BTEpId46iKEMuP3wHE=
Expand Down Expand Up @@ -617,6 +618,7 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
Expand Down
219 changes: 219 additions & 0 deletions backend/pkg/api/data_access/vdb_helpers.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"database/sql"
"fmt"
"math/big"
"reflect"
"time"

"github.com/doug-martin/goqu/v9"
Expand All @@ -14,6 +15,7 @@ import (
"github.com/gobitfly/beaconchain/pkg/commons/cache"
"github.com/gobitfly/beaconchain/pkg/commons/price"
"github.com/gobitfly/beaconchain/pkg/commons/utils"
"github.com/jmoiron/sqlx"
"github.com/lib/pq"
"github.com/pkg/errors"
"github.com/shopspring/decimal"
Expand Down Expand Up @@ -400,3 +402,220 @@ func (d *DataAccessService) calculateValidatorDashboardBalance(ctx context.Conte
}
return balances, nil
}

func (d *DataAccessService) GetLatestExportedChartTs(ctx context.Context, aggregation enums.ChartAggregation) (uint64, error) {
view, dateColumn, err := d.getViewAndDateColumn(aggregation)
if err != nil {
return 0, err
}

query := fmt.Sprintf(`SELECT max(%s) FROM %s`, dateColumn, view)
Copy link
Contributor

@LuccaBitfly LuccaBitfly Feb 7, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Question: shouldn't we use goqu for all queries?

var ts time.Time
err = d.clickhouseReader.GetContext(ctx, &ts, query)
if err != nil {
return 0, fmt.Errorf("error retrieving latest exported chart timestamp: %w", err)
}

return uint64(ts.Unix()), nil
}

// --- Generic Query Execution ---
func executeQuery[T any](ctx context.Context, db *sqlx.DB, ds *goqu.SelectDataset) (T, error) {
query, args, err := ds.Prepared(true).ToSQL()
if err != nil {
var zero T
return zero, fmt.Errorf("error preparing query: %w", err)
}

var result T
resultType := reflect.TypeOf(result)

if resultType != nil && resultType.Kind() == reflect.Slice {
err = db.SelectContext(ctx, &result, query, args...)
} else {
err = db.GetContext(ctx, &result, query, args...)
}
Comment on lines +430 to +437
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Issue: I'd do 2 separate function for select and get rather than doing a runtime type check for every query, since reflecting does have a performance impact.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also we're gonna need something similar for ExecContext, right? executeQuery as a func name might be a bit misleading then.


if err != nil {
return result, fmt.Errorf("error executing query: %w", err)
}

return result, nil
}

// --- Structs ---
type SyncCommitteeResult struct {
ValidatorIndex uint64 `db:"validatorindex"`
Period uint64 `db:"period"`
}

// --- Sync Committee Functions ---
func (d *DataAccessService) getCurrentAndUpcomingSyncCommittees(ctx context.Context, latestEpoch uint64) (map[uint64]bool, map[uint64]bool, error) {
ds := buildSyncCommitteeQuery(latestEpoch)
queryResult, err := executeQuery[[]SyncCommitteeResult](ctx, d.readerDb, ds)
if err != nil {
return nil, nil, err
}

current, upcoming := processSyncCommitteeResults(queryResult, utils.SyncPeriodOfEpoch(latestEpoch))
return current, upcoming, nil
}

func buildSyncCommitteeQuery(latestEpoch uint64) *goqu.SelectDataset {
currentSyncPeriod := utils.SyncPeriodOfEpoch(latestEpoch)
return goqu.Dialect("postgres").
Select(
goqu.L("validatorindex"),
goqu.L("period"),
).
From("sync_committees").
Where(goqu.L("period IN (?, ?)", currentSyncPeriod, currentSyncPeriod+1))
}

func processSyncCommitteeResults(queryResult []SyncCommitteeResult, currentSyncPeriod uint64) (map[uint64]bool, map[uint64]bool) {
currentSyncCommitteeValidators := make(map[uint64]bool)
upcomingSyncCommitteeValidators := make(map[uint64]bool)

for _, entry := range queryResult {
if entry.Period == currentSyncPeriod {
currentSyncCommitteeValidators[entry.ValidatorIndex] = true
} else {
upcomingSyncCommitteeValidators[entry.ValidatorIndex] = true
}
}

return currentSyncCommitteeValidators, upcomingSyncCommitteeValidators
}

// --- Epoch Start Functions ---
func (d *DataAccessService) getEpochStart(ctx context.Context, period enums.TimePeriod) (uint64, error) {
ds, err := buildEpochStartQuery(d, period)
if err != nil {
return 0, err
}

epochStart, err := executeQuery[uint64](ctx, d.clickhouseReader, ds)
if err != nil {
return 0, err
}

return epochStart, nil
}

func buildEpochStartQuery(d *DataAccessService, period enums.TimePeriod) (*goqu.SelectDataset, error) {
clickhouseTable, _, err := d.getTablesForPeriod(period)
if err != nil {
return nil, err
}

ds := goqu.Dialect("postgres").
Select(goqu.L("epoch_start")).
From(goqu.L(fmt.Sprintf("%s FINAL", clickhouseTable))).
Order(goqu.L("epoch_start").Asc()).
Limit(1)

return ds, nil
}

// --- Past Sync Committees Functions ---
func (d *DataAccessService) getPastSyncCommittees(ctx context.Context, indices []uint64, epochStart uint64, latestEpoch uint64) (map[uint64]uint64, error) {
ds := buildPastSyncCommitteesQuery(indices, epochStart, latestEpoch)
validatorIndices, err := executeQuery[[]uint64](ctx, d.alloyReader, ds)
if err != nil {
return nil, err
}

validatorCountMap := processPastSyncCommitteesResults(validatorIndices)
return validatorCountMap, nil
Comment on lines +528 to +529
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nitpick (non-blocking):

Suggested change
validatorCountMap := processPastSyncCommitteesResults(validatorIndices)
return validatorCountMap, nil
return processPastSyncCommitteesResults(validatorIndices)
, nil

}

func buildPastSyncCommitteesQuery(indices []uint64, epochStart, latestEpoch uint64) *goqu.SelectDataset {
pastSyncPeriodCutoff := utils.SyncPeriodOfEpoch(epochStart)
currentSyncPeriod := utils.SyncPeriodOfEpoch(latestEpoch)
Comment on lines +533 to +534
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Question: You have a bit of logic in your build query func? Do we want this? Or should we maybe pass this to the builder func?


return goqu.Dialect("postgres").
Select(goqu.L("sc.validatorindex")).
From(goqu.L("sync_committees sc")).
Where(goqu.L("period >= ? AND period < ? AND validatorindex = ANY(?)", pastSyncPeriodCutoff, currentSyncPeriod, pq.Array(indices)))
}

func processPastSyncCommitteesResults(validatorIndices []uint64) map[uint64]uint64 {
validatorCountMap := make(map[uint64]uint64)
for _, validatorIndex := range validatorIndices {
validatorCountMap[validatorIndex]++
}
return validatorCountMap
}

func (d *DataAccessService) getTablesForPeriod(period enums.TimePeriod) (string, int, error) {
table := ""
hours := 0

switch period {
case enums.TimePeriods.Last1h:
table = "validator_dashboard_data_rolling_1h"
hours = 1
case enums.TimePeriods.Last24h:
table = "validator_dashboard_data_rolling_24h"
hours = 24
case enums.TimePeriods.Last7d:
table = "validator_dashboard_data_rolling_7d"
hours = 7 * 24
case enums.TimePeriods.Last30d:
table = "validator_dashboard_data_rolling_30d"
hours = 30 * 24
case enums.TimePeriods.AllTime:
table = "validator_dashboard_data_rolling_total"
hours = -1
default:
return "", 0, fmt.Errorf("not-implemented time period: %v", period)
}

return table, hours, nil
}

func (d *DataAccessService) getTableAndDateColumn(aggregation enums.ChartAggregation) (string, string, error) {
var table, dateColumn string

switch aggregation {
case enums.IntervalEpoch:
table = "validator_dashboard_data_epoch"
dateColumn = "epoch_timestamp"
case enums.IntervalHourly:
table = "validator_dashboard_data_hourly"
dateColumn = "t"
case enums.IntervalDaily:
table = "validator_dashboard_data_daily"
dateColumn = "t"
case enums.IntervalWeekly:
table = "validator_dashboard_data_weekly"
dateColumn = "t"
default:
return "", "", fmt.Errorf("unexpected aggregation type: %v", aggregation)
}

return table, dateColumn, nil
}

func (d *DataAccessService) getViewAndDateColumn(aggregation enums.ChartAggregation) (string, string, error) {
var view, dateColumn string

switch aggregation {
case enums.IntervalEpoch:
view = "view_validator_dashboard_data_epoch_max_ts"
dateColumn = "t"
case enums.IntervalHourly:
view = "view_validator_dashboard_data_hourly_max_ts"
dateColumn = "t"
case enums.IntervalDaily:
view = "view_validator_dashboard_data_daily_max_ts"
dateColumn = "t"
case enums.IntervalWeekly:
view = "view_validator_dashboard_data_weekly_max_ts"
dateColumn = "t"
default:
return "", "", fmt.Errorf("unexpected aggregation type: %v", aggregation)
}

return view, dateColumn, nil
}
Loading
Loading