Skip to content

Commit

Permalink
Always create snapshot when cluster version changes
Browse files Browse the repository at this point in the history
When downgrading or offline migration, etcd read the maximum
version from the WAL files, so as to ensure the operation is
allowed. It also reads the verson from `ClusterVersionSet` request,
so we should create a snapshot when cluster version chanages,
so as not to block the downgrade or migration operations.

Signed-off-by: Benjamin Wang <[email protected]>
  • Loading branch information
ahrtr committed Jan 7, 2025
1 parent d17821f commit 7f7ebde
Show file tree
Hide file tree
Showing 4 changed files with 8 additions and 14 deletions.
4 changes: 2 additions & 2 deletions server/etcdserver/apply/apply.go
Original file line number Diff line number Diff line change
Expand Up @@ -415,7 +415,6 @@ func (a *applierMembership) ClusterVersionSet(r *membershippb.ClusterVersionSetR
prevVersion := a.cluster.Version()
newVersion := semver.Must(semver.NewVersion(r.Ver))
a.cluster.SetVersion(newVersion, api.UpdateCapability, shouldApplyV3)
// Force snapshot after cluster version downgrade.
if prevVersion != nil && newVersion.LessThan(*prevVersion) {
lg := a.lg
if lg != nil {
Expand All @@ -424,8 +423,9 @@ func (a *applierMembership) ClusterVersionSet(r *membershippb.ClusterVersionSetR
zap.String("new-cluster-version", newVersion.String()),
)
}
a.snapshotServer.ForceSnapshot()
}
// Force snapshot when cluster version changes.
a.snapshotServer.ForceSnapshot()
}

func (a *applierMembership) ClusterMemberAttrSet(r *membershippb.ClusterMemberAttrSetRequest, shouldApplyV3 membership.ShouldApplyV3) {
Expand Down
2 changes: 1 addition & 1 deletion server/etcdserver/bootstrap.go
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ func bootstrapBackend(cfg config.ServerConfig, haveWAL bool, st v2store.Store, s
return nil, err
}
}
cfg.Logger.Debug("restore consistentIndex", zap.Uint64("index", ci.ConsistentIndex()))
cfg.Logger.Info("restore consistentIndex", zap.Uint64("index", ci.ConsistentIndex()))

// TODO(serathius): Implement schema setup in fresh storage
var snapshot *raftpb.Snapshot
Expand Down
4 changes: 2 additions & 2 deletions tests/e2e/etcd_mix_versions_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -148,8 +148,8 @@ func mixVersionsSnapshotTestByMockPartition(t *testing.T, cfg *e2e.EtcdProcessCl
err = toPartitionedMember.Stop()
require.NoError(t, err)

t.Log("Writing 20 keys to the cluster (more than SnapshotCount entries to trigger at least a snapshot)")
writeKVs(t, epc.Etcdctl(), 0, 20)
t.Log("Writing 30 keys to the cluster (more than SnapshotCount entries to trigger at least a snapshot)")
writeKVs(t, epc.Etcdctl(), 0, 30)

t.Log("Verify logs to check leader has saved snapshot")
leaderEPC := epc.Procs[epc.WaitLeader(t)]
Expand Down
12 changes: 3 additions & 9 deletions tests/e2e/utl_migrate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,16 +90,10 @@ func TestEtctlutlMigrate(t *testing.T) {
expectStorageVersion: &version.V3_6,
},
{
name: "Downgrade v3.6 to v3.5 should fail until it's implemented",
name: "Downgrade v3.6 to v3.5 should work",
targetVersion: "3.5",
expectLogsSubString: "cannot downgrade storage, WAL contains newer entries",
expectStorageVersion: &version.V3_6,
},
{
name: "Downgrade v3.6 to v3.5 with force should work",
targetVersion: "3.5",
force: true,
expectLogsSubString: "forcefully cleared storage version",
expectLogsSubString: "updated storage version",
expectStorageVersion: nil, // 3.5 doesn't have the field `storageVersion`, so it returns nil.
},
{
name: "Upgrade v3.6 to v3.7 with force should work",
Expand Down

0 comments on commit 7f7ebde

Please sign in to comment.