From a4b6d80bc318f2d0e8542d192225d3d4ccffb666 Mon Sep 17 00:00:00 2001 From: Chao Chen Date: Mon, 19 Dec 2022 12:52:09 -0800 Subject: [PATCH] tests linearizability: reproduce and prevent 14571 Signed-off-by: Chao Chen --- bill-of-materials.json | 2 +- scripts/install-marker.sh | 2 +- tests/common/auth_in_progress_test.go | 1136 +++++++++++++++++ tests/linearizability/client.go | 28 +- tests/linearizability/failpoints.go | 97 +- tests/linearizability/history.go | 16 +- tests/linearizability/linearizability_test.go | 152 ++- tests/linearizability/traffic.go | 66 +- 8 files changed, 1430 insertions(+), 69 deletions(-) create mode 100644 tests/common/auth_in_progress_test.go diff --git a/bill-of-materials.json b/bill-of-materials.json index 9638e8e54aa2..c0d8f8ed35f6 100644 --- a/bill-of-materials.json +++ b/bill-of-materials.json @@ -396,7 +396,7 @@ ] }, { - "project": "github.com/stretchr/testify/assert", + "project": "github.com/stretchr/testify", "licenses": [ { "type": "MIT License", diff --git a/scripts/install-marker.sh b/scripts/install-marker.sh index 467492666d1f..495b0ec74b26 100755 --- a/scripts/install-marker.sh +++ b/scripts/install-marker.sh @@ -10,7 +10,7 @@ if [ -z "$1" ]; then fi MARKER_URL=https://storage.googleapis.com/etcd/test-binaries/marker-v0.4.0-x86_64-unknown-linux-gnu -if [ ${ARCH} == "darwin" ]; then +if [ "${ARCH}" == "darwin" ]; then MARKER_URL=https://storage.googleapis.com/etcd/test-binaries/marker-v0.4.0-x86_64-apple-darwin fi diff --git a/tests/common/auth_in_progress_test.go b/tests/common/auth_in_progress_test.go new file mode 100644 index 000000000000..0b2f15517760 --- /dev/null +++ b/tests/common/auth_in_progress_test.go @@ -0,0 +1,1136 @@ +//go:build ignore + +package common + +import ( + "context" + "fmt" + "path/filepath" + "strings" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + "go.etcd.io/etcd/client/pkg/v3/testutil" + clientv3 "go.etcd.io/etcd/client/v3" + "go.etcd.io/etcd/tests/v3/framework/config" + "go.etcd.io/etcd/tests/v3/framework/interfaces" + "go.etcd.io/etcd/tests/v3/framework/testutils" +) + +var defaultAuthToken = fmt.Sprintf("jwt,pub-key=%s,priv-key=%s,sign-method=RS256,ttl=1s", + mustAbsPath("../fixtures/server.crt"), mustAbsPath("../fixtures/server.key.insecure")) + +func TestAuthWriteKey(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + assert.NoError(t, cc.Put(ctx, "foo", "a", config.PutOptions{})) + assert.NoErrorf(t, setupAuth(cc, []authRole{testRole}, []authUser{rootUser, testUser}), "failed to enable auth") + rootAuthClient := testutils.MustClient(clus.Client(WithAuth("root", "root"))) + + // confirm root role can access to all keys + assert.NoError(t, rootAuthClient.Put(ctx, "foo", "bar", config.PutOptions{})) + resp, err := rootAuthClient.Get(ctx, "foo", config.GetOptions{}) + assert.NoError(t, err) + if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "foo" || string(resp.Kvs[0].Value) != "bar" { + t.Fatalf("want key value pair 'foo' 'bar' but got %+v", resp.Kvs) + } + + // try invalid user + _, err = clus.Client(WithAuth("a", "b")) + if err == nil || !strings.Contains(err.Error(), rpctypes.ErrAuthFailed.Error()) { + t.Errorf("want error %s but got %v", rpctypes.ErrAuthFailed.Error(), err) + } + // confirm put failed + testUserAuthClient := testutils.MustClient(clus.Client(WithAuth(testUserName, testPassword))) + resp, err = testUserAuthClient.Get(ctx, "foo", config.GetOptions{}) + assert.NoError(t, err) + if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "foo" || string(resp.Kvs[0].Value) != "bar" { + t.Fatalf("want key value pair 'foo' 'bar' but got %+v", resp.Kvs) + } + // try good user + assert.NoError(t, testUserAuthClient.Put(ctx, "foo", "bar2", config.PutOptions{})) + // confirm put succeeded + resp, err = testUserAuthClient.Get(ctx, "foo", config.GetOptions{}) + assert.NoError(t, err) + if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "foo" || string(resp.Kvs[0].Value) != "bar2" { + t.Fatalf("want key value pair 'foo' 'bar2' but got %+v", resp.Kvs) + } + // try bad password + _, err = clus.Client(WithAuth(testUserName, "badpass")) + if err == nil || !strings.Contains(err.Error(), rpctypes.ErrAuthFailed.Error()) { + t.Errorf("want error %s but got %v", rpctypes.ErrAuthFailed.Error(), err) + } + // confirm put failed + resp, err = testUserAuthClient.Get(ctx, "foo", config.GetOptions{}) + assert.NoError(t, err) + if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "foo" || string(resp.Kvs[0].Value) != "bar2" { + t.Fatalf("want key value pair 'foo' 'bar2' but got %+v", resp.Kvs) + } + }) +} + +func TestAuthRoleUpdate(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + assert.NoError(t, cc.Put(ctx, "foo", "bar", config.PutOptions{})) + + assert.NoErrorf(t, setupAuth(cc, []authRole{testRole}, []authUser{rootUser, testUser}), "failed to enable auth") + rootAuthClient := testutils.MustClient(clus.Client(WithAuth(rootUserName, rootPassword))) + + // try put to not granted key + testUserAuthClient := testutils.MustClient(clus.Client(WithAuth(testUserName, testPassword))) + putFailPerm(ctx, testUserAuthClient, "hoo", "bar", t) + // grant a new key + _, err := rootAuthClient.RoleGrantPermission(ctx, testRoleName, "hoo", "", clientv3.PermissionType(clientv3.PermReadWrite)) + assert.NoError(t, err) + // try a newly granted key + assert.NoError(t, testUserAuthClient.Put(ctx, "hoo", "bar", config.PutOptions{})) + // confirm put succeeded + resp, err := testUserAuthClient.Get(ctx, "hoo", config.GetOptions{}) + assert.NoError(t, err) + if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "hoo" || string(resp.Kvs[0].Value) != "bar" { + t.Fatalf("want key value pair 'hoo' 'bar' but got %+v", resp.Kvs) + } + // revoke the newly granted key + _, err = rootAuthClient.RoleRevokePermission(ctx, testRoleName, "hoo", "") + assert.NoError(t, err) + // try put to the revoked key + putFailPerm(ctx, testUserAuthClient, "hoo", "bar", t) + // confirm a key still granted can be accessed + resp, err = testUserAuthClient.Get(ctx, "foo", config.GetOptions{}) + assert.NoError(t, err) + if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "foo" || string(resp.Kvs[0].Value) != "bar" { + t.Fatalf("want key value pair 'foo' 'bar' but got %+v", resp.Kvs) + } + }) +} + +func TestAuthUserDeleteDuringOps(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + if err := cc.Put(ctx, "foo", "bar", config.PutOptions{}); err != nil { + t.Fatal(err) + } + assert.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") + rootAuthClient := testutils.MustClient(clus.Client(WithAuth("root", "root"))) + authSetupDefaultTestUser(ctx, rootAuthClient, t) + + // create a key + testUserAuthClient := testutils.MustClient(clus.Client(WithAuth("test-user", "pass"))) + if err := testUserAuthClient.Put(ctx, "foo", "bar", config.PutOptions{}); err != nil { + t.Fatal(err) + } + // confirm put succeeded + resp, err := testUserAuthClient.Get(ctx, "foo", config.GetOptions{}) + if err != nil { + t.Fatal(err) + } + if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "foo" || string(resp.Kvs[0].Value) != "bar" { + t.Fatalf("want key value pair 'foo' 'bar' but got %+v", resp.Kvs) + } + // delete the user + if _, err = rootAuthClient.UserDelete(ctx, "test-user"); err != nil { + t.Fatal(err) + } + // check the user is deleted + err = testUserAuthClient.Put(ctx, "foo", "baz", config.PutOptions{}) + if err == nil || !strings.Contains(err.Error(), rpctypes.ErrAuthFailed.Error()) { + t.Errorf("want error %s but got %v", rpctypes.ErrAuthFailed.Error(), err) + } + }) +} + +func TestAuthRoleRevokeDuringOps(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + if err := cc.Put(ctx, "foo", "bar", config.PutOptions{}); err != nil { + t.Fatal(err) + } + assert.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") + rootAuthClient := testutils.MustClient(clus.Client(WithAuth("root", "root"))) + authSetupDefaultTestUser(ctx, rootAuthClient, t) + // create a key + testUserAuthClient := testutils.MustClient(clus.Client(WithAuth("test-user", "pass"))) + if err := testUserAuthClient.Put(ctx, "foo", "bar", config.PutOptions{}); err != nil { + t.Fatal(err) + } + // confirm put succeeded + resp, err := testUserAuthClient.Get(ctx, "foo", config.GetOptions{}) + if err != nil { + t.Fatal(err) + } + if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "foo" || string(resp.Kvs[0].Value) != "bar" { + t.Fatalf("want key value pair 'foo' 'bar' but got %+v", resp.Kvs) + } + // create a new role + if _, err = rootAuthClient.RoleAdd(ctx, "test-role2"); err != nil { + t.Fatal(err) + } + // grant a new key to the new role + if _, err = rootAuthClient.RoleGrantPermission(ctx, "test-role2", "hoo", "", clientv3.PermissionType(clientv3.PermReadWrite)); err != nil { + t.Fatal(err) + } + // grant the new role to the user + if _, err = rootAuthClient.UserGrantRole(ctx, "test-user", "test-role2"); err != nil { + t.Fatal(err) + } + + // try a newly granted key + if err := testUserAuthClient.Put(ctx, "hoo", "bar", config.PutOptions{}); err != nil { + t.Fatal(err) + } + // confirm put succeeded + resp, err = testUserAuthClient.Get(ctx, "hoo", config.GetOptions{}) + if err != nil { + t.Fatal(err) + } + if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "hoo" || string(resp.Kvs[0].Value) != "bar" { + t.Fatalf("want key value pair 'hoo' 'bar' but got %+v", resp.Kvs) + } + // revoke a role from the user + if _, err = rootAuthClient.UserRevokeRole(ctx, "test-user", "test-role"); err != nil { + t.Fatal(err) + } + // check the role is revoked and permission is lost from the user + putFailPerm(ctx, testUserAuthClient, "foo", "baz", t) + + // try a key that can be accessed from the remaining role + if err := testUserAuthClient.Put(ctx, "hoo", "bar2", config.PutOptions{}); err != nil { + t.Fatal(err) + } + // confirm put succeeded + resp, err = testUserAuthClient.Get(ctx, "hoo", config.GetOptions{}) + if err != nil { + t.Fatal(err) + } + if len(resp.Kvs) != 1 || string(resp.Kvs[0].Key) != "hoo" || string(resp.Kvs[0].Value) != "bar2" { + t.Fatalf("want key value pair 'hoo' 'bar2' but got %+v", resp.Kvs) + } + }) +} + +// TestAuthEmptyUserGet ensures that a get with an empty user will return an empty user error. +func TestAuthEmptyUserGet(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + assert.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") + _, err := cc.Get(ctx, "abc", config.GetOptions{}) + if err == nil || !strings.Contains(err.Error(), rpctypes.ErrUserEmpty.Error()) { + t.Errorf("want error %s but got %v", rpctypes.ErrUserEmpty.Error(), err) + } + }) +} + +// TestAuthEmptyUserPut ensures that a put with an empty user will return an empty user error, +// and the consistent_index should be moved forward even the apply-->Put fails. +func TestAuthEmptyUserPut(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1, SnapshotCount: 3})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + assert.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") + // The SnapshotCount is 3, so there must be at least 3 new snapshot files being created. + // The VERIFY logic will check whether the consistent_index >= last snapshot index on + // cluster terminating. + for i := 0; i < 10; i++ { + err := cc.Put(ctx, "foo", "bar", config.PutOptions{}) + if err == nil || !strings.Contains(err.Error(), rpctypes.ErrUserEmpty.Error()) { + t.Errorf("want error %s but got %v", rpctypes.ErrUserEmpty.Error(), err) + } + } + }) +} + +// TestAuthTokenWithDisable tests that auth won't crash if +// given a valid token when authentication is disabled +func TestAuthTokenWithDisable(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + assert.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") + rootAuthClient := testutils.MustClient(clus.Client(WithAuth("root", "root"))) + rctx, cancel := context.WithCancel(context.TODO()) + donec := make(chan struct{}) + go func() { + defer close(donec) + for rctx.Err() == nil { + rootAuthClient.Put(ctx, "abc", "def", config.PutOptions{}) + } + }() + time.Sleep(10 * time.Millisecond) + if err := rootAuthClient.AuthDisable(ctx); err != nil { + t.Fatal(err) + } + time.Sleep(10 * time.Millisecond) + cancel() + <-donec + }) +} + +func TestAuthTxn(t *testing.T) { + tcs := []struct { + name string + cfg config.ClusterConfig + }{ + { + "NoJWT", + config.ClusterConfig{ClusterSize: 1}, + }, + { + "JWT", + config.ClusterConfig{ClusterSize: 1, AuthToken: defaultAuthToken}, + }, + } + + reqs := []txnReq{ + { + compare: []string{`version("c2") = "1"`}, + ifSuccess: []string{"get s2"}, + ifFail: []string{"get f2"}, + results: []string{"SUCCESS", "s2", "v"}, + }, + // a key of compare case isn't granted + { + compare: []string{`version("c1") = "1"`}, + ifSuccess: []string{"get s2"}, + ifFail: []string{"get f2"}, + results: []string{"etcdserver: permission denied"}, + }, + // a key of success case isn't granted + { + compare: []string{`version("c2") = "1"`}, + ifSuccess: []string{"get s1"}, + ifFail: []string{"get f2"}, + results: []string{"etcdserver: permission denied"}, + }, + // a key of failure case isn't granted + { + compare: []string{`version("c2") = "1"`}, + ifSuccess: []string{"get s2"}, + ifFail: []string{"get f1"}, + results: []string{"etcdserver: permission denied"}, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.cfg)) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + // keys with 1 suffix aren't granted to test-user + // keys with 2 suffix are granted to test-user + + keys := []string{"c1", "s1", "f1"} + grantedKeys := []string{"c2", "s2", "f2"} + for _, key := range keys { + if err := cc.Put(ctx, key, "v", config.PutOptions{}); err != nil { + t.Fatal(err) + } + } + for _, key := range grantedKeys { + if err := cc.Put(ctx, key, "v", config.PutOptions{}); err != nil { + t.Fatal(err) + } + } + assert.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") + rootAuthClient := testutils.MustClient(clus.Client(WithAuth("root", "root"))) + authSetupDefaultTestUser(ctx, rootAuthClient, t) + // grant keys to test-user + for _, key := range grantedKeys { + if _, err := rootAuthClient.RoleGrantPermission(ctx, "test-role", key, "", clientv3.PermissionType(clientv3.PermReadWrite)); err != nil { + t.Fatal(err) + } + } + testUserAuthClient := testutils.MustClient(clus.Client(WithAuth("test-user", "pass"))) + for _, req := range reqs { + resp, err := testUserAuthClient.Txn(ctx, req.compare, req.ifSuccess, req.ifFail, config.TxnOptions{ + Interactive: true, + }) + if strings.Contains(req.results[0], "denied") { + assert.Contains(t, err.Error(), req.results[0]) + } else { + assert.NoError(t, err) + assert.Equal(t, req.results, getRespValues(resp)) + } + } + }) + }) + } +} + +func TestAuthPrefixPerm(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + assert.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") + rootAuthClient := testutils.MustClient(clus.Client(WithAuth("root", "root"))) + authSetupDefaultTestUser(ctx, rootAuthClient, t) + prefix := "/prefix/" // directory like prefix + // grant keys to test-user + if _, err := rootAuthClient.RoleGrantPermission(ctx, "test-role", prefix, clientv3.GetPrefixRangeEnd(prefix), clientv3.PermissionType(clientv3.PermReadWrite)); err != nil { + t.Fatal(err) + } + // try a prefix granted permission + testUserClient := testutils.MustClient(clus.Client(WithAuth("test-user", "pass"))) + for i := 0; i < 10; i++ { + key := fmt.Sprintf("%s%d", prefix, i) + if err := testUserClient.Put(ctx, key, "val", config.PutOptions{}); err != nil { + t.Fatal(err) + } + } + putFailPerm(ctx, testUserClient, clientv3.GetPrefixRangeEnd(prefix), "baz", t) + // grant the prefix2 keys to test-user + prefix2 := "/prefix2/" + if _, err := rootAuthClient.RoleGrantPermission(ctx, "test-role", prefix2, clientv3.GetPrefixRangeEnd(prefix2), clientv3.PermissionType(clientv3.PermReadWrite)); err != nil { + t.Fatal(err) + } + for i := 0; i < 10; i++ { + key := fmt.Sprintf("%s%d", prefix2, i) + if err := testUserClient.Put(ctx, key, "val", config.PutOptions{}); err != nil { + t.Fatal(err) + } + } + }) +} + +func TestAuthRevokeWithDelete(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + assert.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") + rootAuthClient := testutils.MustClient(clus.Client(WithAuth("root", "root"))) + authSetupDefaultTestUser(ctx, rootAuthClient, t) + // create a new role + if _, err := rootAuthClient.RoleAdd(ctx, "test-role2"); err != nil { + t.Fatal(err) + } + // grant the new role to the user + if _, err := rootAuthClient.UserGrantRole(ctx, "test-user", "test-role2"); err != nil { + t.Fatal(err) + } + // check the result + resp, err := rootAuthClient.UserGet(ctx, "test-user") + if err != nil { + t.Fatal(err) + } + assert.ElementsMatch(t, resp.Roles, []string{"test-role", "test-role2"}) + // delete the role, test-role2 must be revoked from test-user + if _, err := rootAuthClient.RoleDelete(ctx, "test-role2"); err != nil { + t.Fatal(err) + } + // check the result + resp, err = rootAuthClient.UserGet(ctx, "test-user") + if err != nil { + t.Fatal(err) + } + assert.ElementsMatch(t, resp.Roles, []string{"test-role"}) + }) +} + +func TestAuthInvalidMgmt(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + assert.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") + rootAuthClient := testutils.MustClient(clus.Client(WithAuth("root", "root"))) + _, err := rootAuthClient.RoleDelete(ctx, "root") + if err == nil || !strings.Contains(err.Error(), rpctypes.ErrInvalidAuthMgmt.Error()) { + t.Fatalf("want %v error but got %v error", rpctypes.ErrInvalidAuthMgmt, err) + } + _, err = rootAuthClient.UserRevokeRole(ctx, "root", "root") + if err == nil || !strings.Contains(err.Error(), rpctypes.ErrInvalidAuthMgmt.Error()) { + t.Fatalf("want %v error but got %v error", rpctypes.ErrInvalidAuthMgmt, err) + } + }) +} + +func TestAuthLeaseTestKeepAlive(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + assert.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") + rootAuthClient := testutils.MustClient(clus.Client(WithAuth("root", "root"))) + authSetupDefaultTestUser(ctx, rootAuthClient, t) + resp, err := rootAuthClient.Grant(ctx, 10) + if err != nil { + t.Fatal(err) + } + leaseID := resp.ID + if err = rootAuthClient.Put(ctx, "key", "value", config.PutOptions{LeaseID: leaseID}); err != nil { + t.Fatal(err) + } + if _, err = rootAuthClient.KeepAliveOnce(ctx, leaseID); err != nil { + t.Fatal(err) + } + gresp, err := rootAuthClient.Get(ctx, "key", config.GetOptions{}) + if err != nil { + t.Fatal(err) + } + if len(gresp.Kvs) != 1 || string(gresp.Kvs[0].Key) != "key" || string(gresp.Kvs[0].Value) != "value" { + t.Fatalf("want kv pair ('key', 'value') but got %v", gresp.Kvs) + } + }) +} + +func TestAuthLeaseTestTimeToLiveExpired(t *testing.T) { + tcs := []struct { + name string + JWTEnabled bool + }{ + { + name: "JWTEnabled", + JWTEnabled: true, + }, + { + name: "JWTDisabled", + JWTEnabled: false, + }, + } + + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + assert.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") + rootAuthClient := testutils.MustClient(clus.Client(WithAuth("root", "root"))) + authSetupDefaultTestUser(ctx, rootAuthClient, t) + resp, err := rootAuthClient.Grant(ctx, 2) + if err != nil { + t.Fatal(err) + } + leaseID := resp.ID + if err = rootAuthClient.Put(ctx, "key", "val", config.PutOptions{LeaseID: leaseID}); err != nil { + t.Fatal(err) + } + // eliminate false positive + time.Sleep(3 * time.Second) + tresp, err := rootAuthClient.TimeToLive(ctx, leaseID, config.LeaseOption{}) + if err != nil { + t.Fatal(err) + } + if tresp.TTL != -1 { + t.Fatalf("want leaseID %v expired but not", leaseID) + } + gresp, err := rootAuthClient.Get(ctx, "key", config.GetOptions{}) + if err != nil || len(gresp.Kvs) != 0 { + t.Fatalf("want nil err and no kvs but got (%v) error and %d kvs", err, len(gresp.Kvs)) + } + }) + }) + } +} + +func TestAuthLeaseGrantLeases(t *testing.T) { + tcs := []struct { + name string + cfg config.ClusterConfig + }{ + { + "NoJWT", + config.ClusterConfig{ClusterSize: 1}, + }, + { + "JWT", + config.ClusterConfig{ClusterSize: 1, AuthToken: defaultAuthToken}, + }, + } + for _, tc := range tcs { + t.Run(tc.name, func(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.cfg)) + defer clus.Close() + testutils.ExecuteUntil(ctx, t, func() { + rootAuthClient := testutils.MustClient(clus.Client(WithAuth("root", "root"))) + authSetupDefaultTestUser(ctx, rootAuthClient, t) + resp, err := rootAuthClient.Grant(ctx, 10) + if err != nil { + t.Fatal(err) + } + leaseID := resp.ID + lresp, err := rootAuthClient.Leases(ctx) + if err != nil { + t.Fatal(err) + } + if len(lresp.Leases) != 1 || lresp.Leases[0].ID != leaseID { + t.Fatalf("want %v leaseID but got %v leases", leaseID, lresp.Leases) + } + }) + }) + } +} + +func TestAuthLeaseAttach(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + users := []struct { + name string + password string + role string + key string + end string + }{ + { + name: "user1", + password: "user1-123", + role: "role1", + key: "k1", + end: "k3", + }, + { + name: "user2", + password: "user2-123", + role: "role2", + key: "k2", + end: "k4", + }, + } + for _, user := range users { + authSetupTestUser(ctx, cc, user.name, user.password, user.role, user.key, user.end, t) + } + assert.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") + user1c := testutils.MustClient(clus.Client(WithAuth("user1", "user1-123"))) + user2c := testutils.MustClient(clus.Client(WithAuth("user2", "user2-123"))) + leaseResp, err := user1c.Grant(ctx, 90) + testutil.AssertNil(t, err) + leaseID := leaseResp.ID + // permission of k2 is also granted to user2 + err = user1c.Put(ctx, "k2", "val", config.PutOptions{LeaseID: leaseID}) + testutil.AssertNil(t, err) + _, err = user2c.Revoke(ctx, leaseID) + testutil.AssertNil(t, err) + + leaseResp, err = user1c.Grant(ctx, 90) + testutil.AssertNil(t, err) + leaseID = leaseResp.ID + // permission of k1 isn't granted to user2 + err = user1c.Put(ctx, "k1", "val", config.PutOptions{LeaseID: leaseID}) + testutil.AssertNil(t, err) + _, err = user2c.Revoke(ctx, leaseID) + if err == nil || !strings.Contains(err.Error(), rpctypes.ErrPermissionDenied.Error()) { + t.Fatalf("want %v error but got %v error", rpctypes.ErrPermissionDenied, err) + } + }) +} + +func TestAuthLeaseRevoke(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) + defer clus.Close() + testutils.ExecuteUntil(ctx, t, func() { + rootAuthClient := testutils.MustClient(clus.Client(WithAuth("root", "root"))) + authSetupDefaultTestUser(ctx, rootAuthClient, t) + // put with TTL 10 seconds and revoke + resp, err := rootAuthClient.Grant(ctx, 10) + if err != nil { + t.Fatal(err) + } + leaseID := resp.ID + if err = rootAuthClient.Put(ctx, "key", "val", config.PutOptions{LeaseID: leaseID}); err != nil { + t.Fatal(err) + } + if _, err = rootAuthClient.Revoke(ctx, leaseID); err != nil { + t.Fatal(err) + } + gresp, err := rootAuthClient.Get(ctx, "key", config.GetOptions{}) + if err != nil || len(gresp.Kvs) != 0 { + t.Fatalf("want nil err and no kvs but got (%v) error and %d kvs", err, len(gresp.Kvs)) + } + }) +} + +func TestAuthRoleGet(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + assert.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") + rootAuthClient := testutils.MustClient(clus.Client(WithAuth("root", "root"))) + authSetupDefaultTestUser(ctx, rootAuthClient, t) + if _, err := rootAuthClient.RoleGet(ctx, "test-role"); err != nil { + t.Fatal(err) + } + // test-user can get the information of test-role because it belongs to the role + testUserAuthClient := testutils.MustClient(clus.Client(WithAuth("test-user", "pass"))) + if _, err := testUserAuthClient.RoleGet(ctx, "test-role"); err != nil { + t.Fatal(err) + } + // test-user cannot get the information of root because it doesn't belong to the role + _, err := testUserAuthClient.RoleGet(ctx, "root") + if err == nil || !strings.Contains(err.Error(), rpctypes.ErrPermissionDenied.Error()) { + t.Fatalf("want %v error but got %v", rpctypes.ErrPermissionDenied, err) + } + }) +} + +func TestAuthUserGet(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + assert.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") + rootAuthClient := testutils.MustClient(clus.Client(WithAuth("root", "root"))) + authSetupDefaultTestUser(ctx, rootAuthClient, t) + resp, err := rootAuthClient.UserGet(ctx, "test-user") + if err != nil { + t.Fatal(err) + } + assert.ElementsMatch(t, resp.Roles, []string{"test-role"}) + // test-user can get the information of test-user itself + testUserAuthClient := testutils.MustClient(clus.Client(WithAuth("test-user", "pass"))) + resp, err = testUserAuthClient.UserGet(ctx, "test-user") + if err != nil { + t.Fatal(err) + } + assert.ElementsMatch(t, resp.Roles, []string{"test-role"}) + // test-user cannot get the information of root + _, err = testUserAuthClient.UserGet(ctx, "root") + if err == nil || !strings.Contains(err.Error(), rpctypes.ErrPermissionDenied.Error()) { + t.Fatalf("want %v error but got %v", rpctypes.ErrPermissionDenied, err) + } + }) +} + +func TestAuthRoleList(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + assert.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") + rootAuthClient := testutils.MustClient(clus.Client(WithAuth("root", "root"))) + authSetupDefaultTestUser(ctx, rootAuthClient, t) + resp, err := rootAuthClient.RoleList(ctx) + if err != nil { + t.Fatal(err) + } + assert.ElementsMatch(t, resp.Roles, []string{"test-role"}) + }) +} + +func TestAuthDefrag(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + var kvs = []testutils.KV{{Key: "key", Val: "val1"}, {Key: "key", Val: "val2"}, {Key: "key", Val: "val3"}} + for i := range kvs { + if err := cc.Put(ctx, kvs[i].Key, kvs[i].Val, config.PutOptions{}); err != nil { + t.Fatalf("TestAuthDefrag #%d: put kv error (%v)", i, err) + } + } + assert.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") + rootAuthClient := testutils.MustClient(clus.Client(WithAuth("root", "root"))) + authSetupDefaultTestUser(ctx, rootAuthClient, t) + // ordinary user cannot defrag + testUserAuthClient := testutils.MustClient(clus.Client(WithAuth("test-user", "pass"))) + if err := testUserAuthClient.Defragment(ctx, config.DefragOption{Timeout: 5 * time.Second}); err == nil { + t.Fatal("want error but got no error") + } + // root can defrag + if err := rootAuthClient.Defragment(ctx, config.DefragOption{Timeout: 5 * time.Second}); err != nil { + t.Fatal(err) + } + }) +} + +func TestAuthEndpointHealth(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + assert.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") + rootAuthClient := testutils.MustClient(clus.Client(WithAuth("root", "root"))) + authSetupDefaultTestUser(ctx, rootAuthClient, t) + testUserAuthClient := testutils.MustClient(clus.Client(WithAuth("test-user", "pass"))) + + if err := rootAuthClient.Health(ctx); err != nil { + t.Fatal(err) + } + // health checking with an ordinary user "succeeds" since permission denial goes through consensus + if err := testUserAuthClient.Health(ctx); err != nil { + t.Fatal(err) + } + // succeed if permissions granted for ordinary user + if _, err := rootAuthClient.RoleGrantPermission(ctx, "test-role", "health", "", clientv3.PermissionType(clientv3.PermReadWrite)); err != nil { + t.Fatal(err) + } + if err := testUserAuthClient.Health(ctx); err != nil { + t.Fatal(err) + } + }) +} + +func TestAuthWatch(t *testing.T) { + watchTimeout := 1 * time.Second + tcs := []struct { + name string + cfg config.ClusterConfig + }{ + { + "NoJWT", + config.ClusterConfig{ClusterSize: 1}, + }, + { + "JWT", + config.ClusterConfig{ClusterSize: 1, AuthToken: defaultAuthToken}, + }, + } + + tests := []struct { + puts []testutils.KV + watchKey string + opts config.WatchOptions + want bool + wanted []testutils.KV + }{ + { // watch 1 key, should be successful + puts: []testutils.KV{{Key: "key", Val: "value"}}, + watchKey: "key", + opts: config.WatchOptions{Revision: 1}, + want: true, + wanted: []testutils.KV{{Key: "key", Val: "value"}}, + }, + { // watch 3 keys by range, should be successful + puts: []testutils.KV{{Key: "key1", Val: "value1"}, {Key: "key3", Val: "value3"}, {Key: "key2", Val: "value2"}}, + watchKey: "key", + opts: config.WatchOptions{RangeEnd: "key3", Revision: 1}, + want: true, + wanted: []testutils.KV{{Key: "key1", Val: "value1"}, {Key: "key2", Val: "value2"}}, + }, + { // watch 1 key, should not be successful + puts: []testutils.KV{}, + watchKey: "key5", + opts: config.WatchOptions{Revision: 1}, + want: false, + wanted: []testutils.KV{}, + }, + { // watch 3 keys by range, should not be successful + puts: []testutils.KV{}, + watchKey: "key", + opts: config.WatchOptions{RangeEnd: "key6", Revision: 1}, + want: false, + wanted: []testutils.KV{}, + }, + } + + for _, tc := range tcs { + for i, tt := range tests { + t.Run(tc.name, func(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(tc.cfg)) + defer clus.Close() + testutils.ExecuteUntil(ctx, t, func() { + rootAuthClient := testutils.MustClient(clus.Client(WithAuth("root", "root"))) + authSetupDefaultTestUser(ctx, rootAuthClient, t) + + _, err := rootAuthClient.RoleGrantPermission(ctx, "test-role", "key", "key4", clientv3.PermissionType(clientv3.PermReadWrite)) + assert.NoError(t, err) + testUserAuthClient := testutils.MustClient(clus.Client(WithAuth("test-user", "pass"))) + + donec := make(chan struct{}) + go func(i int, puts []testutils.KV) { + defer close(donec) + for j := range puts { + if err := testUserAuthClient.Put(ctx, puts[j].Key, puts[j].Val, config.PutOptions{}); err != nil { + t.Errorf("test #%d-%d: put error (%v)", i, j, err) + } + } + }(i, tt.puts) + wCtx, wCancel := context.WithCancel(ctx) + wch := testUserAuthClient.Watch(wCtx, tt.watchKey, tt.opts) + if wch == nil { + t.Fatalf("failed to watch %s", tt.watchKey) + } + kvs, err := testutils.KeyValuesFromWatchChan(wch, len(tt.wanted), watchTimeout) + if err != nil { + wCancel() + assert.False(t, tt.want) + } else { + assert.Equal(t, tt.wanted, kvs) + } + wCancel() + <-donec + }) + }) + } + } +} + +func TestAuthJWTExpire(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1, AuthToken: defaultAuthToken})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + assert.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") + rootAuthClient := testutils.MustClient(clus.Client(WithAuth("root", "root"))) + authSetupDefaultTestUser(ctx, rootAuthClient, t) + // try a granted key + if err := rootAuthClient.Put(ctx, "hoo", "bar", config.PutOptions{}); err != nil { + t.Error(err) + } + // wait an expiration of my JWT token + <-time.After(3 * time.Second) + if err := rootAuthClient.Put(ctx, "hoo", "bar", config.PutOptions{}); err != nil { + t.Error(err) + } + }) +} + +func TestAuthMemberRemove(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 3})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + assert.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") + rootAuthClient := testutils.MustClient(clus.Client(WithAuth("root", "root"))) + authSetupDefaultTestUser(ctx, rootAuthClient, t) + testUserAuthClient := testutils.MustClient(clus.Client(WithAuth("test-user", "pass"))) + + memberList, err := rootAuthClient.MemberList(ctx) + assert.NoError(t, err) + assert.Equal(t, 3, len(memberList.Members), "want 3 member but got %d", len(memberList.Members)) + id := memberList.Members[0].ID + + // 5 seconds is the minimum required amount of time peer is considered active + time.Sleep(5 * time.Second) + if _, err := testUserAuthClient.MemberRemove(ctx, id); err == nil { + t.Fatalf("ordinary user must not be allowed to remove a member") + } + if _, err := rootAuthClient.MemberRemove(ctx, id); err != nil { + t.Fatal(err) + } + }) +} + +// TestAuthRevisionConsistency ensures authRevision is the same after etcd restart +func TestAuthRevisionConsistency(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + assert.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") + rootAuthClient := testutils.MustClient(clus.Client(WithAuth("root", "root"))) + // add user + if _, err := rootAuthClient.UserAdd(ctx, "test-user", "pass", config.UserAddOptions{}); err != nil { + t.Fatal(err) + } + // delete the same user + if _, err := rootAuthClient.UserDelete(ctx, "test-user"); err != nil { + t.Fatal(err) + } + sresp, err := rootAuthClient.AuthStatus(ctx) + if err != nil { + t.Fatal(err) + } + oldAuthRevision := sresp.AuthRevision + // restart the node + m := clus.Members()[0] + m.Stop() + if err = m.Start(ctx); err != nil { + t.Fatal(err) + } + sresp, err = rootAuthClient.AuthStatus(ctx) + if err != nil { + t.Fatal(err) + } + newAuthRevision := sresp.AuthRevision + // assert AuthRevision equal + if newAuthRevision != oldAuthRevision { + t.Fatalf("auth revison shouldn't change when restarting etcd, expected: %d, got: %d", oldAuthRevision, newAuthRevision) + } + }) +} + +// TestAuthKVRevision ensures kv revision is the same after auth mutating operations +func TestAuthKVRevision(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + err := cc.Put(ctx, "foo", "bar", config.PutOptions{}) + if err != nil { + t.Fatal(err) + } + gresp, err := cc.Get(ctx, "foo", config.GetOptions{}) + if err != nil { + t.Fatal(err) + } + rev := gresp.Header.Revision + aresp, aerr := cc.UserAdd(ctx, "root", "123", config.UserAddOptions{NoPassword: false}) + if aerr != nil { + t.Fatal(err) + } + if aresp.Header.Revision != rev { + t.Fatalf("revision want %d, got %d", rev, aresp.Header.Revision) + } + }) +} + +// TestAuthConcurrent ensures concurrent auth ops don't cause old authRevision errors +func TestAuthRevConcurrent(t *testing.T) { + testRunner.BeforeTest(t) + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) + defer cancel() + clus := testRunner.NewCluster(ctx, t, config.WithClusterConfig(config.ClusterConfig{ClusterSize: 1})) + defer clus.Close() + cc := testutils.MustClient(clus.Client()) + testutils.ExecuteUntil(ctx, t, func() { + assert.NoErrorf(t, setupAuth(cc, []authRole{}, []authUser{rootUser}), "failed to enable auth") + rootAuthClient := testutils.MustClient(clus.Client(WithAuth("root", "root"))) + var wg sync.WaitGroup + f := func(i int) { + defer wg.Done() + role, user := fmt.Sprintf("test-role-%d", i), fmt.Sprintf("test-user-%d", i) + _, err := rootAuthClient.RoleAdd(ctx, role) + testutil.AssertNil(t, err) + _, err = rootAuthClient.RoleGrantPermission(ctx, role, "a", clientv3.GetPrefixRangeEnd("a"), clientv3.PermissionType(clientv3.PermReadWrite)) + testutil.AssertNil(t, err) + _, err = rootAuthClient.UserAdd(ctx, user, "123", config.UserAddOptions{NoPassword: false}) + testutil.AssertNil(t, err) + err = rootAuthClient.Put(ctx, "a", "b", config.PutOptions{}) + testutil.AssertNil(t, err) + } + // needs concurrency to trigger + numRoles := 2 + wg.Add(numRoles) + for i := 0; i < numRoles; i++ { + go f(i) + } + wg.Wait() + }) +} + +func authSetupDefaultTestUser(ctx context.Context, cc interfaces.Client, t *testing.T) { + authSetupTestUser(ctx, cc, "test-user", "pass", "test-role", "foo", "", t) +} + +func authSetupTestUser(ctx context.Context, cc interfaces.Client, userName, password, roleName, key, end string, t *testing.T) { + _, err := cc.UserAdd(ctx, userName, password, config.UserAddOptions{}) + if err != nil { + t.Fatalf("failed to create test-user %v", err) + } + _, err = cc.RoleAdd(ctx, roleName) + if err != nil { + t.Fatalf("failed to create test-role %v", err) + } + _, err = cc.UserGrantRole(ctx, userName, roleName) + if err != nil { + t.Fatalf("failed to grant role test-role to user test-user %v", err) + } + _, err = cc.RoleGrantPermission(ctx, roleName, key, end, clientv3.PermissionType(clientv3.PermReadWrite)) + if err != nil { + t.Fatalf("failed to grant role test-role readwrite permission to key foo %v", err) + } +} + +func putFailPerm(ctx context.Context, cc interfaces.Client, key, val string, t *testing.T) { + err := cc.Put(ctx, key, val, config.PutOptions{}) + if err == nil || !strings.Contains(err.Error(), rpctypes.ErrPermissionDenied.Error()) { + t.Errorf("want error %s but got %v", rpctypes.ErrPermissionDenied.Error(), err) + } +} + +func mustAbsPath(path string) string { + abs, err := filepath.Abs(path) + if err != nil { + panic(err) + } + return abs +} diff --git a/tests/linearizability/client.go b/tests/linearizability/client.go index 20f3abc9a8a9..2f6121ab6b5a 100644 --- a/tests/linearizability/client.go +++ b/tests/linearizability/client.go @@ -28,13 +28,17 @@ type recordingClient struct { history *appendableHistory } -func NewClient(endpoints []string, ids idProvider) (*recordingClient, error) { - cc, err := clientv3.New(clientv3.Config{ +func NewClient(endpoints []string, ids idProvider, ops ...option) (*recordingClient, error) { + cfg := &clientv3.Config{ Endpoints: endpoints, Logger: zap.NewNop(), DialKeepAliveTime: 1 * time.Millisecond, DialKeepAliveTimeout: 5 * time.Millisecond, - }) + } + for _, op := range ops { + op(cfg) + } + cc, err := clientv3.New(*cfg) if err != nil { return nil, err } @@ -93,3 +97,21 @@ func (c *recordingClient) Txn(ctx context.Context, key, expectedValue, newValue c.history.AppendTxn(key, expectedValue, newValue, callTime, returnTime, resp, err) return err } + +type option func(*clientv3.Config) + +func noOpOption() option { return func(*clientv3.Config) {} } + +func withAuth(username, password string) option { + return func(cfg *clientv3.Config) { + cfg.Username = username + cfg.Password = password + } +} + +func clientOption(authEnabled bool, clientIndex int) option { + if !authEnabled { + return noOpOption() + } + return withAuth(rootUserName, rootUserPassword) +} diff --git a/tests/linearizability/failpoints.go b/tests/linearizability/failpoints.go index e40e3df606ea..9a02eb7b3387 100644 --- a/tests/linearizability/failpoints.go +++ b/tests/linearizability/failpoints.go @@ -25,6 +25,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" "go.uber.org/zap" clientv3 "go.etcd.io/etcd/client/v3" @@ -37,6 +38,7 @@ const ( var ( KillFailpoint Failpoint = killFailpoint{} + WaitForSnapshotKillFailpoint Failpoint = killFailpoint{waitForSnapshot: true} DefragBeforeCopyPanic Failpoint = goPanicFailpoint{"defragBeforeCopy", triggerDefrag, AnyMember} DefragBeforeRenamePanic Failpoint = goPanicFailpoint{"defragBeforeRename", triggerDefrag, AnyMember} BeforeCommitPanic Failpoint = goPanicFailpoint{"beforeCommit", nil, AnyMember} @@ -77,13 +79,15 @@ var ( ) type Failpoint interface { - Trigger(t *testing.T, ctx context.Context, clus *e2e.EtcdProcessCluster) error + Trigger(t *testing.T, ctx context.Context, clus *e2e.EtcdProcessCluster, lg *zap.Logger) error Name() string } -type killFailpoint struct{} +type killFailpoint struct { + waitForSnapshot bool +} -func (f killFailpoint) Trigger(t *testing.T, ctx context.Context, clus *e2e.EtcdProcessCluster) error { +func (f killFailpoint) Trigger(t *testing.T, ctx context.Context, clus *e2e.EtcdProcessCluster, lg *zap.Logger) error { member := clus.Procs[rand.Int()%len(clus.Procs)] killCtx, cancel := context.WithTimeout(ctx, triggerTimeout) @@ -99,13 +103,92 @@ func (f killFailpoint) Trigger(t *testing.T, ctx context.Context, clus *e2e.Etcd } } - err := member.Start(ctx) + // get endpoints excluding the killed member client URLs + endpoints := make([]string, 0, len(clus.EndpointsV3())) + for _, ed := range clus.EndpointsV3() { + if ed != member.EndpointsV3()[0] { + endpoints = append(endpoints, ed) + } + } + + if f.waitForSnapshot { + // wait for potential leader election + time.Sleep(2 * time.Second) + cc, err := clientv3.New(clientv3.Config{ + Endpoints: endpoints, + Logger: zap.NewNop(), + DialKeepAliveTime: 1 * time.Millisecond, + DialKeepAliveTimeout: 5 * time.Millisecond, + Username: rootUserName, + Password: rootUserPassword, + }) + require.NoError(t, err) + require.NoError(t, addTestUserAuth(ctx, cc)) + // 5000 is the default number of snapshot count catch up entries + // DefaultSnapshotCatchUpEntries = 5000 + // Try manipulating e2e etcd test binary to 100 locally and snapshot-count here to speed up test + require.NoError(t, waitForSnapshot(ctx, cc, 5000, lg)) + require.NoError(t, cc.Close()) + } + + return member.Start(ctx) +} + +func addTestUserAuth(ctx context.Context, cc *clientv3.Client) (err error) { + if _, err := cc.UserAdd(ctx, testUserName, testUserPassword); err != nil { + return err + } + if _, err := cc.RoleAdd(ctx, testRoleName); err != nil { + return err + } + if _, err := cc.UserGrantRole(ctx, testUserName, testRoleName); err != nil { + return err + } + if _, err := cc.RoleGrantPermission(ctx, testRoleName, "key", "key0", clientv3.PermissionType(clientv3.PermReadWrite)); err != nil { + return err + } + return +} + +func waitForSnapshot(ctx context.Context, cc *clientv3.Client, snapshotCount int64, lg *zap.Logger) error { + initialRevision, err := getRevision(ctx, cc) if err != nil { return err } + lg.Info("got revision", + zap.Int64("initial-revision", initialRevision), + zap.Int64("snapshot-catchup-count", snapshotCount), + ) + for { + time.Sleep(time.Second) + rev, err := getRevision(ctx, cc) + if err != nil { + lg.Warn("failed to get revision", zap.Error(err)) + return err + } + lg.Info("got revision", + zap.Int64("initial-revision", initialRevision), + zap.Int64("current-revision", rev), + zap.Int64("snapshot-catchup-count", snapshotCount), + ) + if rev >= initialRevision+snapshotCount { + break + } + } return nil } +func getRevision(ctx context.Context, client *clientv3.Client) (revision int64, err error) { + if err != nil { + return 0, fmt.Errorf("failed creating client: %w", err) + } + resp, err := client.Get(ctx, "/") + if err != nil { + return 0, err + } + return resp.Header.Revision, nil +} + func (f killFailpoint) Name() string { return "Kill" } @@ -123,7 +206,7 @@ const ( Leader failpointTarget = "Leader" ) -func (f goPanicFailpoint) Trigger(t *testing.T, ctx context.Context, clus *e2e.EtcdProcessCluster) error { +func (f goPanicFailpoint) Trigger(t *testing.T, ctx context.Context, clus *e2e.EtcdProcessCluster, lg *zap.Logger) error { member := f.pickMember(t, clus) address := fmt.Sprintf("127.0.0.1:%d", member.Config().GoFailPort) @@ -238,10 +321,10 @@ type randomFailpoint struct { failpoints []Failpoint } -func (f randomFailpoint) Trigger(t *testing.T, ctx context.Context, clus *e2e.EtcdProcessCluster) error { +func (f randomFailpoint) Trigger(t *testing.T, ctx context.Context, clus *e2e.EtcdProcessCluster, lg *zap.Logger) error { failpoint := f.failpoints[rand.Int()%len(f.failpoints)] t.Logf("Triggering %v failpoint\n", failpoint.Name()) - return failpoint.Trigger(t, ctx, clus) + return failpoint.Trigger(t, ctx, clus, lg) } func (f randomFailpoint) Name() string { diff --git a/tests/linearizability/history.go b/tests/linearizability/history.go index 0dfb0d394efb..8687e1186803 100644 --- a/tests/linearizability/history.go +++ b/tests/linearizability/history.go @@ -121,6 +121,7 @@ func (h *appendableHistory) appendFailed(request EtcdRequest, start time.Time, e Output: EtcdResponse{Err: err}, Return: 0, // For failed writes we don't know when request has really finished. }) + // Operations of single client needs to be sequential. // As we don't know return time of failed operations, all new writes need to be done with new client id. h.id = h.idProvider.ClientId() @@ -133,19 +134,12 @@ type history struct { failed []porcupine.Operation } -func (h history) Merge(h2 history) history { - result := history{ - successful: make([]porcupine.Operation, 0, len(h.successful)+len(h2.successful)), - failed: make([]porcupine.Operation, 0, len(h.failed)+len(h2.failed)), - } - result.successful = append(result.successful, h.successful...) - result.successful = append(result.successful, h2.successful...) - result.failed = append(result.failed, h.failed...) - result.failed = append(result.failed, h2.failed...) - return result +func (h *history) Merge(h2 history) { + h.successful = append(h.successful, h2.successful...) + h.failed = append(h.failed, h2.failed...) } -func (h history) Operations() []porcupine.Operation { +func (h *history) Operations() []porcupine.Operation { operations := make([]porcupine.Operation, 0, len(h.successful)+len(h.failed)) var maxTime int64 for _, op := range h.successful { diff --git a/tests/linearizability/linearizability_test.go b/tests/linearizability/linearizability_test.go index 78780e700c8f..ae6421e9da4b 100644 --- a/tests/linearizability/linearizability_test.go +++ b/tests/linearizability/linearizability_test.go @@ -25,7 +25,10 @@ import ( "time" "github.com/anishathalye/porcupine" + "github.com/stretchr/testify/require" "go.etcd.io/etcd/tests/v3/framework/e2e" + "go.uber.org/zap" + "go.uber.org/zap/zaptest" "golang.org/x/time/rate" ) @@ -41,34 +44,52 @@ const ( func TestLinearizability(t *testing.T) { testRunner.BeforeTest(t) tcs := []struct { - name string - failpoint Failpoint - config e2e.EtcdProcessClusterConfig + name string + failpoint Failpoint + config e2e.EtcdProcessClusterConfig + traffic Traffic + clientCount int }{ + //{ + // name: "ClusterOfSize1", + // failpoint: RandomFailpoint, + // config: *e2e.NewConfig( + // e2e.WithClusterSize(1), + // e2e.WithGoFailEnabled(true), + // e2e.WithCompactionBatchLimit(100), // required for compactBeforeCommitBatch and compactAfterCommitBatch failpoints + // ), + // traffic: DefaultTraffic, + // clientCount: 8, + //}, + //{ + // name: "ClusterOfSize3", + // failpoint: RandomFailpoint, + // config: *e2e.NewConfig( + // e2e.WithGoFailEnabled(true), + // e2e.WithCompactionBatchLimit(100), // required for compactBeforeCommitBatch and compactAfterCommitBatch failpoints + // ), + // traffic: DefaultTraffic, + // clientCount: 8, + //}, + //{ + // name: "Issue14370", + // failpoint: RaftBeforeSavePanic, + // config: *e2e.NewConfig( + // e2e.WithClusterSize(1), + // e2e.WithGoFailEnabled(true), + // ), + // traffic: DefaultTraffic, + // clientCount: 8, + //}, { - name: "ClusterOfSize1", - failpoint: RandomFailpoint, + name: "Issue14571", + failpoint: WaitForSnapshotKillFailpoint, config: *e2e.NewConfig( - e2e.WithClusterSize(1), - e2e.WithGoFailEnabled(true), - e2e.WithCompactionBatchLimit(100), // required for compactBeforeCommitBatch and compactAfterCommitBatch failpoints - ), - }, - { - name: "ClusterOfSize3", - failpoint: RandomFailpoint, - config: *e2e.NewConfig( - e2e.WithGoFailEnabled(true), - e2e.WithCompactionBatchLimit(100), // required for compactBeforeCommitBatch and compactAfterCommitBatch failpoints - ), - }, - { - name: "Issue14370", - failpoint: RaftBeforeSavePanic, - config: *e2e.NewConfig( - e2e.WithClusterSize(1), - e2e.WithGoFailEnabled(true), + e2e.WithClusterSize(3), + e2e.WithSnapshotCount(2), ), + traffic: DefaultTrafficWithAuth, + clientCount: 3, // actual client count = 9; 3 + (2 test user * 3 endpoints) = 9 clients }, } for _, tc := range tcs { @@ -79,18 +100,19 @@ func TestLinearizability(t *testing.T) { retries: 3, waitBetweenTriggers: waitBetweenFailpointTriggers, } - traffic := trafficConfig{ + trafficCfg := trafficConfig{ minimalQPS: minimalQPS, maximalQPS: maximalQPS, - clientCount: 8, - traffic: DefaultTraffic, + clientCount: tc.clientCount, + traffic: tc.traffic, } - testLinearizability(context.Background(), t, tc.config, failpoint, traffic) + lg := zaptest.NewLogger(t, zaptest.WrapOptions(zap.AddCaller())).Named(tc.name) + testLinearizability(context.Background(), t, tc.config, failpoint, trafficCfg, lg) }) } } -func testLinearizability(ctx context.Context, t *testing.T, config e2e.EtcdProcessClusterConfig, failpoint FailpointConfig, traffic trafficConfig) { +func testLinearizability(ctx context.Context, t *testing.T, config e2e.EtcdProcessClusterConfig, failpoint FailpointConfig, traffic trafficConfig, lg *zap.Logger) { clus, err := e2e.NewEtcdProcessCluster(ctx, t, e2e.WithConfig(&config)) if err != nil { t.Fatal(err) @@ -99,26 +121,26 @@ func testLinearizability(ctx context.Context, t *testing.T, config e2e.EtcdProce ctx, cancel := context.WithCancel(ctx) go func() { defer cancel() - err := triggerFailpoints(ctx, t, clus, failpoint) + err := triggerFailpoints(ctx, t, clus, failpoint, lg) if err != nil { t.Error(err) } }() - operations := simulateTraffic(ctx, t, clus, traffic) + operations := simulateTraffic(ctx, t, clus, traffic, lg) err = clus.Stop() if err != nil { t.Error(err) } - checkOperationsAndPersistResults(t, operations, clus) + checkOperationsAndPersistResults(t, operations, clus, lg) } -func triggerFailpoints(ctx context.Context, t *testing.T, clus *e2e.EtcdProcessCluster, config FailpointConfig) error { +func triggerFailpoints(ctx context.Context, t *testing.T, clus *e2e.EtcdProcessCluster, config FailpointConfig, lg *zap.Logger) error { var err error successes := 0 failures := 0 for successes < config.count && failures < config.retries { time.Sleep(config.waitBetweenTriggers) - err = config.failpoint.Trigger(t, ctx, clus) + err = config.failpoint.Trigger(t, ctx, clus, lg) if err != nil { t.Logf("Failed to trigger failpoint %q, err: %v\n", config.failpoint.Name(), err) failures++ @@ -140,20 +162,25 @@ type FailpointConfig struct { waitBetweenTriggers time.Duration } -func simulateTraffic(ctx context.Context, t *testing.T, clus *e2e.EtcdProcessCluster, config trafficConfig) []porcupine.Operation { +func simulateTraffic(ctx context.Context, t *testing.T, clus *e2e.EtcdProcessCluster, config trafficConfig, lg *zap.Logger) []porcupine.Operation { + require.NoError(t, config.traffic.PreRun(ctx, clus.Client(), lg)) + mux := sync.Mutex{} endpoints := clus.EndpointsV3() ids := newIdProvider() - h := history{} + h := &history{} limiter := rate.NewLimiter(rate.Limit(config.maximalQPS), 200) startTime := time.Now() wg := sync.WaitGroup{} + for i := 0; i < config.clientCount; i++ { + i := i + wg.Add(1) endpoints := []string{endpoints[i%len(endpoints)]} - c, err := NewClient(endpoints, ids) + c, err := NewClient(endpoints, ids, clientOption(config.traffic.AuthEnabled(), i)) if err != nil { t.Fatal(err) } @@ -161,25 +188,61 @@ func simulateTraffic(ctx context.Context, t *testing.T, clus *e2e.EtcdProcessClu defer wg.Done() defer c.Close() - config.traffic.Run(ctx, c, limiter, ids) + config.traffic.Run(ctx, c, limiter, ids, lg) mux.Lock() - h = h.Merge(c.history.history) + h.Merge(c.history.history) mux.Unlock() }(c) } + + simulatePostFailpointTraffic(ctx, &wg, endpoints, ids, h, &mux, config, limiter, lg) + wg.Wait() endTime := time.Now() operations := h.Operations() - t.Logf("Recorded %d operations", len(operations)) + lg.Info("Recorded operations", zap.Int("num-of-operation", len(operations))) qps := float64(len(operations)) / float64(endTime.Sub(startTime)) * float64(time.Second) - t.Logf("Average traffic: %f qps", qps) + lg.Info("Average traffic", zap.Float64("qps", qps)) if qps < config.minimalQPS { t.Errorf("Requiring minimal %f qps for test results to be reliable, got %f qps", config.minimalQPS, qps) } return operations } +func simulatePostFailpointTraffic(ctx context.Context, wg *sync.WaitGroup, endpoints []string, ids idProvider, h *history, mux *sync.Mutex, config trafficConfig, limiter *rate.Limiter, lg *zap.Logger) { + if !config.traffic.AuthEnabled() { + return + } + + // each endpoint has a client with each auth user + for _, ep := range endpoints { + eps := []string{ep} + for _, user := range users { + user := user + wg.Add(1) + go func() { + defer wg.Done() + select { + case <-ctx.Done(): + // Trigger failpoint is finished. + } + c, err := NewClient(eps, ids, withAuth(user.userName, user.userPassword)) + if err != nil { + panic(err) + } + defer c.Close() + cctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + config.traffic.Run(cctx, c, limiter, ids, lg) + mux.Lock() + h.Merge(c.history.history) + mux.Unlock() + }() + } + } +} + type trafficConfig struct { minimalQPS float64 maximalQPS float64 @@ -187,16 +250,21 @@ type trafficConfig struct { traffic Traffic } -func checkOperationsAndPersistResults(t *testing.T, operations []porcupine.Operation, clus *e2e.EtcdProcessCluster) { +func checkOperationsAndPersistResults(t *testing.T, operations []porcupine.Operation, clus *e2e.EtcdProcessCluster, lg *zap.Logger) { path, err := testResultsDirectory(t) if err != nil { t.Error(err) } + lg.Info("start evaluating operations", zap.Int("num-of-operations", len(operations))) + start := time.Now() + linearizable, info := porcupine.CheckOperationsVerbose(etcdModel, operations, 0) if linearizable != porcupine.Ok { t.Error("Model is not linearizable") persistMemberDataDir(t, clus, path) + } else { + lg.Info("operations is evaluated. Model is linearizable", zap.Duration("took", time.Since(start))) } visualizationPath := filepath.Join(path, "history.html") diff --git a/tests/linearizability/traffic.go b/tests/linearizability/traffic.go index 590511e48cb4..0baafb9f4f00 100644 --- a/tests/linearizability/traffic.go +++ b/tests/linearizability/traffic.go @@ -21,20 +21,29 @@ import ( "time" "go.etcd.io/etcd/api/v3/mvccpb" + "go.etcd.io/etcd/tests/v3/framework/config" + "go.etcd.io/etcd/tests/v3/framework/interfaces" + "go.uber.org/zap" "golang.org/x/time/rate" ) var ( - DefaultTraffic Traffic = readWriteSingleKey{key: "key", writes: []opChance{{operation: Put, chance: 90}, {operation: Delete, chance: 5}, {operation: Txn, chance: 5}}} + DefaultTraffic Traffic = readWriteSingleKey{key: "key", writes: []opChance{{operation: Put, chance: 90}, {operation: Delete, chance: 5}, {operation: Txn, chance: 5}}} + DefaultTrafficWithAuth Traffic = readWriteSingleKey{key: "key", writes: []opChance{{operation: Put, chance: 90}, {operation: Delete, chance: 5}, {operation: Txn, chance: 5}}, authEnabled: true} ) type Traffic interface { - Run(ctx context.Context, c *recordingClient, limiter *rate.Limiter, ids idProvider) + PreRun(ctx context.Context, c interfaces.Client, lg *zap.Logger) error + Run(ctx context.Context, c *recordingClient, limiter *rate.Limiter, ids idProvider, lg *zap.Logger) + + AuthEnabled() bool } type readWriteSingleKey struct { key string writes []opChance + + authEnabled bool } type opChance struct { @@ -42,8 +51,15 @@ type opChance struct { chance int } -func (t readWriteSingleKey) Run(ctx context.Context, c *recordingClient, limiter *rate.Limiter, ids idProvider) { +func (t readWriteSingleKey) PreRun(ctx context.Context, c interfaces.Client, lg *zap.Logger) error { + if t.AuthEnabled() { + lg.Info("set up auth") + return setupAuth(ctx, c) + } + return nil +} +func (t readWriteSingleKey) Run(ctx context.Context, c *recordingClient, limiter *rate.Limiter, ids idProvider, lg *zap.Logger) { for { select { case <-ctx.Done(): @@ -56,10 +72,17 @@ func (t readWriteSingleKey) Run(ctx context.Context, c *recordingClient, limiter continue } // Provide each write with unique id to make it easier to validate operation history. - t.Write(ctx, c, limiter, ids.RequestId(), resp) + err = t.Write(ctx, c, limiter, ids.RequestId(), resp) + if err != nil { + lg.Info("failed to write with error", zap.Error(err)) + } } } +func (t readWriteSingleKey) AuthEnabled() bool { + return t.authEnabled +} + func (t readWriteSingleKey) Read(ctx context.Context, c *recordingClient, limiter *rate.Limiter) ([]*mvccpb.KeyValue, error) { getCtx, cancel := context.WithTimeout(ctx, 20*time.Millisecond) resp, err := c.Get(getCtx, t.key) @@ -109,3 +132,38 @@ func (t readWriteSingleKey) pickWriteOperation() Operation { } panic("unexpected") } + +var ( + users = []struct { + userName string + userPassword string + }{ + {rootUserName, rootUserPassword}, + {testUserName, testUserPassword}, + } +) + +const ( + rootUserName = "root" + rootRoleName = "root" + rootUserPassword = "123" + testUserName = "test-user" + testRoleName = "test-role" + testUserPassword = "abc" +) + +func setupAuth(ctx context.Context, c interfaces.Client) error { + if _, err := c.UserAdd(ctx, rootUserName, rootUserPassword, config.UserAddOptions{}); err != nil { + return err + } + if _, err := c.RoleAdd(ctx, rootRoleName); err != nil { + return err + } + if _, err := c.UserGrantRole(ctx, rootUserName, rootRoleName); err != nil { + return err + } + if err := c.AuthEnable(ctx); err != nil { + return err + } + return nil +}