diff --git a/downstreamadapter/sink/eventrouter/topic/expression_test.go b/downstreamadapter/sink/eventrouter/topic/expression_test.go index 82c75f265a..30a835d1b8 100644 --- a/downstreamadapter/sink/eventrouter/topic/expression_test.go +++ b/downstreamadapter/sink/eventrouter/topic/expression_test.go @@ -17,7 +17,7 @@ import ( "fmt" "testing" - "github.com/pingcap/tiflow/pkg/errors" + "github.com/pingcap/ticdc/pkg/errors" "github.com/stretchr/testify/require" ) @@ -274,10 +274,10 @@ func TestInvalidExpression(t *testing.T) { require.ErrorContains(t, err, invalidExpr) } -// cmd: go test -run='^$' -bench '^(BenchmarkSubstitute)$' github.com/pingcap/tiflow/cdc/sink/dispatcher/topic +// cmd: go test -run='^$' -bench '^(BenchmarkSubstitute)$' github.com/pingcap/ticdc/cdc/sink/dispatcher/topic // goos: linux // goarch: amd64 -// pkg: github.com/pingcap/tiflow/cdc/sink/dispatcher +// pkg: github.com/pingcap/ticdc/cdc/sink/dispatcher // cpu: Intel(R) Xeon(R) CPU E5-2630 v4 @ 2.20GHz // BenchmarkSubstitute/schema_substitution-40 199372 6477 ns/op // BenchmarkSubstitute/schema_table_substitution-40 110752 13637 ns/op diff --git a/pkg/api/internal/rest/client_test.go b/pkg/api/internal/rest/client_test.go new file mode 100644 index 0000000000..cb7310447d --- /dev/null +++ b/pkg/api/internal/rest/client_test.go @@ -0,0 +1,103 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package rest + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/require" +) + +func restClient(testServer *httptest.Server) (*CDCRESTClient, error) { + c, err := CDCRESTClientFromConfig(&Config{ + Host: testServer.URL, + APIPath: "/api", + Version: "v1", + }) + return c, err +} + +func TestRestRequestSuccess(t *testing.T) { + testServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.Header().Set("Content-Type", "application/json") + rw.WriteHeader(http.StatusOK) + if r.URL.Path == "/api/v1/test" { + _, _ = rw.Write([]byte(`{"cdc": "hello world"}`)) + } + })) + defer testServer.Close() + + c, err := restClient(testServer) + require.Nil(t, err) + body, err := c.Get().WithPrefix("test").Do(context.Background()).Raw() + require.Equal(t, `{"cdc": "hello world"}`, string(body)) + require.NoError(t, err) +} + +func TestRestRequestFailed(t *testing.T) { + testServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.WriteHeader(http.StatusNotFound) + _, _ = rw.Write([]byte(`{ + "error_msg": "test rest request failed", + "error_code": "test rest request failed" + }`)) + })) + defer testServer.Close() + + c, err := restClient(testServer) + require.Nil(t, err) + err = c.Get().WithMaxRetries(1).Do(context.Background()).Error() + require.NotNil(t, err) +} + +func TestRestRawRequestFailed(t *testing.T) { + testServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.WriteHeader(http.StatusNotFound) + _, _ = rw.Write([]byte(`{ + "error_msg": "test rest request failed", + "error_code": "test rest request failed" + }`)) + })) + defer testServer.Close() + + c, err := restClient(testServer) + require.Nil(t, err) + body, err := c.Get().WithMaxRetries(1).Do(context.Background()).Raw() + require.NotNil(t, body) + require.NotNil(t, err) +} + +func TestHTTPMethods(t *testing.T) { + testServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + rw.WriteHeader(http.StatusOK) + })) + defer testServer.Close() + + c, _ := restClient(testServer) + + req := c.Post() + require.NotNil(t, req) + + req = c.Get() + require.NotNil(t, req) + + req = c.Put() + require.NotNil(t, req) + + req = c.Delete() + require.NotNil(t, req) +} diff --git a/pkg/api/internal/rest/config_test.go b/pkg/api/internal/rest/config_test.go new file mode 100644 index 0000000000..fcfeaafb97 --- /dev/null +++ b/pkg/api/internal/rest/config_test.go @@ -0,0 +1,91 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package rest + +import ( + "testing" + + "github.com/pingcap/ticdc/pkg/security" + "github.com/stretchr/testify/require" +) + +func TestCDCRESTClientCommonConfigs(t *testing.T) { + _, err := CDCRESTClientFromConfig(&Config{Host: "127.0.0.1"}) + require.NotNil(t, err) + + _, err = CDCRESTClientFromConfig(&Config{Host: "127.0.0.1", Version: "v1"}) + require.NotNil(t, err) + + _, err = CDCRESTClientFromConfig(&Config{Host: "127.0.0.1", APIPath: "/api"}) + require.NotNil(t, err) + + _, err = CDCRESTClientFromConfig(&Config{Host: "http://127.0.0.1:2379", APIPath: "/api", Version: "v1"}) + require.Nil(t, err) + + _, err = CDCRESTClientFromConfig(&Config{Host: "127.0.0.1:2379", APIPath: "/api", Version: "v2"}) + require.Nil(t, err) +} + +func checkTLS(config *Config) bool { + baseURL, _, err := defaultServerURLFromConfig(config) + if err != nil { + return false + } + return baseURL.Scheme == "https" +} + +func TestCDCRESTClientUsingTLS(t *testing.T) { + testCases := []struct { + Config *Config + UsingTLS bool + }{ + { + Config: &Config{}, + UsingTLS: false, + }, + { + Config: &Config{ + Host: "https://127.0.0.1", + }, + UsingTLS: true, + }, + { + Config: &Config{ + Host: "127.0.0.1", + Credential: &security.Credential{ + CAPath: "foo", + CertPath: "bar", + KeyPath: "test", + }, + }, + UsingTLS: true, + }, + { + Config: &Config{ + Host: "///:://127.0.0.1", + Credential: &security.Credential{ + CAPath: "foo", + CertPath: "bar", + KeyPath: "test", + }, + }, + UsingTLS: false, + }, + } + + for _, tc := range testCases { + usingTLS := checkTLS(tc.Config) + require.Equal(t, usingTLS, tc.UsingTLS) + } +} diff --git a/pkg/api/internal/rest/request_test.go b/pkg/api/internal/rest/request_test.go new file mode 100644 index 0000000000..9f8566656e --- /dev/null +++ b/pkg/api/internal/rest/request_test.go @@ -0,0 +1,171 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package rest + +import ( + "bytes" + "context" + "errors" + "io" + "net/http" + "net/http/httptest" + "net/url" + "strings" + "testing" + "time" + + "github.com/pingcap/ticdc/pkg/httputil" + "github.com/stretchr/testify/require" +) + +func TestRequestParams(t *testing.T) { + req := (&Request{}).WithParam("foo", "bar") + require.Equal(t, req.params, url.Values{"foo": []string{"bar"}}) + + req.WithParam("hello", "world") + require.Equal(t, req.params, url.Values{"foo": []string{"bar"}, "hello": []string{"world"}}) +} + +func TestRequestURI(t *testing.T) { + req := (&Request{}).WithParam("foo", "bar").WithPrefix("test") + req.WithURI("/production?foo=hello&val=1024") + require.Equal(t, req.pathPrefix, "test/production") + require.Equal(t, req.params, url.Values{"foo": []string{"hello"}, "val": []string{"1024"}}) +} + +type testStruct struct { + Foo string `json:"foo"` + Bar int `json:"bar"` +} + +func TestRequestBody(t *testing.T) { + // test unsupported data type + req := (&Request{}).WithBody(func() {}) + require.NotNil(t, req.err) + require.Nil(t, req.body) + + // test data type which can be json marshalled + p := &testStruct{Foo: "hello", Bar: 10} + req = (&Request{}).WithBody(p) + require.Nil(t, req.err) + require.NotNil(t, req.body) + + // test data type io.Reader + req = (&Request{}).WithBody(bytes.NewReader([]byte(`{"hello": "world"}`))) + require.Nil(t, req.err) + require.NotNil(t, req.body) +} + +type clientFunc func(req *http.Request) (*http.Response, error) + +func (f clientFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return f(req) +} + +func TestRequestHeader(t *testing.T) { + cli := httputil.NewTestClient(clientFunc(func(req *http.Request) (*http.Response, error) { + require.Equal(t, req.Header.Get("signature"), "test-header1") + + return &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(bytes.NewReader([]byte{})), + }, nil + })) + req := newRequestWithClient(&url.URL{Path: "/test"}, "", nil).WithMethod(HTTPMethodGet) + req.WithHeader("signature", "test-header2") + req.WithHeader("signature", "test-header1") + req.c.Client = cli + + _ = req.Do(context.Background()) +} + +func TestRequestDoContext(t *testing.T) { + received := make(chan struct{}) + blocked := make(chan struct{}) + testServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + close(received) + <-blocked + rw.WriteHeader(http.StatusOK) + })) + defer testServer.Close() + defer close(blocked) + + ctx, cancel := context.WithCancel(context.Background()) + + go func() { + <-received + cancel() + }() + c, err := CDCRESTClientFromConfig(&Config{ + Host: testServer.URL, + APIPath: "/api", + Version: "v1", + }) + require.Nil(t, err) + err = c.Get(). + WithPrefix("/test"). + WithTimeout(time.Second). + Do(ctx). + Error() + require.NotNil(t, err) +} + +func TestRequestDoContextTimeout(t *testing.T) { + testServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { + time.Sleep(200 * time.Millisecond) + rw.WriteHeader(http.StatusOK) + })) + defer testServer.Close() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + c, err := CDCRESTClientFromConfig(&Config{ + Host: testServer.URL, + APIPath: "/api", + Version: "v1", + }) + require.Nil(t, err) + err = c.Get(). + WithPrefix("/test"). + WithTimeout(50 * time.Millisecond). + Do(ctx). + Error() + require.NotNil(t, err) +} + +func TestResultIntoError(t *testing.T) { + result := Result{err: errors.New("test-error")} + err := result.Into(&testStruct{}) + require.Equal(t, result.err, err) + + result = Result{ + body: []byte(`{"foo": "hello", "bar": 10}`), + } + + var res testStruct + err = result.Into(&res) + require.Nil(t, err) + require.Equal(t, res.Foo, "hello") + require.Equal(t, res.Bar, 10) +} + +func TestResultZeroLengthBody(t *testing.T) { + result := Result{ + body: []byte{}, + } + err := result.Into(&testStruct{}) + require.NotNil(t, err) + require.Equal(t, strings.Contains(err.Error(), "0-length"), true) +} diff --git a/pkg/config/large_message_test.go b/pkg/config/large_message_test.go new file mode 100644 index 0000000000..ad3345643e --- /dev/null +++ b/pkg/config/large_message_test.go @@ -0,0 +1,203 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "testing" + + "github.com/pingcap/ticdc/pkg/compression" + cerror "github.com/pingcap/ticdc/pkg/errors" + "github.com/stretchr/testify/require" +) + +func TestLargeMessageHandle4Compression(t *testing.T) { + t.Parallel() + + largeMessageHandle := NewDefaultLargeMessageHandleConfig() + + // unsupported compression, return error + largeMessageHandle.LargeMessageHandleCompression = "zstd" + + err := largeMessageHandle.AdjustAndValidate(ProtocolCanalJSON, false) + require.ErrorIs(t, err, cerror.ErrInvalidReplicaConfig) + + largeMessageHandle.LargeMessageHandleCompression = compression.LZ4 + err = largeMessageHandle.AdjustAndValidate(ProtocolCanalJSON, false) + require.NoError(t, err) + + largeMessageHandle.LargeMessageHandleCompression = compression.Snappy + err = largeMessageHandle.AdjustAndValidate(ProtocolCanalJSON, false) + require.NoError(t, err) + + largeMessageHandle.LargeMessageHandleCompression = compression.None + err = largeMessageHandle.AdjustAndValidate(ProtocolCanalJSON, false) + require.NoError(t, err) +} + +func TestLargeMessageHandle4NotSupportedProtocol(t *testing.T) { + t.Parallel() + + largeMessageHandle := NewDefaultLargeMessageHandleConfig() + + err := largeMessageHandle.AdjustAndValidate(ProtocolCanal, true) + require.NoError(t, err) + + largeMessageHandle.LargeMessageHandleOption = LargeMessageHandleOptionHandleKeyOnly + err = largeMessageHandle.AdjustAndValidate(ProtocolCanal, true) + require.ErrorIs(t, err, cerror.ErrInvalidReplicaConfig) +} + +func TestHandleKeyOnly4CanalJSON(t *testing.T) { + t.Parallel() + + // large-message-handle not set, always no error + largeMessageHandle := NewDefaultLargeMessageHandleConfig() + + err := largeMessageHandle.AdjustAndValidate(ProtocolCanalJSON, false) + require.NoError(t, err) + require.True(t, largeMessageHandle.Disabled()) + + largeMessageHandle.LargeMessageHandleOption = LargeMessageHandleOptionHandleKeyOnly + + // `enable-tidb-extension` is false, return error + err = largeMessageHandle.AdjustAndValidate(ProtocolCanalJSON, false) + require.ErrorIs(t, err, cerror.ErrInvalidReplicaConfig) + + // `enable-tidb-extension` is true, no error + err = largeMessageHandle.AdjustAndValidate(ProtocolCanalJSON, true) + require.NoError(t, err) + require.Equal(t, LargeMessageHandleOptionHandleKeyOnly, largeMessageHandle.LargeMessageHandleOption) +} + +func TestClaimCheck4CanalJSON(t *testing.T) { + t.Parallel() + + // large-message-handle not set, always no error + largeMessageHandle := NewDefaultLargeMessageHandleConfig() + + err := largeMessageHandle.AdjustAndValidate(ProtocolCanalJSON, false) + require.NoError(t, err) + require.True(t, largeMessageHandle.Disabled()) + + largeMessageHandle.LargeMessageHandleOption = LargeMessageHandleOptionClaimCheck + largeMessageHandle.ClaimCheckStorageURI = "file:///tmp/claim-check" + + for _, rawValue := range []bool{false, true} { + largeMessageHandle.ClaimCheckRawValue = rawValue + // `enable-tidb-extension` is false, return error + err = largeMessageHandle.AdjustAndValidate(ProtocolCanalJSON, false) + require.ErrorIs(t, err, cerror.ErrInvalidReplicaConfig) + + // `enable-tidb-extension` is true, no error + err = largeMessageHandle.AdjustAndValidate(ProtocolCanalJSON, true) + require.NoError(t, err) + require.Equal(t, LargeMessageHandleOptionClaimCheck, largeMessageHandle.LargeMessageHandleOption) + } +} + +func TestHandleKeyOnly4OpenProtocol(t *testing.T) { + t.Parallel() + + // large-message-handle not set, always no error + largeMessageHandle := NewDefaultLargeMessageHandleConfig() + + err := largeMessageHandle.AdjustAndValidate(ProtocolOpen, false) + require.NoError(t, err) + require.True(t, largeMessageHandle.Disabled()) + + largeMessageHandle.LargeMessageHandleOption = LargeMessageHandleOptionHandleKeyOnly + // `enable-tidb-extension` is false, return error + err = largeMessageHandle.AdjustAndValidate(ProtocolOpen, false) + require.NoError(t, err) + + // `enable-tidb-extension` is true, no error + err = largeMessageHandle.AdjustAndValidate(ProtocolOpen, true) + require.NoError(t, err) + require.Equal(t, LargeMessageHandleOptionHandleKeyOnly, largeMessageHandle.LargeMessageHandleOption) +} + +func TestClaimCheck4OpenProtocol(t *testing.T) { + t.Parallel() + + // large-message-handle not set, always no error + largeMessageHandle := NewDefaultLargeMessageHandleConfig() + + err := largeMessageHandle.AdjustAndValidate(ProtocolOpen, false) + require.NoError(t, err) + require.True(t, largeMessageHandle.Disabled()) + + largeMessageHandle.LargeMessageHandleOption = LargeMessageHandleOptionClaimCheck + largeMessageHandle.ClaimCheckStorageURI = "file:///tmp/claim-check" + + // `enable-tidb-extension` is false, return error + err = largeMessageHandle.AdjustAndValidate(ProtocolOpen, false) + require.NoError(t, err) + + // `enable-tidb-extension` is true, no error + err = largeMessageHandle.AdjustAndValidate(ProtocolOpen, true) + require.NoError(t, err) + require.Equal(t, LargeMessageHandleOptionClaimCheck, largeMessageHandle.LargeMessageHandleOption) + + largeMessageHandle.ClaimCheckRawValue = true + err = largeMessageHandle.AdjustAndValidate(ProtocolOpen, true) + require.ErrorIs(t, err, cerror.ErrInvalidReplicaConfig) +} + +func TestHandleKeyOnly4SimpleProtocol(t *testing.T) { + t.Parallel() + + // large-message-handle not set, always no error + largeMessageHandle := NewDefaultLargeMessageHandleConfig() + + err := largeMessageHandle.AdjustAndValidate(ProtocolSimple, false) + require.NoError(t, err) + require.True(t, largeMessageHandle.Disabled()) + + largeMessageHandle.LargeMessageHandleOption = LargeMessageHandleOptionHandleKeyOnly + // `enable-tidb-extension` is false, return error + err = largeMessageHandle.AdjustAndValidate(ProtocolSimple, false) + require.NoError(t, err) + + // `enable-tidb-extension` is true, no error + err = largeMessageHandle.AdjustAndValidate(ProtocolSimple, true) + require.NoError(t, err) + require.Equal(t, LargeMessageHandleOptionHandleKeyOnly, largeMessageHandle.LargeMessageHandleOption) +} + +func TestClaimCheck4SimpleProtocol(t *testing.T) { + t.Parallel() + + // large-message-handle not set, always no error + largeMessageHandle := NewDefaultLargeMessageHandleConfig() + + err := largeMessageHandle.AdjustAndValidate(ProtocolSimple, false) + require.NoError(t, err) + require.True(t, largeMessageHandle.Disabled()) + + largeMessageHandle.LargeMessageHandleOption = LargeMessageHandleOptionClaimCheck + largeMessageHandle.ClaimCheckStorageURI = "file:///tmp/claim-check" + + // `enable-tidb-extension` is false, return error + err = largeMessageHandle.AdjustAndValidate(ProtocolSimple, false) + require.NoError(t, err) + + // `enable-tidb-extension` is true, no error + err = largeMessageHandle.AdjustAndValidate(ProtocolSimple, true) + require.NoError(t, err) + require.Equal(t, LargeMessageHandleOptionClaimCheck, largeMessageHandle.LargeMessageHandleOption) + + largeMessageHandle.ClaimCheckRawValue = true + err = largeMessageHandle.AdjustAndValidate(ProtocolSimple, true) + require.NoError(t, err) +} diff --git a/pkg/config/main_test.go b/pkg/config/main_test.go new file mode 100644 index 0000000000..7dc1da8e83 --- /dev/null +++ b/pkg/config/main_test.go @@ -0,0 +1,24 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "testing" + + "github.com/pingcap/ticdc/pkg/leakutil" +) + +func TestMain(m *testing.M) { + leakutil.SetUpLeakTest(m) +} diff --git a/pkg/config/messages_test.go b/pkg/config/messages_test.go new file mode 100644 index 0000000000..53d3f068d0 --- /dev/null +++ b/pkg/config/messages_test.go @@ -0,0 +1,83 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestDefaultMessageServerConfig(t *testing.T) { + // This test case does a sanity check on default config. + // In case there is any unsatisfied condition, the MessageServer will not work properly. + serverConfig := defaultMessageConfig.ToMessageServerConfig() + require.Greater(t, serverConfig.MaxPendingMessageCountPerTopic, 0) + require.Greater(t, serverConfig.MaxPendingTaskCount, 0) + require.Greater(t, serverConfig.SendChannelSize, 0) + require.Greater(t, serverConfig.AckInterval, time.Duration(0)) + require.Less(t, serverConfig.AckInterval, 10*time.Second) + require.Greater(t, serverConfig.WorkerPoolSize, 0) + require.Greater(t, serverConfig.SendRateLimitPerStream, 0.1) + require.Greater(t, serverConfig.MaxPeerCount, 0) + require.Greater(t, serverConfig.WaitUnregisterHandleTimeoutThreshold, time.Duration(0)) + require.EqualValues(t, serverConfig.MaxRecvMsgSize, defaultMaxRecvMsgSize) +} + +func TestDefaultMessageClientConfig(t *testing.T) { + clientConfig := defaultMessageConfig.ToMessageClientConfig() + require.Greater(t, clientConfig.SendChannelSize, 0) + require.Greater(t, clientConfig.BatchSendInterval, time.Duration(0)) + require.Greater(t, clientConfig.MaxBatchBytes, 0) + require.Greater(t, clientConfig.MaxBatchCount, 0) + require.Greater(t, clientConfig.RetryRateLimitPerSecond, 0.1) + require.Greater(t, clientConfig.DialTimeout, time.Duration(0)) + require.EqualValues(t, clientConfig.MaxRecvMsgSize, defaultMaxRecvMsgSize) +} + +func TestMessagesConfigClone(t *testing.T) { + config := defaultMessageConfig.Clone() + require.Equal(t, defaultMessageConfig, config) +} + +func TestMessagesConfigValidateAndAdjust(t *testing.T) { + emptyConfig := &MessagesConfig{} + err := emptyConfig.ValidateAndAdjust() + require.NoError(t, err) + require.Equal(t, defaultMessageConfig, emptyConfig) + + illegalConfig := defaultMessageConfig.Clone() + illegalConfig.ServerAckInterval = TomlDuration(time.Second * 20) + err = illegalConfig.ValidateAndAdjust() + require.Error(t, err) + require.Regexp(t, ".*ErrInvalidServerOption.*", err.Error()) + + illegalConfig = defaultMessageConfig.Clone() + illegalConfig.ClientMaxBatchInterval = TomlDuration(time.Second * 20) + err = illegalConfig.ValidateAndAdjust() + require.Error(t, err) + require.Regexp(t, ".*ErrInvalidServerOption.*", err.Error()) + + illegalConfig = defaultMessageConfig.Clone() + illegalConfig.ServerWorkerPoolSize = 64 + err = illegalConfig.ValidateAndAdjust() + require.Error(t, err) + require.Regexp(t, ".*ErrInvalidServerOption.*", err.Error()) + + illegalConfig = defaultMessageConfig.Clone() + illegalConfig.MaxRecvMsgSize = -1 + err = illegalConfig.ValidateAndAdjust() + require.Error(t, err) +} diff --git a/pkg/config/server_config_test.go b/pkg/config/server_config_test.go new file mode 100644 index 0000000000..f3f1abe485 --- /dev/null +++ b/pkg/config/server_config_test.go @@ -0,0 +1,157 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestServerConfigMarshal(t *testing.T) { + t.Parallel() + conf := GetDefaultServerConfig() + conf.Addr = "192.155.22.33:8887" + b, err := conf.Marshal() + require.NoError(t, err) + conf2 := new(ServerConfig) + err = conf2.Unmarshal([]byte(b)) + require.NoError(t, err) + require.Equal(t, conf, conf2) +} + +func TestServerConfigClone(t *testing.T) { + t.Parallel() + conf := GetDefaultServerConfig() + conf.Addr = "192.155.22.33:8887" + conf.Sorter.SortDir = "/tmp" + conf2 := conf.Clone() + require.Equal(t, conf, conf2) + conf2.Sorter.SortDir = "/tmp/sorter" + require.Equal(t, "/tmp", conf.Sorter.SortDir) +} + +func TestServerConfigValidateAndAdjust(t *testing.T) { + t.Parallel() + conf := new(ServerConfig) + + require.Regexp(t, ".*bad cluster-id.*", conf.ValidateAndAdjust()) + conf.ClusterID = "__backup__" + require.Regexp(t, ".*bad cluster-id.*", conf.ValidateAndAdjust()) + conf.ClusterID = "default" + require.Regexp(t, ".*empty address", conf.ValidateAndAdjust()) + conf.Addr = "cdc:1234" + require.Regexp(t, ".*empty GC TTL is not allowed", conf.ValidateAndAdjust()) + conf.GcTTL = 60 + require.Nil(t, conf.ValidateAndAdjust()) + require.Equal(t, conf.Addr, conf.AdvertiseAddr) + conf.AdvertiseAddr = "advertise:1234" + require.Nil(t, conf.ValidateAndAdjust()) + require.Equal(t, "cdc:1234", conf.Addr) + require.Equal(t, "advertise:1234", conf.AdvertiseAddr) + conf.AdvertiseAddr = "0.0.0.0:1234" + require.Regexp(t, ".*must be specified.*", conf.ValidateAndAdjust()) + conf.Addr = "0.0.0.0:1234" + require.Regexp(t, ".*must be specified.*", conf.ValidateAndAdjust()) + conf.AdvertiseAddr = "advertise" + require.Regexp(t, ".*does not contain a port", conf.ValidateAndAdjust()) + conf.AdvertiseAddr = "advertise:1234" + conf.Debug.Messages.ServerWorkerPoolSize = 0 + require.Nil(t, conf.ValidateAndAdjust()) + require.EqualValues(t, GetDefaultServerConfig().Debug.Messages.ServerWorkerPoolSize, conf.Debug.Messages.ServerWorkerPoolSize) +} + +func TestDBConfigValidateAndAdjust(t *testing.T) { + t.Parallel() + conf := GetDefaultServerConfig().Clone().Debug.DB + + require.Nil(t, conf.ValidateAndAdjust()) + conf.Compression = "none" + require.Nil(t, conf.ValidateAndAdjust()) + conf.Compression = "snappy" + require.Nil(t, conf.ValidateAndAdjust()) + conf.Compression = "invalid" + require.Error(t, conf.ValidateAndAdjust()) +} + +func TestKVClientConfigValidateAndAdjust(t *testing.T) { + t.Parallel() + conf := GetDefaultServerConfig().Clone().KVClient + + require.Nil(t, conf.ValidateAndAdjust()) + conf.RegionRetryDuration = TomlDuration(time.Second) + require.Nil(t, conf.ValidateAndAdjust()) + conf.RegionRetryDuration = -TomlDuration(time.Second) + require.Error(t, conf.ValidateAndAdjust()) +} + +func TestSchedulerConfigValidateAndAdjust(t *testing.T) { + t.Parallel() + conf := GetDefaultServerConfig().Clone().Debug.Scheduler + require.Nil(t, conf.ValidateAndAdjust()) + + conf = GetDefaultServerConfig().Clone().Debug.Scheduler + conf.HeartbeatTick = -1 + require.Error(t, conf.ValidateAndAdjust()) + conf.HeartbeatTick = 0 + require.Error(t, conf.ValidateAndAdjust()) + + conf = GetDefaultServerConfig().Clone().Debug.Scheduler + conf.CollectStatsTick = -1 + require.Error(t, conf.ValidateAndAdjust()) + conf.CollectStatsTick = 0 + require.Error(t, conf.ValidateAndAdjust()) + + conf = GetDefaultServerConfig().Clone().Debug.Scheduler + conf.MaxTaskConcurrency = -1 + require.Error(t, conf.ValidateAndAdjust()) + conf.MaxTaskConcurrency = 0 + require.Error(t, conf.ValidateAndAdjust()) + + conf = GetDefaultServerConfig().Clone().Debug.Scheduler + conf.CheckBalanceInterval = -1 + require.Error(t, conf.ValidateAndAdjust()) + conf.CheckBalanceInterval = TomlDuration(time.Second) + require.Error(t, conf.ValidateAndAdjust()) + + conf = GetDefaultServerConfig().Clone().Debug.Scheduler + conf.AddTableBatchSize = 0 + require.Error(t, conf.ValidateAndAdjust()) +} + +func TestIsValidClusterID(t *testing.T) { + cases := []struct { + id string + valid bool + }{ + {"owner", false}, + {"capture", false}, + {"task", false}, + {"changefeed", false}, + {"job", false}, + {"meta", false}, + {"__backup__", false}, + {"", false}, + {"12345678901234567890123456789012345678901234567890123456789012345678901234567890" + + "1234567890123456789012345678901234567890123456789", false}, + {"12345678901234567890123456789012345678901234567890123456789012345678901234567890" + + "123456789012345678901234567890123456789012345678", true}, + {"default", true}, + } + for _, c := range cases { + t.Log(c.id) + require.Equal(t, c.valid, isValidClusterID(c.id)) + } +} diff --git a/pkg/config/sink_protocol_test.go b/pkg/config/sink_protocol_test.go new file mode 100644 index 0000000000..27e7106b3c --- /dev/null +++ b/pkg/config/sink_protocol_test.go @@ -0,0 +1,159 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseSinkProtocolFromString(t *testing.T) { + t.Parallel() + testCases := []struct { + protocol string + expectedProtocolEnum Protocol + expectedErr string + }{ + { + protocol: "random", + expectedErr: ".*unknown 'random' message protocol for sink.*", + }, + { + protocol: "default", + expectedProtocolEnum: ProtocolOpen, + }, + { + protocol: "canal", + expectedProtocolEnum: ProtocolCanal, + }, + { + protocol: "canal-json", + expectedProtocolEnum: ProtocolCanalJSON, + }, + { + protocol: "maxwell", + expectedProtocolEnum: ProtocolMaxwell, + }, + { + protocol: "avro", + expectedProtocolEnum: ProtocolAvro, + }, + { + protocol: "flat-avro", + expectedProtocolEnum: ProtocolAvro, + }, + { + protocol: "craft", + expectedProtocolEnum: ProtocolCraft, + }, + { + protocol: "open-protocol", + expectedProtocolEnum: ProtocolOpen, + }, + } + + for _, tc := range testCases { + protocol, err := ParseSinkProtocolFromString(tc.protocol) + if tc.expectedErr != "" { + require.Regexp(t, tc.expectedErr, err) + } else { + require.Equal(t, tc.expectedProtocolEnum, protocol) + } + } +} + +func TestString(t *testing.T) { + t.Parallel() + + testCases := []struct { + protocolEnum Protocol + expectedProtocol string + }{ + { + protocolEnum: ProtocolDefault, + expectedProtocol: "default", + }, + { + protocolEnum: ProtocolCanal, + expectedProtocol: "canal", + }, + { + protocolEnum: ProtocolCanalJSON, + expectedProtocol: "canal-json", + }, + { + protocolEnum: ProtocolMaxwell, + expectedProtocol: "maxwell", + }, + { + protocolEnum: ProtocolAvro, + expectedProtocol: "avro", + }, + { + protocolEnum: ProtocolCraft, + expectedProtocol: "craft", + }, + { + protocolEnum: ProtocolOpen, + expectedProtocol: "open-protocol", + }, + } + + for _, tc := range testCases { + require.Equal(t, tc.expectedProtocol, tc.protocolEnum.String()) + } +} + +func TestIsBatchEncoder(t *testing.T) { + t.Parallel() + + testCases := []struct { + protocolEnum Protocol + expect bool + }{ + { + protocolEnum: ProtocolDefault, + expect: false, + }, + { + protocolEnum: ProtocolCanal, + expect: true, + }, + { + protocolEnum: ProtocolCanalJSON, + expect: false, + }, + { + protocolEnum: ProtocolMaxwell, + expect: true, + }, + { + protocolEnum: ProtocolAvro, + expect: false, + }, + { + protocolEnum: ProtocolCraft, + expect: true, + }, + { + protocolEnum: ProtocolOpen, + expect: true, + }, + } + + for _, tc := range testCases { + require.Equal(t, tc.expect, tc.protocolEnum.IsBatchEncode()) + } +} diff --git a/pkg/config/sink_test.go b/pkg/config/sink_test.go new file mode 100644 index 0000000000..20c0d9075c --- /dev/null +++ b/pkg/config/sink_test.go @@ -0,0 +1,465 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package config + +import ( + "net/url" + "testing" + + "github.com/pingcap/ticdc/pkg/util" + "github.com/stretchr/testify/require" +) + +func TestValidateTxnAtomicity(t *testing.T) { + t.Parallel() + testCases := []struct { + sinkURI string + expectedErr string + shouldSplitTxn bool + }{ + { + sinkURI: "mysql://normal:123456@127.0.0.1:3306", + expectedErr: "", + shouldSplitTxn: true, + }, + { + sinkURI: "mysql://normal:123456@127.0.0.1:3306?transaction-atomicity=table", + expectedErr: "", + shouldSplitTxn: false, + }, + { + sinkURI: "mysql://normal:123456@127.0.0.1:3306?transaction-atomicity=none", + expectedErr: "", + shouldSplitTxn: true, + }, + { + sinkURI: "mysql://normal:123456@127.0.0.1:3306?transaction-atomicity=global", + expectedErr: "global level atomicity is not supported by.*", + }, + { + sinkURI: "tidb://normal:123456@127.0.0.1:3306?protocol=canal", + expectedErr: ".*protocol canal is incompatible with tidb scheme.*", + }, + { + sinkURI: "tidb://normal:123456@127.0.0.1:3306?protocol=default", + expectedErr: ".*protocol default is incompatible with tidb scheme.*", + }, + { + sinkURI: "tidb://normal:123456@127.0.0.1:3306?protocol=random", + expectedErr: ".*protocol .* is incompatible with tidb scheme.*", + }, + { + sinkURI: "blackhole://normal:123456@127.0.0.1:3306?transaction-atomicity=none", + expectedErr: "", + shouldSplitTxn: true, + }, + { + sinkURI: "kafka://127.0.0.1:9092?transaction-atomicity=none" + + "&protocol=open-protocol", + expectedErr: "", + shouldSplitTxn: true, + }, + { + sinkURI: "kafka://127.0.0.1:9092?protocol=default", + expectedErr: "", + shouldSplitTxn: true, + }, + { + sinkURI: "kafka://127.0.0.1:9092?transaction-atomicity=none", + expectedErr: ".*unknown .* message protocol for sink.*", + }, + { + sinkURI: "kafka://127.0.0.1:9092?transaction-atomicity=table" + + "&protocol=open-protocol", + expectedErr: "table level atomicity is not supported by kafka scheme", + }, + { + sinkURI: "kafka://127.0.0.1:9092?transaction-atomicity=invalid" + + "&protocol=open-protocol", + expectedErr: "invalid level atomicity is not supported by kafka scheme", + }, + { + sinkURI: "pulsar://127.0.0.1:6550?transaction-atomicity=invalid" + + "&protocol=open-protocol", + expectedErr: "invalid level atomicity is not supported by pulsar scheme", + }, + { + sinkURI: "pulsar://127.0.0.1:6550/test?protocol=canal-json", + shouldSplitTxn: true, + }, + } + + for _, tc := range testCases { + cfg := SinkConfig{} + parsedSinkURI, err := url.Parse(tc.sinkURI) + require.Nil(t, err) + if tc.expectedErr == "" { + require.Nil(t, cfg.validateAndAdjust(parsedSinkURI)) + require.Equal(t, tc.shouldSplitTxn, util.GetOrZero(cfg.TxnAtomicity).ShouldSplitTxn()) + } else { + require.Regexp(t, tc.expectedErr, cfg.validateAndAdjust(parsedSinkURI)) + } + } +} + +func TestValidateProtocol(t *testing.T) { + t.Parallel() + testCases := []struct { + sinkConfig *SinkConfig + sinkURI string + result string + }{ + { + sinkConfig: &SinkConfig{ + Protocol: util.AddressOf("default"), + }, + sinkURI: "kafka://127.0.0.1:9092?protocol=whatever", + result: "whatever", + }, + { + sinkConfig: &SinkConfig{}, + sinkURI: "kafka://127.0.0.1:9092?protocol=default", + result: "default", + }, + { + sinkConfig: &SinkConfig{ + Protocol: util.AddressOf("default"), + }, + sinkURI: "kafka://127.0.0.1:9092", + result: "default", + }, + { + sinkConfig: &SinkConfig{ + Protocol: util.AddressOf("default"), + }, + sinkURI: "pulsar://127.0.0.1:6650", + result: "default", + }, + { + sinkConfig: &SinkConfig{ + Protocol: util.AddressOf("canal-json"), + }, + sinkURI: "pulsar://127.0.0.1:6650/test?protocol=canal-json", + result: "canal-json", + }, + } + for _, c := range testCases { + parsedSinkURI, err := url.Parse(c.sinkURI) + require.Nil(t, err) + c.sinkConfig.validateAndAdjustSinkURI(parsedSinkURI) + require.Equal(t, c.result, util.GetOrZero(c.sinkConfig.Protocol)) + } +} + +func TestApplyParameterBySinkURI(t *testing.T) { + t.Parallel() + kafkaURI := "kafka://127.0.0.1:9092?protocol=whatever&transaction-atomicity=none" + testCases := []struct { + sinkConfig *SinkConfig + sinkURI string + expectedErr string + expectedProtocol string + expectedTxnAtomicity AtomicityLevel + }{ + // test only config file + { + sinkConfig: &SinkConfig{ + Protocol: util.AddressOf("default"), + TxnAtomicity: util.AddressOf(noneTxnAtomicity), + }, + sinkURI: "kafka://127.0.0.1:9092", + expectedProtocol: "default", + expectedTxnAtomicity: noneTxnAtomicity, + }, + // test only sink uri + { + sinkConfig: &SinkConfig{}, + sinkURI: kafkaURI, + expectedProtocol: "whatever", + expectedTxnAtomicity: noneTxnAtomicity, + }, + // test conflict scenarios + { + sinkConfig: &SinkConfig{ + Protocol: util.AddressOf("default"), + TxnAtomicity: util.AddressOf(tableTxnAtomicity), + }, + sinkURI: kafkaURI, + expectedProtocol: "whatever", + expectedTxnAtomicity: noneTxnAtomicity, + expectedErr: "incompatible configuration in sink uri", + }, + { + sinkConfig: &SinkConfig{ + Protocol: util.AddressOf("default"), + TxnAtomicity: util.AddressOf(unknownTxnAtomicity), + }, + sinkURI: kafkaURI, + expectedProtocol: "whatever", + expectedTxnAtomicity: noneTxnAtomicity, + expectedErr: "incompatible configuration in sink uri", + }, + } + for _, tc := range testCases { + parsedSinkURI, err := url.Parse(tc.sinkURI) + require.Nil(t, err) + err = tc.sinkConfig.applyParameterBySinkURI(parsedSinkURI) + + require.Equal(t, util.AddressOf(tc.expectedProtocol), tc.sinkConfig.Protocol) + require.Equal(t, util.AddressOf(tc.expectedTxnAtomicity), tc.sinkConfig.TxnAtomicity) + if tc.expectedErr == "" { + require.NoError(t, err) + } else { + require.ErrorContains(t, err, tc.expectedErr) + } + } +} + +func TestCheckCompatibilityWithSinkURI(t *testing.T) { + t.Parallel() + testCases := []struct { + newSinkConfig *SinkConfig + oldSinkConfig *SinkConfig + newsinkURI string + expectedErr string + expectedProtocol *string + expectedTxnAtomicity *AtomicityLevel + }{ + // test no update + { + newSinkConfig: &SinkConfig{}, + oldSinkConfig: &SinkConfig{}, + newsinkURI: "kafka://", + expectedProtocol: nil, + expectedTxnAtomicity: nil, + }, + // test update config return err + { + newSinkConfig: &SinkConfig{ + TxnAtomicity: util.AddressOf(tableTxnAtomicity), + }, + oldSinkConfig: &SinkConfig{ + TxnAtomicity: util.AddressOf(noneTxnAtomicity), + }, + newsinkURI: "kafka://127.0.0.1:9092?transaction-atomicity=none", + expectedErr: "incompatible configuration in sink uri", + expectedProtocol: nil, + expectedTxnAtomicity: util.AddressOf(noneTxnAtomicity), + }, + // test update compatible config + { + newSinkConfig: &SinkConfig{ + Protocol: util.AddressOf("canal"), + }, + oldSinkConfig: &SinkConfig{ + TxnAtomicity: util.AddressOf(noneTxnAtomicity), + }, + newsinkURI: "kafka://127.0.0.1:9092?transaction-atomicity=none", + expectedProtocol: util.AddressOf("canal"), + expectedTxnAtomicity: util.AddressOf(noneTxnAtomicity), + }, + // test update sinkuri + { + newSinkConfig: &SinkConfig{ + TxnAtomicity: util.AddressOf(noneTxnAtomicity), + }, + oldSinkConfig: &SinkConfig{ + TxnAtomicity: util.AddressOf(noneTxnAtomicity), + }, + newsinkURI: "kafka://127.0.0.1:9092?transaction-atomicity=table", + expectedProtocol: nil, + expectedTxnAtomicity: util.AddressOf(tableTxnAtomicity), + }, + } + for _, tc := range testCases { + err := tc.newSinkConfig.CheckCompatibilityWithSinkURI(tc.oldSinkConfig, tc.newsinkURI) + if tc.expectedErr == "" { + require.NoError(t, err) + } else { + require.ErrorContains(t, err, tc.expectedErr) + } + require.Equal(t, tc.expectedProtocol, tc.newSinkConfig.Protocol) + require.Equal(t, tc.expectedTxnAtomicity, tc.newSinkConfig.TxnAtomicity) + } +} + +func TestValidateAndAdjustCSVConfig(t *testing.T) { + t.Parallel() + tests := []struct { + name string + config *CSVConfig + wantErr string + }{ + { + name: "valid quote", + config: &CSVConfig{ + Quote: "\"", + Delimiter: ",", + BinaryEncodingMethod: BinaryEncodingBase64, + }, + wantErr: "", + }, + { + name: "quote has multiple characters", + config: &CSVConfig{ + Quote: "***", + }, + wantErr: "csv config quote contains more than one character", + }, + { + name: "quote contains line break character", + config: &CSVConfig{ + Quote: "\n", + }, + wantErr: "csv config quote cannot be line break character", + }, + { + name: "valid delimiter1", + config: &CSVConfig{ + Quote: "\"", + Delimiter: ",", + BinaryEncodingMethod: BinaryEncodingHex, + }, + wantErr: "", + }, + { + name: "valid delimiter with 2 characters", + config: &CSVConfig{ + Quote: "\"", + Delimiter: "FE", + BinaryEncodingMethod: BinaryEncodingHex, + }, + wantErr: "", + }, + { + name: "valid delimiter with 3 characters", + config: &CSVConfig{ + Quote: "\"", + Delimiter: "|@|", + BinaryEncodingMethod: BinaryEncodingHex, + }, + wantErr: "", + }, + { + name: "delimiter is empty", + config: &CSVConfig{ + Quote: "'", + Delimiter: "", + }, + wantErr: "csv config delimiter cannot be empty", + }, + { + name: "delimiter contains line break character", + config: &CSVConfig{ + Quote: "'", + Delimiter: "\r", + }, + wantErr: "csv config delimiter contains line break characters", + }, + { + name: "delimiter contains more than three characters", + config: &CSVConfig{ + Quote: "'", + Delimiter: "FEFA", + }, + wantErr: "csv config delimiter contains more than three characters, note that escape " + + "sequences can only be used in double quotes in toml configuration items.", + }, + { + name: "delimiter and quote are same", + config: &CSVConfig{ + Quote: "'", + Delimiter: "'", + }, + wantErr: "csv config quote and delimiter has common characters which is not allowed", + }, + { + name: "delimiter and quote contain common characters", + config: &CSVConfig{ + Quote: "E", + Delimiter: "FE", + }, + wantErr: "csv config quote and delimiter has common characters which is not allowed", + }, + { + name: "invalid binary encoding method", + config: &CSVConfig{ + Quote: "\"", + Delimiter: ",", + BinaryEncodingMethod: "invalid", + }, + wantErr: "csv config binary-encoding-method can only be hex or base64", + }, + } + for _, c := range tests { + tc := c + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + s := &SinkConfig{ + CSVConfig: tc.config, + } + if tc.wantErr == "" { + require.Nil(t, s.CSVConfig.validateAndAdjust()) + } else { + require.Regexp(t, tc.wantErr, s.CSVConfig.validateAndAdjust()) + } + }) + } +} + +func TestValidateAndAdjustStorageConfig(t *testing.T) { + t.Parallel() + + sinkURI, err := url.Parse("s3://bucket?protocol=csv") + require.NoError(t, err) + s := GetDefaultReplicaConfig() + err = s.ValidateAndAdjust(sinkURI) + require.NoError(t, err) + require.Equal(t, DefaultFileIndexWidth, util.GetOrZero(s.Sink.FileIndexWidth)) + + err = s.ValidateAndAdjust(sinkURI) + require.NoError(t, err) + require.Equal(t, DefaultFileIndexWidth, util.GetOrZero(s.Sink.FileIndexWidth)) + + s.Sink.FileIndexWidth = util.AddressOf(16) + err = s.ValidateAndAdjust(sinkURI) + require.NoError(t, err) + require.Equal(t, 16, util.GetOrZero(s.Sink.FileIndexWidth)) +} + +func TestShouldSendBootstrapMsg(t *testing.T) { + t.Parallel() + sinkConfig := GetDefaultReplicaConfig().Sink + require.False(t, sinkConfig.ShouldSendBootstrapMsg()) + + protocol := "simple" + sinkConfig.Protocol = &protocol + require.True(t, sinkConfig.ShouldSendBootstrapMsg()) + + count := int32(0) + sinkConfig.SendBootstrapInMsgCount = &count + require.False(t, sinkConfig.ShouldSendBootstrapMsg()) +} + +func TestShouldSendAllBootstrapAtStart(t *testing.T) { + t.Parallel() + sinkConfig := GetDefaultReplicaConfig().Sink + protocol := "simple" + sinkConfig.Protocol = &protocol + require.False(t, sinkConfig.ShouldSendAllBootstrapAtStart()) + + should := true + sinkConfig.SendAllBootstrapAtStart = &should + require.True(t, sinkConfig.ShouldSendAllBootstrapAtStart()) +} diff --git a/pkg/errors/helper_test.go b/pkg/errors/helper_test.go new file mode 100644 index 0000000000..b282a956fb --- /dev/null +++ b/pkg/errors/helper_test.go @@ -0,0 +1,130 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "context" + "fmt" + "testing" + + "github.com/pingcap/errors" + "github.com/stretchr/testify/require" +) + +func TestWrapError(t *testing.T) { + t.Parallel() + var ( + err = errors.New("cause error") + testCases = []struct { + rfcError *errors.Error + err error + isNil bool + expected string + args []interface{} + }{ + {ErrDecodeFailed, nil, true, "", nil}, + { + ErrDecodeFailed, err, false, + "[CDC:ErrDecodeFailed]decode failed: args data: cause error", + []interface{}{"args data"}, + }, + } + ) + for _, tc := range testCases { + we := WrapError(tc.rfcError, tc.err, tc.args...) + if tc.isNil { + require.Nil(t, we) + } else { + require.NotNil(t, we) + require.Equal(t, we.Error(), tc.expected) + } + } +} + +func TestRFCCode(t *testing.T) { + t.Parallel() + rfc, ok := RFCCode(ErrAPIInvalidParam) + require.Equal(t, true, ok) + require.Contains(t, rfc, "ErrAPIInvalidParam") + + err := fmt.Errorf("inner error: invalid request") + rfc, ok = RFCCode(err) + require.Equal(t, false, ok) + require.Equal(t, rfc, errors.RFCErrorCode("")) + + rfcErr := ErrAPIInvalidParam + Err := WrapError(rfcErr, err) + rfc, ok = RFCCode(Err) + require.Equal(t, true, ok) + require.Contains(t, rfc, "ErrAPIInvalidParam") + + anoErr := errors.Annotate(ErrEtcdTryAgain, "annotated Etcd Try again") + rfc, ok = RFCCode(anoErr) + require.Equal(t, true, ok) + require.Contains(t, rfc, "ErrEtcdTryAgain") +} + +func TestIsRetryableError(t *testing.T) { + t.Parallel() + tests := []struct { + name string + err error + want bool + }{ + {"nil error", nil, false}, + {"context Canceled err", context.Canceled, false}, + {"context DeadlineExceeded err", context.DeadlineExceeded, false}, + {"normal err", errors.New("test"), true}, + {"cdc reachMaxTry err", ErrReachMaxTry, true}, + } + for _, tt := range tests { + ret := IsRetryableError(tt.err) + require.Equal(t, ret, tt.want, "case:%s", tt.name) + } +} + +func TestChangefeedFastFailError(t *testing.T) { + t.Parallel() + err := ErrSnapshotLostByGC.GenWithStackByArgs(2333, 2345) + rfcCode, _ := RFCCode(err) + require.Equal(t, true, IsChangefeedGCFastFailErrorCode(rfcCode)) + + err = ErrStartTsBeforeGC.GenWithStackByArgs(2333, 2345) + rfcCode, _ = RFCCode(err) + require.Equal(t, true, IsChangefeedGCFastFailErrorCode(rfcCode)) + + err = ErrToTLSConfigFailed.GenWithStackByArgs() + rfcCode, _ = RFCCode(err) + require.Equal(t, false, IsChangefeedGCFastFailErrorCode(rfcCode)) +} + +func TestIsCliUnprintableError(t *testing.T) { + t.Parallel() + tests := []struct { + name string + err error + want bool + }{ + {"nil error", nil, false}, + {"context Canceled err", context.Canceled, false}, + {"context DeadlineExceeded err", context.DeadlineExceeded, false}, + {"normal err", errors.New("test"), false}, + {"cdc reachMaxTry err", ErrReachMaxTry, false}, + {"cli unprint err", ErrCliAborted, true}, + } + for _, tt := range tests { + ret := IsCliUnprintableError(tt.err) + require.Equal(t, ret, tt.want, "case:%s", tt.name) + } +} diff --git a/pkg/errors/main_test.go b/pkg/errors/main_test.go new file mode 100644 index 0000000000..5f60c43031 --- /dev/null +++ b/pkg/errors/main_test.go @@ -0,0 +1,24 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package errors + +import ( + "testing" + + "github.com/pingcap/ticdc/pkg/leakutil" +) + +func TestMain(m *testing.M) { + leakutil.SetUpLeakTest(m) +} diff --git a/pkg/etcd/client_test.go b/pkg/etcd/client_test.go new file mode 100644 index 0000000000..199897c559 --- /dev/null +++ b/pkg/etcd/client_test.go @@ -0,0 +1,356 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcd + +import ( + "context" + "sync/atomic" + "testing" + "time" + + "github.com/benbjohnson/clock" + "github.com/pingcap/errors" + "github.com/stretchr/testify/require" + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + clientv3 "go.etcd.io/etcd/client/v3" +) + +type mockClient struct { + clientv3.KV + getOK bool +} + +func (m *mockClient) Get(ctx context.Context, key string, opts ...clientv3.OpOption) (resp *clientv3.GetResponse, err error) { + if m.getOK { + m.getOK = true + return nil, errors.New("mock error") + } + return &clientv3.GetResponse{}, nil +} + +func (m *mockClient) Put(ctx context.Context, key, val string, opts ...clientv3.OpOption) (resp *clientv3.PutResponse, err error) { + return nil, errors.New("mock error") +} + +func (m *mockClient) Txn(ctx context.Context) clientv3.Txn { + return &mockTxn{ctx: ctx} +} + +type mockWatcher struct { + clientv3.Watcher + watchCh chan clientv3.WatchResponse + resetCount *int32 + requestCount *int32 + rev *int64 +} + +func (m mockWatcher) Watch(ctx context.Context, key string, opts ...clientv3.OpOption) clientv3.WatchChan { + atomic.AddInt32(m.resetCount, 1) + op := &clientv3.Op{} + for _, opt := range opts { + opt(op) + } + atomic.StoreInt64(m.rev, op.Rev()) + return m.watchCh +} + +func (m mockWatcher) RequestProgress(ctx context.Context) error { + atomic.AddInt32(m.requestCount, 1) + return nil +} + +func TestRetry(t *testing.T) { + // here we need to change maxTries, which is not thread safe + // so we don't use t.Parallel() for this test + + originValue := maxTries + // to speedup the test + maxTries = 2 + + cli := clientv3.NewCtxClient(context.TODO()) + cli.KV = &mockClient{} + retrycli := Wrap(cli, nil) + get, err := retrycli.Get(context.TODO(), "") + + require.NoError(t, err) + require.NotNil(t, get) + + _, err = retrycli.Put(context.TODO(), "", "") + require.NotNil(t, err) + require.Containsf(t, errors.Cause(err).Error(), "mock error", "err:%v", err.Error()) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Test Txn case + // case 0: normal + rsp, err := retrycli.Txn(ctx, nil, nil, nil) + require.NoError(t, err) + require.False(t, rsp.Succeeded) + + // case 1: errors.ErrReachMaxTry + _, err = retrycli.Txn(ctx, txnEmptyCmps, nil, nil) + require.Regexp(t, ".*CDC:ErrReachMaxTry.*", err) + + // case 2: errors.ErrReachMaxTry + _, err = retrycli.Txn(ctx, nil, txnEmptyOpsThen, nil) + require.Regexp(t, ".*CDC:ErrReachMaxTry.*", err) + + // case 3: context.DeadlineExceeded + _, err = retrycli.Txn(ctx, txnEmptyCmps, txnEmptyOpsThen, nil) + require.Equal(t, context.DeadlineExceeded, err) + + // other case: mock error + _, err = retrycli.Txn(ctx, txnEmptyCmps, txnEmptyOpsThen, TxnEmptyOpsElse) + require.Containsf(t, errors.Cause(err).Error(), "mock error", "err:%v", err.Error()) + + maxTries = originValue +} + +func TestDelegateLease(t *testing.T) { + t.Parallel() + + ctx := context.Background() + url, server, err := SetupEmbedEtcd(t.TempDir()) + defer func() { + server.Close() + }() + require.Nil(t, err) + cli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{url.String()}, + DialTimeout: 3 * time.Second, + }) + require.Nil(t, err) + defer cli.Close() + + ttl := int64(10) + lease, err := cli.Grant(ctx, ttl) + require.Nil(t, err) + + ttlResp, err := cli.TimeToLive(ctx, lease.ID) + require.Nil(t, err) + require.Equal(t, ttlResp.GrantedTTL, ttl) + require.Less(t, ttlResp.TTL, ttl) + require.Greater(t, ttlResp.TTL, int64(0)) + + _, err = cli.Revoke(ctx, lease.ID) + require.Nil(t, err) + ttlResp, err = cli.TimeToLive(ctx, lease.ID) + require.Nil(t, err) + require.Equal(t, ttlResp.TTL, int64(-1)) +} + +// test no data lost when WatchCh blocked +func TestWatchChBlocked(t *testing.T) { + t.Parallel() + + cli := clientv3.NewCtxClient(context.TODO()) + resetCount := int32(0) + requestCount := int32(0) + rev := int64(0) + watchCh := make(chan clientv3.WatchResponse, 1) + watcher := mockWatcher{watchCh: watchCh, resetCount: &resetCount, requestCount: &requestCount, rev: &rev} + cli.Watcher = watcher + + sentRes := []clientv3.WatchResponse{ + {CompactRevision: 1}, + {CompactRevision: 2}, + {CompactRevision: 3}, + {CompactRevision: 4}, + {CompactRevision: 5}, + {CompactRevision: 6}, + } + + go func() { + for _, r := range sentRes { + watchCh <- r + } + }() + + mockClock := clock.NewMock() + watchCli := Wrap(cli, nil) + watchCli.clock = mockClock + + key := "testWatchChBlocked" + outCh := make(chan clientv3.WatchResponse, 6) + revision := int64(1) + ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) + defer cancel() + + go func() { + watchCli.WatchWithChan(ctx, outCh, key, "", clientv3.WithPrefix(), clientv3.WithRev(revision)) + }() + receivedRes := make([]clientv3.WatchResponse, 0) + // wait for WatchWithChan set up + r := <-outCh + receivedRes = append(receivedRes, r) + // move time forward + mockClock.Add(time.Second * 30) + + for r := range outCh { + receivedRes = append(receivedRes, r) + if len(receivedRes) == len(sentRes) { + cancel() + } + } + + require.Equal(t, sentRes, receivedRes) + // make sure watchCh has been reset since timeout + require.True(t, atomic.LoadInt32(watcher.resetCount) > 1) + // make sure RequestProgress has been call since timeout + require.True(t, atomic.LoadInt32(watcher.requestCount) > 1) + // make sure etcdRequestProgressDuration is less than etcdWatchChTimeoutDuration + require.Less(t, etcdRequestProgressDuration, etcdWatchChTimeoutDuration) +} + +// test no data lost when OutCh blocked +func TestOutChBlocked(t *testing.T) { + t.Parallel() + + cli := clientv3.NewCtxClient(context.TODO()) + resetCount := int32(0) + requestCount := int32(0) + rev := int64(0) + watchCh := make(chan clientv3.WatchResponse, 1) + watcher := mockWatcher{watchCh: watchCh, resetCount: &resetCount, requestCount: &requestCount, rev: &rev} + cli.Watcher = watcher + + mockClock := clock.NewMock() + watchCli := Wrap(cli, nil) + watchCli.clock = mockClock + + sentRes := []clientv3.WatchResponse{ + {CompactRevision: 1}, + {CompactRevision: 2}, + {CompactRevision: 3}, + } + + go func() { + for _, r := range sentRes { + watchCh <- r + } + }() + + key := "testOutChBlocked" + outCh := make(chan clientv3.WatchResponse, 1) + revision := int64(1) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) + defer cancel() + go func() { + watchCli.WatchWithChan(ctx, outCh, key, "", clientv3.WithPrefix(), clientv3.WithRev(revision)) + }() + receivedRes := make([]clientv3.WatchResponse, 0) + // wait for WatchWithChan set up + r := <-outCh + receivedRes = append(receivedRes, r) + // move time forward + mockClock.Add(time.Second * 30) + + for r := range outCh { + receivedRes = append(receivedRes, r) + if len(receivedRes) == len(sentRes) { + cancel() + } + } + + require.Equal(t, sentRes, receivedRes) +} + +func TestRevisionNotFallBack(t *testing.T) { + t.Parallel() + + cli := clientv3.NewCtxClient(context.TODO()) + resetCount := int32(0) + requestCount := int32(0) + rev := int64(0) + watchCh := make(chan clientv3.WatchResponse, 1) + watcher := mockWatcher{watchCh: watchCh, resetCount: &resetCount, requestCount: &requestCount, rev: &rev} + cli.Watcher = watcher + mockClock := clock.NewMock() + watchCli := Wrap(cli, nil) + watchCli.clock = mockClock + + key := "testRevisionNotFallBack" + outCh := make(chan clientv3.WatchResponse, 1) + // watch from revision = 2 + revision := int64(2) + + sentRes := []clientv3.WatchResponse{ + {CompactRevision: 1}, + } + + go func() { + for _, r := range sentRes { + watchCh <- r + } + }() + + ctx, cancel := context.WithTimeout(context.Background(), time.Second*2) + defer cancel() + go func() { + watchCli.WatchWithChan(ctx, outCh, key, "", clientv3.WithPrefix(), clientv3.WithRev(revision)) + }() + // wait for WatchWithChan set up + <-outCh + // move time forward + mockClock.Add(time.Second * 30) + // make sure watchCh has been reset since timeout + require.True(t, atomic.LoadInt32(watcher.resetCount) > 1) + // make sure revision in WatchWitchChan does not fall back + // even if there has not any response been received from WatchCh + // while WatchCh was reset + require.Equal(t, atomic.LoadInt64(watcher.rev), revision) +} + +type mockTxn struct { + ctx context.Context + mode int +} + +func (txn *mockTxn) If(cs ...clientv3.Cmp) clientv3.Txn { + if cs != nil { + txn.mode += 1 + } + return txn +} + +func (txn *mockTxn) Then(ops ...clientv3.Op) clientv3.Txn { + if ops != nil { + txn.mode += 1 << 1 + } + return txn +} + +func (txn *mockTxn) Else(ops ...clientv3.Op) clientv3.Txn { + if ops != nil { + txn.mode += 1 << 2 + } + return txn +} + +func (txn *mockTxn) Commit() (*clientv3.TxnResponse, error) { + switch txn.mode { + case 0: + return &clientv3.TxnResponse{}, nil + case 1: + return nil, rpctypes.ErrNoSpace + case 2: + return nil, rpctypes.ErrTimeoutDueToLeaderFail + case 3: + return nil, context.DeadlineExceeded + default: + return nil, errors.New("mock error") + } +} diff --git a/pkg/etcd/etcdkey_test.go b/pkg/etcd/etcdkey_test.go new file mode 100644 index 0000000000..fd81c8af67 --- /dev/null +++ b/pkg/etcd/etcdkey_test.go @@ -0,0 +1,167 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcd + +import ( + "fmt" + "testing" + + "github.com/pingcap/ticdc/pkg/common" + "github.com/stretchr/testify/require" +) + +func TestEtcdKey(t *testing.T) { + t.Parallel() + + testcases := []struct { + key string + expected *CDCKey + }{{ + key: fmt.Sprintf("%s/owner/223176cb44d20a13", DefaultClusterAndMetaPrefix), + expected: &CDCKey{ + Tp: CDCKeyTypeOwner, + OwnerLeaseID: "223176cb44d20a13", + ClusterID: DefaultCDCClusterID, + }, + }, { + key: fmt.Sprintf("%s/owner", DefaultClusterAndMetaPrefix), + expected: &CDCKey{ + Tp: CDCKeyTypeOwner, + OwnerLeaseID: "", + ClusterID: DefaultCDCClusterID, + }, + }, { + key: fmt.Sprintf("%s/capture/6bbc01c8-0605-4f86-a0f9-b3119109b225", + DefaultClusterAndMetaPrefix), + expected: &CDCKey{ + Tp: CDCKeyTypeCapture, + CaptureID: "6bbc01c8-0605-4f86-a0f9-b3119109b225", + ClusterID: DefaultCDCClusterID, + }, + }, { + key: DefaultClusterAndKeyspacePrefix + + "/changefeed/info/test-_@#$%changefeed", + expected: &CDCKey{ + Tp: CDCKeyTypeChangefeedInfo, + ChangefeedID: common.ChangeFeedID{DisplayName: common.NewChangeFeedDisplayName("test-_@#$%changefeed", "default")}, + ClusterID: DefaultCDCClusterID, + Keyspace: "default", + }, + }, { + key: DefaultClusterAndKeyspacePrefix + + "/changefeed/info/test/changefeed", + expected: &CDCKey{ + Tp: CDCKeyTypeChangefeedInfo, + ChangefeedID: common.ChangeFeedID{DisplayName: common.NewChangeFeedDisplayName("test/changefeed", "default")}, + ClusterID: DefaultCDCClusterID, + Keyspace: "default", + }, + }, { + key: DefaultClusterAndKeyspacePrefix + + "/changefeed/status/test-changefeed", + expected: &CDCKey{ + Tp: CDCKeyTypeChangeFeedStatus, + ChangefeedID: common.ChangeFeedID{DisplayName: common.NewChangeFeedDisplayName("test-changefeed", "default")}, + ClusterID: DefaultCDCClusterID, + Keyspace: "default", + }, + }, { + key: "/tidb/cdc/default/name/task/position/6bbc01c8-0605-4f86-a0f9-b3119109b225/test-changefeed", + expected: &CDCKey{ + Tp: CDCKeyTypeTaskPosition, + ChangefeedID: common.ChangeFeedID{DisplayName: common.NewChangeFeedDisplayName("test-changefeed", "name")}, + CaptureID: "6bbc01c8-0605-4f86-a0f9-b3119109b225", + ClusterID: DefaultCDCClusterID, + Keyspace: "name", + }, + }, { + key: DefaultClusterAndKeyspacePrefix + + "/task/position/6bbc01c8-0605-4f86-a0f9-b3119109b225/test/changefeed", + expected: &CDCKey{ + Tp: CDCKeyTypeTaskPosition, + ChangefeedID: common.ChangeFeedID{DisplayName: common.NewChangeFeedDisplayName("test/changefeed", "default")}, + CaptureID: "6bbc01c8-0605-4f86-a0f9-b3119109b225", + ClusterID: DefaultCDCClusterID, + Keyspace: "default", + }, + }, { + key: DefaultClusterAndKeyspacePrefix + "/upstream/12345", + expected: &CDCKey{ + Tp: CDCKeyTypeUpStream, + ClusterID: DefaultCDCClusterID, + Keyspace: "default", + UpstreamID: 12345, + }, + }, { + key: fmt.Sprintf("%s%s", DefaultClusterAndMetaPrefix, metaVersionKey), + expected: &CDCKey{ + Tp: CDCKeyTypeMetaVersion, + ClusterID: DefaultCDCClusterID, + }, + }} + for _, tc := range testcases { + k := new(CDCKey) + err := k.Parse(DefaultCDCClusterID, tc.key) + require.NoError(t, err) + require.Equal(t, k, tc.expected) + require.Equal(t, k.String(), tc.key) + } +} + +func TestEtcdKeyParseError(t *testing.T) { + t.Parallel() + + testCases := []struct { + key string + error bool + }{{ + key: DefaultClusterAndKeyspacePrefix + + "/task/position/6bbc01c8-0605-4f86-a0f9-b3119109b225/test/changefeed", + error: false, + }, { + key: DefaultClusterAndKeyspacePrefix + + "/task/position/6bbc01c8-0605-4f86-a0f9-b3119109b225/", + error: false, + }, { + key: DefaultClusterAndKeyspacePrefix + + "/task/position/6bbc01c8-0605-4f86-a0f9-b3119109b225", + error: true, + }, { + key: "/tidb/cd", + error: true, + }, { + key: "/tidb/cdc/", + error: true, + }, { + key: "/tidb/cdc/default/__meta_data__/abcd", + error: true, + }, { + key: "/tidb/cdc/default/default/abcd", + error: true, + }} + for _, tc := range testCases { + k := new(CDCKey) + err := k.Parse(DefaultCDCClusterID, tc.key) + if tc.error { + require.NotNil(t, err) + } else { + require.Nil(t, err) + } + } + k := new(CDCKey) + k.Tp = CDCKeyTypeUpStream + 1 + require.Panics(t, func() { + _ = k.String() + }) +} diff --git a/pkg/etcd/main_test.go b/pkg/etcd/main_test.go new file mode 100644 index 0000000000..6b70bc2694 --- /dev/null +++ b/pkg/etcd/main_test.go @@ -0,0 +1,24 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package etcd + +import ( + "testing" + + "github.com/pingcap/ticdc/pkg/leakutil" +) + +func TestMain(m *testing.M) { + leakutil.SetUpLeakTest(m) +} diff --git a/pkg/etcd/util_test.go b/pkg/etcd/util_test.go new file mode 100644 index 0000000000..248078ec19 --- /dev/null +++ b/pkg/etcd/util_test.go @@ -0,0 +1,32 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. +package etcd + +import ( + "math" + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + clientv3 "go.etcd.io/etcd/client/v3" +) + +func TestGetRevisionFromWatchOpts(t *testing.T) { + t.Parallel() + + for i := 0; i < 100; i++ { + rev := rand.Int63n(math.MaxInt64) + opt := clientv3.WithRev(rev) + require.Equal(t, getRevisionFromWatchOpts(opt), rev) + } +} diff --git a/pkg/eventservice/main_test.go b/pkg/eventservice/main_test.go index 6190357c89..f1b96a612f 100644 --- a/pkg/eventservice/main_test.go +++ b/pkg/eventservice/main_test.go @@ -17,16 +17,8 @@ import ( "testing" "github.com/pingcap/ticdc/pkg/leakutil" - "go.uber.org/goleak" ) func TestMain(m *testing.M) { - opts := []goleak.Option{ - goleak.IgnoreTopFunction("github.com/pingcap/ticdc/pkg/workerpool.(*worker).run"), - goleak.IgnoreTopFunction("sync.runtime_Semacquire"), - goleak.IgnoreAnyFunction("github.com/godbus/dbus.(*Conn).Auth"), - goleak.IgnoreCurrent(), - } - - leakutil.SetUpLeakTest(m, opts...) + leakutil.SetUpLeakTest(m) } diff --git a/pkg/filter/expr_filter_bench_test.go b/pkg/filter/expr_filter_bench_test.go new file mode 100644 index 0000000000..7583f62f05 --- /dev/null +++ b/pkg/filter/expr_filter_bench_test.go @@ -0,0 +1,126 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package filter + +import ( + "testing" + + commonType "github.com/pingcap/ticdc/pkg/common" + "github.com/pingcap/ticdc/pkg/config" + "github.com/pingcap/ticdc/pkg/util" + timodel "github.com/pingcap/tidb/pkg/meta/model" + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/types" + "github.com/pingcap/tidb/pkg/util/chunk" + "github.com/stretchr/testify/require" +) + +// cmd: go test -benchmem -run=^$ -bench ^BenchmarkSkipDML$ github.com/pingcap/ticdc/pkg/filter +// goos: maxOS 12.3.1 +// goarch: arm64 +// cpu: Apple M1 Pro +// BenchmarkSkipDML/insert-1 +// BenchmarkSkipDML/insert-1-10 990166 1151 ns/op 768 B/op 24 allocs/op +// BenchmarkSkipDML/insert-2 +// BenchmarkSkipDML/insert-2-10 1000000 1187 ns/op 768 B/op 24 allocs/op +// BenchmarkSkipDML/update +// BenchmarkSkipDML/update-10 698208 1637 ns/op 1480 B/op 43 allocs/op +// BenchmarkSkipDML/delete +// BenchmarkSkipDML/delete-10 1000000 1112 ns/op 768 B/op 24 allocs/op +func BenchmarkSkipDML(b *testing.B) { + cfg := &config.FilterConfig{ + EventFilters: []*config.EventFilterRule{ + { + Matcher: []string{"test.student"}, + IgnoreInsertValueExpr: util.AddressOf("name = 'Will'"), + IgnoreDeleteValueExpr: util.AddressOf("age >= 32"), + IgnoreUpdateOldValueExpr: util.AddressOf("gender = 'female'"), + IgnoreUpdateNewValueExpr: util.AddressOf("age > 28"), + }, + }, + } + + f, err := newExprFilter("UTC", cfg) + require.NoError(b, err) + + // Build a table info matching the filter rules. + cols := []*timodel.ColumnInfo{ + newColumnInfo(1, "id", mysql.TypeLong, mysql.PriKeyFlag|mysql.NotNullFlag), + newColumnInfo(2, "name", mysql.TypeString, 0), + newColumnInfo(3, "age", mysql.TypeLong, 0), + newColumnInfo(4, "gender", mysql.TypeString, 0), + } + tableInfo := mustNewCommonTableInfo("test", "student", cols, nil) + + insertRow := datumsToChunkRow([]types.Datum{ + types.NewIntDatum(999), + types.NewStringDatum("Will"), + types.NewIntDatum(39), + types.NewStringDatum("male"), + }, tableInfo) + updatePreRow := datumsToChunkRow([]types.Datum{ + types.NewIntDatum(876), + types.NewStringDatum("Li"), + types.NewIntDatum(45), + types.NewStringDatum("female"), + }, tableInfo) + updateRow := datumsToChunkRow([]types.Datum{ + types.NewIntDatum(1), + types.NewStringDatum("Dongmen"), + types.NewIntDatum(20), + types.NewStringDatum("male"), + }, tableInfo) + + type benchCase struct { + name string + dmlType commonType.RowType + preRow chunk.Row + row chunk.Row + shouldSkip bool + } + + cases := []benchCase{ + { + name: "insert", + dmlType: commonType.RowTypeInsert, + preRow: chunk.Row{}, + row: insertRow, + shouldSkip: true, + }, + { + name: "update", + dmlType: commonType.RowTypeUpdate, + preRow: updatePreRow, + row: updateRow, + shouldSkip: true, + }, + { + name: "delete", + dmlType: commonType.RowTypeDelete, + preRow: updatePreRow, + row: chunk.Row{}, + shouldSkip: true, + }, + } + + for _, c := range cases { + b.Run(c.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + ignore, err := f.shouldSkipDML(c.dmlType, c.preRow, c.row, tableInfo) + require.NoError(b, err) + require.Equal(b, c.shouldSkip, ignore) + } + }) + } +} diff --git a/pkg/filter/main_test.go b/pkg/filter/main_test.go new file mode 100644 index 0000000000..63ca3092f2 --- /dev/null +++ b/pkg/filter/main_test.go @@ -0,0 +1,24 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package filter + +import ( + "testing" + + "github.com/pingcap/ticdc/pkg/leakutil" +) + +func TestMain(m *testing.M) { + leakutil.SetUpLeakTest(m) +} diff --git a/pkg/filter/utils_test.go b/pkg/filter/utils_test.go new file mode 100644 index 0000000000..e168a3535a --- /dev/null +++ b/pkg/filter/utils_test.go @@ -0,0 +1,82 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package filter + +import ( + "testing" + + "github.com/pingcap/ticdc/pkg/config" + "github.com/pingcap/tidb/pkg/meta/metadef" + tifilter "github.com/pingcap/tidb/pkg/util/filter" + "github.com/stretchr/testify/require" +) + +func TestIsSchema(t *testing.T) { + t.Parallel() + cases := []struct { + schema string + result bool + }{ + {"", false}, + {"test", false}, + {"SYS", true}, + {"MYSQL", true}, + {metadef.InformationSchemaName.O, true}, + {tifilter.InspectionSchemaName, true}, + {metadef.PerformanceSchemaName.O, true}, + {metadef.MetricSchemaName.O, true}, + {metadef.InformationSchemaName.L, true}, + {metadef.PerformanceSchemaName.L, true}, + {metadef.MetricSchemaName.L, true}, + {TiCDCSystemSchema, true}, + } + for _, c := range cases { + require.Equal(t, c.result, IsSysSchema(c.schema), "case +%v", c) + } +} + +func BenchmarkIsSysSchemaInputLower(b *testing.B) { + for i := 0; i < b.N; i++ { + IsSysSchema("mysql") + } +} + +func BenchmarkIsSysSchemaInputUpper(b *testing.B) { + for i := 0; i < b.N; i++ { + IsSysSchema("MYSQL") + } +} + +func TestVerifyTableRules(t *testing.T) { + t.Parallel() + cases := []struct { + cfg *config.FilterConfig + hasError bool + }{ + {&config.FilterConfig{}, false}, + {&config.FilterConfig{Rules: []string{""}}, false}, + {&config.FilterConfig{Rules: []string{"*.*"}}, false}, + {&config.FilterConfig{Rules: []string{"test.*ms"}}, false}, + {&config.FilterConfig{Rules: []string{"*.889"}}, false}, + {&config.FilterConfig{Rules: []string{"test-a.*", "*.*.*"}}, true}, + {&config.FilterConfig{Rules: []string{"*.*", "*.*.*", "*.*.*.*"}}, true}, + } + for _, c := range cases { + f, err := VerifyTableRules(c.cfg) + require.Equal(t, c.hasError, err != nil, "case: %s", c.cfg.Rules) + if !c.hasError { + require.True(t, f != nil) + } + } +} diff --git a/pkg/leakutil/leak_helper_test.go b/pkg/leakutil/leak_helper_test.go index 0fe85d86eb..535e24e3cc 100644 --- a/pkg/leakutil/leak_helper_test.go +++ b/pkg/leakutil/leak_helper_test.go @@ -20,7 +20,7 @@ import ( ) func TestSetUpLeakTest(t *testing.T) { - leakChan := make(chan interface{}) + leakChan := make(chan struct{}) go func() { <-leakChan diff --git a/pkg/orchestrator/batch_test.go b/pkg/orchestrator/batch_test.go new file mode 100644 index 0000000000..9a7267a6f9 --- /dev/null +++ b/pkg/orchestrator/batch_test.go @@ -0,0 +1,75 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package orchestrator + +import ( + "fmt" + "testing" + + "github.com/pingcap/ticdc/pkg/orchestrator/util" + "github.com/stretchr/testify/require" +) + +func TestGetBatchChangeState(t *testing.T) { + t.Parallel() + patchGroupSize := 1000 + patchGroup := make([][]DataPatch, patchGroupSize) + for i := 0; i < patchGroupSize; i++ { + i := i + patches := []DataPatch{&SingleDataPatch{ + Key: util.NewEtcdKey(fmt.Sprintf("/key%d", i)), + Func: func(old []byte) (newValue []byte, changed bool, err error) { + newValue = []byte(fmt.Sprintf("abc%d", i)) + return newValue, true, nil + }, + }} + patchGroup[i] = patches + } + rawState := make(map[util.EtcdKey][]byte) + changedState, n, size, err := getBatchChangedState(rawState, patchGroup) + require.Nil(t, err) + require.LessOrEqual(t, n, len(patchGroup)) + require.LessOrEqual(t, size, etcdTxnMaxSize) + require.LessOrEqual(t, len(changedState), etcdTxnMaxOps) + require.Equal(t, []byte(fmt.Sprintf("abc%d", 0)), changedState[util.NewEtcdKey("/key0")]) + + // test single patch exceed txn max size + largeSizePatches := []DataPatch{&SingleDataPatch{ + Key: util.NewEtcdKey("largePatch"), + Func: func(old []byte) (newValue []byte, changed bool, err error) { + newValue = make([]byte, etcdTxnMaxSize) + return newValue, true, nil + }, + }} + patchGroup = [][]DataPatch{largeSizePatches} + _, _, _, err = getBatchChangedState(rawState, patchGroup) + require.NotNil(t, err) + require.Contains(t, err.Error(), "a single changefeed exceed etcd txn max size") + + // test single patch exceed txn max ops + manyOpsPatches := make([]DataPatch, 0) + for i := 0; i <= etcdTxnMaxOps*2; i++ { + manyOpsPatches = append(manyOpsPatches, &SingleDataPatch{ + Key: util.NewEtcdKey(fmt.Sprintf("/key%d", i)), + Func: func(old []byte) (newValue []byte, changed bool, err error) { + newValue = []byte(fmt.Sprintf("abc%d", i)) + return newValue, true, nil + }, + }) + } + patchGroup = [][]DataPatch{manyOpsPatches} + _, _, _, err = getBatchChangedState(rawState, patchGroup) + require.NotNil(t, err) + require.Contains(t, err.Error(), "a single changefeed exceed etcd txn max ops") +} diff --git a/pkg/orchestrator/etcd_worker_test.go b/pkg/orchestrator/etcd_worker_test.go new file mode 100644 index 0000000000..ca8303d4d9 --- /dev/null +++ b/pkg/orchestrator/etcd_worker_test.go @@ -0,0 +1,784 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package orchestrator + +import ( + "context" + "encoding/json" + "regexp" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/log" + cerrors "github.com/pingcap/ticdc/pkg/errors" + "github.com/pingcap/ticdc/pkg/etcd" + "github.com/pingcap/ticdc/pkg/migrate" + "github.com/pingcap/ticdc/pkg/orchestrator/util" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "go.etcd.io/etcd/api/v3/v3rpc/rpctypes" + clientv3 "go.etcd.io/etcd/client/v3" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" +) + +const ( + testEtcdKeyPrefix = "/cdc_etcd_worker_test" + numGroups = 10 + numValuesPerGroup = 5 + totalTicksPerReactor = 1000 +) + +type simpleReactor struct { + state *simpleReactorState + tickCount int + id int +} + +func (s *simpleReactor) Tick(_ context.Context, state ReactorState) (nextState ReactorState, err error) { + if s.tickCount >= totalTicksPerReactor { + return s.state, cerrors.ErrReactorFinished + } + s.tickCount++ + + newState := state.(*simpleReactorState) + if newState == nil { + return s.state, nil + } + s.state = newState + + if s.id == 0 { + sum := s.state.sum + for _, delta := range s.state.deltas { + sum = sum - delta.old + sum = sum + delta.new + } + + // check for consistency + expectedSum := 0 + for i := range s.state.values { + for j := range s.state.values[i] { + expectedSum += s.state.values[i][j] + } + } + if sum != expectedSum { + log.Panic("state is inconsistent", + zap.Int("expectedSum", sum), zap.Int("actualSum", s.state.sum)) + } + + s.state.SetSum(sum) + } else { + i2 := s.id - 1 + for i := range s.state.values { + s.state.Inc(i, i2) + } + } + + s.state.deltas = s.state.deltas[:0] + + return s.state, nil +} + +type delta struct { + old int + new int + i1 int + i2 int +} + +type simpleReactorState struct { + values [][]int + sum int + deltas []*delta + patches []DataPatch +} + +var keyParseRegexp = regexp.MustCompile(regexp.QuoteMeta(testEtcdKeyPrefix) + `/(.+)`) + +func (s *simpleReactorState) Get(i1, i2 int) int { + return s.values[i1][i2] +} + +func (s *simpleReactorState) Inc(i1, i2 int) { + patch := &SingleDataPatch{ + Key: util.NewEtcdKey(testEtcdKeyPrefix + "/" + strconv.Itoa(i1)), + Func: func(old []byte) ([]byte, bool, error) { + var oldJSON []int + err := json.Unmarshal(old, &oldJSON) + if err != nil { + return nil, false, errors.Trace(err) + } + + oldJSON[i2]++ + newValue, err := json.Marshal(oldJSON) + if err != nil { + return nil, false, errors.Trace(err) + } + return newValue, true, nil + }, + } + + s.patches = append(s.patches, patch) +} + +func (s *simpleReactorState) SetSum(sum int) { + patch := &SingleDataPatch{ + Key: util.NewEtcdKey(testEtcdKeyPrefix + "/sum"), + Func: func(_ []byte) ([]byte, bool, error) { + return []byte(strconv.Itoa(sum)), true, nil + }, + } + + s.patches = append(s.patches, patch) +} + +func (s *simpleReactorState) UpdatePendingChange() { +} + +func (s *simpleReactorState) Update(key util.EtcdKey, value []byte, isInit bool) error { + subMatches := keyParseRegexp.FindSubmatch(key.Bytes()) + if len(subMatches) != 2 { + log.Panic("illegal Etcd key", zap.ByteString("key", key.Bytes())) + } + + if string(subMatches[1]) == "sum" { + newSum, err := strconv.Atoi(string(value)) + if err != nil { + log.Panic("illegal sum", zap.Error(err)) + } + s.sum = newSum + return nil + } + + index, err := strconv.Atoi(string(subMatches[1])) + if err != nil { + log.Panic("illegal index", zap.Error(err)) + } + + var newValues []int + err = json.Unmarshal(value, &newValues) + if err != nil { + log.Panic("illegal value", zap.Error(err)) + } + + for i2, v := range s.values[index] { + if v != newValues[i2] { + s.deltas = append(s.deltas, &delta{ + old: v, + new: newValues[i2], + i1: index, + i2: i2, + }) + } + } + + s.values[index] = newValues + return nil +} + +func (s *simpleReactorState) GetPatches() [][]DataPatch { + ret := s.patches + s.patches = nil + return [][]DataPatch{ret} +} + +func setUpTest(t *testing.T) (func() etcd.Client, func()) { + url, server, err := etcd.SetupEmbedEtcd(t.TempDir()) + require.Nil(t, err) + endpoints := []string{url.String()} + return func() etcd.Client { + rawCli, err := clientv3.NewFromURLs(endpoints) + require.Nil(t, err) + return etcd.Wrap(rawCli, map[string]prometheus.Counter{}) + }, func() { + server.Close() + } +} + +func TestEtcdSum(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) + defer cancel() + + newClient, closer := setUpTest(t) + defer closer() + + cli := newClient() + defer func() { + _ = cli.Unwrap().Close() + }() + _, err := cli.Put(ctx, testEtcdKeyPrefix+"/sum", "0") + require.Nil(t, err) + + initArray := make([]int, numValuesPerGroup) + jsonStr, err := json.Marshal(initArray) + require.Nil(t, err) + + for i := 0; i < numGroups; i++ { + _, err := cli.Put(ctx, testEtcdKeyPrefix+"/"+strconv.Itoa(i), string(jsonStr)) + require.Nil(t, err) + } + + errg, ctx := errgroup.WithContext(ctx) + for i := 0; i < numValuesPerGroup+1; i++ { + finalI := i + errg.Go(func() error { + values := make([][]int, numGroups) + for j := range values { + values[j] = make([]int, numValuesPerGroup) + } + + reactor := &simpleReactor{ + state: nil, + id: finalI, + } + + initState := &simpleReactorState{ + values: values, + sum: 0, + deltas: nil, + patches: nil, + } + + cli := newClient() + cdcCli, err := etcd.NewCDCEtcdClient(ctx, cli.Unwrap(), "default") + require.Nil(t, err) + defer func() { + _ = cli.Unwrap().Close() + }() + + etcdWorker, err := NewEtcdWorker(cdcCli, testEtcdKeyPrefix, reactor, initState, + &migrate.NoOpMigrator{}) + if err != nil { + return errors.Trace(err) + } + + return errors.Trace(etcdWorker.Run(ctx, nil, 10*time.Millisecond, "owner")) + }) + } + + err = errg.Wait() + if err != nil && (errors.Cause(err) == context.DeadlineExceeded || + errors.Cause(err) == context.Canceled || + strings.Contains(err.Error(), "etcdserver: request timeout")) { + return + } + require.Nil(t, err) +} + +type intReactorState struct { + val int + isUpdated bool + lastVal int +} + +func (s *intReactorState) UpdatePendingChange() { +} + +func (s *intReactorState) Update(key util.EtcdKey, value []byte, isInit bool) error { + var err error + s.val, err = strconv.Atoi(string(value)) + if err != nil { + log.Panic("intReactorState", zap.Error(err)) + } + // As long as we can ensure that val is monotonically increasing, + // we can ensure that the linearizability of state changes + if s.lastVal > s.val { + log.Panic("linearizability check failed, lastVal must less than current val", zap.Int("lastVal", s.lastVal), zap.Int("val", s.val)) + } + s.lastVal = s.val + s.isUpdated = !isInit + return nil +} + +func (s *intReactorState) GetPatches() [][]DataPatch { + return [][]DataPatch{} +} + +type linearizabilityReactor struct { + state *intReactorState + tickCount int +} + +func (r *linearizabilityReactor) Tick(ctx context.Context, state ReactorState) (nextState ReactorState, err error) { + r.state = state.(*intReactorState) + if r.state.isUpdated { + if r.state.val < r.tickCount { + log.Panic("linearizability check failed, val must larger than tickCount", zap.Int("expected", r.tickCount), zap.Int("actual", r.state.val)) + } + r.tickCount++ + } + if r.state.val == 1999 { + return r.state, cerrors.ErrReactorFinished + } + r.state.isUpdated = false + return r.state, nil +} + +func TestLinearizability(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) + defer cancel() + + newClient, closer := setUpTest(t) + defer closer() + + cli0 := newClient() + cdcCli, err := etcd.NewCDCEtcdClient(ctx, cli0.Unwrap(), "default") + require.Nil(t, err) + cli := newClient() + for i := 0; i < 1000; i++ { + _, err := cli.Put(ctx, testEtcdKeyPrefix+"/lin", strconv.Itoa(i)) + require.Nil(t, err) + } + + reactor, err := NewEtcdWorker(cdcCli, testEtcdKeyPrefix+"/lin", &linearizabilityReactor{ + state: nil, + tickCount: 999, + }, &intReactorState{ + val: 0, + isUpdated: false, + }, &migrate.NoOpMigrator{}) + require.Nil(t, err) + errg := &errgroup.Group{} + errg.Go(func() error { + return reactor.Run(ctx, nil, 10*time.Millisecond, "owner") + }) + + time.Sleep(500 * time.Millisecond) + for i := 999; i < 2000; i++ { + _, err := cli.Put(ctx, testEtcdKeyPrefix+"/lin", strconv.Itoa(i)) + require.Nil(t, err) + } + + err = errg.Wait() + require.Nil(t, err) + + err = cli.Unwrap().Close() + require.Nil(t, err) + err = cli0.Unwrap().Close() + require.Nil(t, err) +} + +type commonReactorState struct { + state map[string]string + pendingPatches []DataPatch +} + +func (s *commonReactorState) UpdatePendingChange() { +} + +func (s *commonReactorState) Update(key util.EtcdKey, value []byte, isInit bool) error { + s.state[key.String()] = string(value) + return nil +} + +func (s *commonReactorState) AppendPatch(key util.EtcdKey, fun func(old []byte) (newValue []byte, changed bool, err error)) { + s.pendingPatches = append(s.pendingPatches, &SingleDataPatch{ + Key: key, + Func: fun, + }) +} + +func (s *commonReactorState) GetPatches() [][]DataPatch { + pendingPatches := s.pendingPatches + s.pendingPatches = nil + return [][]DataPatch{pendingPatches} +} + +type finishedReactor struct { + state *commonReactorState + tickNum int + prefix string +} + +func (r *finishedReactor) Tick(ctx context.Context, state ReactorState) (nextState ReactorState, err error) { + r.state = state.(*commonReactorState) + if r.tickNum < 2 { + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key1"), func(old []byte) (newValue []byte, changed bool, err error) { + return append(old, []byte("abc")...), true, nil + }) + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key2"), func(old []byte) (newValue []byte, changed bool, err error) { + return append(old, []byte("123")...), true, nil + }) + r.tickNum++ + return r.state, nil + } + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key1"), func(old []byte) (newValue []byte, changed bool, err error) { + return append(old, []byte("fin")...), true, nil + }) + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key2"), func(old []byte) (newValue []byte, changed bool, err error) { + return nil, true, nil + }) + return r.state, cerrors.ErrReactorFinished +} + +func TestFinished(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) + defer cancel() + + newClient, closer := setUpTest(t) + defer closer() + + cli := newClient() + cdcCli, err := etcd.NewCDCEtcdClient(ctx, cli.Unwrap(), "default") + require.Nil(t, err) + + prefix := testEtcdKeyPrefix + "/finished" + reactor, err := NewEtcdWorker(cdcCli, prefix, &finishedReactor{ + prefix: prefix, + }, &commonReactorState{ + state: make(map[string]string), + }, &migrate.NoOpMigrator{}) + require.Nil(t, err) + err = reactor.Run(ctx, nil, 10*time.Millisecond, "owner") + require.Nil(t, err) + resp, err := cli.Get(ctx, prefix+"/key1") + require.Nil(t, err) + require.Equal(t, string(resp.Kvs[0].Key), "/cdc_etcd_worker_test/finished/key1") + require.Equal(t, string(resp.Kvs[0].Value), "abcabcfin") + resp, err = cli.Get(ctx, prefix+"/key2") + require.Nil(t, err) + require.Len(t, resp.Kvs, 0) + err = cli.Unwrap().Close() + require.Nil(t, err) +} + +type coverReactor struct { + state *commonReactorState + tickNum int + prefix string +} + +func (r *coverReactor) Tick(ctx context.Context, state ReactorState) (nextState ReactorState, err error) { + r.state = state.(*commonReactorState) + if r.tickNum < 2 { + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key1"), func(old []byte) (newValue []byte, changed bool, err error) { + return append(old, []byte("abc")...), true, nil + }) + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key2"), func(old []byte) (newValue []byte, changed bool, err error) { + return append(old, []byte("123")...), true, nil + }) + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key1"), func(old []byte) (newValue []byte, changed bool, err error) { + return append(old, []byte("cba")...), true, nil + }) + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key2"), func(old []byte) (newValue []byte, changed bool, err error) { + return append(old, []byte("321")...), true, nil + }) + r.tickNum++ + return r.state, nil + } + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key1"), func(old []byte) (newValue []byte, changed bool, err error) { + return append(old, []byte("fin")...), true, nil + }) + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key1"), func(old []byte) (newValue []byte, changed bool, err error) { + return append(old, []byte("fin")...), true, nil + }) + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key2"), func(old []byte) (newValue []byte, changed bool, err error) { + return nil, true, nil + }) + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key2"), func(old []byte) (newValue []byte, changed bool, err error) { + return append(old, []byte("fin")...), true, nil + }) + return r.state, cerrors.ErrReactorFinished +} + +func TestCover(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) + defer cancel() + + newClient, closer := setUpTest(t) + defer closer() + + cli := newClient() + cdcCli, err := etcd.NewCDCEtcdClient(ctx, cli.Unwrap(), "default") + require.Nil(t, err) + + prefix := testEtcdKeyPrefix + "/cover" + reactor, err := NewEtcdWorker(cdcCli, prefix, &coverReactor{ + prefix: prefix, + }, &commonReactorState{ + state: make(map[string]string), + }, &migrate.NoOpMigrator{}) + require.Nil(t, err) + err = reactor.Run(ctx, nil, 10*time.Millisecond, "owner") + require.Nil(t, err) + resp, err := cli.Get(ctx, prefix+"/key1") + require.Nil(t, err) + require.Equal(t, string(resp.Kvs[0].Key), "/cdc_etcd_worker_test/cover/key1") + require.Equal(t, string(resp.Kvs[0].Value), "abccbaabccbafinfin") + resp, err = cli.Get(ctx, prefix+"/key2") + require.Nil(t, err) + require.Equal(t, string(resp.Kvs[0].Key), "/cdc_etcd_worker_test/cover/key2") + require.Equal(t, string(resp.Kvs[0].Value), "fin") + err = cli.Unwrap().Close() + require.Nil(t, err) +} + +type emptyTxnReactor struct { + state *commonReactorState + tickNum int + prefix string + cli etcd.Client +} + +func (r *emptyTxnReactor) Tick(ctx context.Context, state ReactorState) (nextState ReactorState, err error) { + r.state = state.(*commonReactorState) + if r.tickNum == 0 { + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key1"), func(old []byte) (newValue []byte, changed bool, err error) { + return []byte("abc"), true, nil + }) + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key2"), func(old []byte) (newValue []byte, changed bool, err error) { + return []byte("123"), true, nil + }) + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key1"), func(old []byte) (newValue []byte, changed bool, err error) { + return nil, true, nil + }) + r.tickNum++ + return r.state, nil + } + if r.tickNum == 1 { + // Simulating other client writes + _, err := r.cli.Put(ctx, "/key3", "123") + if err != nil { + return nil, errors.Trace(err) + } + + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key2"), func(old []byte) (newValue []byte, changed bool, err error) { + return []byte("123"), true, nil + }) + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key1"), func(old []byte) (newValue []byte, changed bool, err error) { + return nil, true, nil + }) + r.tickNum++ + return r.state, nil + } + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key1"), func(old []byte) (newValue []byte, changed bool, err error) { + return nil, true, nil + }) + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key2"), func(old []byte) (newValue []byte, changed bool, err error) { + return []byte("123"), true, nil + }) + return r.state, cerrors.ErrReactorFinished +} + +func TestEmptyTxn(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) + defer cancel() + + newClient, closer := setUpTest(t) + defer closer() + + cli := newClient() + cdcCli, err := etcd.NewCDCEtcdClient(ctx, cli.Unwrap(), "default") + require.Nil(t, err) + + prefix := testEtcdKeyPrefix + "/empty_txn" + reactor, err := NewEtcdWorker(cdcCli, prefix, &emptyTxnReactor{ + prefix: prefix, + cli: cli, + }, &commonReactorState{ + state: make(map[string]string), + }, &migrate.NoOpMigrator{}) + require.Nil(t, err) + err = reactor.Run(ctx, nil, 10*time.Millisecond, "owner") + require.Nil(t, err) + resp, err := cli.Get(ctx, prefix+"/key1") + require.Nil(t, err) + require.Len(t, resp.Kvs, 0) + resp, err = cli.Get(ctx, prefix+"/key2") + require.Nil(t, err) + require.Equal(t, string(resp.Kvs[0].Key), "/cdc_etcd_worker_test/empty_txn/key2") + require.Equal(t, string(resp.Kvs[0].Value), "123") + err = cli.Unwrap().Close() + require.Nil(t, err) +} + +type emptyOrNilReactor struct { + state *commonReactorState + tickNum int + prefix string +} + +func (r *emptyOrNilReactor) Tick(ctx context.Context, state ReactorState) (nextState ReactorState, err error) { + r.state = state.(*commonReactorState) + if r.tickNum == 0 { + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key1"), func(old []byte) (newValue []byte, changed bool, err error) { + return []byte(""), true, nil + }) + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key2"), func(old []byte) (newValue []byte, changed bool, err error) { + return nil, true, nil + }) + r.tickNum++ + return r.state, nil + } + if r.tickNum == 1 { + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key1"), func(old []byte) (newValue []byte, changed bool, err error) { + return nil, true, nil + }) + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key2"), func(old []byte) (newValue []byte, changed bool, err error) { + return []byte(""), true, nil + }) + r.tickNum++ + return r.state, nil + } + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key1"), func(old []byte) (newValue []byte, changed bool, err error) { + return []byte(""), true, nil + }) + r.state.AppendPatch(util.NewEtcdKey(r.prefix+"/key2"), func(old []byte) (newValue []byte, changed bool, err error) { + return nil, true, nil + }) + return r.state, cerrors.ErrReactorFinished +} + +func TestEmptyOrNil(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) + defer cancel() + + newClient, closer := setUpTest(t) + defer closer() + + cli := newClient() + cdcCli, err := etcd.NewCDCEtcdClient(ctx, cli.Unwrap(), "default") + require.Nil(t, err) + + prefix := testEtcdKeyPrefix + "/emptyOrNil" + reactor, err := NewEtcdWorker(cdcCli, prefix, &emptyOrNilReactor{ + prefix: prefix, + }, &commonReactorState{ + state: make(map[string]string), + }, &migrate.NoOpMigrator{}) + require.Nil(t, err) + err = reactor.Run(ctx, nil, 10*time.Millisecond, "owner") + require.Nil(t, err) + resp, err := cli.Get(ctx, prefix+"/key1") + require.Nil(t, err) + require.Equal(t, string(resp.Kvs[0].Key), "/cdc_etcd_worker_test/emptyOrNil/key1") + require.Equal(t, string(resp.Kvs[0].Value), "") + resp, err = cli.Get(ctx, prefix+"/key2") + require.Nil(t, err) + require.Len(t, resp.Kvs, 0) + err = cli.Unwrap().Close() + require.Nil(t, err) +} + +type modifyOneReactor struct { + state *commonReactorState + key []byte + value []byte + finished bool + + waitOnCh chan struct{} +} + +func (r *modifyOneReactor) Tick(ctx context.Context, state ReactorState) (nextState ReactorState, err error) { + r.state = state.(*commonReactorState) + if !r.finished { + r.finished = true + } else { + return r.state, cerrors.ErrReactorFinished.GenWithStackByArgs() + } + if r.waitOnCh != nil { + select { + case <-ctx.Done(): + return nil, errors.Trace(ctx.Err()) + case <-r.waitOnCh: + } + select { + case <-ctx.Done(): + return nil, errors.Trace(ctx.Err()) + case <-r.waitOnCh: + } + } + r.state.AppendPatch(util.NewEtcdKeyFromBytes(r.key), func(old []byte) (newValue []byte, changed bool, err error) { + if len(old) > 0 { + return r.value, true, nil + } + return nil, false, nil + }) + return r.state, nil +} + +// TestModifyAfterDelete tests snapshot isolation when there is one modifying transaction delayed in the middle while a deleting transaction +// commits. The first transaction should be aborted and retried, and isolation should not be violated. +func TestModifyAfterDelete(t *testing.T) { + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*5) + defer cancel() + + newClient, closer := setUpTest(t) + defer closer() + + cli1 := newClient() + cdcCli1, err := etcd.NewCDCEtcdClient(ctx, cli1.Unwrap(), "default") + require.Nil(t, err) + + cli2 := newClient() + cdcCli2, err := etcd.NewCDCEtcdClient(ctx, cli2.Unwrap(), "default") + require.Nil(t, err) + + _, err = cli1.Put(ctx, "/test/key1", "original value") + require.Nil(t, err) + + modifyReactor := &modifyOneReactor{ + key: []byte("/test/key1"), + value: []byte("modified value"), + waitOnCh: make(chan struct{}), + } + worker1, err := NewEtcdWorker(cdcCli1, "/test", modifyReactor, &commonReactorState{ + state: make(map[string]string), + }, &migrate.NoOpMigrator{}) + require.Nil(t, err) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + err := worker1.Run(ctx, nil, time.Millisecond*100, "owner") + require.Nil(t, err) + }() + + modifyReactor.waitOnCh <- struct{}{} + + deleteReactor := &modifyOneReactor{ + key: []byte("/test/key1"), + value: nil, // deletion + } + worker2, err := NewEtcdWorker(cdcCli2, "/test", deleteReactor, &commonReactorState{ + state: make(map[string]string), + }, &migrate.NoOpMigrator{}) + require.Nil(t, err) + + err = worker2.Run(ctx, nil, time.Millisecond*100, "owner") + require.Nil(t, err) + + modifyReactor.waitOnCh <- struct{}{} + wg.Wait() + + resp, err := cli1.Get(ctx, "/test/key1") + require.Nil(t, err) + require.Len(t, resp.Kvs, 0) + require.Equal(t, worker1.deleteCounter, int64(1)) + + _ = cli1.Unwrap().Close() + _ = cli2.Unwrap().Close() +} + +func TestRetryableError(t *testing.T) { + require.True(t, isRetryableError(cerrors.ErrEtcdTryAgain)) + require.True(t, isRetryableError(cerrors.ErrReachMaxTry.Wrap(rpctypes.ErrTimeoutDueToLeaderFail))) + require.True(t, isRetryableError(errors.Trace(context.DeadlineExceeded))) + require.False(t, isRetryableError(context.Canceled)) +} diff --git a/pkg/orchestrator/main_test.go b/pkg/orchestrator/main_test.go new file mode 100644 index 0000000000..c77e260c81 --- /dev/null +++ b/pkg/orchestrator/main_test.go @@ -0,0 +1,24 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package orchestrator + +import ( + "testing" + + "github.com/pingcap/ticdc/pkg/leakutil" +) + +func TestMain(m *testing.M) { + leakutil.SetUpLeakTest(m) +} diff --git a/pkg/sink/cloudstorage/main_test.go b/pkg/sink/cloudstorage/main_test.go new file mode 100644 index 0000000000..ed5b1282ca --- /dev/null +++ b/pkg/sink/cloudstorage/main_test.go @@ -0,0 +1,24 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package cloudstorage + +import ( + "testing" + + "github.com/pingcap/ticdc/pkg/leakutil" +) + +func TestMain(m *testing.M) { + leakutil.SetUpLeakTest(m) +} diff --git a/pkg/sink/kafka/claimcheck/claim_check_test.go b/pkg/sink/kafka/claimcheck/claim_check_test.go new file mode 100644 index 0000000000..a641ddd97b --- /dev/null +++ b/pkg/sink/kafka/claimcheck/claim_check_test.go @@ -0,0 +1,44 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package claimcheck + +import ( + "context" + "testing" + + commonType "github.com/pingcap/ticdc/pkg/common" + "github.com/pingcap/ticdc/pkg/config" + "github.com/stretchr/testify/require" +) + +func TestClaimCheck(t *testing.T) { + t.Parallel() + + ctx := context.Background() + + changefeedID := commonType.NewChangeFeedIDWithName("test", "") + largeHandleConfig := config.NewDefaultLargeMessageHandleConfig() + + claimCheck, err := New(ctx, largeHandleConfig, changefeedID) + require.NoError(t, err) + require.Nil(t, claimCheck) + + largeHandleConfig.LargeMessageHandleOption = config.LargeMessageHandleOptionClaimCheck + largeHandleConfig.ClaimCheckStorageURI = "file:///tmp/abc/" + claimCheck, err = New(ctx, largeHandleConfig, changefeedID) + require.NoError(t, err) + + fileName := claimCheck.FileNameWithPrefix("file.json") + require.Equal(t, "file:///tmp/abc/file.json", fileName) +} diff --git a/pkg/sink/mysql/main_test.go b/pkg/sink/mysql/main_test.go new file mode 100644 index 0000000000..e7f5f8c3fd --- /dev/null +++ b/pkg/sink/mysql/main_test.go @@ -0,0 +1,24 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package mysql + +import ( + "testing" + + "github.com/pingcap/ticdc/pkg/leakutil" +) + +func TestMain(m *testing.M) { + leakutil.SetUpLeakTest(m) +} diff --git a/pkg/txnutil/gc/gc_manager_nextgen_test.go b/pkg/txnutil/gc/gc_manager_nextgen_test.go index 83525fcfdd..c8143d0526 100644 --- a/pkg/txnutil/gc/gc_manager_nextgen_test.go +++ b/pkg/txnutil/gc/gc_manager_nextgen_test.go @@ -22,7 +22,6 @@ import ( "testing" "time" - "github.com/pingcap/errors" "github.com/pingcap/ticdc/pkg/common" appcontext "github.com/pingcap/ticdc/pkg/common/context" cerrors "github.com/pingcap/ticdc/pkg/errors" @@ -31,30 +30,6 @@ import ( pdgc "github.com/tikv/pd/client/clients/gc" ) -type mockGCStatesClient struct { - txnSafePoint uint64 -} - -func (c *mockGCStatesClient) SetGCBarrier( - ctx context.Context, barrierID string, barrierTS uint64, ttl time.Duration, -) (*pdgc.GCBarrierInfo, error) { - if barrierTS < c.txnSafePoint { - // Mark this error as non-retryable for SetGCBarrier's internal retry loop. - return nil, errors.Annotate(context.Canceled, "ErrGCBarrierTSBehindTxnSafePoint") - } - return pdgc.NewGCBarrierInfo(barrierID, barrierTS, ttl, time.Now()), nil -} - -func (c *mockGCStatesClient) DeleteGCBarrier(ctx context.Context, barrierID string) (*pdgc.GCBarrierInfo, error) { - return nil, nil -} - -func (c *mockGCStatesClient) GetGCState(ctx context.Context) (pdgc.GCState, error) { - return pdgc.GCState{ - TxnSafePoint: c.txnSafePoint, - }, nil -} - func TestTryUpdateKeyspaceGCBarrierDoesNotReturnSnapshotLost(t *testing.T) { appcontext.SetService(appcontext.DefaultPDClock, pdutil.NewClock4Test()) @@ -63,7 +38,15 @@ func TestTryUpdateKeyspaceGCBarrierDoesNotReturnSnapshotLost(t *testing.T) { checkpointTs := common.Ts(100) txnSafePoint := uint64(200) - gcStatesClient := &mockGCStatesClient{txnSafePoint: txnSafePoint} + pdCliForGCStates := &mockPdClientForServiceGCSafePoint{ + serviceSafePoint: make(map[string]uint64), + gcBarriers: make(map[string]uint64), + txnSafePoint: txnSafePoint, + } + gcStatesClient := &mockGCStatesClient{ + keyspaceID: keyspaceID, + parent: pdCliForGCStates, + } pdClient := &MockPDClient{ GetGCStatesClientFunc: func(id uint32) pdgc.GCStatesClient { require.Equal(t, keyspaceID, id) diff --git a/pkg/txnutil/gc/gc_service_test.go b/pkg/txnutil/gc/gc_service_test.go new file mode 100644 index 0000000000..dc0ad6dda6 --- /dev/null +++ b/pkg/txnutil/gc/gc_service_test.go @@ -0,0 +1,238 @@ +// Copyright 2020 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package gc + +import ( + "context" + "math" + "testing" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/ticdc/pkg/common" + "github.com/pingcap/ticdc/pkg/config/kerneltype" + cerror "github.com/pingcap/ticdc/pkg/errors" + "github.com/stretchr/testify/require" + pd "github.com/tikv/pd/client" + pdgc "github.com/tikv/pd/client/clients/gc" +) + +func TestCheckSafetyOfStartTs(t *testing.T) { + t.Parallel() + + pdCli := &mockPdClientForServiceGCSafePoint{ + serviceSafePoint: make(map[string]uint64), + gcBarriers: make(map[string]uint64), + txnSafePoint: 60, + } + + ctx := context.Background() + + TTL := int64(1) + + if kerneltype.IsClassic() { + // assume no pd leader switch + pdCli.UpdateServiceGCSafePoint(ctx, "service1", 10, 60) //nolint:errcheck + err := EnsureChangefeedStartTsSafety(ctx, pdCli, + "ticdc-creating-", + 0, + common.NewChangeFeedIDWithName("changefeed1", "default"), TTL, 50) + require.Equal(t, + "[CDC:ErrStartTsBeforeGC]fail to create or maintain changefeed "+ + "because start-ts 50 is earlier than or equal to GC safepoint at 60", err.Error()) + pdCli.UpdateServiceGCSafePoint(ctx, "service2", 10, 80) //nolint:errcheck + pdCli.UpdateServiceGCSafePoint(ctx, "service3", 10, 70) //nolint:errcheck + err = EnsureChangefeedStartTsSafety(ctx, pdCli, + "ticdc-creating-", + 0, + common.NewChangeFeedIDWithName("changefeed2", "default"), TTL, 65) + require.Nil(t, err) + require.Equal(t, pdCli.serviceSafePoint, map[string]uint64{ + "service1": 60, + "service2": 80, + "service3": 70, + "ticdc-creating-default_changefeed2": 65, + }) + err = UndoEnsureChangefeedStartTsSafety(ctx, pdCli, + 0, + "ticdc-creating-", + common.NewChangeFeedIDWithName("changefeed2", "default")) + require.Nil(t, err) + require.Equal(t, pdCli.serviceSafePoint, map[string]uint64{ + "service1": 60, + "service2": 80, + "service3": 70, + "ticdc-creating-default_changefeed2": math.MaxUint64, + }) + + pdCli.enableLeaderSwitch = true + + pdCli.retryThreshold = 1 + pdCli.retryCount = 0 + err = EnsureChangefeedStartTsSafety(ctx, pdCli, + "ticdc-creating-", + 0, + common.NewChangeFeedIDWithName("changefeed2", "default"), TTL, 65) + require.Nil(t, err) + + pdCli.retryThreshold = gcServiceMaxRetries + 1 + pdCli.retryCount = 0 + err = EnsureChangefeedStartTsSafety(ctx, pdCli, + "ticdc-creating-", + 0, + common.NewChangeFeedIDWithName("changefeed2", "default"), TTL, 65) + require.NotNil(t, err) + require.Equal(t, err.Error(), + "[CDC:ErrReachMaxTry]reach maximum try: 9, error: not pd leader: not pd leader") + + pdCli.retryThreshold = 3 + pdCli.retryCount = 0 + err = EnsureChangefeedStartTsSafety(ctx, pdCli, + "ticdc-creating-", + 0, + common.NewChangeFeedIDWithName("changefeed1", "default"), TTL, 50) + require.Equal(t, err.Error(), + "[CDC:ErrStartTsBeforeGC]fail to create or maintain changefeed "+ + "because start-ts 50 is earlier than or equal to GC safepoint at 60") + return + } + + err := EnsureChangefeedStartTsSafety(ctx, pdCli, + "ticdc-creating-", + 0, + common.NewChangeFeedIDWithName("changefeed1", "default"), TTL, 50) + require.True(t, cerror.ErrStartTsBeforeGC.Equal(errors.Cause(err))) + + err = EnsureChangefeedStartTsSafety(ctx, pdCli, + "ticdc-creating-", + 0, + common.NewChangeFeedIDWithName("changefeed2", "default"), TTL, 65) + require.NoError(t, err) + require.Equal(t, uint64(65), pdCli.gcBarriers["ticdc-creating-default_changefeed2"]) + + err = UndoEnsureChangefeedStartTsSafety(ctx, pdCli, + 0, + "ticdc-creating-", + common.NewChangeFeedIDWithName("changefeed2", "default")) + require.NoError(t, err) + _, ok := pdCli.gcBarriers["ticdc-creating-default_changefeed2"] + require.False(t, ok) + + pdCli.enableLeaderSwitch = true + + pdCli.retryThreshold = 1 + pdCli.retryCount = 0 + err = EnsureChangefeedStartTsSafety(ctx, pdCli, + "ticdc-creating-", + 0, + common.NewChangeFeedIDWithName("changefeed2", "default"), TTL, 65) + require.NoError(t, err) + + pdCli.retryThreshold = gcServiceMaxRetries + 1 + pdCli.retryCount = 0 + err = EnsureChangefeedStartTsSafety(ctx, pdCli, + "ticdc-creating-", + 0, + common.NewChangeFeedIDWithName("changefeed2", "default"), TTL, 65) + require.True(t, cerror.ErrStartTsBeforeGC.Equal(errors.Cause(err))) + + pdCli.retryThreshold = 3 + pdCli.retryCount = 0 + err = EnsureChangefeedStartTsSafety(ctx, pdCli, + "ticdc-creating-", + 0, + common.NewChangeFeedIDWithName("changefeed1", "default"), TTL, 50) + require.True(t, cerror.ErrStartTsBeforeGC.Equal(errors.Cause(err))) +} + +type mockPdClientForServiceGCSafePoint struct { + pd.Client + serviceSafePoint map[string]uint64 + gcBarriers map[string]uint64 + txnSafePoint uint64 + enableLeaderSwitch bool + retryCount int + retryThreshold int +} + +func (m *mockPdClientForServiceGCSafePoint) UpdateServiceGCSafePoint(ctx context.Context, serviceID string, ttl int64, safePoint uint64) (uint64, error) { + defer func() { m.retryCount++ }() + minSafePoint := uint64(math.MaxUint64) + if m.enableLeaderSwitch && m.retryCount < m.retryThreshold { + // simulate pd leader switch error + return minSafePoint, errors.New("not pd leader") + } + + for _, safePoint := range m.serviceSafePoint { + if minSafePoint > safePoint { + minSafePoint = safePoint + } + } + if safePoint < minSafePoint && len(m.serviceSafePoint) != 0 { + return minSafePoint, nil + } + m.serviceSafePoint[serviceID] = safePoint + return minSafePoint, nil +} + +func (m *mockPdClientForServiceGCSafePoint) GetGCStatesClient(keyspaceID uint32) pdgc.GCStatesClient { + return &mockGCStatesClient{ + keyspaceID: keyspaceID, + parent: m, + } +} + +type mockGCStatesClient struct { + keyspaceID uint32 + parent *mockPdClientForServiceGCSafePoint +} + +func (m *mockGCStatesClient) SetGCBarrier(ctx context.Context, barrierID string, barrierTS uint64, ttl time.Duration) (*pdgc.GCBarrierInfo, error) { + defer func() { m.parent.retryCount++ }() + if m.parent.enableLeaderSwitch && m.parent.retryCount < m.parent.retryThreshold { + // simulate pd leader switch error + return nil, errors.New("not pd leader") + } + if barrierTS < m.parent.txnSafePoint { + return nil, errors.New("ErrGCBarrierTSBehindTxnSafePoint") + } + m.parent.gcBarriers[barrierID] = barrierTS + return pdgc.NewGCBarrierInfo(barrierID, barrierTS, ttl, time.Now()), nil +} + +func (m *mockGCStatesClient) DeleteGCBarrier(ctx context.Context, barrierID string) (*pdgc.GCBarrierInfo, error) { + if m.parent.enableLeaderSwitch && m.parent.retryCount < m.parent.retryThreshold { + // simulate pd leader switch error + return nil, errors.New("not pd leader") + } + barrierTS, ok := m.parent.gcBarriers[barrierID] + if !ok { + return nil, nil + } + delete(m.parent.gcBarriers, barrierID) + return pdgc.NewGCBarrierInfo(barrierID, barrierTS, 0, time.Now()), nil +} + +func (m *mockGCStatesClient) GetGCState(ctx context.Context) (pdgc.GCState, error) { + gcBarriers := make([]*pdgc.GCBarrierInfo, 0, len(m.parent.gcBarriers)) + for id, ts := range m.parent.gcBarriers { + gcBarriers = append(gcBarriers, pdgc.NewGCBarrierInfo(id, ts, 0, time.Now())) + } + + return pdgc.GCState{ + KeyspaceID: m.keyspaceID, + TxnSafePoint: m.parent.txnSafePoint, + GCBarriers: gcBarriers, + }, nil +} diff --git a/pkg/txnutil/gc/main_test.go b/pkg/txnutil/gc/main_test.go new file mode 100644 index 0000000000..01d459f2f6 --- /dev/null +++ b/pkg/txnutil/gc/main_test.go @@ -0,0 +1,24 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package gc + +import ( + "testing" + + "github.com/pingcap/ticdc/pkg/leakutil" +) + +func TestMain(m *testing.M) { + leakutil.SetUpLeakTest(m) +} diff --git a/pkg/upstream/main_test.go b/pkg/upstream/main_test.go new file mode 100644 index 0000000000..064696e77c --- /dev/null +++ b/pkg/upstream/main_test.go @@ -0,0 +1,24 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package upstream + +import ( + "testing" + + "github.com/pingcap/ticdc/pkg/leakutil" +) + +func TestMain(m *testing.M) { + leakutil.SetUpLeakTest(m) +} diff --git a/pkg/upstream/manager_test.go b/pkg/upstream/manager_test.go new file mode 100644 index 0000000000..19349913c7 --- /dev/null +++ b/pkg/upstream/manager_test.go @@ -0,0 +1,118 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package upstream + +import ( + "context" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/pingcap/errors" + "github.com/pingcap/ticdc/pkg/security" + "github.com/stretchr/testify/require" + uatomic "go.uber.org/atomic" +) + +func TestManagerAddUpstreamSameIDResetsIdleTime(t *testing.T) { + t.Parallel() + + m := NewManager(context.Background(), NodeTopologyCfg{GCServiceID: "id"}) + m.initUpstreamFunc = func(context.Context, *Upstream, *NodeTopologyCfg) error { + return nil + } + + up := m.AddUpstream(&UpstreamInfo{ID: 3}) + require.NotNil(t, up) + + up.mu.Lock() + up.idleTime = time.Now() + up.mu.Unlock() + require.False(t, up.idleTime.IsZero()) + + up2 := m.AddUpstream(&UpstreamInfo{ID: 3}) + require.Same(t, up, up2) + require.True(t, up2.idleTime.IsZero()) +} + +func TestManagerAddDefaultUpstream(t *testing.T) { + t.Parallel() + + m := NewManager(context.Background(), NodeTopologyCfg{GCServiceID: "id"}) + + m.initUpstreamFunc = func(context.Context, *Upstream, *NodeTopologyCfg) error { + return errors.New("test") + } + _, err := m.AddDefaultUpstream([]string{}, &security.Credential{}, nil, nil) + require.Error(t, err) + _, err = m.GetDefaultUpstream() + require.Error(t, err) + + m.initUpstreamFunc = func(_ context.Context, up *Upstream, _ *NodeTopologyCfg) error { + up.ID = uint64(2) + up.cancel = func() {} + atomic.StoreInt32(&up.status, normal) + return nil + } + + _, err = m.AddDefaultUpstream([]string{}, &security.Credential{}, nil, nil) + require.NoError(t, err) + + up, err := m.GetDefaultUpstream() + require.NoError(t, err) + require.NotNil(t, up) + + up2, ok := m.Get(uint64(2)) + require.True(t, ok) + require.Same(t, up, up2) +} + +func TestManagerCloseRemovesUpstreams(t *testing.T) { + t.Parallel() + + m := NewManager(context.Background(), NodeTopologyCfg{GCServiceID: "id"}) + + canceled := uatomic.NewBool(false) + up := &Upstream{ + cancel: func() { canceled.Store(true) }, + wg: new(sync.WaitGroup), + } + atomic.StoreInt32(&up.status, normal) + m.ups.Store(uint64(1), up) + + m.Close() + require.True(t, canceled.Load()) + _, ok := m.ups.Load(uint64(1)) + require.False(t, ok) +} + +func TestManagerVisit(t *testing.T) { + t.Parallel() + + m := NewManager(context.Background(), NodeTopologyCfg{GCServiceID: "id"}) + + up1 := &Upstream{cancel: func() {}, wg: new(sync.WaitGroup)} + up2 := &Upstream{cancel: func() {}, wg: new(sync.WaitGroup)} + m.ups.Store(uint64(1), up1) + m.ups.Store(uint64(2), up2) + + visited := uatomic.NewInt64(0) + require.NoError(t, m.Visit(func(up *Upstream) error { + require.NotNil(t, up) + visited.Inc() + return nil + })) + require.Equal(t, int64(2), visited.Load()) +} diff --git a/pkg/upstream/upstream_test.go b/pkg/upstream/upstream_test.go new file mode 100644 index 0000000000..7bc157ba19 --- /dev/null +++ b/pkg/upstream/upstream_test.go @@ -0,0 +1,138 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package upstream + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/benbjohnson/clock" + "github.com/pingcap/errors" + "github.com/pingcap/ticdc/pkg/etcd" + "github.com/pingcap/ticdc/pkg/node" + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + "go.etcd.io/etcd/client/pkg/v3/logutil" + clientv3 "go.etcd.io/etcd/client/v3" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +func TestUpstreamShouldClose(t *testing.T) { + t.Parallel() + + up := &Upstream{} + up.isDefaultUpstream = false + mockClock := clock.NewMock() + require.False(t, up.shouldClose()) + up.clock = mockClock + up.idleTime = mockClock.Now().Add(-2 * maxIdleDuration) + require.True(t, up.shouldClose()) + up.isDefaultUpstream = true + require.False(t, up.shouldClose()) +} + +func TestUpstreamError(t *testing.T) { + t.Parallel() + + up := &Upstream{} + err := errors.New("test") + up.err.Store(err) + require.Equal(t, err, up.Error()) + up.err.Store(nil) + require.Nil(t, up.Error()) +} + +func TestUpstreamIsNormal(t *testing.T) { + t.Parallel() + + up := &Upstream{} + up.status = uninit + require.False(t, up.IsNormal()) + up.status = normal + require.True(t, up.IsNormal()) + up.err.Store(errors.New("test")) + require.False(t, up.IsNormal()) +} + +func TestTrySetIdleTime(t *testing.T) { + t.Parallel() + + up := newUpstream(nil, nil) + require.Equal(t, uninit, up.status) + up.clock = clock.New() + up.trySetIdleTime() + require.False(t, up.idleTime.IsZero()) + idleTime := up.idleTime + up.trySetIdleTime() + require.Equal(t, idleTime, up.idleTime) + up.resetIdleTime() + require.True(t, up.idleTime.IsZero()) + up.resetIdleTime() + require.True(t, up.idleTime.IsZero()) +} + +func TestRegisterTopo(t *testing.T) { + t.Parallel() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + clientURL, etcdServer, err := etcd.SetupEmbedEtcd(t.TempDir()) + defer etcdServer.Close() + + require.NoError(t, err) + logConfig := logutil.DefaultZapLoggerConfig + logConfig.Level = zap.NewAtomicLevelAt(zapcore.DebugLevel) + + rawEtcdCli, err := clientv3.New(clientv3.Config{ + Endpoints: []string{clientURL.String()}, + Context: ctx, + LogConfig: &logConfig, + DialTimeout: 3 * time.Second, + }) + require.NoError(t, err) + defer rawEtcdCli.Close() + etcdCli := etcd.Wrap(rawEtcdCli, make(map[string]prometheus.Counter)) + up := &Upstream{ + cancel: func() {}, + etcdCli: etcdCli, + wg: &sync.WaitGroup{}, + } + + info := node.NewInfo("localhost:8300", "test") + info.Version = "test.1.0" + err = up.registerTopologyInfo(ctx, &NodeTopologyCfg{ + Info: info, + GCServiceID: "clusterID", + SessionTTL: 2, + }) + require.NoError(t, err) + + resp, err := etcdCli.Get(ctx, "/topology/ticdc/clusterID/localhost:8300") + require.NoError(t, err) + + infoData, err := info.Marshal() + require.NoError(t, err) + require.Equal(t, infoData, resp.Kvs[0].Value) + + up.etcdCli = nil + up.Close() + require.Eventually(t, func() bool { + resp, err := etcdCli.Get(ctx, "/topology/ticdc/clusterID/localhost:8300") + require.NoError(t, err) + return len(resp.Kvs) == 0 + }, time.Second*5, time.Millisecond*100) +} diff --git a/pkg/util/json_writer_test.go b/pkg/util/json_writer_test.go new file mode 100644 index 0000000000..f8003e2415 --- /dev/null +++ b/pkg/util/json_writer_test.go @@ -0,0 +1,484 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/suite" + "github.com/thanhpk/randstr" +) + +type JSONWriterTestSuite struct { + suite.Suite + useInternalBuffer bool +} + +func (suite *JSONWriterTestSuite) writeJSON(fn func(*JSONWriter)) string { + if suite.useInternalBuffer { + w := BorrowJSONWriter(nil) + fn(w) + ret := string(w.Buffer()) + ReturnJSONWriter(w) + return ret + } + + out := &bytes.Buffer{} + w := BorrowJSONWriter(out) + fn(w) + ReturnJSONWriter(w) + return out.String() +} + +func (suite *JSONWriterTestSuite) TestObject() { + var s string + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() {}) + }) + suite.Require().Equal(`{}`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteAnyField("foo", 1) + }) + }) + suite.Require().Equal(`{"foo":1}`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteAnyField("foo", 1) + w.WriteAnyField("bar", 2) + }) + }) + suite.Require().Equal(`{"foo":1,"bar":2}`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteObjectField("foo", func() {}) + }) + }) + suite.Require().Equal(`{"foo":{}}`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteObjectField("foo", func() {}) + w.WriteObjectField("bar", func() {}) + }) + }) + suite.Require().Equal(`{"foo":{},"bar":{}}`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteObjectField("foo", func() { + w.WriteObjectField("foo1", func() {}) + }) + w.WriteObjectField("bar", func() { + w.WriteObjectField("foo2", func() {}) + }) + }) + }) + suite.Require().Equal(`{"foo":{"foo1":{}},"bar":{"foo2":{}}}`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteObjectField("foo", func() { + w.WriteObjectField("foo1", func() {}) + w.WriteObjectField("bar1", func() {}) + }) + w.WriteObjectField("bar", func() { + w.WriteObjectField("foo2", func() {}) + }) + }) + }) + suite.Require().Equal(`{"foo":{"foo1":{},"bar1":{}},"bar":{"foo2":{}}}`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteObjectField("foo", func() { + w.WriteObjectField("foo1", func() {}) + w.WriteObjectField("bar1", func() {}) + }) + w.WriteObjectField("bar", func() { + w.WriteObjectField("foo2", func() {}) + w.WriteObjectField("bar2", func() {}) + }) + }) + }) + suite.Require().Equal(`{"foo":{"foo1":{},"bar1":{}},"bar":{"foo2":{},"bar2":{}}}`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteObjectField("foo", func() { + w.WriteObjectField("foo1", func() { + w.WriteNullField("abc") + }) + w.WriteObjectField("bar1", func() {}) + }) + w.WriteObjectField("bar", func() { + w.WriteObjectField("foo2", func() {}) + w.WriteObjectField("bar2", func() {}) + }) + }) + }) + suite.Require().Equal(`{"foo":{"foo1":{"abc":null},"bar1":{}},"bar":{"foo2":{},"bar2":{}}}`, s) +} + +func (suite *JSONWriterTestSuite) TestArray() { + var s string + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteArray(func() {}) + }) + suite.Require().Equal(`[]`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteArray(func() { + w.WriteUint64Element(1) + }) + }) + suite.Require().Equal(`[1]`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteArray(func() { + w.WriteUint64Element(1) + w.WriteUint64Element(2) + }) + }) + suite.Require().Equal(`[1,2]`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteArray(func() { + w.WriteArrayElement(func() {}) + }) + }) + suite.Require().Equal(`[[]]`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteArray(func() { + w.WriteArrayElement(func() {}) + w.WriteArrayElement(func() {}) + }) + }) + suite.Require().Equal(`[[],[]]`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteArray(func() { + w.WriteArrayElement(func() { + w.WriteArrayElement(func() {}) + }) + w.WriteArrayElement(func() { + w.WriteArrayElement(func() {}) + }) + }) + }) + suite.Require().Equal(`[[[]],[[]]]`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteArray(func() { + w.WriteArrayElement(func() { + w.WriteArrayElement(func() {}) + w.WriteArrayElement(func() {}) + }) + w.WriteArrayElement(func() { + w.WriteArrayElement(func() {}) + }) + }) + }) + suite.Require().Equal(`[[[],[]],[[]]]`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteArray(func() { + w.WriteArrayElement(func() { + w.WriteArrayElement(func() {}) + w.WriteArrayElement(func() {}) + }) + w.WriteArrayElement(func() { + w.WriteArrayElement(func() {}) + w.WriteArrayElement(func() {}) + }) + }) + }) + suite.Require().Equal(`[[[],[]],[[],[]]]`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteArray(func() { + w.WriteArrayElement(func() { + w.WriteArrayElement(func() { + w.WriteNullElement() + }) + w.WriteArrayElement(func() {}) + }) + w.WriteArrayElement(func() { + w.WriteArrayElement(func() {}) + w.WriteArrayElement(func() {}) + }) + }) + }) + suite.Require().Equal(`[[[null],[]],[[],[]]]`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteArray(func() { + w.WriteObjectElement(func() {}) + }) + }) + suite.Require().Equal(`[{}]`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteArray(func() { + w.WriteObjectElement(func() { + w.WriteUint64Field("foo", 1) + }) + }) + }) + suite.Require().Equal(`[{"foo":1}]`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteArray(func() { + w.WriteObjectElement(func() { + w.WriteUint64Field("foo", 1) + w.WriteUint64Field("bar", 2) + }) + }) + }) + suite.Require().Equal(`[{"foo":1,"bar":2}]`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteArray(func() { + w.WriteObjectElement(func() { + w.WriteUint64Field("foo", 1) + w.WriteUint64Field("bar", 2) + }) + w.WriteObjectElement(func() { + w.WriteUint64Field("The world is just a stage", 1) + w.WriteUint64Field("It's better to laugh than to cry", 2) + }) + }) + }) + suite.Require().Equal(`[{"foo":1,"bar":2},{"The world is just a stage":1,"It's better to laugh than to cry":2}]`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteArrayField("values", func() { + w.WriteUint64Element(1) + w.WriteUint64Element(2) + }) + }) + }) + suite.Require().Equal(`{"values":[1,2]}`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteArrayField("values", func() { + w.WriteUint64Element(1) + w.WriteUint64Element(2) + }) + w.WriteUint64Field("foo", 1) + }) + }) + suite.Require().Equal(`{"values":[1,2],"foo":1}`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteUint64Field("foo", 1) + w.WriteArrayField("values", func() { + w.WriteUint64Element(1) + w.WriteUint64Element(2) + }) + }) + }) + suite.Require().Equal(`{"foo":1,"values":[1,2]}`, s) +} + +func (suite *JSONWriterTestSuite) TestBase64() { + var s string + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteBase64String([]byte("foo")) + }) + suite.Require().Equal(`"Zm9v"`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteBase64StringField("foo", []byte("bar")) + }) + }) + suite.Require().Equal(`{"foo":"YmFy"}`, s) +} + +func (suite *JSONWriterTestSuite) TestField() { + var s string + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteStringField("foo", "bar") + }) + }) + suite.Require().Equal(`{"foo":"bar"}`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteUint64Field("foo", 1) + }) + }) + suite.Require().Equal(`{"foo":1}`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteFloat64Field("foo", 1.1) + }) + }) + suite.Require().Equal(`{"foo":1.1}`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteNullField("foo") + }) + }) + suite.Require().Equal(`{"foo":null}`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteAnyField("foo", nil) + }) + }) + suite.Require().Equal(`{"foo":null}`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteAnyField("foo", 1) + }) + }) + suite.Require().Equal(`{"foo":1}`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteBoolField("foo", true) + }) + }) + suite.Require().Equal(`{"foo":true}`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteBoolField("foo", false) + }) + }) + suite.Require().Equal(`{"foo":false}`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteObjectField("foo", func() {}) + }) + }) + suite.Require().Equal(`{"foo":{}}`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteObjectField("foo", func() { + w.WriteUint64Field("bar", 1) + }) + }) + }) + suite.Require().Equal(`{"foo":{"bar":1}}`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteObjectField("foo", func() { + w.WriteUint64Field("bar", 1) + w.WriteStringField("abc", "def") + }) + }) + }) + suite.Require().Equal(`{"foo":{"bar":1,"abc":"def"}}`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteUint64Field("bar", 1) + w.WriteStringField("abc", "def") + w.WriteNullField("xyz") + }) + }) + suite.Require().Equal(`{"bar":1,"abc":"def","xyz":null}`, s) + + s = suite.writeJSON(func(w *JSONWriter) { + w.WriteObject(func() { + w.WriteUint64Field("bar", 1) + w.WriteStringField("abc", "def") + w.WriteNullField("xyz") + w.WriteObjectField("foo", func() {}) + }) + }) + suite.Require().Equal(`{"bar":1,"abc":"def","xyz":null,"foo":{}}`, s) +} + +func TestExternalBuffer(t *testing.T) { + suite.Run(t, &JSONWriterTestSuite{useInternalBuffer: false}) +} + +func TestInternalBuffer(t *testing.T) { + suite.Run(t, &JSONWriterTestSuite{useInternalBuffer: true}) +} + +func BenchmarkExternalBufferWriteBase64(b *testing.B) { + out := &bytes.Buffer{} + str := randstr.Bytes(1024) + for i := 0; i < b.N; i++ { + out.Reset() + w := BorrowJSONWriter(out) + w.WriteObject(func() { + w.WriteBase64StringField("foo", str) + }) + _ = out.Bytes() + ReturnJSONWriter(w) + } +} + +func BenchmarkInternalBufferWriteBase64(b *testing.B) { + str := randstr.Bytes(1024) + for i := 0; i < b.N; i++ { + w := BorrowJSONWriter(nil) + w.WriteObject(func() { + w.WriteBase64StringField("foo", str) + }) + _ = w.Buffer() + ReturnJSONWriter(w) + } +} + +func BenchmarkExternalBufferWriteString(b *testing.B) { + out := &bytes.Buffer{} + str := randstr.String(1024) + for i := 0; i < b.N; i++ { + out.Reset() + w := BorrowJSONWriter(out) + w.WriteObject(func() { + w.WriteStringField("foo", str) + }) + _ = out.Bytes() + ReturnJSONWriter(w) + } +} + +func BenchmarkInternalBufferWriteString(b *testing.B) { + str := randstr.String(1024) + for i := 0; i < b.N; i++ { + w := BorrowJSONWriter(nil) + w.WriteObject(func() { + w.WriteStringField("foo", str) + }) + _ = w.Buffer() + ReturnJSONWriter(w) + } +} diff --git a/pkg/util/main_test.go b/pkg/util/main_test.go new file mode 100644 index 0000000000..e896af08ea --- /dev/null +++ b/pkg/util/main_test.go @@ -0,0 +1,24 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "testing" + + "github.com/pingcap/ticdc/pkg/leakutil" +) + +func TestMain(m *testing.M) { + leakutil.SetUpLeakTest(m) +} diff --git a/pkg/util/memory_test.go b/pkg/util/memory_test.go new file mode 100644 index 0000000000..5104037564 --- /dev/null +++ b/pkg/util/memory_test.go @@ -0,0 +1,27 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestGetMemoryLimit(t *testing.T) { + t.Parallel() + limit, err := GetMemoryLimit() + require.NoError(t, err) + require.Less(t, limit, memoryMax) +} diff --git a/pkg/util/uri_test.go b/pkg/util/uri_test.go new file mode 100644 index 0000000000..788fb38c59 --- /dev/null +++ b/pkg/util/uri_test.go @@ -0,0 +1,136 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestIsValidIPv6AddressFormatInURI(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + host string + want bool + }{ + {"valid ipv6 address", "[::1]", true}, + {"valid ipv6 address1 with port", "[::1]:8080", true}, + {"valid ipv6 address2 with port", "[1080:0:0:0:8:800:200C:417A]:8080", true}, + {"valid ipv6 address3 with port", "[::FFFF:129.144.52.38]:8080", true}, + {"invalid ipv6 address", "::1", false}, + {"invalid ipv6 address with port", "::1:8000", false}, + } + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + t.Parallel() + require.Equal(t, test.want, IsValidIPv6AddressFormatInURI(test.host)) + }) + } +} + +func TestIsIPv6Address(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + host string + want bool + }{ + {"valid ipv6 address1", "::1", true}, + {"valid ipv6 address2", "1080:0:0:0:8:800:200C:417A", true}, + {"ipv4 address", "127.0.0.1", false}, + {"empty address", "", false}, + {"not ip address", "emmmmmmmm", false}, + } + + for _, tt := range tests { + test := tt + t.Run(test.name, func(t *testing.T) { + t.Parallel() + require.Equal(t, test.want, IsIPv6Address(test.host)) + }) + } +} + +func TestMaskSinkURI(t *testing.T) { + tests := []struct { + uri string + masked string + }{ + { + "mysql://root:123456@127.0.0.1:3306/?time-zone=Asia/Shanghai", + "mysql://root:xxxxx@127.0.0.1:3306/?time-zone=Asia/Shanghai", + }, + { + "kafka://127.0.0.1:9093/cdc?sasl-mechanism=SCRAM-SHA-256&sasl-user=ticdc&sasl-password=verysecure", + "kafka://127.0.0.1:9093/cdc?sasl-mechanism=SCRAM-SHA-256&sasl-password=xxxxx&sasl-user=ticdc", + }, + } + + for _, tt := range tests { + maskedURI, err := MaskSinkURI(tt.uri) + require.NoError(t, err) + require.Equal(t, tt.masked, maskedURI) + } +} + +func TestMaskSensitiveDataInURI(t *testing.T) { + tests := []struct { + uri string + masked string + }{ + { + "mysql://root:123456@127.0.0.1:3306/?time-zone=c", + "mysql://root:xxxxx@127.0.0.1:3306/?time-zone=c", + }, + { + "mysql://root:123456@127.0.0.1:3306/?access_key=c", + "mysql://root:xxxxx@127.0.0.1:3306/?access_key=xxxxx", + }, + { + "mysql://root:123456@127.0.0.1:3306/?secret_access_key=c", + "mysql://root:xxxxx@127.0.0.1:3306/?secret_access_key=xxxxx", + }, + { + "mysql://root:123456@127.0.0.1:3306/?client_secret=c", + "mysql://root:xxxxx@127.0.0.1:3306/?client_secret=xxxxx", + }, + { + "", + "", + }, + { + "abc", + "abc", + }, + } + for _, q := range sensitiveQueryParameterNames { + tests = append(tests, struct { + uri string + masked string + }{ + "kafka://127.0.0.1:9093/cdc?" + q + "=verysecure", + "kafka://127.0.0.1:9093/cdc?" + q + "=xxxxx", + }) + } + + for _, tt := range tests { + maskedURI := MaskSensitiveDataInURI(tt.uri) + require.Equal(t, tt.masked, maskedURI) + } +} diff --git a/pkg/version/main_test.go b/pkg/version/main_test.go new file mode 100644 index 0000000000..943c56a67b --- /dev/null +++ b/pkg/version/main_test.go @@ -0,0 +1,24 @@ +// Copyright 2021 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package version + +import ( + "testing" + + "github.com/pingcap/ticdc/pkg/leakutil" +) + +func TestMain(m *testing.M) { + leakutil.SetUpLeakTest(m) +}