diff --git a/cmd/backup.go b/cmd/backup.go index b856dae38..3aed2147f 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -7,6 +7,7 @@ import ( "github.com/pingcap/tidb/session" "github.com/spf13/cobra" + "github.com/pingcap/br/pkg/gluetikv" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" @@ -21,11 +22,11 @@ func runBackupCommand(command *cobra.Command, cmdName string) error { } func runBackupRawCommand(command *cobra.Command, cmdName string) error { - cfg := task.BackupRawConfig{Config: task.Config{LogProgress: HasLogFile()}} + cfg := task.RawKvConfig{Config: task.Config{LogProgress: HasLogFile()}} if err := cfg.ParseFromFlags(command.Flags()); err != nil { return err } - return task.RunBackupRaw(GetDefaultContext(), tidbGlue, cmdName, &cfg) + return task.RunBackupRaw(GetDefaultContext(), gluetikv.Glue{}, cmdName, &cfg) } // NewBackupCommand return a full backup subcommand. diff --git a/cmd/restore.go b/cmd/restore.go index 0b2792a25..6353719f2 100644 --- a/cmd/restore.go +++ b/cmd/restore.go @@ -6,6 +6,7 @@ import ( "github.com/pingcap/tidb/session" "github.com/spf13/cobra" + "github.com/pingcap/br/pkg/gluetikv" "github.com/pingcap/br/pkg/summary" "github.com/pingcap/br/pkg/task" "github.com/pingcap/br/pkg/utils" @@ -19,6 +20,16 @@ func runRestoreCommand(command *cobra.Command, cmdName string) error { return task.RunRestore(GetDefaultContext(), tidbGlue, cmdName, &cfg) } +func runRestoreRawCommand(command *cobra.Command, cmdName string) error { + cfg := task.RestoreRawConfig{ + RawKvConfig: task.RawKvConfig{Config: task.Config{LogProgress: HasLogFile()}}, + } + if err := cfg.ParseFromFlags(command.Flags()); err != nil { + return err + } + return task.RunRestoreRaw(GetDefaultContext(), gluetikv.Glue{}, cmdName, &cfg) +} + // NewRestoreCommand returns a restore subcommand func NewRestoreCommand() *cobra.Command { command := &cobra.Command{ @@ -43,6 +54,7 @@ func NewRestoreCommand() *cobra.Command { newFullRestoreCommand(), newDbRestoreCommand(), newTableRestoreCommand(), + newRawRestoreCommand(), ) task.DefineRestoreFlags(command.PersistentFlags()) @@ -83,3 +95,16 @@ func newTableRestoreCommand() *cobra.Command { task.DefineTableFlags(command) return command } + +func newRawRestoreCommand() *cobra.Command { + command := &cobra.Command{ + Use: "raw", + Short: "(experimental) restore a raw kv range to TiKV cluster", + RunE: func(cmd *cobra.Command, _ []string) error { + return runRestoreRawCommand(cmd, "Raw restore") + }, + } + + task.DefineRawRestoreFlags(command) + return command +} diff --git a/pkg/gluetidb/glue.go b/pkg/gluetidb/glue.go index 333053b97..80756d2c2 100644 --- a/pkg/gluetidb/glue.go +++ b/pkg/gluetidb/glue.go @@ -8,19 +8,20 @@ import ( "github.com/pingcap/parser/model" pd "github.com/pingcap/pd/v4/client" - "github.com/pingcap/tidb/config" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/executor" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/meta/autoid" "github.com/pingcap/tidb/session" - "github.com/pingcap/tidb/store/tikv" "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/gluetikv" ) // Glue is an implementation of glue.Glue using a new TiDB session. -type Glue struct{} +type Glue struct { + tikvGlue gluetikv.Glue +} type tidbSession struct { se session.Session @@ -41,15 +42,8 @@ func (Glue) CreateSession(store kv.Storage) (glue.Session, error) { } // Open implements glue.Glue -func (Glue) Open(path string, option pd.SecurityOption) (kv.Storage, error) { - if option.CAPath != "" { - conf := config.GetGlobalConfig() - conf.Security.ClusterSSLCA = option.CAPath - conf.Security.ClusterSSLCert = option.CertPath - conf.Security.ClusterSSLKey = option.KeyPath - config.StoreGlobalConfig(conf) - } - return tikv.Driver{}.Open(path) +func (g Glue) Open(path string, option pd.SecurityOption) (kv.Storage, error) { + return g.tikvGlue.Open(path, option) } // OwnsStorage implements glue.Glue diff --git a/pkg/gluetikv/glue.go b/pkg/gluetikv/glue.go new file mode 100644 index 000000000..e63b35b95 --- /dev/null +++ b/pkg/gluetikv/glue.go @@ -0,0 +1,43 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package gluetikv + +import ( + pd "github.com/pingcap/pd/v4/client" + "github.com/pingcap/tidb/config" + "github.com/pingcap/tidb/domain" + "github.com/pingcap/tidb/kv" + "github.com/pingcap/tidb/store/tikv" + + "github.com/pingcap/br/pkg/glue" +) + +// Glue is an implementation of glue.Glue that accesses only TiKV without TiDB. +type Glue struct{} + +// GetDomain implements glue.Glue +func (Glue) GetDomain(store kv.Storage) (*domain.Domain, error) { + return nil, nil +} + +// CreateSession implements glue.Glue +func (Glue) CreateSession(store kv.Storage) (glue.Session, error) { + return nil, nil +} + +// Open implements glue.Glue +func (Glue) Open(path string, option pd.SecurityOption) (kv.Storage, error) { + if option.CAPath != "" { + conf := config.GetGlobalConfig() + conf.Security.ClusterSSLCA = option.CAPath + conf.Security.ClusterSSLCert = option.CertPath + conf.Security.ClusterSSLKey = option.KeyPath + config.StoreGlobalConfig(conf) + } + return tikv.Driver{}.Open(path) +} + +// OwnsStorage implements glue.Glue +func (Glue) OwnsStorage() bool { + return true +} diff --git a/pkg/restore/client.go b/pkg/restore/client.go index ba409ec32..97467d913 100644 --- a/pkg/restore/client.go +++ b/pkg/restore/client.go @@ -3,6 +3,7 @@ package restore import ( + "bytes" "context" "crypto/tls" "encoding/hex" @@ -108,33 +109,97 @@ func (rc *Client) IsOnline() bool { // Close a client func (rc *Client) Close() { - rc.db.Close() + // rc.db can be nil in raw kv mode. + if rc.db != nil { + rc.db.Close() + } rc.cancel() log.Info("Restore client closed") } // InitBackupMeta loads schemas from BackupMeta to initialize RestoreClient func (rc *Client) InitBackupMeta(backupMeta *backup.BackupMeta, backend *backup.StorageBackend) error { - databases, err := utils.LoadBackupTables(backupMeta) - if err != nil { - return errors.Trace(err) - } - var ddlJobs []*model.Job - err = json.Unmarshal(backupMeta.GetDdls(), &ddlJobs) - if err != nil { - return errors.Trace(err) + if !backupMeta.IsRawKv { + databases, err := utils.LoadBackupTables(backupMeta) + if err != nil { + return errors.Trace(err) + } + rc.databases = databases + + var ddlJobs []*model.Job + err = json.Unmarshal(backupMeta.GetDdls(), &ddlJobs) + if err != nil { + return errors.Trace(err) + } + rc.ddlJobs = ddlJobs } - rc.databases = databases - rc.ddlJobs = ddlJobs rc.backupMeta = backupMeta log.Info("load backupmeta", zap.Int("databases", len(rc.databases)), zap.Int("jobs", len(rc.ddlJobs))) metaClient := NewSplitClient(rc.pdClient, rc.tlsConf) importClient := NewImportClient(metaClient, rc.tlsConf) - rc.fileImporter = NewFileImporter(rc.ctx, metaClient, importClient, backend, rc.rateLimit) + rc.fileImporter = NewFileImporter(rc.ctx, metaClient, importClient, backend, backupMeta.IsRawKv, rc.rateLimit) return nil } +// IsRawKvMode checks whether the backup data is in raw kv format, in which case transactional recover is forbidden. +func (rc *Client) IsRawKvMode() bool { + return rc.backupMeta.IsRawKv +} + +// GetFilesInRawRange gets all files that are in the given range or intersects with the given range. +func (rc *Client) GetFilesInRawRange(startKey []byte, endKey []byte, cf string) ([]*backup.File, error) { + if !rc.IsRawKvMode() { + return nil, errors.New("the backup data is not in raw kv mode") + } + + for _, rawRange := range rc.backupMeta.RawRanges { + // First check whether the given range is backup-ed. If not, we cannot perform the restore. + if rawRange.Cf != cf { + continue + } + + if (len(rawRange.EndKey) > 0 && bytes.Compare(startKey, rawRange.EndKey) >= 0) || + (len(endKey) > 0 && bytes.Compare(rawRange.StartKey, endKey) >= 0) { + // The restoring range is totally out of the current range. Skip it. + continue + } + + if bytes.Compare(startKey, rawRange.StartKey) < 0 || + utils.CompareEndKey(endKey, rawRange.EndKey) > 0 { + // Only partial of the restoring range is in the current backup-ed range. So the given range can't be fully + // restored. + return nil, errors.New("the given range to restore is not fully covered by the range that was backed up") + } + + // We have found the range that contains the given range. Find all necessary files. + files := make([]*backup.File, 0) + + for _, file := range rc.backupMeta.Files { + if file.Cf != cf { + continue + } + + if len(file.EndKey) > 0 && bytes.Compare(file.EndKey, startKey) < 0 { + // The file is before the range to be restored. + continue + } + if len(endKey) > 0 && bytes.Compare(endKey, file.StartKey) <= 0 { + // The file is after the range to be restored. + // The specified endKey is exclusive, so when it equals to a file's startKey, the file is still skipped. + continue + } + + files = append(files, file) + } + + // There should be at most one backed up range that covers the restoring range. + return files, nil + } + + return nil, errors.New("no backup data in the range") +} + // SetConcurrency sets the concurrency of dbs tables files func (rc *Client) SetConcurrency(c uint) { rc.workerPool = utils.NewWorkerPool(c, "file") @@ -334,6 +399,63 @@ func (rc *Client) RestoreFiles( return nil } +// RestoreRaw tries to restore raw keys in the specified range. +func (rc *Client) RestoreRaw(startKey []byte, endKey []byte, files []*backup.File, updateCh chan<- struct{}) error { + start := time.Now() + defer func() { + elapsed := time.Since(start) + log.Info("Restore Raw", + zap.String("startKey", hex.EncodeToString(startKey)), + zap.String("endKey", hex.EncodeToString(endKey)), + zap.Duration("take", elapsed)) + }() + errCh := make(chan error, len(files)) + wg := new(sync.WaitGroup) + defer close(errCh) + + err := rc.fileImporter.SetRawRange(startKey, endKey) + if err != nil { + + return errors.Trace(err) + } + + emptyRules := &RewriteRules{} + for _, file := range files { + wg.Add(1) + fileReplica := file + rc.workerPool.Apply( + func() { + defer wg.Done() + select { + case <-rc.ctx.Done(): + errCh <- nil + case errCh <- rc.fileImporter.Import(fileReplica, emptyRules): + updateCh <- struct{}{} + } + }) + } + for range files { + err := <-errCh + if err != nil { + rc.cancel() + wg.Wait() + log.Error( + "restore raw range failed", + zap.String("startKey", hex.EncodeToString(startKey)), + zap.String("endKey", hex.EncodeToString(endKey)), + zap.Error(err), + ) + return err + } + } + log.Info( + "finish to restore raw range", + zap.String("startKey", hex.EncodeToString(startKey)), + zap.String("endKey", hex.EncodeToString(endKey)), + ) + return nil +} + //SwitchToImportMode switch tikv cluster to import mode func (rc *Client) SwitchToImportMode(ctx context.Context) error { return rc.switchTiKVMode(ctx, import_sstpb.SwitchMode_Import) diff --git a/pkg/restore/db.go b/pkg/restore/db.go index 7251b9f24..d4a4a0a41 100644 --- a/pkg/restore/db.go +++ b/pkg/restore/db.go @@ -29,6 +29,10 @@ func NewDB(g glue.Glue, store kv.Storage) (*DB, error) { if err != nil { return nil, errors.Trace(err) } + // The session may be nil in raw kv mode + if se == nil { + return nil, nil + } // Set SQL mode to None for avoiding SQL compatibility problem err = se.Execute(context.Background(), "set @@sql_mode=''") if err != nil { diff --git a/pkg/restore/import.go b/pkg/restore/import.go index 9b96509ea..fec07a870 100644 --- a/pkg/restore/import.go +++ b/pkg/restore/import.go @@ -3,6 +3,7 @@ package restore import ( + "bytes" "context" "crypto/tls" "strings" @@ -134,6 +135,10 @@ type FileImporter struct { backend *backup.StorageBackend rateLimit uint64 + isRawKvMode bool + rawStartKey []byte + rawEndKey []byte + ctx context.Context cancel context.CancelFunc } @@ -144,6 +149,7 @@ func NewFileImporter( metaClient SplitClient, importClient ImporterClient, backend *backup.StorageBackend, + isRawKvMode bool, rateLimit uint64, ) FileImporter { ctx, cancel := context.WithCancel(ctx) @@ -153,16 +159,34 @@ func NewFileImporter( ctx: ctx, cancel: cancel, importClient: importClient, + isRawKvMode: isRawKvMode, rateLimit: rateLimit, } } +// SetRawRange sets the range to be restored in raw kv mode. +func (importer *FileImporter) SetRawRange(startKey, endKey []byte) error { + if !importer.isRawKvMode { + return errors.New("file importer is not in raw kv mode") + } + importer.rawStartKey = startKey + importer.rawEndKey = endKey + return nil +} + // Import tries to import a file. // All rules must contain encoded keys. func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRules) error { log.Debug("import file", zap.Stringer("file", file)) // Rewrite the start key and end key of file to scan regions - startKey, endKey, err := rewriteFileKeys(file, rewriteRules) + var startKey, endKey []byte + var err error + if importer.isRawKvMode { + startKey = file.StartKey + endKey = file.EndKey + } else { + startKey, endKey, err = rewriteFileKeys(file, rewriteRules) + } if err != nil { return err } @@ -187,7 +211,11 @@ func (importer *FileImporter) Import(file *backup.File, rewriteRules *RewriteRul var downloadMeta *import_sstpb.SSTMeta errDownload := utils.WithRetry(importer.ctx, func() error { var e error - downloadMeta, e = importer.downloadSST(info, file, rewriteRules) + if importer.isRawKvMode { + downloadMeta, e = importer.downloadRawKVSST(info, file) + } else { + downloadMeta, e = importer.downloadSST(info, file, rewriteRules) + } return e }, newDownloadSSTBackoffer()) if errDownload != nil { @@ -303,6 +331,7 @@ func (importer *FileImporter) downloadSST( NewKeyPrefix: encodeKeyPrefix(regionRule.GetNewKeyPrefix()), } sstMeta := getSSTMetaFromFile(id, file, regionInfo.Region, &rule) + req := &import_sstpb.DownloadRequest{ Sst: sstMeta, StorageBackend: importer.backend, @@ -328,6 +357,55 @@ func (importer *FileImporter) downloadSST( return &sstMeta, nil } +func (importer *FileImporter) downloadRawKVSST( + regionInfo *RegionInfo, + file *backup.File, +) (*import_sstpb.SSTMeta, error) { + id, err := uuid.New().MarshalBinary() + if err != nil { + return nil, errors.Trace(err) + } + // Empty rule + var rule import_sstpb.RewriteRule + sstMeta := getSSTMetaFromFile(id, file, regionInfo.Region, &rule) + + // Cut the SST file's range to fit in the restoring range. + if bytes.Compare(importer.rawStartKey, sstMeta.Range.GetStart()) > 0 { + sstMeta.Range.Start = importer.rawStartKey + } + // TODO: importer.RawEndKey is exclusive but sstMeta.Range.End is inclusive. How to exclude importer.RawEndKey? + if len(importer.rawEndKey) > 0 && bytes.Compare(importer.rawEndKey, sstMeta.Range.GetEnd()) < 0 { + sstMeta.Range.End = importer.rawEndKey + } + if bytes.Compare(sstMeta.Range.GetStart(), sstMeta.Range.GetEnd()) > 0 { + return nil, errors.Trace(errRangeIsEmpty) + } + + req := &import_sstpb.DownloadRequest{ + Sst: sstMeta, + StorageBackend: importer.backend, + Name: file.GetName(), + RewriteRule: rule, + } + log.Debug("download SST", + zap.Stringer("sstMeta", &sstMeta), + zap.Stringer("region", regionInfo.Region), + ) + var resp *import_sstpb.DownloadResponse + for _, peer := range regionInfo.Region.GetPeers() { + resp, err = importer.importClient.DownloadSST(importer.ctx, peer.GetStoreId(), req) + if err != nil { + return nil, extractDownloadSSTError(err) + } + if resp.GetIsEmpty() { + return nil, errors.Trace(errRangeIsEmpty) + } + } + sstMeta.Range.Start = resp.Range.GetStart() + sstMeta.Range.End = resp.Range.GetEnd() + return &sstMeta, nil +} + func (importer *FileImporter) ingestSST( sstMeta *import_sstpb.SSTMeta, regionInfo *RegionInfo, diff --git a/pkg/task/backup_raw.go b/pkg/task/backup_raw.go index a51e80e95..55299bbb1 100644 --- a/pkg/task/backup_raw.go +++ b/pkg/task/backup_raw.go @@ -27,8 +27,8 @@ const ( flagEndKey = "end" ) -// BackupRawConfig is the configuration specific for backup tasks. -type BackupRawConfig struct { +// RawKvConfig is the common config for rawkv backup and restore. +type RawKvConfig struct { Config StartKey []byte `json:"start-key" toml:"start-key"` @@ -45,7 +45,7 @@ func DefineRawBackupFlags(command *cobra.Command) { } // ParseFromFlags parses the backup-related flags from the flag set. -func (cfg *BackupRawConfig) ParseFromFlags(flags *pflag.FlagSet) error { +func (cfg *RawKvConfig) ParseFromFlags(flags *pflag.FlagSet) error { format, err := flags.GetString(flagKeyFormat) if err != nil { return err @@ -82,7 +82,7 @@ func (cfg *BackupRawConfig) ParseFromFlags(flags *pflag.FlagSet) error { } // RunBackupRaw starts a backup task inside the current goroutine. -func RunBackupRaw(c context.Context, g glue.Glue, cmdName string, cfg *BackupRawConfig) error { +func RunBackupRaw(c context.Context, g glue.Glue, cmdName string, cfg *RawKvConfig) error { ctx, cancel := context.WithCancel(c) defer cancel() diff --git a/pkg/task/restore.go b/pkg/task/restore.go index 4dac5f869..f2143764c 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -107,6 +107,10 @@ func RunRestore(c context.Context, g glue.Glue, cmdName string, cfg *RestoreConf return err } + if client.IsRawKvMode() { + return errors.New("cannot do transactional restore from raw kv data") + } + files, tables, err := filterRestoreFiles(client, cfg) if err != nil { return err diff --git a/pkg/task/restore_raw.go b/pkg/task/restore_raw.go new file mode 100644 index 000000000..8511003a1 --- /dev/null +++ b/pkg/task/restore_raw.go @@ -0,0 +1,131 @@ +// Copyright 2020 PingCAP, Inc. Licensed under Apache-2.0. + +package task + +import ( + "context" + + "github.com/pingcap/errors" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/pingcap/br/pkg/conn" + "github.com/pingcap/br/pkg/glue" + "github.com/pingcap/br/pkg/restore" + "github.com/pingcap/br/pkg/summary" + "github.com/pingcap/br/pkg/utils" +) + +// RestoreRawConfig is the configuration specific for raw kv restore tasks. +type RestoreRawConfig struct { + RawKvConfig + + Online bool `json:"online" toml:"online"` +} + +// DefineRawRestoreFlags defines common flags for the backup command. +func DefineRawRestoreFlags(command *cobra.Command) { + command.Flags().StringP(flagKeyFormat, "", "hex", "start/end key format, support raw|escaped|hex") + command.Flags().StringP(flagTiKVColumnFamily, "", "default", "restore specify cf, correspond to tikv cf") + command.Flags().StringP(flagStartKey, "", "", "restore raw kv start key, key is inclusive") + command.Flags().StringP(flagEndKey, "", "", "restore raw kv end key, key is exclusive") + + command.Flags().Bool(flagOnline, false, "Whether online when restore") + // TODO remove hidden flag if it's stable + _ = command.Flags().MarkHidden(flagOnline) +} + +// ParseFromFlags parses the backup-related flags from the flag set. +func (cfg *RestoreRawConfig) ParseFromFlags(flags *pflag.FlagSet) error { + var err error + cfg.Online, err = flags.GetBool(flagOnline) + if err != nil { + return errors.Trace(err) + } + return cfg.RawKvConfig.ParseFromFlags(flags) +} + +// RunRestoreRaw starts a raw kv restore task inside the current goroutine. +func RunRestoreRaw(c context.Context, g glue.Glue, cmdName string, cfg *RestoreRawConfig) error { + ctx, cancel := context.WithCancel(c) + defer cancel() + + mgr, err := newMgr(ctx, g, cfg.PD, cfg.TLS, conn.ErrorOnTiFlash) + if err != nil { + return err + } + defer mgr.Close() + + client, err := restore.NewRestoreClient(ctx, g, mgr.GetPDClient(), mgr.GetTiKV(), mgr.GetTLSConfig()) + if err != nil { + return err + } + defer client.Close() + client.SetRateLimit(cfg.RateLimit) + client.SetConcurrency(uint(cfg.Concurrency)) + if cfg.Online { + client.EnableOnline() + } + + defer summary.Summary(cmdName) + + u, _, backupMeta, err := ReadBackupMeta(ctx, &cfg.Config) + if err != nil { + return err + } + if err = client.InitBackupMeta(backupMeta, u); err != nil { + return err + } + + if !client.IsRawKvMode() { + return errors.New("cannot do raw restore from transactional data") + } + + files, err := client.GetFilesInRawRange(cfg.StartKey, cfg.EndKey, cfg.CF) + if err != nil { + return errors.Trace(err) + } + + if len(files) == 0 { + return errors.New("all files are filtered out from the backup archive, nothing to restore") + } + summary.CollectInt("restore files", len(files)) + + ranges, err := restore.ValidateFileRanges(files, nil) + if err != nil { + return errors.Trace(err) + } + + // Redirect to log if there is no log file to avoid unreadable output. + // TODO: How to show progress? + updateCh := utils.StartProgress( + ctx, + "Raw Restore", + // Split/Scatter + Download/Ingest + int64(len(ranges)+len(files)), + !cfg.LogProgress) + + err = restore.SplitRanges(ctx, client, ranges, nil, updateCh) + if err != nil { + return errors.Trace(err) + } + + removedSchedulers, err := restorePreWork(ctx, client, mgr) + if err != nil { + return errors.Trace(err) + } + + err = client.RestoreRaw(cfg.StartKey, cfg.EndKey, files, updateCh) + if err != nil { + return errors.Trace(err) + } + + err = restorePostWork(ctx, client, mgr, removedSchedulers) + if err != nil { + return errors.Trace(err) + } + // Restore has finished. + close(updateCh) + + return nil +} diff --git a/pkg/utils/key.go b/pkg/utils/key.go index ecaa5fce2..8caeb2833 100644 --- a/pkg/utils/key.go +++ b/pkg/utils/key.go @@ -70,3 +70,21 @@ func unescapedKey(text string) ([]byte, error) { } return buf, nil } + +// CompareEndKey compared two keys that BOTH represent the EXCLUSIVE ending of some range. An empty end key is the very +// end, so an empty key is greater than any other keys. +// Please note that this function is not applicable if any one argument is not an EXCLUSIVE ending of a range. +func CompareEndKey(a, b []byte) int { + if len(a) == 0 { + if len(b) == 0 { + return 0 + } + return 1 + } + + if len(b) == 0 { + return -1 + } + + return bytes.Compare(a, b) +} diff --git a/pkg/utils/key_test.go b/pkg/utils/key_test.go index e314fbeb5..3e20bae24 100644 --- a/pkg/utils/key_test.go +++ b/pkg/utils/key_test.go @@ -32,3 +32,23 @@ func (r *testKeySuite) TestParseKey(c *C) { c.Assert(err, ErrorMatches, "*unknown format*") } + +func (r *testKeySuite) TestCompareEndKey(c *C) { + res := CompareEndKey([]byte("1"), []byte("2")) + c.Assert(res, Less, 0) + + res = CompareEndKey([]byte("1"), []byte("1")) + c.Assert(res, Equals, 0) + + res = CompareEndKey([]byte("2"), []byte("1")) + c.Assert(res, Greater, 0) + + res = CompareEndKey([]byte("1"), []byte("")) + c.Assert(res, Less, 0) + + res = CompareEndKey([]byte(""), []byte("")) + c.Assert(res, Equals, 0) + + res = CompareEndKey([]byte(""), []byte("1")) + c.Assert(res, Greater, 0) +} diff --git a/tests/br_rawkv/run.sh b/tests/br_rawkv/run.sh index a3f62311f..f57e76827 100644 --- a/tests/br_rawkv/run.sh +++ b/tests/br_rawkv/run.sh @@ -17,13 +17,22 @@ set -eu BACKUP_DIR="raw_backup" +checksum() { + bin/rawkv --pd $PD_ADDR --mode checksum --start-key $1 --end-key $2 | grep result | awk '{print $3}' +} + +fail_and_exit() { + echo "TEST: [$TEST_NAME] failed!" + exit 1 +} + +checksum_empty=$(checksum 31 3130303030303030) + # generate raw kv randomly in range[start-key, end-key) in 10s bin/rawkv --pd $PD_ADDR --mode rand-gen --start-key 31 --end-key 3130303030303030 --duration 10 -# output checksum -bin/rawkv --pd $PD_ADDR --mode checksum --start-key 31 --end-key 3130303030303030 > /$TEST_DIR/checksum.out - -checksum_ori=$(cat /$TEST_DIR/checksum.out | grep result | awk '{print $3}') +checksum_ori=$(checksum 31 3130303030303030) +checksum_partial=$(checksum 311111 311122) # backup rawkv echo "backup start..." @@ -32,21 +41,45 @@ run_br --pd $PD_ADDR backup raw -s "local://$TEST_DIR/$BACKUP_DIR" --start 31 -- # delete data in range[start-key, end-key) bin/rawkv --pd $PD_ADDR --mode delete --start-key 31 --end-key 3130303030303030 -# TODO: Finish check after restore ready +# Ensure the data is deleted +checksum_new=$(checksum 31 3130303030303030) + +if [ "$checksum_new" != "$checksum_empty" ];then + echo "failed to delete data in range" + fail_and_exit +fi + # restore rawkv -# echo "restore start..." -# run_br --pd $PD_ADDR restore raw -s "local://$TEST_DIR/$BACKUP_DIR" --start 31 --end 3130303030303030 --format hex --concurrency 4 +echo "restore start..." +run_br --pd $PD_ADDR restore raw -s "local://$TEST_DIR/$BACKUP_DIR" --start 31 --end 3130303030303030 --format hex --concurrency 4 -# output checksum after restore -# bin/rawkv --pd $PD_ADDR --mode checksum --start-key 31 --end-key 3130303030303030 > /$TEST_DIR/checksum.out +checksum_new=$(checksum 31 3130303030303030) -checksum_new=$(cat /$TEST_DIR/checksum.out | grep result | awk '{print $3}') +if [ "$checksum_new" != "$checksum_ori" ];then + echo "checksum failed after restore" + fail_and_exit +fi -if [ "$checksum_ori" == "$checksum_new" ];then - echo "TEST: [$TEST_NAME] successed!" -else - echo "TEST: [$TEST_NAME] failed!" - exit 1 +# delete data in range[start-key, end-key) +bin/rawkv --pd $PD_ADDR --mode delete --start-key 31 --end-key 3130303030303030 + +# Ensure the data is deleted +checksum_new=$(checksum 31 3130303030303030) + +if [ "$checksum_new" != "$checksum_empty" ];then + echo "failed to delete data in range" + fail_and_exit fi +# FIXME restore rawkv partially after change endkey to inclusive +# echo "restore start..." +# run_br --pd $PD_ADDR restore raw -s "local://$TEST_DIR/$BACKUP_DIR" --start 311111 --end 311122 --format hex --concurrency 4 +# +# checksum_new=$(checksum 31 3130303030303030) +# +# if [ "$checksum_new" != "$checksum_partial" ];then +# echo "checksum failed after restore" +# fail_and_exit +# fi +echo "TEST: [$TEST_NAME] successed!"