diff --git a/cmd/backup.go b/cmd/backup.go index 844003a6d..b856dae38 100644 --- a/cmd/backup.go +++ b/cmd/backup.go @@ -102,9 +102,10 @@ func newTableBackupCommand() *cobra.Command { // newRawBackupCommand return a raw kv range backup subcommand. func newRawBackupCommand() *cobra.Command { + // TODO: remove experimental tag if it's stable command := &cobra.Command{ Use: "raw", - Short: "backup a raw kv range from TiKV cluster", + Short: "(experimental) backup a raw kv range from TiKV cluster", RunE: func(command *cobra.Command, _ []string) error { return runBackupRawCommand(command, "Raw backup") }, diff --git a/pkg/backup/client.go b/pkg/backup/client.go index aa16d072f..07a8fb5f1 100644 --- a/pkg/backup/client.go +++ b/pkg/backup/client.go @@ -323,8 +323,8 @@ func (bc *Client) BackupRanges( close(errCh) }() - // Check GC safepoint every 30s. - t := time.NewTicker(time.Second * 30) + // Check GC safepoint every 5s. + t := time.NewTicker(time.Second * 5) defer t.Stop() finished := false @@ -334,6 +334,13 @@ func (bc *Client) BackupRanges( log.Error("check GC safepoint failed", zap.Error(err)) return err } + if req.StartVersion > 0 { + err = CheckGCSafepoint(ctx, bc.mgr.GetPDClient(), req.StartVersion) + if err != nil { + log.Error("Check gc safepoint for last backup ts failed", zap.Error(err)) + return err + } + } if finished { // Return error (if there is any) before finishing backup. return err diff --git a/pkg/storage/gcs.go b/pkg/storage/gcs.go index 7e105929f..4af3ea059 100644 --- a/pkg/storage/gcs.go +++ b/pkg/storage/gcs.go @@ -48,28 +48,11 @@ func (options *GCSBackendOptions) apply(gcs *backup.GCS) error { } func defineGCSFlags(flags *pflag.FlagSet) { - flags.String(gcsEndpointOption, "", "Set the GCS endpoint URL") - flags.String(gcsStorageClassOption, "", - `Specify the GCS storage class for objects. -If it is not set, objects uploaded are -followed by the default storage class of the bucket. -See https://cloud.google.com/storage/docs/storage-classes -for valid values.`) - flags.String(gcsPredefinedACL, "", - `Specify the GCS predefined acl for objects. -If it is not set, objects uploaded are -followed by the acl of bucket scope. -See https://cloud.google.com/storage/docs/access-control/lists#predefined-acl -for valid values.`) - flags.String(gcsCredentialsFile, "", - `Set the GCS credentials file path. -You can get one from -https://console.cloud.google.com/apis/credentials.`) - - _ = flags.MarkHidden(gcsEndpointOption) - _ = flags.MarkHidden(gcsStorageClassOption) - _ = flags.MarkHidden(gcsPredefinedACL) - _ = flags.MarkHidden(gcsCredentialsFile) + // TODO: remove experimental tag if it's stable + flags.String(gcsEndpointOption, "", "(experimental) Set the GCS endpoint URL") + flags.String(gcsStorageClassOption, "", "(experimental) Specify the GCS storage class for objects") + flags.String(gcsPredefinedACL, "", "(experimental) Specify the GCS predefined acl for objects") + flags.String(gcsCredentialsFile, "", "(experimental) Set the GCS credentials file path") } func (options *GCSBackendOptions) parseFromFlags(flags *pflag.FlagSet) error { diff --git a/pkg/storage/s3.go b/pkg/storage/s3.go index 4758cad6d..bf24b9a2b 100644 --- a/pkg/storage/s3.go +++ b/pkg/storage/s3.go @@ -104,19 +104,14 @@ func (options *S3BackendOptions) apply(s3 *backup.S3) error { } func defineS3Flags(flags *pflag.FlagSet) { - flags.String(s3EndpointOption, "", "Set the S3 endpoint URL, please specify the http or https scheme explicitly") - flags.String(s3RegionOption, "", "Set the S3 region, e.g. us-east-1") - flags.String(s3StorageClassOption, "", "Set the S3 storage class, e.g. STANDARD") - flags.String(s3SSEOption, "", "Set the S3 server-side encryption algorithm, e.g. AES256") - flags.String(s3ACLOption, "", "Set the S3 canned ACLs, e.g. authenticated-read") - flags.String(s3ProviderOption, "", "Set the S3 provider, e.g. aws, alibaba, ceph") - - _ = flags.MarkHidden(s3EndpointOption) - _ = flags.MarkHidden(s3RegionOption) - _ = flags.MarkHidden(s3StorageClassOption) - _ = flags.MarkHidden(s3SSEOption) - _ = flags.MarkHidden(s3ACLOption) - _ = flags.MarkHidden(s3ProviderOption) + // TODO: remove experimental tag if it's stable + flags.String(s3EndpointOption, "", + "(experimental) Set the S3 endpoint URL, please specify the http or https scheme explicitly") + flags.String(s3RegionOption, "", "(experimental) Set the S3 region, e.g. us-east-1") + flags.String(s3StorageClassOption, "", "(experimental) Set the S3 storage class, e.g. STANDARD") + flags.String(s3SSEOption, "", "(experimental) Set the S3 server-side encryption algorithm, e.g. AES256") + flags.String(s3ACLOption, "", "(experimental) Set the S3 canned ACLs, e.g. authenticated-read") + flags.String(s3ProviderOption, "", "(experimental) Set the S3 provider, e.g. aws, alibaba, ceph") } func (options *S3BackendOptions) parseFromFlags(flags *pflag.FlagSet) error { diff --git a/pkg/task/backup.go b/pkg/task/backup.go index b4ece838d..bee2102f5 100644 --- a/pkg/task/backup.go +++ b/pkg/task/backup.go @@ -42,8 +42,8 @@ func DefineBackupFlags(flags *pflag.FlagSet) { flagBackupTimeago, 0, "The history version of the backup task, e.g. 1m, 1h. Do not exceed GCSafePoint") - flags.Uint64(flagLastBackupTS, 0, "the last time backup ts") - _ = flags.MarkHidden(flagLastBackupTS) + // TODO: remove experimental tag if it's stable + flags.Uint64(flagLastBackupTS, 0, "(experimental) the last time backup ts") } // ParseFromFlags parses the backup-related flags from the flag set. @@ -111,6 +111,10 @@ func RunBackup(c context.Context, g glue.Glue, cmdName string, cfg *BackupConfig ddlJobs := make([]*model.Job, 0) if cfg.LastBackupTS > 0 { + if backupTS < cfg.LastBackupTS { + log.Error("LastBackupTS is larger than current TS") + return errors.New("LastBackupTS is larger than current TS") + } err = backup.CheckGCSafepoint(ctx, mgr.GetPDClient(), cfg.LastBackupTS) if err != nil { log.Error("Check gc safepoint for last backup ts failed", zap.Error(err)) diff --git a/pkg/task/restore.go b/pkg/task/restore.go index f8333d7ff..c759fe8d5 100644 --- a/pkg/task/restore.go +++ b/pkg/task/restore.go @@ -48,9 +48,8 @@ type RestoreConfig struct { // DefineRestoreFlags defines common flags for the restore command. func DefineRestoreFlags(flags *pflag.FlagSet) { - flags.Bool("online", false, "Whether online when restore") - // TODO remove hidden flag if it's stable - _ = flags.MarkHidden("online") + // TODO remove experimental tag if it's stable + flags.Bool("online", false, "(experimental) Whether online when restore") } // ParseFromFlags parses the restore-related flags from the flag set. diff --git a/tests/br_z_gc_safepoint/run.sh b/tests/br_z_gc_safepoint/run.sh index 916ca1fa8..a76e97501 100755 --- a/tests/br_z_gc_safepoint/run.sh +++ b/tests/br_z_gc_safepoint/run.sh @@ -23,6 +23,8 @@ set -eu DB="$TEST_NAME" TABLE="usertable" +MAX_UINT64=9223372036854775807 + run_sql "CREATE DATABASE $DB;" go-ycsb load mysql -P tests/$TEST_NAME/workload -p mysql.host=$TIDB_IP -p mysql.port=$TIDB_PORT -p mysql.user=root -p mysql.db=$DB @@ -39,7 +41,25 @@ echo "backup start (expect fail)..." run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB" --db $DB -t $TABLE --ratelimit 1 --ratelimit-unit 1 || backup_gc_fail=1 if [ "$backup_gc_fail" -ne "1" ];then - echo "TEST: [$TEST_NAME] failed!" + echo "TEST: [$TEST_NAME] test check backup ts failed!" + exit 1 +fi + +backup_gc_fail=0 +echo "incremental backup start (expect fail)..." +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB" --db $DB -t $TABLE --lastbackupts 1 --ratelimit 1 --ratelimit-unit 1 || backup_gc_fail=1 + +if [ "$backup_gc_fail" -ne "1" ];then + echo "TEST: [$TEST_NAME] test check last backup ts failed!" + exit 1 +fi + +backup_gc_fail=0 +echo "incremental backup with max_uint64 start (expect fail)..." +run_br --pd $PD_ADDR backup table -s "local://$TEST_DIR/$DB" --db $DB -t $TABLE --lastbackupts $MAX_UINT64 --ratelimit 1 --ratelimit-unit 1 || backup_gc_fail=1 + +if [ "$backup_gc_fail" -ne "1" ];then + echo "TEST: [$TEST_NAME] test check max backup ts failed!" exit 1 fi