From cf398f455dce64bc9d8545a6b700fc3b714d37fc Mon Sep 17 00:00:00 2001 From: Neil Shen Date: Fri, 6 Mar 2020 10:23:32 +0800 Subject: [PATCH 1/6] README, docker: add quick start Signed-off-by: Neil Shen --- .dockerignore | 1 + .gitignore | 2 + README.md | 38 +++ docker-compose.yaml | 194 ++++++++++++++++ docker/Dockerfile | 17 ++ docker/config/pd.toml | 81 +++++++ docker/config/tidb.toml | 90 ++++++++ docker/config/tikv.toml | 501 ++++++++++++++++++++++++++++++++++++++++ 8 files changed, 924 insertions(+) create mode 120000 .dockerignore create mode 100644 docker-compose.yaml create mode 100644 docker/Dockerfile create mode 100644 docker/config/pd.toml create mode 100644 docker/config/tidb.toml create mode 100644 docker/config/tikv.toml diff --git a/.dockerignore b/.dockerignore new file mode 120000 index 000000000..3e4e48b0b --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +.gitignore \ No newline at end of file diff --git a/.gitignore b/.gitignore index e104ab6e8..e61a56bde 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,5 @@ backupmeta *.ngo *.coverprofile coverage.txt +docker/data/ +docker/logs/ diff --git a/README.md b/README.md index 55444fdec..0c5e9c485 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,44 @@ Notice BR supports building with Go version `Go >= 1.13` When BR is built successfully, you can find binary in the `bin` directory. +## Get started in 2 minute! + +```sh +# Start TiDB cluster +docker-compose -f docker-compose.yaml rm -s -v && \ +docker-compose -f docker-compose.yaml build && \ +docker-compose -f docker-compose.yaml up --remove-orphans + +# Attch to control container runs BR +docker exec -it br_control_1 bash + +# Load testing data to TiDB +cd /go/src/github.com/pingcap/go-ycsb && \ +make && \ +bin/go-ycsb load mysql -p workload=core \ + -p mysql.host=tidb -p mysql.port=4000 -p mysql.user=root \ + -p recordcount=100000 -p threadcount=100 + +# How many rows do we get? +mysql -uroot -htidb -P4000 -E -e "SELECT COUNT(*) FROM test.usertable" + +# Build BR and backup! +cd /go/src/github.com/pingcap/br && \ +make release && \ +bin/br backup full --pd pd0:2379 --storage "local:///data/backup/full" \ + --log-file "/logs/br_backup.log" + +# Let's drop database. +mysql -uroot -htidb -P4000 -E -e "DROP DATABASE test; SHOW DATABASES;" + +# Restore! +bin/br restore full --pd pd0:2379 --storage "local:///data/backup/full" \ + --log-file "/logs/br_restore.log" + +# How many rows do we get again? +mysql -uroot -htidb -P4000 -E -e "SELECT COUNT(*) FROM test.usertable" +``` + ## Contributing Contributions are welcomed and greatly appreciated. See [CONTRIBUTING](./CONTRIBUTING.md) diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 000000000..4d84c67fa --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,194 @@ +--- +# Source: tidb-docker-compose/templates/docker-compose.yml +version: '2.1' + +services: + control: + image: control:latest + build: + context: . + dockerfile: ./docker/Dockerfile + volumes: + - ./docker/data:/data + - ./docker/logs:/logs + command: -c "/usr/bin/tail -f /dev/null" + depends_on: + - "tidb" + restart: on-failure + + pd0: + image: pingcap/pd:latest + ports: + - "2379" + volumes: + - ./docker/config/pd.toml:/pd.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --name=pd0 + - --client-urls=http://0.0.0.0:2379 + - --peer-urls=http://0.0.0.0:2380 + - --advertise-client-urls=http://pd0:2379 + - --advertise-peer-urls=http://pd0:2380 + - --initial-cluster=pd0=http://pd0:2380 + - --data-dir=/data/pd0 + - --config=/pd.toml + - --log-file=/logs/pd0.log + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + tikv0: + image: pingcap/tikv:latest + volumes: + - ./docker/config/tikv.toml:/tikv.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv0:20160 + - --data-dir=/data/tikv0 + - --pd=pd0:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv0.log + depends_on: + - "pd0" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + tikv1: + image: pingcap/tikv:latest + volumes: + - ./docker/config/tikv.toml:/tikv.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv1:20160 + - --data-dir=/data/tikv1 + - --pd=pd0:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv1.log + depends_on: + - "pd0" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + tikv2: + image: pingcap/tikv:latest + volumes: + - ./docker/config/tikv.toml:/tikv.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv2:20160 + - --data-dir=/data/tikv2 + - --pd=pd0:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv2.log + depends_on: + - "pd0" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + tikv3: + image: pingcap/tikv:latest + volumes: + - ./docker/config/tikv.toml:/tikv.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv3:20160 + - --data-dir=/data/tikv3 + - --pd=pd0:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv3.log + depends_on: + - "pd0" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + tikv4: + image: pingcap/tikv:latest + volumes: + - ./docker/config/tikv.toml:/tikv.toml:ro + - ./docker/data:/data + - ./docker/logs:/logs + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv4:20160 + - --data-dir=/data/tikv4 + - --pd=pd0:2379 + - --config=/tikv.toml + - --log-file=/logs/tikv4.log + depends_on: + - "pd0" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + tidb: + image: pingcap/tidb:latest + ports: + - "4000:4000" + - "10080:10080" + volumes: + - ./docker/config/tidb.toml:/tidb.toml:ro + - ./docker/logs:/logs + command: + - --store=tikv + - --path=pd0:2379 + - --config=/tidb.toml + - --log-file=/logs/tidb.log + - --advertise-address=tidb + depends_on: + - "tikv0" + - "tikv1" + - "tikv2" + - "tikv3" + - "tikv4" + # sysctls: + # net.core.somaxconn: 32768 + # ulimits: + # nofile: + # soft: 1000000 + # hard: 1000000 + restart: on-failure + + tidb-vision: + image: pingcap/tidb-vision:latest + environment: + PD_ENDPOINT: pd0:2379 + ports: + - "8010:8010" + restart: on-failure diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 000000000..16f2e2955 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,17 @@ +FROM golang:1.13.8-buster + +RUN apt-get update && apt-get install -y --no-install-recommends \ + git \ + curl \ + vim \ + less \ + default-mysql-client \ + && rm -rf /var/lib/apt/lists/* + +WORKDIR /go/src/github.com/pingcap/br +COPY . . + +# For loading data to TiDB +RUN git clone https://github.com/pingcap/go-ycsb.git /go/src/github.com/pingcap/go-ycsb + +ENTRYPOINT ["/bin/bash"] diff --git a/docker/config/pd.toml b/docker/config/pd.toml new file mode 100644 index 000000000..3c50e7e55 --- /dev/null +++ b/docker/config/pd.toml @@ -0,0 +1,81 @@ +# PD Configuration. + +name = "pd" +data-dir = "default.pd" + +client-urls = "http://127.0.0.1:2379" +# if not set, use ${client-urls} +advertise-client-urls = "" + +peer-urls = "http://127.0.0.1:2380" +# if not set, use ${peer-urls} +advertise-peer-urls = "" + +initial-cluster = "pd=http://127.0.0.1:2380" +initial-cluster-state = "new" + +lease = 3 +tso-save-interval = "3s" + +[security] +# Path of file that contains list of trusted SSL CAs. if set, following four settings shouldn't be empty +cacert-path = "" +# Path of file that contains X509 certificate in PEM format. +cert-path = "" +# Path of file that contains X509 key in PEM format. +key-path = "" + +[log] +level = "info" + +# log format, one of json, text, console +#format = "text" + +# disable automatic timestamps in output +#disable-timestamp = false + +# file logging +[log.file] +#filename = "" +# max log file size in MB +#max-size = 300 +# max log file keep days +#max-days = 28 +# maximum number of old log files to retain +#max-backups = 7 +# rotate log by day +#log-rotate = true + +[schedule] +max-merge-region-size = 0 +max-merge-region-key = 0 +split-merge-interval = "1h" +max-snapshot-count = 3 +max-pending-peer-count = 16 +max-store-down-time = "30m" +leader-schedule-limit = 4 +region-schedule-limit = 4 +replica-schedule-limit = 8 +merge-schedule-limit = 8 +tolerant-size-ratio = 5.0 + +# customized schedulers, the format is as below +# if empty, it will use balance-leader, balance-region, hot-region as default +# [[schedule.schedulers]] +# type = "evict-leader" +# args = ["1"] + +[replication] +# The number of replicas for each region. +max-replicas = 3 +# The label keys specified the location of a store. +# The placement priorities is implied by the order of label keys. +# For example, ["zone", "rack"] means that we should place replicas to +# different zones first, then to different racks if we don't have enough zones. +location-labels = [] + +[label-property] +# Do not assign region leaders to stores that have these tags. +# [[label-property.reject-leader]] +# key = "zone" +# value = "cn1 diff --git a/docker/config/tidb.toml b/docker/config/tidb.toml new file mode 100644 index 000000000..e9e0f9614 --- /dev/null +++ b/docker/config/tidb.toml @@ -0,0 +1,90 @@ +# TiDB Configuration. + +# TiDB server host. +host = "0.0.0.0" + +# TiDB server port. +port = 4000 + +# Run ddl worker on this tidb-server. +run-ddl = true + +# Schema lease duration, very dangerous to change only if you know what you do. +lease = "360s" + +# When create table, split a separated region for it. It is recommended to +# turn off this option if there will be a large number of tables created. +split-table = true + +# The limit of concurrent executed sessions. +token-limit = 1000 + +[log] +# Log level: debug, info, warn, error, fatal. +level = "info" + +# Log format, one of json, text, console. +format = "text" + +# Disable automatic timestamp in output +disable-timestamp = false + +# Stores slow query log into separated files. +slow-query-file = "" + +# Queries with execution time greater than this value will be logged. (Milliseconds) +slow-threshold = 300 + +# Queries with internal result greater than this value will be logged. +expensive-threshold = 10000 + +# Maximum query length recorded in log. +query-log-max-len = 2048 + +# File logging. +[log.file] +# Log file name. +filename = "" + +# Max log file size in MB (upper limit to 4096MB). +max-size = 300 + +# Max log file keep days. No clean up by default. +max-days = 0 + +# Maximum number of old log files to retain. No clean up by default. +max-backups = 0 + +[security] +# Path of file that contains list of trusted SSL CAs for connection with mysql client. +ssl-ca = "" + +# Path of file that contains X509 certificate in PEM format for connection with mysql client. +ssl-cert = "" + +# Path of file that contains X509 key in PEM format for connection with mysql client. +ssl-key = "" + +# Path of file that contains list of trusted SSL CAs for connection with cluster components. +cluster-ssl-ca = "" + +# Path of file that contains X509 certificate in PEM format for connection with cluster components. +cluster-ssl-cert = "" + +# Path of file that contains X509 key in PEM format for connection with cluster components. +cluster-ssl-key = "" + +[tikv-client] +# Max gRPC connections that will be established with each tikv-server. +grpc-connection-count = 16 + +# After a duration of this time in seconds if the client doesn't see any activity it pings +# the server to see if the transport is still alive. +grpc-keepalive-time = 10 + +# After having pinged for keepalive check, the client waits for a duration of Timeout in seconds +# and if no activity is seen even after that the connection is closed. +grpc-keepalive-timeout = 3 + +# max time for commit command, must be twice bigger than raft election timeout. +commit-timeout = "41s" diff --git a/docker/config/tikv.toml b/docker/config/tikv.toml new file mode 100644 index 000000000..60e3cb0ef --- /dev/null +++ b/docker/config/tikv.toml @@ -0,0 +1,501 @@ +# TiKV config template +# Human-readable big numbers: +# File size(based on byte): KB, MB, GB, TB, PB +# e.g.: 1_048_576 = "1MB" +# Time(based on ms): ms, s, m, h +# e.g.: 78_000 = "1.3m" + +# log level: trace, debug, info, warn, error, off. +log-level = "info" +# file to store log, write to stderr if it's empty. +# log-file = "" + +[readpool.storage] +# size of thread pool for high-priority operations +# high-concurrency = 4 +# size of thread pool for normal-priority operations +# normal-concurrency = 4 +# size of thread pool for low-priority operations +# low-concurrency = 4 +# max running high-priority operations, reject if exceed +# max-tasks-high = 8000 +# max running normal-priority operations, reject if exceed +# max-tasks-normal = 8000 +# max running low-priority operations, reject if exceed +# max-tasks-low = 8000 +# size of stack size for each thread pool +# stack-size = "10MB" + +[readpool.coprocessor] +# Notice: if CPU_NUM > 8, default thread pool size for coprocessors +# will be set to CPU_NUM * 0.8. + +# high-concurrency = 8 +# normal-concurrency = 8 +# low-concurrency = 8 +# max-tasks-high = 16000 +# max-tasks-normal = 16000 +# max-tasks-low = 16000 +# stack-size = "10MB" + +[server] +# set listening address. +# addr = "127.0.0.1:20160" +# set advertise listening address for client communication, if not set, use addr instead. +# advertise-addr = "" +# notify capacity, 40960 is suitable for about 7000 regions. +# notify-capacity = 40960 +# maximum number of messages can be processed in one tick. +# messages-per-tick = 4096 + +# compression type for grpc channel, available values are no, deflate and gzip. +# grpc-compression-type = "no" +# size of thread pool for grpc server. +# grpc-concurrency = 4 +# The number of max concurrent streams/requests on a client connection. +# grpc-concurrent-stream = 1024 +# The number of connections with each tikv server to send raft messages. +# grpc-raft-conn-num = 10 +# Amount to read ahead on individual grpc streams. +# grpc-stream-initial-window-size = "2MB" + +# How many snapshots can be sent concurrently. +# concurrent-send-snap-limit = 32 +# How many snapshots can be recv concurrently. +# concurrent-recv-snap-limit = 32 + +# max count of tasks being handled, new tasks will be rejected. +# end-point-max-tasks = 2000 + +# max recursion level allowed when decoding dag expression +# end-point-recursion-limit = 1000 + +# max time to handle coprocessor request before timeout +# end-point-request-max-handle-duration = "60s" + +# the max bytes that snapshot can be written to disk in one second, +# should be set based on your disk performance +# snap-max-write-bytes-per-sec = "100MB" + +# set attributes about this server, e.g. { zone = "us-west-1", disk = "ssd" }. +# labels = {} + +[storage] +# set the path to rocksdb directory. +# data-dir = "/tmp/tikv/store" + +# notify capacity of scheduler's channel +# scheduler-notify-capacity = 10240 + +# maximum number of messages can be processed in one tick +# scheduler-messages-per-tick = 1024 + +# the number of slots in scheduler latches, concurrency control for write. +# scheduler-concurrency = 2048000 + +# scheduler's worker pool size, should increase it in heavy write cases, +# also should less than total cpu cores. +# scheduler-worker-pool-size = 4 + +# When the pending write bytes exceeds this threshold, +# the "scheduler too busy" error is displayed. +# scheduler-pending-write-threshold = "100MB" + +[pd] +# pd endpoints +# endpoints = [] + +[metric] +# the Prometheus client push interval. Setting the value to 0s stops Prometheus client from pushing. +# interval = "15s" +# the Prometheus pushgateway address. Leaving it empty stops Prometheus client from pushing. +address = "pushgateway:9091" +# the Prometheus client push job name. Note: A node id will automatically append, e.g., "tikv_1". +# job = "tikv" + +[raftstore] +# true (default value) for high reliability, this can prevent data loss when power failure. +# sync-log = true + +# set the path to raftdb directory, default value is data-dir/raft +# raftdb-path = "" + +# set store capacity, if no set, use disk capacity. +# capacity = 0 + +# notify capacity, 40960 is suitable for about 7000 regions. +# notify-capacity = 40960 + +# maximum number of messages can be processed in one tick. +# messages-per-tick = 4096 + +# Region heartbeat tick interval for reporting to pd. +# pd-heartbeat-tick-interval = "60s" +# Store heartbeat tick interval for reporting to pd. +# pd-store-heartbeat-tick-interval = "10s" + +# When region size changes exceeds region-split-check-diff, we should check +# whether the region should be split or not. +# region-split-check-diff = "6MB" + +# Interval to check region whether need to be split or not. +# split-region-check-tick-interval = "10s" + +# When raft entry exceed the max size, reject to propose the entry. +# raft-entry-max-size = "8MB" + +# Interval to gc unnecessary raft log. +# raft-log-gc-tick-interval = "10s" +# A threshold to gc stale raft log, must >= 1. +# raft-log-gc-threshold = 50 +# When entry count exceed this value, gc will be forced trigger. +# raft-log-gc-count-limit = 72000 +# When the approximate size of raft log entries exceed this value, gc will be forced trigger. +# It's recommanded to set it to 3/4 of region-split-size. +# raft-log-gc-size-limit = "72MB" + +# When a peer hasn't been active for max-peer-down-duration, +# we will consider this peer to be down and report it to pd. +# max-peer-down-duration = "5m" + +# Interval to check whether start manual compaction for a region, +# region-compact-check-interval = "5m" +# Number of regions for each time to check. +# region-compact-check-step = 100 +# The minimum number of delete tombstones to trigger manual compaction. +# region-compact-min-tombstones = 10000 +# Interval to check whether should start a manual compaction for lock column family, +# if written bytes reach lock-cf-compact-threshold for lock column family, will fire +# a manual compaction for lock column family. +# lock-cf-compact-interval = "10m" +# lock-cf-compact-bytes-threshold = "256MB" + +# Interval (s) to check region whether the data are consistent. +# consistency-check-interval = 0 + +# Use delete range to drop a large number of continuous keys. +# use-delete-range = false + +# delay time before deleting a stale peer +# clean-stale-peer-delay = "10m" + +# Interval to cleanup import sst files. +# cleanup-import-sst-interval = "10m" + +[coprocessor] +# When it is true, it will try to split a region with table prefix if +# that region crosses tables. It is recommended to turn off this option +# if there will be a large number of tables created. +# split-region-on-table = true +# When the region's size exceeds region-max-size, we will split the region +# into two which the left region's size will be region-split-size or a little +# bit smaller. +# region-max-size = "144MB" +# region-split-size = "96MB" + +# Make region split more aggressive. +region-max-keys = 100 +region-split-keys = 80 + +[rocksdb] +# Maximum number of concurrent background jobs (compactions and flushes) +# max-background-jobs = 8 + +# This value represents the maximum number of threads that will concurrently perform a +# compaction job by breaking it into multiple, smaller ones that are run simultaneously. +# Default: 1 (i.e. no subcompactions) +# max-sub-compactions = 1 + +# Number of open files that can be used by the DB. You may need to +# increase this if your database has a large working set. Value -1 means +# files opened are always kept open. You can estimate number of files based +# on target_file_size_base and target_file_size_multiplier for level-based +# compaction. +# If max-open-files = -1, RocksDB will prefetch index and filter blocks into +# block cache at startup, so if your database has a large working set, it will +# take several minutes to open the db. +max-open-files = 1024 + +# Max size of rocksdb's MANIFEST file. +# For detailed explanation please refer to https://github.com/facebook/rocksdb/wiki/MANIFEST +# max-manifest-file-size = "20MB" + +# If true, the database will be created if it is missing. +# create-if-missing = true + +# rocksdb wal recovery mode +# 0 : TolerateCorruptedTailRecords, tolerate incomplete record in trailing data on all logs; +# 1 : AbsoluteConsistency, We don't expect to find any corruption in the WAL; +# 2 : PointInTimeRecovery, Recover to point-in-time consistency; +# 3 : SkipAnyCorruptedRecords, Recovery after a disaster; +# wal-recovery-mode = 2 + +# rocksdb write-ahead logs dir path +# This specifies the absolute dir path for write-ahead logs (WAL). +# If it is empty, the log files will be in the same dir as data. +# When you set the path to rocksdb directory in memory like in /dev/shm, you may want to set +# wal-dir to a directory on a persistent storage. +# See https://github.com/facebook/rocksdb/wiki/How-to-persist-in-memory-RocksDB-database +# wal-dir = "/tmp/tikv/store" + +# The following two fields affect how archived write-ahead logs will be deleted. +# 1. If both set to 0, logs will be deleted asap and will not get into the archive. +# 2. If wal-ttl-seconds is 0 and wal-size-limit is not 0, +# WAL files will be checked every 10 min and if total size is greater +# then wal-size-limit, they will be deleted starting with the +# earliest until size_limit is met. All empty files will be deleted. +# 3. If wal-ttl-seconds is not 0 and wal-size-limit is 0, then +# WAL files will be checked every wal-ttl-seconds / 2 and those that +# are older than wal-ttl-seconds will be deleted. +# 4. If both are not 0, WAL files will be checked every 10 min and both +# checks will be performed with ttl being first. +# When you set the path to rocksdb directory in memory like in /dev/shm, you may want to set +# wal-ttl-seconds to a value greater than 0 (like 86400) and backup your db on a regular basis. +# See https://github.com/facebook/rocksdb/wiki/How-to-persist-in-memory-RocksDB-database +# wal-ttl-seconds = 0 +# wal-size-limit = 0 + +# rocksdb max total wal size +# max-total-wal-size = "4GB" + +# Rocksdb Statistics provides cumulative stats over time. +# Turn statistics on will introduce about 5%-10% overhead for RocksDB, +# but it is worthy to know the internal status of RocksDB. +# enable-statistics = true + +# Dump statistics periodically in information logs. +# Same as rocksdb's default value (10 min). +# stats-dump-period = "10m" + +# Due to Rocksdb FAQ: https://github.com/facebook/rocksdb/wiki/RocksDB-FAQ, +# If you want to use rocksdb on multi disks or spinning disks, you should set value at +# least 2MB; +# compaction-readahead-size = 0 + +# This is the maximum buffer size that is used by WritableFileWrite +# writable-file-max-buffer-size = "1MB" + +# Use O_DIRECT for both reads and writes in background flush and compactions +# use-direct-io-for-flush-and-compaction = false + +# Limit the disk IO of compaction and flush. Compaction and flush can cause +# terrible spikes if they exceed a certain threshold. Consider setting this to +# 50% ~ 80% of the disk throughput for a more stable result. However, in heavy +# write workload, limiting compaction and flush speed can cause write stalls too. +# rate-bytes-per-sec = 0 + +# Enable or disable the pipelined write +# enable-pipelined-write = true + +# Allows OS to incrementally sync files to disk while they are being +# written, asynchronously, in the background. +# bytes-per-sync = "0MB" + +# Allows OS to incrementally sync WAL to disk while it is being written. +# wal-bytes-per-sync = "0KB" + +# Specify the maximal size of the Rocksdb info log file. If the log file +# is larger than `max_log_file_size`, a new info log file will be created. +# If max_log_file_size == 0, all logs will be written to one log file. +# Default: 1GB +# info-log-max-size = "1GB" + +# Time for the Rocksdb info log file to roll (in seconds). +# If specified with non-zero value, log file will be rolled +# if it has been active longer than `log_file_time_to_roll`. +# Default: 0 (disabled) +# info-log-roll-time = "0" + +# Maximal Rocksdb info log files to be kept. +# Default: 10 +# info-log-keep-log-file-num = 10 + +# This specifies the Rocksdb info LOG dir. +# If it is empty, the log files will be in the same dir as data. +# If it is non empty, the log files will be in the specified dir, +# and the db data dir's absolute path will be used as the log file +# name's prefix. +# Default: empty +# info-log-dir = "" + +# Column Family default used to store actual data of the database. +[rocksdb.defaultcf] +# compression method (if any) is used to compress a block. +# no: kNoCompression +# snappy: kSnappyCompression +# zlib: kZlibCompression +# bzip2: kBZip2Compression +# lz4: kLZ4Compression +# lz4hc: kLZ4HCCompression +# zstd: kZSTD + +# per level compression +# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"] + +# Approximate size of user data packed per block. Note that the +# block size specified here corresponds to uncompressed data. +# block-size = "64KB" + +# If you're doing point lookups you definitely want to turn bloom filters on, We use +# bloom filters to avoid unnecessary disk reads. Default bits_per_key is 10, which +# yields ~1% false positive rate. Larger bits_per_key values will reduce false positive +# rate, but increase memory usage and space amplification. +# bloom-filter-bits-per-key = 10 + +# false means one sst file one bloom filter, true means evry block has a corresponding bloom filter +# block-based-bloom-filter = false + +# level0-file-num-compaction-trigger = 4 + +# Soft limit on number of level-0 files. We start slowing down writes at this point. +# level0-slowdown-writes-trigger = 20 + +# Maximum number of level-0 files. We stop writes at this point. +# level0-stop-writes-trigger = 36 + +# Amount of data to build up in memory (backed by an unsorted log +# on disk) before converting to a sorted on-disk file. +# write-buffer-size = "128MB" + +# The maximum number of write buffers that are built up in memory. +# max-write-buffer-number = 5 + +# The minimum number of write buffers that will be merged together +# before writing to storage. +# min-write-buffer-number-to-merge = 1 + +# Control maximum total data size for base level (level 1). +# max-bytes-for-level-base = "512MB" + +# Target file size for compaction. +# target-file-size-base = "8MB" + +# Max bytes for compaction.max_compaction_bytes +# max-compaction-bytes = "2GB" + +# There are four different algorithms to pick files to compact. +# 0 : ByCompensatedSize +# 1 : OldestLargestSeqFirst +# 2 : OldestSmallestSeqFirst +# 3 : MinOverlappingRatio +# compaction-pri = 3 + +# block-cache used to cache uncompressed blocks, big block-cache can speed up read. +# in normal cases should tune to 30%-50% system's total memory. +# block-cache-size = "1GB" + +# Indicating if we'd put index/filter blocks to the block cache. +# If not specified, each "table reader" object will pre-load index/filter block +# during table initialization. +# cache-index-and-filter-blocks = true + +# Pin level0 filter and index blocks in cache. +# pin-l0-filter-and-index-blocks = true + +# Enable read amplication statistics. +# value => memory usage (percentage of loaded blocks memory) +# 1 => 12.50 % +# 2 => 06.25 % +# 4 => 03.12 % +# 8 => 01.56 % +# 16 => 00.78 % +# read-amp-bytes-per-bit = 0 + +# Pick target size of each level dynamically. +# dynamic-level-bytes = true + +# Options for Column Family write +# Column Family write used to store commit informations in MVCC model +[rocksdb.writecf] +# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"] +# block-size = "64KB" +# write-buffer-size = "128MB" +# max-write-buffer-number = 5 +# min-write-buffer-number-to-merge = 1 +# max-bytes-for-level-base = "512MB" +# target-file-size-base = "8MB" + +# in normal cases should tune to 10%-30% system's total memory. +# block-cache-size = "256MB" +# level0-file-num-compaction-trigger = 4 +# level0-slowdown-writes-trigger = 20 +# level0-stop-writes-trigger = 36 +# cache-index-and-filter-blocks = true +# pin-l0-filter-and-index-blocks = true +# compaction-pri = 3 +# read-amp-bytes-per-bit = 0 +# dynamic-level-bytes = true + +[rocksdb.lockcf] +# compression-per-level = ["no", "no", "no", "no", "no", "no", "no"] +# block-size = "16KB" +# write-buffer-size = "128MB" +# max-write-buffer-number = 5 +# min-write-buffer-number-to-merge = 1 +# max-bytes-for-level-base = "128MB" +# target-file-size-base = "8MB" +# block-cache-size = "256MB" +# level0-file-num-compaction-trigger = 1 +# level0-slowdown-writes-trigger = 20 +# level0-stop-writes-trigger = 36 +# cache-index-and-filter-blocks = true +# pin-l0-filter-and-index-blocks = true +# compaction-pri = 0 +# read-amp-bytes-per-bit = 0 +# dynamic-level-bytes = true + +[raftdb] +# max-sub-compactions = 1 +max-open-files = 1024 +# max-manifest-file-size = "20MB" +# create-if-missing = true + +# enable-statistics = true +# stats-dump-period = "10m" + +# compaction-readahead-size = 0 +# writable-file-max-buffer-size = "1MB" +# use-direct-io-for-flush-and-compaction = false +# enable-pipelined-write = true +# allow-concurrent-memtable-write = false +# bytes-per-sync = "0MB" +# wal-bytes-per-sync = "0KB" + +# info-log-max-size = "1GB" +# info-log-roll-time = "0" +# info-log-keep-log-file-num = 10 +# info-log-dir = "" + +[raftdb.defaultcf] +# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"] +# block-size = "64KB" +# write-buffer-size = "128MB" +# max-write-buffer-number = 5 +# min-write-buffer-number-to-merge = 1 +# max-bytes-for-level-base = "512MB" +# target-file-size-base = "8MB" + +# should tune to 256MB~2GB. +# block-cache-size = "256MB" +# level0-file-num-compaction-trigger = 4 +# level0-slowdown-writes-trigger = 20 +# level0-stop-writes-trigger = 36 +# cache-index-and-filter-blocks = true +# pin-l0-filter-and-index-blocks = true +# compaction-pri = 0 +# read-amp-bytes-per-bit = 0 +# dynamic-level-bytes = true + +[security] +# set the path for certificates. Empty string means disabling secure connectoins. +# ca-path = "" +# cert-path = "" +# key-path = "" + +[import] +# the directory to store importing kv data. +# import-dir = "/tmp/tikv/import" +# number of threads to handle RPC requests. +# num-threads = 8 +# stream channel window size, stream will be blocked on channel full. +# stream-channel-window = 128 From 2265a96b719969b440cb86bcec28f09b9b019ebb Mon Sep 17 00:00:00 2001 From: Neil Shen Date: Fri, 6 Mar 2020 13:54:53 +0800 Subject: [PATCH 2/6] cmd: disable some TiDB log Signed-off-by: Neil Shen --- cmd/cmd.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/cmd/cmd.go b/cmd/cmd.go index 83355e5dd..02b1a3f0b 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -86,16 +86,19 @@ func Init(cmd *cobra.Command) (err error) { err = e return } + tidbLogCfg := logutil.LogConfig{} if len(slowLogFilename) != 0 { - slowCfg := logutil.LogConfig{SlowQueryFile: slowLogFilename} - e = logutil.InitLogger(&slowCfg) - if e != nil { - err = e - return - } + tidbLogCfg.SlowQueryFile = slowLogFilename } else { // Hack! Discard slow log by setting log level to PanicLevel logutil.SlowQueryLogger.SetLevel(logrus.PanicLevel) + // Disable annoying TiDB Log. + tidbLogCfg.Level = "fatal" + } + e = logutil.InitLogger(&tidbLogCfg) + if e != nil { + err = e + return } // Initialize the pprof server. From 5a691f339ec5a13c70b485d3ee5fb6aaee19f804 Mon Sep 17 00:00:00 2001 From: Neil Shen Date: Fri, 6 Mar 2020 14:13:00 +0800 Subject: [PATCH 3/6] 2 minutes Signed-off-by: Neil Shen --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0c5e9c485..34736fe92 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ Notice BR supports building with Go version `Go >= 1.13` When BR is built successfully, you can find binary in the `bin` directory. -## Get started in 2 minute! +## Get started in 2 minutes! ```sh # Start TiDB cluster From 803f6955110bc3115b83a05da119dbd3534abadb Mon Sep 17 00:00:00 2001 From: Neil Shen Date: Fri, 6 Mar 2020 17:45:31 +0800 Subject: [PATCH 4/6] address comments Signed-off-by: Neil Shen --- README.md | 8 +- docker/config/pd.toml | 73 +----- docker/config/tidb.toml | 81 ------- docker/config/tikv.toml | 481 +--------------------------------------- 4 files changed, 10 insertions(+), 633 deletions(-) diff --git a/README.md b/README.md index 34736fe92..7606573d4 100644 --- a/README.md +++ b/README.md @@ -36,7 +36,7 @@ Notice BR supports building with Go version `Go >= 1.13` When BR is built successfully, you can find binary in the `bin` directory. -## Get started in 2 minutes! +## Quick start ```sh # Start TiDB cluster @@ -44,7 +44,7 @@ docker-compose -f docker-compose.yaml rm -s -v && \ docker-compose -f docker-compose.yaml build && \ docker-compose -f docker-compose.yaml up --remove-orphans -# Attch to control container runs BR +# Attach to control container to run BR docker exec -it br_control_1 bash # Load testing data to TiDB @@ -54,7 +54,7 @@ bin/go-ycsb load mysql -p workload=core \ -p mysql.host=tidb -p mysql.port=4000 -p mysql.user=root \ -p recordcount=100000 -p threadcount=100 -# How many rows do we get? +# How many rows do we get? 100000 rows. mysql -uroot -htidb -P4000 -E -e "SELECT COUNT(*) FROM test.usertable" # Build BR and backup! @@ -70,7 +70,7 @@ mysql -uroot -htidb -P4000 -E -e "DROP DATABASE test; SHOW DATABASES;" bin/br restore full --pd pd0:2379 --storage "local:///data/backup/full" \ --log-file "/logs/br_restore.log" -# How many rows do we get again? +# How many rows do we get again? Expected to be 100000 rows. mysql -uroot -htidb -P4000 -E -e "SELECT COUNT(*) FROM test.usertable" ``` diff --git a/docker/config/pd.toml b/docker/config/pd.toml index 3c50e7e55..e6fb173d1 100644 --- a/docker/config/pd.toml +++ b/docker/config/pd.toml @@ -1,81 +1,18 @@ # PD Configuration. - -name = "pd" -data-dir = "default.pd" - -client-urls = "http://127.0.0.1:2379" -# if not set, use ${client-urls} -advertise-client-urls = "" - -peer-urls = "http://127.0.0.1:2380" -# if not set, use ${peer-urls} -advertise-peer-urls = "" - -initial-cluster = "pd=http://127.0.0.1:2380" -initial-cluster-state = "new" - -lease = 3 -tso-save-interval = "3s" - -[security] -# Path of file that contains list of trusted SSL CAs. if set, following four settings shouldn't be empty -cacert-path = "" -# Path of file that contains X509 certificate in PEM format. -cert-path = "" -# Path of file that contains X509 key in PEM format. -key-path = "" - -[log] -level = "info" - -# log format, one of json, text, console -#format = "text" - -# disable automatic timestamps in output -#disable-timestamp = false - -# file logging -[log.file] -#filename = "" -# max log file size in MB -#max-size = 300 -# max log file keep days -#max-days = 28 -# maximum number of old log files to retain -#max-backups = 7 -# rotate log by day -#log-rotate = true - [schedule] +# Disbale Region Merge max-merge-region-size = 0 max-merge-region-key = 0 -split-merge-interval = "1h" -max-snapshot-count = 3 -max-pending-peer-count = 16 +merge-schedule-limit = 0 + +max-snapshot-count = 10 +max-pending-peer-count = 32 max-store-down-time = "30m" leader-schedule-limit = 4 region-schedule-limit = 4 replica-schedule-limit = 8 -merge-schedule-limit = 8 tolerant-size-ratio = 5.0 -# customized schedulers, the format is as below -# if empty, it will use balance-leader, balance-region, hot-region as default -# [[schedule.schedulers]] -# type = "evict-leader" -# args = ["1"] - [replication] # The number of replicas for each region. max-replicas = 3 -# The label keys specified the location of a store. -# The placement priorities is implied by the order of label keys. -# For example, ["zone", "rack"] means that we should place replicas to -# different zones first, then to different racks if we don't have enough zones. -location-labels = [] - -[label-property] -# Do not assign region leaders to stores that have these tags. -# [[label-property.reject-leader]] -# key = "zone" -# value = "cn1 diff --git a/docker/config/tidb.toml b/docker/config/tidb.toml index e9e0f9614..3ef20cc07 100644 --- a/docker/config/tidb.toml +++ b/docker/config/tidb.toml @@ -1,11 +1,3 @@ -# TiDB Configuration. - -# TiDB server host. -host = "0.0.0.0" - -# TiDB server port. -port = 4000 - # Run ddl worker on this tidb-server. run-ddl = true @@ -15,76 +7,3 @@ lease = "360s" # When create table, split a separated region for it. It is recommended to # turn off this option if there will be a large number of tables created. split-table = true - -# The limit of concurrent executed sessions. -token-limit = 1000 - -[log] -# Log level: debug, info, warn, error, fatal. -level = "info" - -# Log format, one of json, text, console. -format = "text" - -# Disable automatic timestamp in output -disable-timestamp = false - -# Stores slow query log into separated files. -slow-query-file = "" - -# Queries with execution time greater than this value will be logged. (Milliseconds) -slow-threshold = 300 - -# Queries with internal result greater than this value will be logged. -expensive-threshold = 10000 - -# Maximum query length recorded in log. -query-log-max-len = 2048 - -# File logging. -[log.file] -# Log file name. -filename = "" - -# Max log file size in MB (upper limit to 4096MB). -max-size = 300 - -# Max log file keep days. No clean up by default. -max-days = 0 - -# Maximum number of old log files to retain. No clean up by default. -max-backups = 0 - -[security] -# Path of file that contains list of trusted SSL CAs for connection with mysql client. -ssl-ca = "" - -# Path of file that contains X509 certificate in PEM format for connection with mysql client. -ssl-cert = "" - -# Path of file that contains X509 key in PEM format for connection with mysql client. -ssl-key = "" - -# Path of file that contains list of trusted SSL CAs for connection with cluster components. -cluster-ssl-ca = "" - -# Path of file that contains X509 certificate in PEM format for connection with cluster components. -cluster-ssl-cert = "" - -# Path of file that contains X509 key in PEM format for connection with cluster components. -cluster-ssl-key = "" - -[tikv-client] -# Max gRPC connections that will be established with each tikv-server. -grpc-connection-count = 16 - -# After a duration of this time in seconds if the client doesn't see any activity it pings -# the server to see if the transport is still alive. -grpc-keepalive-time = 10 - -# After having pinged for keepalive check, the client waits for a duration of Timeout in seconds -# and if no activity is seen even after that the connection is closed. -grpc-keepalive-timeout = 3 - -# max time for commit command, must be twice bigger than raft election timeout. -commit-timeout = "41s" diff --git a/docker/config/tikv.toml b/docker/config/tikv.toml index 60e3cb0ef..6528e447f 100644 --- a/docker/config/tikv.toml +++ b/docker/config/tikv.toml @@ -1,211 +1,13 @@ -# TiKV config template -# Human-readable big numbers: -# File size(based on byte): KB, MB, GB, TB, PB -# e.g.: 1_048_576 = "1MB" -# Time(based on ms): ms, s, m, h -# e.g.: 78_000 = "1.3m" - -# log level: trace, debug, info, warn, error, off. -log-level = "info" -# file to store log, write to stderr if it's empty. -# log-file = "" - -[readpool.storage] -# size of thread pool for high-priority operations -# high-concurrency = 4 -# size of thread pool for normal-priority operations -# normal-concurrency = 4 -# size of thread pool for low-priority operations -# low-concurrency = 4 -# max running high-priority operations, reject if exceed -# max-tasks-high = 8000 -# max running normal-priority operations, reject if exceed -# max-tasks-normal = 8000 -# max running low-priority operations, reject if exceed -# max-tasks-low = 8000 -# size of stack size for each thread pool -# stack-size = "10MB" - -[readpool.coprocessor] -# Notice: if CPU_NUM > 8, default thread pool size for coprocessors -# will be set to CPU_NUM * 0.8. - -# high-concurrency = 8 -# normal-concurrency = 8 -# low-concurrency = 8 -# max-tasks-high = 16000 -# max-tasks-normal = 16000 -# max-tasks-low = 16000 -# stack-size = "10MB" - -[server] -# set listening address. -# addr = "127.0.0.1:20160" -# set advertise listening address for client communication, if not set, use addr instead. -# advertise-addr = "" -# notify capacity, 40960 is suitable for about 7000 regions. -# notify-capacity = 40960 -# maximum number of messages can be processed in one tick. -# messages-per-tick = 4096 - -# compression type for grpc channel, available values are no, deflate and gzip. -# grpc-compression-type = "no" -# size of thread pool for grpc server. -# grpc-concurrency = 4 -# The number of max concurrent streams/requests on a client connection. -# grpc-concurrent-stream = 1024 -# The number of connections with each tikv server to send raft messages. -# grpc-raft-conn-num = 10 -# Amount to read ahead on individual grpc streams. -# grpc-stream-initial-window-size = "2MB" - -# How many snapshots can be sent concurrently. -# concurrent-send-snap-limit = 32 -# How many snapshots can be recv concurrently. -# concurrent-recv-snap-limit = 32 - -# max count of tasks being handled, new tasks will be rejected. -# end-point-max-tasks = 2000 - -# max recursion level allowed when decoding dag expression -# end-point-recursion-limit = 1000 - -# max time to handle coprocessor request before timeout -# end-point-request-max-handle-duration = "60s" - -# the max bytes that snapshot can be written to disk in one second, -# should be set based on your disk performance -# snap-max-write-bytes-per-sec = "100MB" - -# set attributes about this server, e.g. { zone = "us-west-1", disk = "ssd" }. -# labels = {} - -[storage] -# set the path to rocksdb directory. -# data-dir = "/tmp/tikv/store" - -# notify capacity of scheduler's channel -# scheduler-notify-capacity = 10240 - -# maximum number of messages can be processed in one tick -# scheduler-messages-per-tick = 1024 - -# the number of slots in scheduler latches, concurrency control for write. -# scheduler-concurrency = 2048000 - -# scheduler's worker pool size, should increase it in heavy write cases, -# also should less than total cpu cores. -# scheduler-worker-pool-size = 4 - -# When the pending write bytes exceeds this threshold, -# the "scheduler too busy" error is displayed. -# scheduler-pending-write-threshold = "100MB" - -[pd] -# pd endpoints -# endpoints = [] - -[metric] -# the Prometheus client push interval. Setting the value to 0s stops Prometheus client from pushing. -# interval = "15s" -# the Prometheus pushgateway address. Leaving it empty stops Prometheus client from pushing. -address = "pushgateway:9091" -# the Prometheus client push job name. Note: A node id will automatically append, e.g., "tikv_1". -# job = "tikv" - [raftstore] # true (default value) for high reliability, this can prevent data loss when power failure. -# sync-log = true - -# set the path to raftdb directory, default value is data-dir/raft -# raftdb-path = "" - -# set store capacity, if no set, use disk capacity. -# capacity = 0 - -# notify capacity, 40960 is suitable for about 7000 regions. -# notify-capacity = 40960 - -# maximum number of messages can be processed in one tick. -# messages-per-tick = 4096 - -# Region heartbeat tick interval for reporting to pd. -# pd-heartbeat-tick-interval = "60s" -# Store heartbeat tick interval for reporting to pd. -# pd-store-heartbeat-tick-interval = "10s" - -# When region size changes exceeds region-split-check-diff, we should check -# whether the region should be split or not. -# region-split-check-diff = "6MB" - -# Interval to check region whether need to be split or not. -# split-region-check-tick-interval = "10s" - -# When raft entry exceed the max size, reject to propose the entry. -# raft-entry-max-size = "8MB" - -# Interval to gc unnecessary raft log. -# raft-log-gc-tick-interval = "10s" -# A threshold to gc stale raft log, must >= 1. -# raft-log-gc-threshold = 50 -# When entry count exceed this value, gc will be forced trigger. -# raft-log-gc-count-limit = 72000 -# When the approximate size of raft log entries exceed this value, gc will be forced trigger. -# It's recommanded to set it to 3/4 of region-split-size. -# raft-log-gc-size-limit = "72MB" - -# When a peer hasn't been active for max-peer-down-duration, -# we will consider this peer to be down and report it to pd. -# max-peer-down-duration = "5m" - -# Interval to check whether start manual compaction for a region, -# region-compact-check-interval = "5m" -# Number of regions for each time to check. -# region-compact-check-step = 100 -# The minimum number of delete tombstones to trigger manual compaction. -# region-compact-min-tombstones = 10000 -# Interval to check whether should start a manual compaction for lock column family, -# if written bytes reach lock-cf-compact-threshold for lock column family, will fire -# a manual compaction for lock column family. -# lock-cf-compact-interval = "10m" -# lock-cf-compact-bytes-threshold = "256MB" - -# Interval (s) to check region whether the data are consistent. -# consistency-check-interval = 0 - -# Use delete range to drop a large number of continuous keys. -# use-delete-range = false - -# delay time before deleting a stale peer -# clean-stale-peer-delay = "10m" - -# Interval to cleanup import sst files. -# cleanup-import-sst-interval = "10m" +sync-log = true [coprocessor] -# When it is true, it will try to split a region with table prefix if -# that region crosses tables. It is recommended to turn off this option -# if there will be a large number of tables created. -# split-region-on-table = true -# When the region's size exceeds region-max-size, we will split the region -# into two which the left region's size will be region-split-size or a little -# bit smaller. -# region-max-size = "144MB" -# region-split-size = "96MB" - # Make region split more aggressive. region-max-keys = 100 region-split-keys = 80 [rocksdb] -# Maximum number of concurrent background jobs (compactions and flushes) -# max-background-jobs = 8 - -# This value represents the maximum number of threads that will concurrently perform a -# compaction job by breaking it into multiple, smaller ones that are run simultaneously. -# Default: 1 (i.e. no subcompactions) -# max-sub-compactions = 1 - # Number of open files that can be used by the DB. You may need to # increase this if your database has a large working set. Value -1 means # files opened are always kept open. You can estimate number of files based @@ -216,286 +18,5 @@ region-split-keys = 80 # take several minutes to open the db. max-open-files = 1024 -# Max size of rocksdb's MANIFEST file. -# For detailed explanation please refer to https://github.com/facebook/rocksdb/wiki/MANIFEST -# max-manifest-file-size = "20MB" - -# If true, the database will be created if it is missing. -# create-if-missing = true - -# rocksdb wal recovery mode -# 0 : TolerateCorruptedTailRecords, tolerate incomplete record in trailing data on all logs; -# 1 : AbsoluteConsistency, We don't expect to find any corruption in the WAL; -# 2 : PointInTimeRecovery, Recover to point-in-time consistency; -# 3 : SkipAnyCorruptedRecords, Recovery after a disaster; -# wal-recovery-mode = 2 - -# rocksdb write-ahead logs dir path -# This specifies the absolute dir path for write-ahead logs (WAL). -# If it is empty, the log files will be in the same dir as data. -# When you set the path to rocksdb directory in memory like in /dev/shm, you may want to set -# wal-dir to a directory on a persistent storage. -# See https://github.com/facebook/rocksdb/wiki/How-to-persist-in-memory-RocksDB-database -# wal-dir = "/tmp/tikv/store" - -# The following two fields affect how archived write-ahead logs will be deleted. -# 1. If both set to 0, logs will be deleted asap and will not get into the archive. -# 2. If wal-ttl-seconds is 0 and wal-size-limit is not 0, -# WAL files will be checked every 10 min and if total size is greater -# then wal-size-limit, they will be deleted starting with the -# earliest until size_limit is met. All empty files will be deleted. -# 3. If wal-ttl-seconds is not 0 and wal-size-limit is 0, then -# WAL files will be checked every wal-ttl-seconds / 2 and those that -# are older than wal-ttl-seconds will be deleted. -# 4. If both are not 0, WAL files will be checked every 10 min and both -# checks will be performed with ttl being first. -# When you set the path to rocksdb directory in memory like in /dev/shm, you may want to set -# wal-ttl-seconds to a value greater than 0 (like 86400) and backup your db on a regular basis. -# See https://github.com/facebook/rocksdb/wiki/How-to-persist-in-memory-RocksDB-database -# wal-ttl-seconds = 0 -# wal-size-limit = 0 - -# rocksdb max total wal size -# max-total-wal-size = "4GB" - -# Rocksdb Statistics provides cumulative stats over time. -# Turn statistics on will introduce about 5%-10% overhead for RocksDB, -# but it is worthy to know the internal status of RocksDB. -# enable-statistics = true - -# Dump statistics periodically in information logs. -# Same as rocksdb's default value (10 min). -# stats-dump-period = "10m" - -# Due to Rocksdb FAQ: https://github.com/facebook/rocksdb/wiki/RocksDB-FAQ, -# If you want to use rocksdb on multi disks or spinning disks, you should set value at -# least 2MB; -# compaction-readahead-size = 0 - -# This is the maximum buffer size that is used by WritableFileWrite -# writable-file-max-buffer-size = "1MB" - -# Use O_DIRECT for both reads and writes in background flush and compactions -# use-direct-io-for-flush-and-compaction = false - -# Limit the disk IO of compaction and flush. Compaction and flush can cause -# terrible spikes if they exceed a certain threshold. Consider setting this to -# 50% ~ 80% of the disk throughput for a more stable result. However, in heavy -# write workload, limiting compaction and flush speed can cause write stalls too. -# rate-bytes-per-sec = 0 - -# Enable or disable the pipelined write -# enable-pipelined-write = true - -# Allows OS to incrementally sync files to disk while they are being -# written, asynchronously, in the background. -# bytes-per-sync = "0MB" - -# Allows OS to incrementally sync WAL to disk while it is being written. -# wal-bytes-per-sync = "0KB" - -# Specify the maximal size of the Rocksdb info log file. If the log file -# is larger than `max_log_file_size`, a new info log file will be created. -# If max_log_file_size == 0, all logs will be written to one log file. -# Default: 1GB -# info-log-max-size = "1GB" - -# Time for the Rocksdb info log file to roll (in seconds). -# If specified with non-zero value, log file will be rolled -# if it has been active longer than `log_file_time_to_roll`. -# Default: 0 (disabled) -# info-log-roll-time = "0" - -# Maximal Rocksdb info log files to be kept. -# Default: 10 -# info-log-keep-log-file-num = 10 - -# This specifies the Rocksdb info LOG dir. -# If it is empty, the log files will be in the same dir as data. -# If it is non empty, the log files will be in the specified dir, -# and the db data dir's absolute path will be used as the log file -# name's prefix. -# Default: empty -# info-log-dir = "" - -# Column Family default used to store actual data of the database. -[rocksdb.defaultcf] -# compression method (if any) is used to compress a block. -# no: kNoCompression -# snappy: kSnappyCompression -# zlib: kZlibCompression -# bzip2: kBZip2Compression -# lz4: kLZ4Compression -# lz4hc: kLZ4HCCompression -# zstd: kZSTD - -# per level compression -# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"] - -# Approximate size of user data packed per block. Note that the -# block size specified here corresponds to uncompressed data. -# block-size = "64KB" - -# If you're doing point lookups you definitely want to turn bloom filters on, We use -# bloom filters to avoid unnecessary disk reads. Default bits_per_key is 10, which -# yields ~1% false positive rate. Larger bits_per_key values will reduce false positive -# rate, but increase memory usage and space amplification. -# bloom-filter-bits-per-key = 10 - -# false means one sst file one bloom filter, true means evry block has a corresponding bloom filter -# block-based-bloom-filter = false - -# level0-file-num-compaction-trigger = 4 - -# Soft limit on number of level-0 files. We start slowing down writes at this point. -# level0-slowdown-writes-trigger = 20 - -# Maximum number of level-0 files. We stop writes at this point. -# level0-stop-writes-trigger = 36 - -# Amount of data to build up in memory (backed by an unsorted log -# on disk) before converting to a sorted on-disk file. -# write-buffer-size = "128MB" - -# The maximum number of write buffers that are built up in memory. -# max-write-buffer-number = 5 - -# The minimum number of write buffers that will be merged together -# before writing to storage. -# min-write-buffer-number-to-merge = 1 - -# Control maximum total data size for base level (level 1). -# max-bytes-for-level-base = "512MB" - -# Target file size for compaction. -# target-file-size-base = "8MB" - -# Max bytes for compaction.max_compaction_bytes -# max-compaction-bytes = "2GB" - -# There are four different algorithms to pick files to compact. -# 0 : ByCompensatedSize -# 1 : OldestLargestSeqFirst -# 2 : OldestSmallestSeqFirst -# 3 : MinOverlappingRatio -# compaction-pri = 3 - -# block-cache used to cache uncompressed blocks, big block-cache can speed up read. -# in normal cases should tune to 30%-50% system's total memory. -# block-cache-size = "1GB" - -# Indicating if we'd put index/filter blocks to the block cache. -# If not specified, each "table reader" object will pre-load index/filter block -# during table initialization. -# cache-index-and-filter-blocks = true - -# Pin level0 filter and index blocks in cache. -# pin-l0-filter-and-index-blocks = true - -# Enable read amplication statistics. -# value => memory usage (percentage of loaded blocks memory) -# 1 => 12.50 % -# 2 => 06.25 % -# 4 => 03.12 % -# 8 => 01.56 % -# 16 => 00.78 % -# read-amp-bytes-per-bit = 0 - -# Pick target size of each level dynamically. -# dynamic-level-bytes = true - -# Options for Column Family write -# Column Family write used to store commit informations in MVCC model -[rocksdb.writecf] -# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"] -# block-size = "64KB" -# write-buffer-size = "128MB" -# max-write-buffer-number = 5 -# min-write-buffer-number-to-merge = 1 -# max-bytes-for-level-base = "512MB" -# target-file-size-base = "8MB" - -# in normal cases should tune to 10%-30% system's total memory. -# block-cache-size = "256MB" -# level0-file-num-compaction-trigger = 4 -# level0-slowdown-writes-trigger = 20 -# level0-stop-writes-trigger = 36 -# cache-index-and-filter-blocks = true -# pin-l0-filter-and-index-blocks = true -# compaction-pri = 3 -# read-amp-bytes-per-bit = 0 -# dynamic-level-bytes = true - -[rocksdb.lockcf] -# compression-per-level = ["no", "no", "no", "no", "no", "no", "no"] -# block-size = "16KB" -# write-buffer-size = "128MB" -# max-write-buffer-number = 5 -# min-write-buffer-number-to-merge = 1 -# max-bytes-for-level-base = "128MB" -# target-file-size-base = "8MB" -# block-cache-size = "256MB" -# level0-file-num-compaction-trigger = 1 -# level0-slowdown-writes-trigger = 20 -# level0-stop-writes-trigger = 36 -# cache-index-and-filter-blocks = true -# pin-l0-filter-and-index-blocks = true -# compaction-pri = 0 -# read-amp-bytes-per-bit = 0 -# dynamic-level-bytes = true - [raftdb] -# max-sub-compactions = 1 max-open-files = 1024 -# max-manifest-file-size = "20MB" -# create-if-missing = true - -# enable-statistics = true -# stats-dump-period = "10m" - -# compaction-readahead-size = 0 -# writable-file-max-buffer-size = "1MB" -# use-direct-io-for-flush-and-compaction = false -# enable-pipelined-write = true -# allow-concurrent-memtable-write = false -# bytes-per-sync = "0MB" -# wal-bytes-per-sync = "0KB" - -# info-log-max-size = "1GB" -# info-log-roll-time = "0" -# info-log-keep-log-file-num = 10 -# info-log-dir = "" - -[raftdb.defaultcf] -# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"] -# block-size = "64KB" -# write-buffer-size = "128MB" -# max-write-buffer-number = 5 -# min-write-buffer-number-to-merge = 1 -# max-bytes-for-level-base = "512MB" -# target-file-size-base = "8MB" - -# should tune to 256MB~2GB. -# block-cache-size = "256MB" -# level0-file-num-compaction-trigger = 4 -# level0-slowdown-writes-trigger = 20 -# level0-stop-writes-trigger = 36 -# cache-index-and-filter-blocks = true -# pin-l0-filter-and-index-blocks = true -# compaction-pri = 0 -# read-amp-bytes-per-bit = 0 -# dynamic-level-bytes = true - -[security] -# set the path for certificates. Empty string means disabling secure connectoins. -# ca-path = "" -# cert-path = "" -# key-path = "" - -[import] -# the directory to store importing kv data. -# import-dir = "/tmp/tikv/import" -# number of threads to handle RPC requests. -# num-threads = 8 -# stream channel window size, stream will be blocked on channel full. -# stream-channel-window = 128 From 74122c573e3e56921a5db43634d7c59385d118be Mon Sep 17 00:00:00 2001 From: Neil Shen Date: Fri, 6 Mar 2020 17:53:05 +0800 Subject: [PATCH 5/6] docker: build go-ycsb automatically Signed-off-by: Neil Shen --- README.md | 5 +---- docker/Dockerfile | 11 +++++++++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 7606573d4..6207d98eb 100644 --- a/README.md +++ b/README.md @@ -48,9 +48,7 @@ docker-compose -f docker-compose.yaml up --remove-orphans docker exec -it br_control_1 bash # Load testing data to TiDB -cd /go/src/github.com/pingcap/go-ycsb && \ -make && \ -bin/go-ycsb load mysql -p workload=core \ +go-ycsb load mysql -p workload=core \ -p mysql.host=tidb -p mysql.port=4000 -p mysql.user=root \ -p recordcount=100000 -p threadcount=100 @@ -58,7 +56,6 @@ bin/go-ycsb load mysql -p workload=core \ mysql -uroot -htidb -P4000 -E -e "SELECT COUNT(*) FROM test.usertable" # Build BR and backup! -cd /go/src/github.com/pingcap/br && \ make release && \ bin/br backup full --pd pd0:2379 --storage "local:///data/backup/full" \ --log-file "/logs/br_backup.log" diff --git a/docker/Dockerfile b/docker/Dockerfile index 16f2e2955..c93d22ab4 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,3 +1,11 @@ +FROM golang:1.13.8-buster as builder + +# For loading data to TiDB +WORKDIR /go/src/github.com/pingcap/ +RUN git clone https://github.com/pingcap/go-ycsb.git && \ + cd go-ycsb && \ + make + FROM golang:1.13.8-buster RUN apt-get update && apt-get install -y --no-install-recommends \ @@ -11,7 +19,6 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ WORKDIR /go/src/github.com/pingcap/br COPY . . -# For loading data to TiDB -RUN git clone https://github.com/pingcap/go-ycsb.git /go/src/github.com/pingcap/go-ycsb +COPY --from=builder /go/src/github.com/pingcap/go-ycsb/bin/go-ycsb /go/bin/go-ycsb ENTRYPOINT ["/bin/bash"] From 4deb6a990dd5ee62c2f534a50e3b1011558c7abc Mon Sep 17 00:00:00 2001 From: Neil Shen Date: Fri, 6 Mar 2020 18:06:56 +0800 Subject: [PATCH 6/6] cmd: add TODO about TiDB logs Signed-off-by: Neil Shen --- cmd/cmd.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/cmd.go b/cmd/cmd.go index 02b1a3f0b..3fa287ca5 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -93,6 +93,7 @@ func Init(cmd *cobra.Command) (err error) { // Hack! Discard slow log by setting log level to PanicLevel logutil.SlowQueryLogger.SetLevel(logrus.PanicLevel) // Disable annoying TiDB Log. + // TODO: some error logs outputs randomly, we need to fix them in TiDB. tidbLogCfg.Level = "fatal" } e = logutil.InitLogger(&tidbLogCfg)