mydumper使用記錄
mydumper使用記錄
參考:
http://t.zoukankan.com/liang545621-p-7497461.html
https://www.jianshu.com/p/27e6ac408969
http://www.rzrgm.cn/xiaoyaojinzhazhadehangcheng/p/15098594.html
https://developer.51cto.com/article/685874.html
mydumper常用命令速覽
#1、備份多個指定的表數據
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --database sbtest --tables-list sbtest98,sbtest99,sbtest100 --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --trx-consistency-only
#2、備份全部數據庫包含觸發器、事件、存儲過程及函數、包括系統庫表
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --trx-consistency-only
#3、備份全部數據庫包含觸發器、事件、存儲過程及函數、不包括系統庫表
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --regex '^(?!(mysql|test))' --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --trx-consistency-only
#4、備份指定庫
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --database sbtest --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --trx-consistency-only
#5、只備份表結構
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --database sbtest --no-data --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --trx-consistency-only
#6、只備份表數據
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --database sbtest --no-schemas --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --trx-consistency-only
#7、壓縮備份某個表
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --database sbtest --tables-list sbtest98 --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --compress --trx-consistency-only
#8、不備份某(幾)個庫
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --regex '^(?!(mysql\.|sys\.|information_schema.|performance_schema.))' --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --trx-consistency-only
實驗環境說明
10.105.114.34 mydumper 64核256G 3.5T nvme0n1
10.105.114.35 mysql5.7.29 64核256G 3.5T nvme0n1
# 創建賬號
create user 'bak'@'10.%' identified by 'cjLXUFE0uQja6DR';
grant all privileges on *.* to 'bak'@'10.%';
# 創建數據
create database sbtest;
# 備份實驗表
sbtest1~sbtest100
# 每張表5k萬記錄,每張表大小12G
# 目標庫 IP:10.105.114.35 端口:7306
# 備份目標目錄:[root@spa-34-114-105 data]# mkdir -p /data/backup/{data,log}
測試結果概覽
# 3張表,共計:28G,3線程, 耗費8分鐘。
開始時間:2022-07-29 15:02:58
結束時間:2022-07-29 15:10:43
# 1.2T 業務庫, 100張表,每張表5k萬記錄。100線程,耗費20分鐘。
開始時間:2022-07-29 15:34:44
結束時間:2022-07-29 15:55:41
1、備份多個指定的表數據
#1、備份多個指定的表數據
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --database sbtest --tables-list sbtest98,sbtest99,sbtest100 --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 3 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --trx-consistency-only
2、備份全部數據庫 包含觸發器、事件、存儲過程及函數、包括系統庫表
#2、備份全部數據庫 包含觸發器、事件、存儲過程及函數、包括系統庫表
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --trx-consistency-only
3、備份全部數據庫包含觸發器、事件、存儲過程及函數、不包括系統庫表
#3、備份全部數據庫 包含觸發器、事件、存儲過程及函數、不包括系統庫表
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --regex '^(?!(mysql|test))' --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --trx-consistency-only
4、備份指定庫
#4、備份指定庫
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --database sbtest --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --trx-consistency-only
5、只備份表結構
#5、只備份表結構
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --database sbtest --no-data --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --trx-consistency-only
6、只備份表數據
#6、只備份表數據
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --database sbtest --no-schemas --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --trx-consistency-only
7、壓縮備份某個表
#7、壓縮備份某個表
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --database sbtest --tables-list sbtest98 --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --compress --trx-consistency-only
8、不備份某(幾)個庫
#8、不備份某(幾)個庫
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --regex '^(?!(mysql\.|sys\.|information_schema.|performance_schema.))' --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --trx-consistency-only
mydumper主要流程概括
mydumper主要流程概括:
1、主線程 FLUSH TABLES WITH READ LOCK, 施加全局只讀鎖,以阻止DML語句寫入,保證數據的一致性
2、讀取當前時間點的二進制日志文件名和日志寫入的位置并記錄在metadata文件中,以供即使點恢復使用
3、N個(線程數可以指定,默認是4)dump線程 START TRANSACTION WITH CONSISTENT SNAPSHOT; 開啟讀一致的事務
4、dump non-InnoDB tables, 首先導出非事務引擎的表
5、主線程 UNLOCK TABLES 非 事務引擎備份完后,釋放全局只讀鎖
6、dump InnoDB tables, 基于 事務導出InnoDB表
7、事務結束
mydumper使用--less-locking可以減少鎖等待時間,此時mydumper的執行機制大致為
1、主線程 FLUSH TABLES WITH READ LOCK (全局鎖)
2、Dump線程 START TRANSACTION WITH CONSISTENT SNAPSHOT;
3、LL Dump線程 LOCK TABLES non-InnoDB (線程內部鎖)
4、主線程UNLOCK TABLES
5、LL Dump線程 dump non-InnoDB tables
6、LL DUmp線程 UNLOCK non-InnoDB
7、Dump線程 dump InnoDB tables
mydumper安裝
官網地址:https://launchpad.net/mydumper
GitHub 地址:https://github.com/maxbube/mydumper
參考官方介紹,mydumper 主要有以下幾點特性:
支持多線程導出數據,速度更快。
支持一致性備份。
支持將導出文件壓縮,節約空間。
支持多線程恢復。
支持以守護進程模式工作,定時快照和連續二進制日志。
支持按照指定大小將備份文件切割。
數據與建表語句分離。
[root@spa-34-114-105 ~]# yum install -y glib2-devel zlib-devel pcre-devel openssl-devel
[root@spa-34-114-105 ~]# ll mydumper-0.12.5-2.el7.x86_64.rpm
-rw-r--r-- 1 root root 2736370 Jul 18 11:39 mydumper-0.12.5-2.el7.x86_64.rpm
[root@spa-34-114-105 ~]# rpm -ivh mydumper-0.12.5-2.el7.x86_64.rpm
Preparing... ################################# [100%]
Updating / installing...
1:mydumper-0.12.5-2 ################################# [100%]
[root@spa-34-114-105 ~]#
查看mydumper幫助
[root@spa-34-114-105 ~]# mydumper --help
Usage:
mydumper [OPTION…] multi-threaded MySQL dumping
Help Options:
-?, --help Show help options
Application Options:
-B, --database Database to dump
-o, --outputdir Directory to output files to
-d, --no-data Do not dump table data
-D, --daemon Enable daemon mode
-L, --logfile Log file name to use, by default stdout is used
--disk-limits Set the limit to pause and resume if determines there is no enough disk space.Accepts values like: '<resume>:<pause>' in MB.For instance: 100:500 will pause when there is only 100MB free and willresume if 500MB are available
-t, --threads Number of threads to use, default 4
-C, --compress-protocol Use compression on the MySQL connection
-V, --version Show the program version and exit
-v, --verbose Verbosity of output, 0 = silent, 1 = errors, 2 = warnings, 3 = info, default 2
--defaults-file Use a specific defaults file
--ssl Connect using SSL
--ssl-mode Desired security state of the connection to the server: DISABLED, PREFERRED, REQUIRED, VERIFY_CA, VERIFY_IDENTITY
--key The path name to the key file
--cert The path name to the certificate file
--ca The path name to the certificate authority file
--capath The path name to a directory that contains trusted SSL CA certificates in PEM format
--cipher A list of permissible ciphers to use for SSL encryption
--tls-version Which protocols the server permits for encrypted connections
--stream It will stream over STDOUT once the files has been written
--no-delete It will not delete the files after stream has been completed
-O, --omit-from-file File containing a list of database.table entries to skip, one per line (skips before applying regex option)
-T, --tables-list Comma delimited table list to dump (does not exclude regex option)
-h, --host The host to connect to
-u, --user Username with the necessary privileges
-p, --password User password
-a, --ask-password Prompt For User password
-P, --port TCP/IP port to connect to
-S, --socket UNIX domain socket file to use for connection
-x, --regex Regular expression for 'db.table' matching
-G, --triggers Dump triggers. By default, it do not dump triggers
--split-partitions Dump partitions into separate files. This options overrides the --rows option for partitioned tables.
--max-rows Limit the number of rows per block after the table is estimated, default 1000000
--no-check-generated-fields Queries related to generated fields are not going to be executed.It will lead to restoration issues if you have generated columns
--order-by-primary Sort the data by Primary Key or Unique key if no primary key exists
-E, --events Dump events. By default, it do not dump events
-R, --routines Dump stored procedures and functions. By default, it do not dump stored procedures nor functions
-W, --no-views Do not dump VIEWs
-M, --checksum-all Dump checksums for all elements
--data-checksums Dump table checksums with the data
--schema-checksums Dump schema table and view creation checksums
--routine-checksums Dump triggers, functions and routines checksums
--tz-utc SET TIME_ZONE='+00:00' at top of dump to allow dumping of TIMESTAMP data when a server has data in different time zones or data is being moved between servers with different time zones, defaults to on use --skip-tz-utc to disable.
--complete-insert Use complete INSERT statements that include column names
--skip-tz-utc
-z, --tidb-snapshot Snapshot to use for TiDB
-N, --insert-ignore Dump rows with INSERT IGNORE
--replace Dump rows with REPLACE
--exit-if-broken-table-found Exits if a broken table has been found
--success-on-1146 Not increment error count and Warning instead of Critical in case of table doesn't exist
--use-savepoints Use savepoints to reduce metadata locking issues, needs SUPER privilege
-s, --statement-size Attempted size of INSERT statement in bytes, default 1000000
-F, --chunk-filesize Split tables into chunks of this output file size. This value is in MB
-e, --build-empty-files Build dump files even if no data available from table
--where Dump only selected records.
-i, --ignore-engines Comma delimited list of storage engines to ignore
--load-data
--csv Automatically enables --load-data and set variables to export in CSV format.
--fields-terminated-by
--fields-enclosed-by
--fields-escaped-by Single character that is going to be used to escape characters in theLOAD DATA stament, default: '\'
--lines-starting-by Adds the string at the begining of each row. When --load-data is usedit is added to the LOAD DATA statement. Its affects INSERT INTO statementsalso when it is used.
--lines-terminated-by Adds the string at the end of each row. When --load-data is used it isadded to the LOAD DATA statement. Its affects INSERT INTO statementsalso when it is used.
--statement-terminated-by This might never be used, unless you know what are you doing
-r, --rows Try to split tables into chunks of this many rows. This option turns off --chunk-filesize
-c, --compress Compress output files
--exec Command to execute using the file as parameter
--long-query-retries Retry checking for long queries, default 0 (do not retry)
--long-query-retry-interval Time to wait before retrying the long query check in seconds, default 60
-l, --long-query-guard Set long query timer in seconds, default 60
--tidb-snapshot Snapshot to use for TiDB
-U, --updated-since Use Update_time to dump only tables updated in the last U days
-k, --no-locks Do not execute the temporary shared read lock. WARNING: This will cause inconsistent backups
-Y, --all-tablespaces Dump all the tablespaces.
--no-backup-locks Do not use Percona backup locks
--lock-all-tables Use LOCK TABLE for all, instead of FTWRL
--less-locking Minimize locking time on InnoDB tables.
--trx-consistency-only Transactional consistency only
-m, --no-schemas Do not dump table schemas with the data and triggers
-K, --kill-long-queries Kill long running queries (instead of aborting)
--set-names Sets the names, use it at your own risk, default binary
--pmm-path which default value will be /usr/local/percona/pmm2/collectors/textfile-collector/high-resolution
--pmm-resolution which default will be high
-I, --snapshot-interval Interval between each dump snapshot (in minutes), requires --daemon, default 60
-X, --snapshot-count number of snapshots, default 2
[root@spa-34-114-105 ~]#
myloader參數詳解
-d, --directory Directory of the dump to import
-q, --queries-per-transaction Number of queries per transaction, default 1000
-o, --overwrite-tables Drop tables if they already exist
--append-if-not-exist Appends IF NOT EXISTS to the create table statements. This will be removed when https://bugs.mysql.com/bug.php?id=103791 has been implemented
-B, --database An alternative database to restore into
-s, --source-db Database to restore
-e, --enable-binlog Enable binary logging of the restore data
--innodb-optimize-keys Creates the table without the indexes and it adds them at the end
--set-names Sets the names, use it at your own risk, default binary
-L, --logfile Log file name to use, by default stdout is used
--purge-mode This specify the truncate mode which can be: NONE, DROP, TRUNCATE and DELETE
--disable-redo-log Disables the REDO_LOG and enables it after, doesn't check initial status
-r, --rows Split the INSERT statement into this many rows.
--max-threads-per-table Maximum number of threads per table to use, default 4
--skip-triggers Do not import triggers. By default, it imports triggers
--skip-post Do not import events, stored procedures and functions. By default, it imports events, stored procedures nor functions
--no-data Do not dump or import table data
--serialized-table-creation Table recreation will be executed in serie, one thread at a time
--resume Expect to find resume file in backup dir and will only process those files
--pmm-path which default value will be /usr/local/percona/pmm2/collectors/textfile-collector/high-resolution
--pmm-resolution which default will be high
-t, --threads Number of threads to use, default 4
-C, --compress-protocol Use compression on the MySQL connection
-V, --version Show the program version and exit
-v, --verbose Verbosity of output, 0 = silent, 1 = errors, 2 = warnings, 3 = info, default 2
--defaults-file Use a specific defaults file
--ssl Connect using SSL
--ssl-mode Desired security state of the connection to the server: DISABLED, PREFERRED, REQUIRED, VERIFY_CA, VERIFY_IDENTITY
--key The path name to the key file
--cert The path name to the certificate file
--ca The path name to the certificate authority file
--capath The path name to a directory that contains trusted SSL CA certificates in PEM format
--cipher A list of permissible ciphers to use for SSL encryption
--tls-version Which protocols the server permits for encrypted connections
--stream It will stream over STDOUT once the files has been written
--no-delete It will not delete the files after stream has been completed
-O, --omit-from-file File containing a list of database.table entries to skip, one per line (skips before applying regex option)
-T, --tables-list Comma delimited table list to dump (does not exclude regex option)
-h, --host The host to connect to
-u, --user Username with the necessary privileges
-p, --password User password
-a, --ask-password Prompt For User password
-P, --port TCP/IP port to connect to
-S, --socket UNIX domain socket file to use for connection
-x, --regex Regular expression for 'db.table' matching
--skip-definer Removes DEFINER from the CREATE statement. By default, statements are not modified
1、單表恢復
# 無法直接還原單表 只能找到單表的sql文件 進入命令行source 執行, 如果表結構不存在,需要先創建表結構
mysql --host 10.105.114.35 --user=bak --password=cjLXUFE0uQja6DR --port=7306 db2 < /data/backup/data/0/sbtest.sbtest98.00000.sql &
2、單庫恢復
# 排除系統庫,只備份業務
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --regex '^(?!(mysql|test|sys|performance_schema|information_schema))' --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --trx-consistency-only
# 從全備中恢復sbtest庫到指定的db2
# 先刪除目標: drop database db2;
myloader --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --database db2 --overwrite-tables --source-db sbtest --directory /data/backup/data/0/ --queries-per-transaction 1000 --enable-binlog --threads 8 --compress-protocol --verbose 3 --logfile /data/backup/log/db_recover.log --purge-mode TRUNCATE --max-threads-per-table 4 --append-if-not-exist &
3、恢復全部的業務庫
# 排除系統庫,只備份業務
mydumper --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --regex '^(?!(mysql|test|sys|performance_schema|information_schema))' --outputdir /data/backup/data --daemon --logfile /data/backup/log/db.log --disk-limits "20480:40960" --threads 8 --compress-protocol --verbose 3 --triggers --max-rows 1000000 --events --routines --tz-utc --complete-insert --long-query-retry-interval 60 --less-locking --kill-long-queries --build-empty-files --trx-consistency-only
# 恢復全部的業務庫
myloader --host 10.105.114.35 --user bak --password cjLXUFE0uQja6DR --port 7306 --overwrite-tables --directory /data/backup/data/0/ --queries-per-transaction 1000 --enable-binlog --threads 8 --compress-protocol --verbose 3 --logfile /data/backup/log/db_recover.log --purge-mode TRUNCATE --max-threads-per-table 4 --append-if-not-exist &

浙公網安備 33010602011771號