MySQL: pro test backup strategy instance (online real backup case) recommendation

Xiaosheng blog: http://xsboke.blog.51cto.com

If in doubt, please Click here The author will reply in time (or comment directly in the current article).

                        -------Thank you for your reference. If you have any questions, welcome to exchange

Have you ever worried about the backup of online database? Here is a complete backup scheme for the secondary database, which can be used by personal test

Explain:

  1. Backup slave database, weekly, once a week for full backup
  2. Every Monday at 6:00 in the morning, backup the relay logs at other times
  3. Enable rsync service on slave database for remote backup
  4. Using rsync command in local server to synchronize the backup of database
  5. This backup can be used to add a new Slave to the Master or restore the Master

I. server configuration

1. Backup script written by Python

root@DBSlave:~# cat /scripts/mysql_slave_backup.py

#!/usr/bin/env python
#-*- coding:utf-8 -*-
import os
import datetime,time

# Please install zip and unzip in linux system

# Backup policy example
'''
    1. Once a week,Others are backup relay logs
    2. 6 a.m. every Monday:00 Database full backup
    3. Tuesday to Sunday,12 noon every day:00,18 p.m.:00,6 in the morning:00,Backup relay log
'''

# Plan backup directory
# The backup directory is created in weeks
# ""% W ": week of the year, Monday as the first day of the week (00-53)"
Date_Time = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")   #  %F: date month
Week_Date = datetime.datetime.now().strftime("%Y-%W")     # Year / current week of the year
Dir = "/data/backup"
Backup_Dir = Dir+ '/' + Week_Date

# --Create a backup directory and generate a directory every week, because a full backup is made every week
if os.path.isdir(Backup_Dir) is False:
    os.makedirs(Backup_Dir)

# Set database connection information
#mysqldump option
#    --Skip TZ UTC: keep the same time zone as before table export
#    --Master data = 2: when the "change master to" statement is written and commented during backup, if it is equal to 1, it will not be commented
#    --Dump slave = 2: backup the slave database. It is used when adding slave for the master
#    --quick: retrieve the table's rows from the server in the rows at a time to speed up the export of the tables
#    --Routes: exporting stored procedures
#    --Triggers: export triggers
#    --Set GTID purged = off: prevents the backup data from conflicting with its GTID when it is imported into a new instance, so the GTID information is not added when the data is backed up
#    --Single transaction: issue BEGIN SQL statement before dumping data from the server to ensure data consistency as much as possible, but this parameter is only applicable to storage engines such as innodb
#    --Dump slave = 2: write the change statement from the database to the master database and comment when the backup is done. If it is equal to 1, no comment will be made

# Set database backup information
DB = '-uroot -p123456'  # Specify login account and password
ARG = '--dump-slave=2 --skip-tz-utc --routines --triggers --quick --default-character-set=utf8 --single-transaction'    # Specify backup parameters
DB_NAME = "dbname"  # Database name
Back_DBNAME = DB_NAME + '_' + Date_Time + '.sql'    # Database backup name
Logs_File = Backup_Dir + '/logs'    # Specifies the file to output the log when backing up
Mysql_Bin = "/usr/bin/mysql"    # Specify the path of the [mysql] Command
MysqlDump_Bin = "/usr/bin/mysqldump"    # Specify the path of the [mysqldump] Command
Relay_Log_Dir = "/data/logs/relay_log"  #Specify relay log
Relay_Log_Info = "/data/logs/relay_log/relay-bin.info"  # Used to get the relay log currently in use

# Define delete old backup
def Del_Old():
    '''Delete old backups 36 days ago'''
    OLD_Files = os.popen("find %s -type f  -mtime +36"%(Dir)).readlines()
    if len(OLD_Files) > 0:
        for OLD_FIle in OLD_Files:
            FileName = OLD_FIle.split("\n")[0]
            os.system("rm -f %s"%(FileName))

    # remove empty directories
    All_Dir = os.popen("find %s -type d"%(Dir + '/*')).readlines()
    for Path_Dir in All_Dir:
        Path_Dir = Path_Dir.split("\n")[0]
        Terms = os.popen("ls %s | wc -l"%(Path_Dir)).read()
        if int(Terms) == 0:
            os.system("rm -rf %s"%(Path_Dir))

# Back up the trunk log files that have been synchronized
def ZIP_And_Del_Existed():
    '''
    //Compress the log files that have been synchronized and delete them,
    //In order to prevent the relay logs from being deleted before synchronization is completed, here is a judgment to compress and delete only the synchronized relay logs
    '''

    # Get all relay logs
    Relog_List = os.popen("ls %s | grep \"^relay-bin.*\" | grep -v \"relay-bin.in*\"" % (Relay_Log_Dir)).readlines()

    # Get the relay log file currently in use
    CurRelay = os.popen("cat %s | head -n 1" % (Relay_Log_Info)).readline().split("\n")[0]
    CurRelay_MTime = os.path.getmtime(CurRelay)  # Gets the last modification time of the currently in use file

    # Loop all the relay log files, and compare with the last modification time of the relay log to get the relay log to be backed up
    Need_ZIP_FName = []  # Define file names to be compressed and deleted
    for FileName in Relog_List:
        '''
        //Add files whose modification time is less than [relay log currently in use] to the [need] zip [fname] list for backup / deletion
        '''
        FName =  FileName.split("\n")[0]
        FName_MTime = os.path.getmtime("%s/%s"%(Relay_Log_Dir,FName))
        if FName_MTime < CurRelay_MTime:
            Need_ZIP_FName.append("%s/%s"%(Relay_Log_Dir,FName))

    os.system("zip -j %s/Relay_log_%s.zip %s" % (Backup_Dir, Date_Time," ".join(Need_ZIP_FName)))

    # Get the compressed relay log file and delete it
    for Relay_Log in Need_ZIP_FName:
        os.system("rm -f %s"%(Relay_Log))

# Start to perform backup. (judge that if today is Monday, perform full backup, not incremental backup on Monday)
IF_Week = datetime.datetime.now().strftime('%w')
if int(IF_Week) == 1:
    # Whether the match already exists
    Test = os.popen('ls %s | grep -E \"^%s.*([0-9]{2}-[0-9]{2}-[0-9]{2}).sql.zip\" | wc -l'%(Backup_Dir,DB_NAME)).readline()
    if int(Test) == 0:
        # If you have a full backup on Monday, start an incremental backup
        with open(Logs_File,'a+') as file:
            file.writelines("####----------        Demarcation line        ----------####\n")
            file.writelines("###Start > > > full datetime:% s \ n "% (datetime. Datetime. Now(). Strftime ("% Y -% m -% d -% H -% m -% s "))
            file.writelines("###     Today is week% s \ n "% (if & week))
            file.writelines("###     stop slave\n")
            os.system("%s %s -e \"stop slave\""%(Mysql_Bin,DB))
            file.writelines("###     status slave\n")
            Show_Slave = os.popen("%s %s -e \"show slave status\G\""%(Mysql_Bin,DB)).readlines()
            file.writelines(Show_Slave)
            file.writelines("###     backup\n")
            os.system("%s %s %s %s > %s/%s"%(MysqlDump_Bin,DB,ARG,DB_NAME,Backup_Dir,Back_DBNAME))
            file.writelines("###    backup done && start slave | datetime : %s\n"%(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")))
            os.system("%s %s -e \"start slave;\""%(Mysql_Bin,DB))
            time.sleep(5)
            file.writelines("###    slave status\n")
            Show_Slave = os.popen("%s %s -e \"show slave status\G\"" % (Mysql_Bin, DB)).readlines()
            file.writelines(Show_Slave)
            file.writelines("###Done > > > all ready \ n ")
            os.system("zip -j %s/%s.zip %s/%s"%(Backup_Dir,Back_DBNAME,Backup_Dir,Back_DBNAME))
            os.system("rm %s/%s"%(Backup_Dir,Back_DBNAME))
            file.writelines("\n\n\n\n\n")
        Del_Old()
    else:
        with open(Logs_File,'a+') as file:
            file.writelines("####----------        Demarcation line        ----------####\n")
            file.writelines("###Start > > > incremental backup datetime:% s \ n "% (datetime. Datetime. Now(). Strftime ("% Y -% m -% d -% H -% m -% s "))
            file.writelines("###     Today is week% s \ n "% (if & week))
            file.writelines("###     stop slave\n")
            os.system("%s %s -e \"stop slave\""%(Mysql_Bin,DB))
            file.writelines("###     status slave\n")
            Show_Slave = os.popen("%s %s -e \"show slave status\G\""%(Mysql_Bin,DB)).readlines()
            file.writelines(Show_Slave)
            file.writelines("###     backup\n")
            ZIP_And_Del_Existed()
            file.writelines("###    backup done && start slave | datetime : %s\n"%(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")))
            os.system("%s %s -e \"start slave;\""%(Mysql_Bin,DB))
            time.sleep(5)
            file.writelines("###    slave status\n")
            Show_Slave = os.popen("%s %s -e \"show slave status\G\"" % (Mysql_Bin, DB)).readlines()
            file.writelines(Show_Slave)
            file.writelines("###Done > > > incremental backup complete \ n ")
            file.writelines("\n\n\n\n\n")
        Del_Old()
else:
    with open(Logs_File, 'a+') as file:
        file.writelines("####----------        Demarcation line        ----------####\n")
        file.writelines("###Start > > > incremental backup datetime:% s \ n "% (datetime. Datetime. Now(). Strftime ("% Y -% m -% d -% H -% m -% s "))
        file.writelines("###     Today is week% s \ n "% (if & week))
        file.writelines("###     stop slave\n")
        os.system("%s %s -e \"stop slave\"" % (Mysql_Bin, DB))
        file.writelines("###     status slave\n")
        Show_Slave = os.popen("%s %s -e \"show slave status\G\"" % (Mysql_Bin, DB)).readlines()
        file.writelines(Show_Slave)
        file.writelines("###     backup\n")
        ZIP_And_Del_Existed()
        file.writelines("###    backup done && start slave | datetime : %s\n" % (datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")))
        os.system("%s %s -e \"start slave;\"" % (Mysql_Bin, DB))
        time.sleep(5)
        file.writelines("###    slave status\n")
        Show_Slave = os.popen("%s %s -e \"show slave status\G\"" % (Mysql_Bin, DB)).readlines()
        file.writelines(Show_Slave)
        file.writelines("###Done > > > incremental backup complete \ n ")
        file.writelines("\n\n\n\n\n")
    Del_Old()

2. Plan tasks

root@DBSlave:~# cat /etc/cron.d/general

#mysql backup
0 6     * * *   root    python /scripts/mysql_slave_backup.py
0 12    * * *   root    python /scripts/mysql_slave_backup.py
0 18    * * *   root    python /scripts/mysql_slave_backup.py

3. rsync configuration

root@DBSlave:~# cat /etc/rsyncd.conf

uid = 0
gid = 0
use chroot = yes
address = "Current host public network address"
port = 8638
log file = /var/log/rsync.log
pid file = /var/run/rsync.pid
hosts allow = "Only one is allowed IP Connect"

[databases]
path = /data/backup/
comment = databases
read only = yes
dont compress = *.gz *.bz2 *.zip
# Only remoteuser users are allowed
auth users = remoteuser
secrets file = /etc/rsyncd_users.db
root@DBSlave:~# cat /etc/rsyncd_users.db
# Format: user name: password
remoteuser:password

II. Local backup host configuration

1. Create rsync password file

root@localhost:~# cat /etc/server.pass
remoteuser:password

2. Synchronization script

root@localhost:~# cat /scripts/backup.sh

#!/bin/bash
SSH=$(which ssh)
Logs_Dir="/Backup/logs.txt"
Rsync=$(which rsync)
Project="databases"
Dest="/Backup/"

IF_DEL_FILE=$(find ${Dest} -type f -mtime +36 -name "*" | wc -l)
DEL_FILE=$(find ${Dest} -type f -mtime +36 -name "*")

# Delete old backup
RMOLD(){
    if [ "${IF_DEL_FILE}" -gt "0" ]
    then
        for filename in ${DEL_FILE}
        do
            rm -f ${filename}
        done
        rmdir ${Dest}*  # remove empty directories
    fi
}

# Execute synchronization command
Backup(){
    echo "###    ---------- datetime : `date +%F-%H-%M-%S` ----------    ###" >> ${Logs_Dir}
    echo "#    start rsync" >> ${Logs_Dir}
    ${Rsync} -azH --password-file=/etc/server.pass --bwlimit=300 --port=8638 remoteuser@data base rsync Monitoring IP address::${Project} ${Dest} &>> ${Logs_Dir}
    echo "###    end   rsync    ---------- dateime : `date +%F-%H-%M-%S` ----------    ###" >> ${Logs_Dir}
    echo -e "\n\n\n\n\n" >> ${Logs_Dir}
    RMOLD
}

# Determine if the synchronization command is no longer executed if the synchronization is currently in progress
IFProcess(){
    ps -ef | grep "${Rsync} -azH --password-file=/etc/server.pass --bwlimit=300 --port=8638 remoteuser@data base rsync Monitoring IP address::${Project}" | grep -v "grep" &> /dev/null
    if [[ "$?" == 0 ]]
    then
        exit 0
    else
        Backup
    fi
}

IFProcess

3. Plan tasks

root@localhost:~# cat /etc/cron.d/general
01 23 * * * root /bin/sh /scripts/backup.sh

Tags: MySQL rsync Database Python mysqldump

Posted on Sat, 07 Dec 2019 06:25:10 -0800 by Dilb