forked from pinterest/mysql_utils
-
Notifications
You must be signed in to change notification settings - Fork 0
/
mysql_backup_xtrabackup.py
executable file
·111 lines (92 loc) · 3.72 KB
/
mysql_backup_xtrabackup.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
#!/usr/bin/env python
import argparse
import socket
import os
import time
from lib import environment_specific
from lib import host_utils
from lib import mysql_lib
from lib import backup
import purge_mysql_backups
log = environment_specific.setup_logging_defaults(__name__)
def main():
parser = argparse.ArgumentParser(description='xtrabackup wrapper')
parser.add_argument('-p',
'--port',
help='Port to backup on localhost (default: 3306)',
default='3306')
args = parser.parse_args()
instance = host_utils.HostAddr(':'.join((socket.getfqdn(), args.port)))
xtrabackup_backup_instance(instance)
def xtrabackup_backup_instance(instance):
""" Run a file based backup on a supplied local instance
Args:
instance - A hostaddr object
"""
starttime_sql = time.strftime('%Y-%m-%d %H:%M:%S')
log.info('Logging initial status to mysqlops')
row_id = None
lock_handle = None
try:
reporting_conn = mysql_lib.get_mysqlops_connections()
cursor = reporting_conn.cursor()
sql = ("INSERT INTO mysqlops.mysql_backups "
"SET "
"hostname = %(hostname)s, "
"port = %(port)s, "
"started = %(started)s, "
"backup_type = 'xbstream' ")
metadata = {'hostname': instance.hostname,
'port': instance.port,
'started': starttime_sql}
cursor.execute(sql, metadata)
row_id = cursor.lastrowid
reporting_conn.commit()
except Exception as e:
log.warning("Unable to write log entry to "
"mysqlopsdb001: {e}".format(e=e))
log.warning("However, we will attempt to continue with the backup.")
# Take a lock to prevent multiple backups from running concurrently
try:
log.info('Taking backup lock')
lock_handle = host_utils.take_flock_lock(backup.BACKUP_LOCK_FILE)
log.info('Cleaning up old backups')
purge_mysql_backups.purge_mysql_backups(instance, skip_lock=True)
# Actually run the backup
log.info('Running backup')
backup_file = backup.xtrabackup_instance(instance)
finished = time.strftime('%Y-%m-%d %H:%M:%S')
# Upload file to s3
log.info('Uploading file to s3')
backup.s3_upload(backup_file)
# Update database with additional info now that backup is done.
if row_id is None:
log.info("The backup is complete, but we were not able to "
"write to the central log DB.")
else:
log.info("Updating database log entry with final backup info")
try:
sql = ("UPDATE mysqlops.mysql_backups "
"SET "
"filename = %(filename)s, "
"finished = %(finished)s, "
"size = %(size)s "
"WHERE id = %(id)s")
metadata = {'filename': backup_file,
'finished': finished,
'size': os.stat(backup_file).st_size,
'id': row_id}
cursor.execute(sql, metadata)
reporting_conn.commit()
reporting_conn.close()
except Exception as e:
log.warning("Unable to update mysqlopsdb with "
"backup status: {e}".format(e=e))
# Running purge again most for the chmod
purge_mysql_backups.purge_mysql_backups(instance, skip_lock=True)
finally:
if lock_handle:
log.info('Releasing lock')
host_utils.release_flock_lock(lock_handle)
if __name__ == "__main__":
main()