# non-sharded replica sets if not len(rs_dbs_map[rs]): continue expected_dbs = rs_dbs_map[rs] instance = zk.get_mysql_instance_from_replica_set(rs) activity = mysql_lib.get_dbs_activity(instance) actual_dbs = mysql_lib.get_dbs(instance) unexpected_dbs = actual_dbs.difference(expected_dbs) missing = expected_dbs.difference(actual_dbs) if missing: missing_dbs[instance] = expected_dbs.difference(actual_dbs) for db in unexpected_dbs: if activity[db]['ROWS_CHANGED'] != 0: if instance not in orphaned_but_used: orphaned_but_used[instance] = set() orphaned_but_used[instance].add(db) else: if instance not in orphaned: orphaned[instance] = set() orphaned[instance].add(db) return orphaned, orphaned_but_used, missing_dbs if __name__ == "__main__": environment_specific.initialize_logger() main()
def check_replication_for_backup(self): """ Confirm that replication is caught up enough to run """ while True: heartbeat = mysql_lib.get_heartbeat(self.instance) if heartbeat.date() < self.timestamp.date(): log.warning('Replicaiton is too lagged ({cur}) to run daily backup, ' 'sleeping'.format(cur=heartbeat)) time.sleep(10) elif heartbeat.date() > self.timestamp.date(): raise Exception('Replication is later than expected day') else: log.info('Replicaiton is ok ({cur}) to run daily backup' ''.format(cur=heartbeat)) return def setup_and_get_tmp_path(self): """ Figure out where to temporarily store csv backups, and clean it up """ tmp_dir_root = os.path.join(host_utils.find_root_volume(), 'csv_export', str(self.instance.port)) if not os.path.exists(tmp_dir_root): os.makedirs(tmp_dir_root) host_utils.change_owner(tmp_dir_root, 'mysql', 'mysql') self.dump_base_path = tmp_dir_root if __name__ == "__main__": environment_specific.initialize_logger() main()