def vacuum_dbs(self, vacuum_list): ''' Target: - vacuum a group of PostgreSQL databases. Parameters: - vacuum_list: names of the databases which are going to be vacuumed. ''' if vacuum_list: self.logger.highlight('info', Messenger.BEGINNING_VACUUMER, 'white') for db in vacuum_list: dbname = db['datname'] message = Messenger.PROCESSING_DB.format(dbname=dbname) self.logger.highlight('info', message, 'cyan') # Let the user know whether the database connection is allowed if not db['datallowconn']: message = Messenger.FORBIDDEN_DB_CONNECTION.format( dbname=dbname) self.logger.highlight('warning', message, 'yellow', effect='bold') success = False else: start_time = DateTools.get_current_datetime() # Vacuum the database success = self.vacuum_db(dbname) end_time = DateTools.get_current_datetime() # Get and show the process' duration diff = DateTools.get_diff_datetimes(start_time, end_time) if success: message = Messenger.DB_VACUUMER_DONE.format(dbname=dbname, diff=diff) self.logger.highlight('info', message, 'green') else: message = Messenger.DB_VACUUMER_FAIL.format(dbname=dbname) self.logger.highlight('warning', message, 'yellow', effect='bold') self.logger.highlight('info', Messenger.VACUUMER_DONE, 'green', effect='bold')
def alter_dbs_owner(self, alt_list): ''' Target: - change the owner of a group of databases and their tables. Parameters: - alt_list: names of the databases which are going to be altered. ''' self.logger.highlight('info', Msg.PROCESSING_ALTERER, 'white') if alt_list: for db in alt_list: dbname = db['datname'] msg = Msg.PROCESSING_DB.format(dbname=dbname) self.logger.highlight('info', msg, 'cyan') start_time = DateTools.get_current_datetime() # Change the owner of the database success = self.alter_db_owner(db) end_time = DateTools.get_current_datetime() # Get and show the process' duration diff = DateTools.get_diff_datetimes(start_time, end_time) if success: msg = Msg.DB_ALTERER_DONE.format(dbname=dbname, diff=diff) self.logger.highlight('info', msg, 'green') else: msg = Msg.DB_ALTERER_FAIL.format(dbname=dbname) self.logger.highlight('warning', msg, 'yellow', effect='bold') else: self.logger.highlight('warning', Msg.ALTERER_HAS_NOTHING_TO_DO, 'yellow', effect='bold') self.logger.highlight('info', Msg.ALTERER_DONE, 'green', effect='bold')
def backup_cl(self): ''' Target: - vacuum if necessary and make a backup of a cluster. ''' self.logger.highlight('info', Msg.CHECKING_BACKUP_DIR, 'white') # Create a new directory with the name of the group bkps_dir = self.bkp_path + self.group + Default.CL_BKPS_DIR Dir.create_dir(bkps_dir, self.logger) self.logger.info(Msg.DESTINY_DIR.format(path=bkps_dir)) # Vaccum the databases before the backup process if necessary if self.vacuum: vacuumer = Vacuumer(connecter=self.connecter, logger=self.logger) dbs_all = vacuumer.connecter.get_pg_dbs_data( vacuumer.ex_templates, vacuumer.db_owner) vacuumer.vacuum_dbs(dbs_all) self.logger.highlight('info', Msg.BEGINNING_CL_BACKER, 'white') start_time = DateTools.get_current_datetime() # Make the backup of the cluster success = self.backup_all(bkps_dir) end_time = DateTools.get_current_datetime() # Get and show the process' duration diff = DateTools.get_diff_datetimes(start_time, end_time) if success: msg = Msg.CL_BACKER_DONE.format(diff=diff) self.logger.highlight('info', msg, 'green', effect='bold') else: self.logger.highlight('warning', Msg.CL_BACKER_FAIL, 'yellow', effect='bold') self.logger.highlight('info', Msg.BACKER_DONE, 'green', effect='bold')
def backup_cl(self): ''' Target: - vacuum if necessary and make a backup of a cluster. ''' self.logger.highlight('info', Msg.CHECKING_BACKUP_DIR, 'white') # Create a new directory with the name of the group bkps_dir = self.bkp_path + self.group + Default.CL_BKPS_DIR Dir.create_dir(bkps_dir, self.logger) self.logger.info(Msg.DESTINY_DIR.format(path=bkps_dir)) # Vaccum the databases before the backup process if necessary if self.vacuum: vacuumer = Vacuumer(connecter=self.connecter, logger=self.logger) dbs_all = vacuumer.connecter.get_pg_dbs_data(vacuumer.ex_templates, vacuumer.db_owner) vacuumer.vacuum_dbs(dbs_all) self.logger.highlight('info', Msg.BEGINNING_CL_BACKER, 'white') start_time = DateTools.get_current_datetime() # Make the backup of the cluster success = self.backup_all(bkps_dir) end_time = DateTools.get_current_datetime() # Get and show the process' duration diff = DateTools.get_diff_datetimes(start_time, end_time) if success: msg = Msg.CL_BACKER_DONE.format(diff=diff) self.logger.highlight('info', msg, 'green', effect='bold') else: self.logger.highlight('warning', Msg.CL_BACKER_FAIL, 'yellow', effect='bold') self.logger.highlight('info', Msg.BACKER_DONE, 'green', effect='bold')
def drop_pg_db(self, dbname, pg_superuser): ''' Target: - remove a database in PostgreSQL. Parameters: - dbname: the PostgreSQL database's name which is going to be removed. - pg_superuser: a flag which indicates whether the current user is PostgreSQL superuser or not. ''' delete = False try: self.connecter.cursor.execute(Queries.PG_DB_EXISTS, (dbname, )) result = self.connecter.cursor.fetchone() if result: pg_pid = self.connecter.get_pid_str() formatted_sql = Queries.BACKEND_PG_DB_EXISTS.format( pg_pid=pg_pid, target_db=dbname) self.connecter.cursor.execute(formatted_sql) result = self.connecter.cursor.fetchone() # If there are not any connections to the target database... if not result: # Users who are not superusers will only be able to drop # the databases they own if not pg_superuser: self.connecter.cursor.execute(Queries.GET_PG_DB_OWNER, (dbname, )) db = self.connecter.cursor.fetchone() if db['owner'] != self.connecter.user: msg = Msg.DROP_DB_NOT_ALLOWED.format( user=self.connecter.user, dbname=dbname) self.logger.highlight('warning', msg, 'yellow') else: delete = True else: delete = True if delete: # Get the database's "datallowconn" value datallowconn = self.connecter.get_datallowconn(dbname) # If datallowconn is allowed, change it temporarily if datallowconn: # Disallow connections to the database during the # process result = self.connecter.disallow_db_conn(dbname) if not result: msg = Msg.DISALLOW_CONN_TO_PG_DB_FAIL.format( dbname=dbname) self.logger.highlight('warning', msg, 'yellow') fmt_query_drop_db = Queries.DROP_PG_DB.format( dbname=dbname) start_time = DateTools.get_current_datetime() # Drop the database self.connecter.cursor.execute(fmt_query_drop_db) end_time = DateTools.get_current_datetime() # Get and show the process' duration diff = DateTools.get_diff_datetimes( start_time, end_time) msg = Msg.DROP_DB_DONE.format(dbname=dbname, diff=diff) self.logger.highlight('info', msg, 'green') # If datallowconn was allowed, leave it as it was if datallowconn: # Allow connections to the database at the end of # the process result = self.connecter.allow_db_conn(dbname) if not result: msg = Msg.ALLOW_CONN_TO_PG_DB_FAIL.format( dbname=dbname) self.logger.highlight('warning', msg, 'yellow') else: msg = Msg.ACTIVE_CONNS_ERROR.format(dbname=dbname) self.logger.highlight('warning', msg, 'yellow') else: msg = Msg.DB_DOES_NOT_EXIST.format(dbname=dbname) self.logger.highlight('warning', msg, 'yellow') except Exception as e: self.logger.debug('Error en la función "drop_pg_db": ' '{}.'.format(str(e))) self.logger.highlight('warning', Msg.DROP_DB_FAIL.format(dbname=dbname), 'yellow')
def trim_cluster(self, ht_bkps_list): ''' Target: - remove (if necessary) some cluster's backups, taking into account some parameters in the following order: minimum number of backups to keep > obsolete backups. Parameters: - ht_bkps_list: list of backups of a cluster to analyse and trim. ''' if self.exp_days == -1: # No expiration date x_days_ago = None else: x_days_ago = time.time() - (60 * 60 * 24 * self.exp_days) # Store the total number of backups of the cluster num_bkps = len(ht_bkps_list) # Clone the list to avoid conflict errors when removing ht_bkps_lt = ht_bkps_list[:] unlinked = False self.logger.highlight('info', Messenger.BEGINNING_CL_TRIMMER, 'white') start_time = DateTools.get_current_datetime() for f in ht_bkps_list: # Break if number of backups do not exceed the minimum if num_bkps <= self.min_n_bkps: break file_info = os.stat(f) # Obsolete backup if x_days_ago and file_info.st_ctime < x_days_ago: self.logger.info(Messenger.DELETING_OBSOLETE_BACKUP % f) os.unlink(f) # Remove backup's file unlinked = True # Update the number of backups of the database num_bkps -= 1 ht_bkps_lt.remove(f) # Update the list of cluster's backups end_time = DateTools.get_current_datetime() # Get total size of the backups in Bytes tsize = Dir.get_files_tsize(ht_bkps_lt) # Get total size of the backups in the selected unit of measure tsize_unit = ceil(tsize / self.equivalence) ## UNCOMMENT NEXT SECTION TO PROCEDURE WITH THE BACKUP'S DELETION IF ## THEIR TOTAL SIZE EXCEEDS THE SPECIFIED MAXIMUM SIZE #ht_bkps_list = ht_bkps_lt[:] #for f in ht_bkps_list: ## If there are less backups than the minimum required... #if num_bkps <= self.min_n_bkps: #break #if tsize <= self.max_size_bytes: #break #else: #file_info = os.stat(f) #self.logger.info('Tamaño de copias de seguridad en disco ' #'mayor que {} {}: eliminando el archivo ' #'{}...' % (self.max_size['size'], #self.max_size['unit'], f)) #os.unlink(f) # Remove backup's file #unlinked = True ## Update the number of backups of the cluster #num_bkps -= 1 ## ht_bkps_lt.remove(f) # Update the list of cluster's backups #tsize -= file_info.st_size # Update total size after deletion if not unlinked: message = Messenger.NO_CL_BACKUP_DELETED self.logger.highlight('warning', message, 'yellow') if tsize > self.max_size_bytes: # Total size exceeds the maximum message = Messenger.CL_BKPS_SIZE_EXCEEDED.format( tsize_unit=tsize_unit, size=self.max_size['size'], unit=self.max_size['unit']) self.logger.highlight('warning', message, 'yellow', effect='bold') # Get and show the process' duration diff = DateTools.get_diff_datetimes(start_time, end_time) self.logger.highlight('info', Messenger.CL_TRIMMER_DONE.format( diff=diff), 'green')
def replicate_pg_db(self): ''' Target: - clone a specified database in PostgreSQL. ''' try: pg_pid = self.connecter.get_pid_str() formatted_sql = Queries.BACKEND_PG_DB_EXISTS.format( pg_pid=pg_pid, target_db=self.original_dbname) self.connecter.cursor.execute(formatted_sql) result = self.connecter.cursor.fetchone() if result: msg = Msg.ACTIVE_CONNS_ERROR.format( dbname=self.original_dbname) self.logger.stop_exe(msg) formatted_query_clone_pg_db = Queries.CLONE_PG_DB.format( dbname=self.new_dbname, original_dbname=self.original_dbname, user=self.connecter.user) msg = Msg.BEGINNING_REPLICATOR.format( original_dbname=self.original_dbname) self.logger.highlight('info', msg, 'white') # Get the database's "datallowconn" value datallowconn = self.connecter.get_datallowconn( self.original_dbname) # If datallowconn is allowed, change it temporarily if datallowconn: # Disallow connections to the database during the # process result = self.connecter.disallow_db_conn(self.original_dbname) if not result: msg = Msg.DISALLOW_CONN_TO_PG_DB_FAIL.format( dbname=self.original_dbname) self.logger.highlight('warning', msg, 'yellow') # self.connecter.cursor.execute('commit') start_time = DateTools.get_current_datetime() # Replicate the database self.connecter.cursor.execute(formatted_query_clone_pg_db) end_time = DateTools.get_current_datetime() # Get and show the process' duration diff = DateTools.get_diff_datetimes(start_time, end_time) # If datallowconn was allowed, leave it as it was if datallowconn: # Allow connections to the database at the end of # the process result = self.connecter.allow_db_conn(self.original_dbname) if not result: msg = Msg.ALLOW_CONN_TO_PG_DB_FAIL.format( dbname=self.original_dbname) self.logger.highlight('warning', msg, 'yellow') msg = Msg.REPLICATE_DB_DONE.format( new_dbname=self.new_dbname, original_dbname=self.original_dbname, diff=diff) self.logger.highlight('info', msg, 'green') self.logger.highlight('info', Msg.REPLICATOR_DONE, 'green', effect='bold') except Exception as e: self.logger.debug('Error en la función "clone_pg_db": ' '{}.'.format(str(e))) self.logger.stop_exe(Msg.REPLICATE_DB_FAIL)
def restore_db_backup(self): ''' Target: - restore a database's backup in PostgreSQL. ''' #replicator = Replicator(self.connecter, self.new_dbname, #Default.RESTORING_TEMPLATE, self.logger) #result = self.connecter.allow_db_conn(Default.RESTORING_TEMPLATE) #if result: #replicator.replicate_pg_db() #self.connecter.disallow_db_conn(Default.RESTORING_TEMPLATE) #else: #self.logger.stop_exe(Messenger.ALLOW_DB_CONN_FAIL.format( #dbname=Default.RESTORING_TEMPLATE)) # Regular expression which must match the backup's name regex = r'.*db_(.+)_(\d{8}_\d{6}_.+)\.(dump|bz2|gz|zip)$' regex = re.compile(regex) if re.match(regex, self.db_backup): # Store the parts of the backup's name (name, date, ext) parts = regex.search(self.db_backup).groups() # Store only the extension to know the type of file ext = parts[2] else: self.logger.stop_exe(Messenger.NO_BACKUP_FORMAT) message = Messenger.BEGINNING_DB_RESTORER.format( db_backup=self.db_backup, new_dbname=self.new_dbname) self.logger.highlight('info', message, 'white') self.logger.info(Messenger.WAIT_PLEASE) if ext == 'gz': command = 'gunzip -c {} -k | pg_restore -U {} -h {} -p {} ' \ '-d {}'.format(self.db_backup, self.connecter.user, self.connecter.server, self.connecter.port, self.new_dbname) elif ext == 'bz2': command = 'bunzip2 -c {} -k | pg_restore -U {} -h {} -p {} ' \ '-d {}'.format(self.db_backup, self.connecter.user, self.connecter.server, self.connecter.port, self.new_dbname) elif ext == 'zip': command = 'unzip -p {} | pg_restore -U {} -h {} -p {} ' \ '-d {}'.format(self.db_backup, self.connecter.user, self.connecter.server, self.connecter.port, self.new_dbname) else: command = 'pg_restore -U {} -h {} -p {} -d {} {}'.format( self.connecter.user, self.connecter.server, self.connecter.port, self.new_dbname, self.db_backup) try: start_time = DateTools.get_current_datetime() # Make the restauration of the database result = subprocess.call(command, shell=True) end_time = DateTools.get_current_datetime() # Get and show the process' duration diff = DateTools.get_diff_datetimes(start_time, end_time) if result != 0: raise Exception() message = Messenger.RESTORE_DB_DONE.format( db_backup=self.db_backup, new_dbname=self.new_dbname, diff=diff) self.logger.highlight('info', message, 'green') self.logger.highlight('info', Messenger.RESTORER_DONE, 'green', effect='bold') except Exception as e: self.logger.debug('Error en la función "restore_db_backup": ' '{}.'.format(str(e))) message = Messenger.RESTORE_DB_FAIL.format( db_backup=self.db_backup, new_dbname=self.new_dbname) self.logger.stop_exe(message)
def restore_cluster_backup(self): ''' Target: - restore a cluster's backup in PostgreSQL. The cluster must have been created before this process. ''' # Regular expression which must match the backup's name regex = r'.*ht_(.+_cluster)_(\d{8}_\d{6}_.+)\.(dump|bz2|gz|zip)$' regex = re.compile(regex) if re.match(regex, self.cluster_backup): # Store the parts of the backup's name (servername, date, ext) parts = regex.search(self.cluster_backup).groups() # Store only the extension to know the type of file ext = parts[2] else: Messenger.NO_BACKUP_FORMAT message = Messenger.BEGINNING_CL_RESTORER.format( cluster_backup=self.cluster_backup) self.logger.highlight('info', message, 'white') self.logger.info(Messenger.WAIT_PLEASE) # TODO: make dissappear every line about the operation shown in console if ext == 'gz': command = 'gunzip -c {} -k | psql postgres -U {} -h {} ' \ '-p {}'.format( self.cluster_backup, self.connecter.user, self.connecter.server, self.connecter.port) elif ext == 'bz2': command = 'bunzip2 -c {} -k | psql postgres -U {} -h {} ' \ '-p {}'.format( self.cluster_backup, self.connecter.user, self.connecter.server, self.connecter.port) elif ext == 'zip': command = 'unzip -p {} | psql postgres -U {} -h {} -p {}'.format( self.cluster_backup, self.connecter.user, self.connecter.server, self.connecter.port) else: command = 'psql postgres -U {} -h {} -p {} < {}'.format( self.connecter.user, self.connecter.server, self.connecter.port, self.cluster_backup) try: start_time = DateTools.get_current_datetime() # Make the restauration of the cluster result = subprocess.call(command, shell=True) end_time = DateTools.get_current_datetime() # Get and show the process' duration diff = DateTools.get_diff_datetimes(start_time, end_time) if result != 0: raise Exception() message = Messenger.RESTORE_CL_DONE.format( cluster_backup=self.cluster_backup, diff=diff) self.logger.highlight('info', message, 'green') self.logger.highlight('info', Messenger.RESTORER_DONE, 'green', effect='bold') except Exception as e: self.logger.debug('Error en la función "restore_cluster_backup": ' '{}.'.format(str(e))) message = Messenger.RESTORE_CL_FAIL.format( cluster_backup=self.cluster_backup) self.logger.stop_exe(message)
def drop_pg_db(self, dbname, pg_superuser): ''' Target: - remove a database in PostgreSQL. Parameters: - dbname: the PostgreSQL database's name which is going to be removed. - pg_superuser: a flag which indicates whether the current user is PostgreSQL superuser or not. ''' delete = False try: self.connecter.cursor.execute(Queries.PG_DB_EXISTS, (dbname, )) result = self.connecter.cursor.fetchone() if result: pg_pid = self.connecter.get_pid_str() formatted_sql = Queries.BACKEND_PG_DB_EXISTS.format( pg_pid=pg_pid, target_db=dbname) self.connecter.cursor.execute(formatted_sql) result = self.connecter.cursor.fetchone() # If there are not any connections to the target database... if not result: # Users who are not superusers will only be able to drop # the databases they own if not pg_superuser: self.connecter.cursor.execute(Queries.GET_PG_DB_OWNER, (dbname, )) db = self.connecter.cursor.fetchone() if db['owner'] != self.connecter.user: msg = Msg.DROP_DB_NOT_ALLOWED.format( user=self.connecter.user, dbname=dbname) self.logger.highlight('warning', msg, 'yellow') else: delete = True else: delete = True if delete: # Get the database's "datallowconn" value datallowconn = self.connecter.get_datallowconn(dbname) # If datallowconn is allowed, change it temporarily if datallowconn: # Disallow connections to the database during the # process result = self.connecter.disallow_db_conn(dbname) if not result: msg = Msg.DISALLOW_CONN_TO_PG_DB_FAIL.format( dbname=dbname) self.logger.highlight('warning', msg, 'yellow') fmt_query_drop_db = Queries.DROP_PG_DB.format( dbname=dbname) start_time = DateTools.get_current_datetime() # Drop the database self.connecter.cursor.execute(fmt_query_drop_db) end_time = DateTools.get_current_datetime() # Get and show the process' duration diff = DateTools.get_diff_datetimes(start_time, end_time) msg = Msg.DROP_DB_DONE.format(dbname=dbname, diff=diff) self.logger.highlight('info', msg, 'green') # If datallowconn was allowed, leave it as it was if datallowconn: # Allow connections to the database at the end of # the process result = self.connecter.allow_db_conn(dbname) if not result: msg = Msg.ALLOW_CONN_TO_PG_DB_FAIL.format( dbname=dbname) self.logger.highlight('warning', msg, 'yellow') else: msg = Msg.ACTIVE_CONNS_ERROR.format(dbname=dbname) self.logger.highlight('warning', msg, 'yellow') else: msg = Msg.DB_DOES_NOT_EXIST.format(dbname=dbname) self.logger.highlight('warning', msg, 'yellow') except Exception as e: self.logger.debug('Error en la función "drop_pg_db": ' '{}.'.format(str(e))) self.logger.highlight('warning', Msg.DROP_DB_FAIL.format( dbname=dbname), 'yellow')
def restore_db_backup(self): ''' Target: - restore a database's backup in PostgreSQL. ''' replicator = Replicator(self.connecter, self.new_dbname, Default.RESTORING_TEMPLATE, self.logger) result = self.connecter.allow_db_conn(Default.RESTORING_TEMPLATE) if result: replicator.replicate_pg_db() self.connecter.disallow_db_conn(Default.RESTORING_TEMPLATE) else: self.logger.stop_exe( Messenger.ALLOW_DB_CONN_FAIL.format( dbname=Default.RESTORING_TEMPLATE)) # Regular expression which must match the backup's name regex = r'.*db_(.+)_(\d{8}_\d{6}_.+)\.(dump|bz2|gz|zip)$' regex = re.compile(regex) if re.match(regex, self.db_backup): # Store the parts of the backup's name (name, date, ext) parts = regex.search(self.db_backup).groups() # Store only the extension to know the type of file ext = parts[2] else: self.logger.stop_exe(Messenger.NO_BACKUP_FORMAT) message = Messenger.BEGINNING_DB_RESTORER.format( db_backup=self.db_backup, new_dbname=self.new_dbname) self.logger.highlight('info', message, 'white') self.logger.info(Messenger.WAIT_PLEASE) if ext == 'gz': command = 'gunzip -c {} -k | pg_restore -U {} -h {} -p {} ' \ '-d {}'.format(self.db_backup, self.connecter.user, self.connecter.server, self.connecter.port, self.new_dbname) elif ext == 'bz2': command = 'bunzip2 -c {} -k | pg_restore -U {} -h {} -p {} ' \ '-d {}'.format(self.db_backup, self.connecter.user, self.connecter.server, self.connecter.port, self.new_dbname) elif ext == 'zip': command = 'unzip -p {} | pg_restore -U {} -h {} -p {} ' \ '-d {}'.format(self.db_backup, self.connecter.user, self.connecter.server, self.connecter.port, self.new_dbname) else: command = 'pg_restore -U {} -h {} -p {} -d {} {}'.format( self.connecter.user, self.connecter.server, self.connecter.port, self.new_dbname, self.db_backup) try: start_time = DateTools.get_current_datetime() # Make the restauration of the database result = subprocess.call(command, shell=True) end_time = DateTools.get_current_datetime() # Get and show the process' duration diff = DateTools.get_diff_datetimes(start_time, end_time) if result != 0: raise Exception() message = Messenger.RESTORE_DB_DONE.format( db_backup=self.db_backup, new_dbname=self.new_dbname, diff=diff) self.logger.highlight('info', message, 'green') self.logger.highlight('info', Messenger.RESTORER_DONE, 'green', effect='bold') except Exception as e: self.logger.debug('Error en la función "restore_db_backup": ' '{}.'.format(str(e))) message = Messenger.RESTORE_DB_FAIL.format( db_backup=self.db_backup, new_dbname=self.new_dbname) self.logger.stop_exe(message)
def backup_dbs(self, dbs_all): ''' Target: - make a backup of some specified databases. Parameters: - dbs_all: names of the databases which are going to be backuped. ''' self.logger.highlight('info', Msg.CHECKING_BACKUP_DIR, 'white') # Create a new directory with the name of the group bkps_dir = self.bkp_path + self.group + Default.DB_BKPS_DIR Dir.create_dir(bkps_dir, self.logger) self.logger.info(Msg.DESTINY_DIR.format(path=bkps_dir)) self.logger.highlight('info', Msg.PROCESSING_DB_BACKER, 'white') if dbs_all: for db in dbs_all: dbname = db['datname'] msg = Msg.PROCESSING_DB.format(dbname=dbname) self.logger.highlight('info', msg, 'cyan') # Let the user know whether the database connection is allowed if not db['datallowconn']: msg = Msg.FORBIDDEN_DB_CONNECTION.format(dbname=dbname) self.logger.highlight('warning', msg, 'yellow', effect='bold') success = False else: # Vaccum the database before the backup process if # necessary if self.vacuum: self.logger.info( Msg.PRE_VACUUMING_DB.format(dbname=dbname)) vacuumer = Vacuumer(self.connecter, self.in_dbs, self.in_regex, self.in_priority, self.ex_dbs, self.ex_regex, self.ex_templates, self.db_owner, self.logger) # Vacuum the database success = vacuumer.vacuum_db(dbname) if success: msg = Msg.PRE_VACUUMING_DB_DONE.format( dbname=dbname) self.logger.info(msg) else: msg = Msg.PRE_VACUUMING_DB_FAIL.format( dbname=dbname) self.logger.highlight('warning', msg, 'yellow') self.logger.info( Msg.BEGINNING_DB_BACKER.format(dbname=dbname)) start_time = DateTools.get_current_datetime() # Make the backup of the database success = self.backup_db(dbname, bkps_dir) end_time = DateTools.get_current_datetime() # Get and show the process' duration diff = DateTools.get_diff_datetimes(start_time, end_time) if success: msg = Msg.DB_BACKER_DONE.format(dbname=dbname, diff=diff) self.logger.highlight('info', msg, 'green') else: msg = Msg.DB_BACKER_FAIL.format(dbname=dbname) self.logger.highlight('warning', msg, 'yellow', effect='bold') else: self.logger.highlight('warning', Msg.BACKER_HAS_NOTHING_TO_DO, 'yellow', effect='bold') self.logger.highlight('info', Msg.BACKER_DONE, 'green', effect='bold')
def backup_dbs(self, dbs_all): ''' Target: - make a backup of some specified databases. Parameters: - dbs_all: names of the databases which are going to be backuped. ''' self.logger.highlight('info', Msg.CHECKING_BACKUP_DIR, 'white') # Create a new directory with the name of the group bkps_dir = self.bkp_path + self.group + Default.DB_BKPS_DIR Dir.create_dir(bkps_dir, self.logger) self.logger.info(Msg.DESTINY_DIR.format(path=bkps_dir)) self.logger.highlight('info', Msg.PROCESSING_DB_BACKER, 'white') if dbs_all: for db in dbs_all: dbname = db['datname'] msg = Msg.PROCESSING_DB.format(dbname=dbname) self.logger.highlight('info', msg, 'cyan') # Let the user know whether the database connection is allowed if not db['datallowconn']: msg = Msg.FORBIDDEN_DB_CONNECTION.format(dbname=dbname) self.logger.highlight('warning', msg, 'yellow', effect='bold') success = False else: # Vaccum the database before the backup process if # necessary if self.vacuum: self.logger.info(Msg.PRE_VACUUMING_DB.format( dbname=dbname)) vacuumer = Vacuumer(self.connecter, self.in_dbs, self.in_regex, self.in_priority, self.ex_dbs, self.ex_regex, self.ex_templates, self.db_owner, self.logger) # Vacuum the database success = vacuumer.vacuum_db(dbname) if success: msg = Msg.PRE_VACUUMING_DB_DONE.format( dbname=dbname) self.logger.info(msg) else: msg = Msg.PRE_VACUUMING_DB_FAIL.format( dbname=dbname) self.logger.highlight('warning', msg, 'yellow') self.logger.info(Msg.BEGINNING_DB_BACKER.format( dbname=dbname)) start_time = DateTools.get_current_datetime() # Make the backup of the database success = self.backup_db(dbname, bkps_dir) end_time = DateTools.get_current_datetime() # Get and show the process' duration diff = DateTools.get_diff_datetimes(start_time, end_time) if success: msg = Msg.DB_BACKER_DONE.format(dbname=dbname, diff=diff) self.logger.highlight('info', msg, 'green') else: msg = Msg.DB_BACKER_FAIL.format(dbname=dbname) self.logger.highlight('warning', msg, 'yellow', effect='bold') else: self.logger.highlight('warning', Msg.BACKER_HAS_NOTHING_TO_DO, 'yellow', effect='bold') self.logger.highlight('info', Msg.BACKER_DONE, 'green', effect='bold')