示例#1
0
    def backup_all(self, bkps_dir):
        '''
        Target:
            - make a backup of a cluster.
        Parameters:
            - bkps_dir: directory where the backup is going to be stored.
        Return:
            - a boolean which indicates the success of the process.
        '''
        success = True
        # Get date and time of the zone
        init_ts = DateTools.get_date()
        # Get current year
        year = str(DateTools.get_year(init_ts))
        # Get current month
        month = str(DateTools.get_month(init_ts))
        # Create new directories with the year and the month of the backup
        bkp_dir = bkps_dir + year + '/' + month + '/'
        Dir.create_dir(bkp_dir, self.logger)

        # Set backup's name
        file_name = self.prefix + 'ht_' + self.connecter.server + \
            str(self.connecter.port) + '_cluster_' + init_ts + '.' + \
            self.bkp_type

        # Store the command to do depending on the backup type
        if self.bkp_type == 'gz':  # Zip with gzip
            command = 'pg_dumpall -U {} -h {} -p {} | gzip > {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)
        elif self.bkp_type == 'bz2':  # Zip with bzip2
            command = 'pg_dumpall -U {} -h {} -p {} | bzip2 > {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)
        elif self.bkp_type == 'zip':  # Zip with zip
            command = 'pg_dumpall -U {} -h {} -p {} | zip > {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)
        else:  # Do not zip
            command = 'pg_dumpall -U {} -h {} -p {} > {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)
        try:
            # Execute the command in console
            result = subprocess.call(command, shell=True)
            if result != 0:
                raise Exception()

        except Exception as e:
            self.logger.debug('Error en la función "backup_all": {}.'.format(
                str(e)))
            success = False

        return success
示例#2
0
    def backup_all(self, bkps_dir):
        '''
        Target:
            - make a backup of a cluster.
        Parameters:
            - bkps_dir: directory where the backup is going to be stored.
        Return:
            - a boolean which indicates the success of the process.
        '''
        success = True
        # Get date and time of the zone
        init_ts = DateTools.get_date()
        # Get current year
        year = str(DateTools.get_year(init_ts))
        # Get current month
        month = str(DateTools.get_month(init_ts))
        # Create new directories with the year and the month of the backup
        bkp_dir = bkps_dir + year + '/' + month + '/'
        Dir.create_dir(bkp_dir, self.logger)

        # Set backup's name
        file_name = self.prefix + 'ht_' + self.connecter.server + \
            str(self.connecter.port) + '_cluster_' + init_ts + '.' + \
            self.bkp_type

        # Store the command to do depending on the backup type
        if self.bkp_type == 'gz':  # Zip with gzip
            command = 'pg_dumpall -U {} -h {} -p {} | gzip > {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)
        elif self.bkp_type == 'bz2':  # Zip with bzip2
            command = 'pg_dumpall -U {} -h {} -p {} | bzip2 > {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)
        elif self.bkp_type == 'zip':  # Zip with zip
            command = 'pg_dumpall -U {} -h {} -p {} | zip > {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)
        else:  # Do not zip
            command = 'pg_dumpall -U {} -h {} -p {} > {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)
        try:
            # Execute the command in console
            result = subprocess.call(command, shell=True)
            if result != 0:
                raise Exception()

        except Exception as e:
            self.logger.debug('Error en la función "backup_all": {}.'.format(
                str(e)))
            success = False

        return success
示例#3
0
    def vacuum_dbs(self, vacuum_list):
        '''
        Target:
            - vacuum a group of PostgreSQL databases.
        Parameters:
            - vacuum_list: names of the databases which are going to be
              vacuumed.
        '''
        if vacuum_list:
            self.logger.highlight('info', Messenger.BEGINNING_VACUUMER,
                                  'white')

        for db in vacuum_list:

            dbname = db['datname']

            message = Messenger.PROCESSING_DB.format(dbname=dbname)
            self.logger.highlight('info', message, 'cyan')

            # Let the user know whether the database connection is allowed
            if not db['datallowconn']:
                message = Messenger.FORBIDDEN_DB_CONNECTION.format(
                    dbname=dbname)
                self.logger.highlight('warning', message, 'yellow',
                                      effect='bold')
                success = False
            else:
                start_time = DateTools.get_current_datetime()
                # Vacuum the database
                success = self.vacuum_db(dbname)
                end_time = DateTools.get_current_datetime()
                # Get and show the process' duration
                diff = DateTools.get_diff_datetimes(start_time, end_time)

            if success:
                message = Messenger.DB_VACUUMER_DONE.format(dbname=dbname,
                                                            diff=diff)
                self.logger.highlight('info', message, 'green')

            else:
                message = Messenger.DB_VACUUMER_FAIL.format(dbname=dbname)
                self.logger.highlight('warning', message, 'yellow',
                                      effect='bold')

        self.logger.highlight('info', Messenger.VACUUMER_DONE, 'green',
                              effect='bold')
示例#4
0
    def alter_dbs_owner(self, alt_list):
        '''
        Target:
            - change the owner of a group of databases and their tables.
        Parameters:
            - alt_list: names of the databases which are going to be altered.
        '''
        self.logger.highlight('info', Msg.PROCESSING_ALTERER, 'white')

        if alt_list:

            for db in alt_list:

                dbname = db['datname']

                msg = Msg.PROCESSING_DB.format(dbname=dbname)
                self.logger.highlight('info', msg, 'cyan')

                start_time = DateTools.get_current_datetime()
                # Change the owner of the database
                success = self.alter_db_owner(db)
                end_time = DateTools.get_current_datetime()
                # Get and show the process' duration
                diff = DateTools.get_diff_datetimes(start_time, end_time)

                if success:
                    msg = Msg.DB_ALTERER_DONE.format(dbname=dbname, diff=diff)
                    self.logger.highlight('info', msg, 'green')
                else:
                    msg = Msg.DB_ALTERER_FAIL.format(dbname=dbname)
                    self.logger.highlight('warning',
                                          msg,
                                          'yellow',
                                          effect='bold')
        else:
            self.logger.highlight('warning',
                                  Msg.ALTERER_HAS_NOTHING_TO_DO,
                                  'yellow',
                                  effect='bold')

        self.logger.highlight('info', Msg.ALTERER_DONE, 'green', effect='bold')
示例#5
0
    def backup_cl(self):
        '''
        Target:
            - vacuum if necessary and make a backup of a cluster.
        '''
        self.logger.highlight('info', Msg.CHECKING_BACKUP_DIR, 'white')

        # Create a new directory with the name of the group
        bkps_dir = self.bkp_path + self.group + Default.CL_BKPS_DIR
        Dir.create_dir(bkps_dir, self.logger)

        self.logger.info(Msg.DESTINY_DIR.format(path=bkps_dir))

        # Vaccum the databases before the backup process if necessary
        if self.vacuum:
            vacuumer = Vacuumer(connecter=self.connecter, logger=self.logger)
            dbs_all = vacuumer.connecter.get_pg_dbs_data(
                vacuumer.ex_templates, vacuumer.db_owner)
            vacuumer.vacuum_dbs(dbs_all)

        self.logger.highlight('info', Msg.BEGINNING_CL_BACKER, 'white')

        start_time = DateTools.get_current_datetime()
        # Make the backup of the cluster
        success = self.backup_all(bkps_dir)
        end_time = DateTools.get_current_datetime()
        # Get and show the process' duration
        diff = DateTools.get_diff_datetimes(start_time, end_time)

        if success:
            msg = Msg.CL_BACKER_DONE.format(diff=diff)
            self.logger.highlight('info', msg, 'green', effect='bold')
        else:
            self.logger.highlight('warning',
                                  Msg.CL_BACKER_FAIL,
                                  'yellow',
                                  effect='bold')

        self.logger.highlight('info', Msg.BACKER_DONE, 'green', effect='bold')
示例#6
0
    def backup_cl(self):
        '''
        Target:
            - vacuum if necessary and make a backup of a cluster.
        '''
        self.logger.highlight('info', Msg.CHECKING_BACKUP_DIR, 'white')

        # Create a new directory with the name of the group
        bkps_dir = self.bkp_path + self.group + Default.CL_BKPS_DIR
        Dir.create_dir(bkps_dir, self.logger)

        self.logger.info(Msg.DESTINY_DIR.format(path=bkps_dir))

        # Vaccum the databases before the backup process if necessary
        if self.vacuum:
            vacuumer = Vacuumer(connecter=self.connecter, logger=self.logger)
            dbs_all = vacuumer.connecter.get_pg_dbs_data(vacuumer.ex_templates,
                                                         vacuumer.db_owner)
            vacuumer.vacuum_dbs(dbs_all)

        self.logger.highlight('info', Msg.BEGINNING_CL_BACKER, 'white')

        start_time = DateTools.get_current_datetime()
        # Make the backup of the cluster
        success = self.backup_all(bkps_dir)
        end_time = DateTools.get_current_datetime()
        # Get and show the process' duration
        diff = DateTools.get_diff_datetimes(start_time, end_time)

        if success:
            msg = Msg.CL_BACKER_DONE.format(diff=diff)
            self.logger.highlight('info', msg, 'green', effect='bold')
        else:
            self.logger.highlight('warning', Msg.CL_BACKER_FAIL,
                                  'yellow', effect='bold')

        self.logger.highlight('info', Msg.BACKER_DONE, 'green',
                              effect='bold')
示例#7
0
    def alter_dbs_owner(self, alt_list):
        '''
        Target:
            - change the owner of a group of databases and their tables.
        Parameters:
            - alt_list: names of the databases which are going to be altered.
        '''
        self.logger.highlight('info', Msg.PROCESSING_ALTERER, 'white')

        if alt_list:

            for db in alt_list:

                dbname = db['datname']

                msg = Msg.PROCESSING_DB.format(dbname=dbname)
                self.logger.highlight('info', msg, 'cyan')

                start_time = DateTools.get_current_datetime()
                # Change the owner of the database
                success = self.alter_db_owner(db)
                end_time = DateTools.get_current_datetime()
                # Get and show the process' duration
                diff = DateTools.get_diff_datetimes(start_time, end_time)

                if success:
                    msg = Msg.DB_ALTERER_DONE.format(dbname=dbname, diff=diff)
                    self.logger.highlight('info', msg, 'green')
                else:
                    msg = Msg.DB_ALTERER_FAIL.format(dbname=dbname)
                    self.logger.highlight('warning', msg, 'yellow',
                                          effect='bold')
        else:
            self.logger.highlight('warning', Msg.ALTERER_HAS_NOTHING_TO_DO,
                                  'yellow', effect='bold')

        self.logger.highlight('info', Msg.ALTERER_DONE, 'green', effect='bold')
    def replicate_pg_db(self):
        '''
        Target:
            - clone a specified database in PostgreSQL.
        '''
        try:
            pg_pid = self.connecter.get_pid_str()
            formatted_sql = Queries.BACKEND_PG_DB_EXISTS.format(
                pg_pid=pg_pid, target_db=self.original_dbname)
            self.connecter.cursor.execute(formatted_sql)
            result = self.connecter.cursor.fetchone()

            if result:
                msg = Msg.ACTIVE_CONNS_ERROR.format(
                    dbname=self.original_dbname)
                self.logger.stop_exe(msg)

            formatted_query_clone_pg_db = Queries.CLONE_PG_DB.format(
                dbname=self.new_dbname, original_dbname=self.original_dbname,
                user=self.connecter.user)

            msg = Msg.BEGINNING_REPLICATOR.format(
                original_dbname=self.original_dbname)
            self.logger.highlight('info', msg, 'white')

            # Get the database's "datallowconn" value
            datallowconn = self.connecter.get_datallowconn(
                self.original_dbname)

            # If datallowconn is allowed, change it temporarily
            if datallowconn:
                # Disallow connections to the database during the
                # process
                result = self.connecter.disallow_db_conn(self.original_dbname)
                if not result:
                    msg = Msg.DISALLOW_CONN_TO_PG_DB_FAIL.format(
                        dbname=self.original_dbname)
                    self.logger.highlight('warning', msg, 'yellow')

            # self.connecter.cursor.execute('commit')
            start_time = DateTools.get_current_datetime()
            # Replicate the database
            self.connecter.cursor.execute(formatted_query_clone_pg_db)
            end_time = DateTools.get_current_datetime()
            # Get and show the process' duration
            diff = DateTools.get_diff_datetimes(start_time, end_time)

            # If datallowconn was allowed, leave it as it was
            if datallowconn:
                # Allow connections to the database at the end of
                # the process
                result = self.connecter.allow_db_conn(self.original_dbname)
                if not result:
                    msg = Msg.ALLOW_CONN_TO_PG_DB_FAIL.format(
                        dbname=self.original_dbname)
                    self.logger.highlight('warning', msg, 'yellow')

            msg = Msg.REPLICATE_DB_DONE.format(
                new_dbname=self.new_dbname,
                original_dbname=self.original_dbname, diff=diff)
            self.logger.highlight('info', msg, 'green')
            self.logger.highlight('info', Msg.REPLICATOR_DONE, 'green',
                                  effect='bold')

        except Exception as e:
            self.logger.debug('Error en la función "clone_pg_db": '
                              '{}.'.format(str(e)))
            self.logger.stop_exe(Msg.REPLICATE_DB_FAIL)
示例#9
0
    def restore_db_backup(self):
        '''
        Target:
            - restore a database's backup in PostgreSQL.
        '''
        #replicator = Replicator(self.connecter, self.new_dbname,
                                #Default.RESTORING_TEMPLATE, self.logger)
        #result = self.connecter.allow_db_conn(Default.RESTORING_TEMPLATE)
        #if result:
            #replicator.replicate_pg_db()
            #self.connecter.disallow_db_conn(Default.RESTORING_TEMPLATE)
        #else:
            #self.logger.stop_exe(Messenger.ALLOW_DB_CONN_FAIL.format(
                #dbname=Default.RESTORING_TEMPLATE))

        # Regular expression which must match the backup's name
        regex = r'.*db_(.+)_(\d{8}_\d{6}_.+)\.(dump|bz2|gz|zip)$'
        regex = re.compile(regex)

        if re.match(regex, self.db_backup):
            # Store the parts of the backup's name (name, date, ext)
            parts = regex.search(self.db_backup).groups()
            # Store only the extension to know the type of file
            ext = parts[2]
        else:
            self.logger.stop_exe(Messenger.NO_BACKUP_FORMAT)

        message = Messenger.BEGINNING_DB_RESTORER.format(
            db_backup=self.db_backup, new_dbname=self.new_dbname)
        self.logger.highlight('info', message, 'white')
        self.logger.info(Messenger.WAIT_PLEASE)

        if ext == 'gz':
            command = 'gunzip -c {} -k | pg_restore -U {} -h {} -p {} ' \
                      '-d {}'.format(self.db_backup, self.connecter.user,
                                     self.connecter.server,
                                     self.connecter.port, self.new_dbname)
        elif ext == 'bz2':
            command = 'bunzip2 -c {} -k | pg_restore -U {} -h {} -p {} ' \
                      '-d {}'.format(self.db_backup, self.connecter.user,
                                     self.connecter.server,
                                     self.connecter.port, self.new_dbname)
        elif ext == 'zip':
            command = 'unzip -p {} | pg_restore -U {} -h {} -p {} ' \
                      '-d {}'.format(self.db_backup, self.connecter.user,
                                     self.connecter.server,
                                     self.connecter.port, self.new_dbname)
        else:
            command = 'pg_restore -U {} -h {} -p {} -d {} {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, self.new_dbname, self.db_backup)

        try:
            start_time = DateTools.get_current_datetime()
            # Make the restauration of the database
            result = subprocess.call(command, shell=True)
            end_time = DateTools.get_current_datetime()
            # Get and show the process' duration
            diff = DateTools.get_diff_datetimes(start_time, end_time)

            if result != 0:
                raise Exception()

            message = Messenger.RESTORE_DB_DONE.format(
                db_backup=self.db_backup, new_dbname=self.new_dbname,
                diff=diff)
            self.logger.highlight('info', message, 'green')

            self.logger.highlight('info', Messenger.RESTORER_DONE, 'green',
                                  effect='bold')

        except Exception as e:
            self.logger.debug('Error en la función "restore_db_backup": '
                              '{}.'.format(str(e)))
            message = Messenger.RESTORE_DB_FAIL.format(
                db_backup=self.db_backup, new_dbname=self.new_dbname)
            self.logger.stop_exe(message)
示例#10
0
    def restore_cluster_backup(self):
        '''
        Target:
            - restore a cluster's backup in PostgreSQL. The cluster must have
              been created before this process.
        '''
        # Regular expression which must match the backup's name
        regex = r'.*ht_(.+_cluster)_(\d{8}_\d{6}_.+)\.(dump|bz2|gz|zip)$'
        regex = re.compile(regex)

        if re.match(regex, self.cluster_backup):
            # Store the parts of the backup's name (servername, date, ext)
            parts = regex.search(self.cluster_backup).groups()
            # Store only the extension to know the type of file
            ext = parts[2]
        else:
            Messenger.NO_BACKUP_FORMAT

        message = Messenger.BEGINNING_CL_RESTORER.format(
            cluster_backup=self.cluster_backup)
        self.logger.highlight('info', message, 'white')
        self.logger.info(Messenger.WAIT_PLEASE)

        # TODO: make dissappear every line about the operation shown in console
        if ext == 'gz':
            command = 'gunzip -c {} -k | psql postgres -U {} -h {} ' \
                      '-p {}'.format(
                          self.cluster_backup, self.connecter.user,
                          self.connecter.server, self.connecter.port)
        elif ext == 'bz2':
            command = 'bunzip2 -c {} -k | psql postgres -U {} -h {} ' \
                      '-p {}'.format(
                          self.cluster_backup, self.connecter.user,
                          self.connecter.server, self.connecter.port)
        elif ext == 'zip':
            command = 'unzip -p {} | psql postgres -U {} -h {} -p {}'.format(
                self.cluster_backup, self.connecter.user,
                self.connecter.server, self.connecter.port)
        else:
            command = 'psql postgres -U {} -h {} -p {} < {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, self.cluster_backup)

        try:
            start_time = DateTools.get_current_datetime()
            # Make the restauration of the cluster
            result = subprocess.call(command, shell=True)
            end_time = DateTools.get_current_datetime()
            # Get and show the process' duration
            diff = DateTools.get_diff_datetimes(start_time, end_time)

            if result != 0:
                raise Exception()

            message = Messenger.RESTORE_CL_DONE.format(
                cluster_backup=self.cluster_backup, diff=diff)
            self.logger.highlight('info', message, 'green')

            self.logger.highlight('info', Messenger.RESTORER_DONE, 'green',
                                  effect='bold')

        except Exception as e:
            self.logger.debug('Error en la función "restore_cluster_backup": '
                              '{}.'.format(str(e)))
            message = Messenger.RESTORE_CL_FAIL.format(
                cluster_backup=self.cluster_backup)
            self.logger.stop_exe(message)
示例#11
0
    def drop_pg_db(self, dbname, pg_superuser):
        '''
        Target:
            - remove a database in PostgreSQL.
        Parameters:
            - dbname: the PostgreSQL database's name which is going to be
              removed.
            - pg_superuser: a flag which indicates whether the current user is
              PostgreSQL superuser or not.
        '''
        delete = False

        try:
            self.connecter.cursor.execute(Queries.PG_DB_EXISTS, (dbname, ))
            result = self.connecter.cursor.fetchone()

            if result:

                pg_pid = self.connecter.get_pid_str()
                formatted_sql = Queries.BACKEND_PG_DB_EXISTS.format(
                    pg_pid=pg_pid, target_db=dbname)

                self.connecter.cursor.execute(formatted_sql)
                result = self.connecter.cursor.fetchone()

                # If there are not any connections to the target database...
                if not result:

                    # Users who are not superusers will only be able to drop
                    # the databases they own
                    if not pg_superuser:

                        self.connecter.cursor.execute(Queries.GET_PG_DB_OWNER,
                                                      (dbname, ))
                        db = self.connecter.cursor.fetchone()

                        if db['owner'] != self.connecter.user:

                            msg = Msg.DROP_DB_NOT_ALLOWED.format(
                                user=self.connecter.user, dbname=dbname)
                            self.logger.highlight('warning', msg, 'yellow')

                        else:
                            delete = True

                    else:
                        delete = True

                    if delete:

                        # Get the database's "datallowconn" value
                        datallowconn = self.connecter.get_datallowconn(dbname)

                        # If datallowconn is allowed, change it temporarily
                        if datallowconn:
                            # Disallow connections to the database during the
                            # process
                            result = self.connecter.disallow_db_conn(dbname)
                            if not result:
                                msg = Msg.DISALLOW_CONN_TO_PG_DB_FAIL.format(
                                    dbname=dbname)
                                self.logger.highlight('warning', msg, 'yellow')

                        fmt_query_drop_db = Queries.DROP_PG_DB.format(
                            dbname=dbname)

                        start_time = DateTools.get_current_datetime()
                        # Drop the database
                        self.connecter.cursor.execute(fmt_query_drop_db)
                        end_time = DateTools.get_current_datetime()
                        # Get and show the process' duration
                        diff = DateTools.get_diff_datetimes(start_time,
                                                            end_time)
                        msg = Msg.DROP_DB_DONE.format(dbname=dbname, diff=diff)
                        self.logger.highlight('info', msg, 'green')

                        # If datallowconn was allowed, leave it as it was
                        if datallowconn:
                            # Allow connections to the database at the end of
                            # the process
                            result = self.connecter.allow_db_conn(dbname)
                            if not result:
                                msg = Msg.ALLOW_CONN_TO_PG_DB_FAIL.format(
                                    dbname=dbname)
                                self.logger.highlight('warning', msg, 'yellow')

                else:
                    msg = Msg.ACTIVE_CONNS_ERROR.format(dbname=dbname)
                    self.logger.highlight('warning', msg, 'yellow')

            else:
                msg = Msg.DB_DOES_NOT_EXIST.format(dbname=dbname)
                self.logger.highlight('warning', msg, 'yellow')

        except Exception as e:
            self.logger.debug('Error en la función "drop_pg_db": '
                              '{}.'.format(str(e)))
            self.logger.highlight('warning', Msg.DROP_DB_FAIL.format(
                dbname=dbname), 'yellow')
示例#12
0
    def restore_db_backup(self):
        '''
        Target:
            - restore a database's backup in PostgreSQL.
        '''
        replicator = Replicator(self.connecter, self.new_dbname,
                                Default.RESTORING_TEMPLATE, self.logger)
        result = self.connecter.allow_db_conn(Default.RESTORING_TEMPLATE)
        if result:
            replicator.replicate_pg_db()
            self.connecter.disallow_db_conn(Default.RESTORING_TEMPLATE)
        else:
            self.logger.stop_exe(
                Messenger.ALLOW_DB_CONN_FAIL.format(
                    dbname=Default.RESTORING_TEMPLATE))

        # Regular expression which must match the backup's name
        regex = r'.*db_(.+)_(\d{8}_\d{6}_.+)\.(dump|bz2|gz|zip)$'
        regex = re.compile(regex)

        if re.match(regex, self.db_backup):
            # Store the parts of the backup's name (name, date, ext)
            parts = regex.search(self.db_backup).groups()
            # Store only the extension to know the type of file
            ext = parts[2]
        else:
            self.logger.stop_exe(Messenger.NO_BACKUP_FORMAT)

        message = Messenger.BEGINNING_DB_RESTORER.format(
            db_backup=self.db_backup, new_dbname=self.new_dbname)
        self.logger.highlight('info', message, 'white')
        self.logger.info(Messenger.WAIT_PLEASE)

        if ext == 'gz':
            command = 'gunzip -c {} -k | pg_restore -U {} -h {} -p {} ' \
                      '-d {}'.format(self.db_backup, self.connecter.user,
                                     self.connecter.server,
                                     self.connecter.port, self.new_dbname)
        elif ext == 'bz2':
            command = 'bunzip2 -c {} -k | pg_restore -U {} -h {} -p {} ' \
                      '-d {}'.format(self.db_backup, self.connecter.user,
                                     self.connecter.server,
                                     self.connecter.port, self.new_dbname)
        elif ext == 'zip':
            command = 'unzip -p {} | pg_restore -U {} -h {} -p {} ' \
                      '-d {}'.format(self.db_backup, self.connecter.user,
                                     self.connecter.server,
                                     self.connecter.port, self.new_dbname)
        else:
            command = 'pg_restore -U {} -h {} -p {} -d {} {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, self.new_dbname, self.db_backup)

        try:
            start_time = DateTools.get_current_datetime()
            # Make the restauration of the database
            result = subprocess.call(command, shell=True)
            end_time = DateTools.get_current_datetime()
            # Get and show the process' duration
            diff = DateTools.get_diff_datetimes(start_time, end_time)

            if result != 0:
                raise Exception()

            message = Messenger.RESTORE_DB_DONE.format(
                db_backup=self.db_backup,
                new_dbname=self.new_dbname,
                diff=diff)
            self.logger.highlight('info', message, 'green')

            self.logger.highlight('info',
                                  Messenger.RESTORER_DONE,
                                  'green',
                                  effect='bold')

        except Exception as e:
            self.logger.debug('Error en la función "restore_db_backup": '
                              '{}.'.format(str(e)))
            message = Messenger.RESTORE_DB_FAIL.format(
                db_backup=self.db_backup, new_dbname=self.new_dbname)
            self.logger.stop_exe(message)
示例#13
0
    def drop_pg_db(self, dbname, pg_superuser):
        '''
        Target:
            - remove a database in PostgreSQL.
        Parameters:
            - dbname: the PostgreSQL database's name which is going to be
              removed.
            - pg_superuser: a flag which indicates whether the current user is
              PostgreSQL superuser or not.
        '''
        delete = False

        try:
            self.connecter.cursor.execute(Queries.PG_DB_EXISTS, (dbname, ))
            result = self.connecter.cursor.fetchone()

            if result:

                pg_pid = self.connecter.get_pid_str()
                formatted_sql = Queries.BACKEND_PG_DB_EXISTS.format(
                    pg_pid=pg_pid, target_db=dbname)

                self.connecter.cursor.execute(formatted_sql)
                result = self.connecter.cursor.fetchone()

                # If there are not any connections to the target database...
                if not result:

                    # Users who are not superusers will only be able to drop
                    # the databases they own
                    if not pg_superuser:

                        self.connecter.cursor.execute(Queries.GET_PG_DB_OWNER,
                                                      (dbname, ))
                        db = self.connecter.cursor.fetchone()

                        if db['owner'] != self.connecter.user:

                            msg = Msg.DROP_DB_NOT_ALLOWED.format(
                                user=self.connecter.user, dbname=dbname)
                            self.logger.highlight('warning', msg, 'yellow')

                        else:
                            delete = True

                    else:
                        delete = True

                    if delete:

                        # Get the database's "datallowconn" value
                        datallowconn = self.connecter.get_datallowconn(dbname)

                        # If datallowconn is allowed, change it temporarily
                        if datallowconn:
                            # Disallow connections to the database during the
                            # process
                            result = self.connecter.disallow_db_conn(dbname)
                            if not result:
                                msg = Msg.DISALLOW_CONN_TO_PG_DB_FAIL.format(
                                    dbname=dbname)
                                self.logger.highlight('warning', msg, 'yellow')

                        fmt_query_drop_db = Queries.DROP_PG_DB.format(
                            dbname=dbname)

                        start_time = DateTools.get_current_datetime()
                        # Drop the database
                        self.connecter.cursor.execute(fmt_query_drop_db)
                        end_time = DateTools.get_current_datetime()
                        # Get and show the process' duration
                        diff = DateTools.get_diff_datetimes(
                            start_time, end_time)
                        msg = Msg.DROP_DB_DONE.format(dbname=dbname, diff=diff)
                        self.logger.highlight('info', msg, 'green')

                        # If datallowconn was allowed, leave it as it was
                        if datallowconn:
                            # Allow connections to the database at the end of
                            # the process
                            result = self.connecter.allow_db_conn(dbname)
                            if not result:
                                msg = Msg.ALLOW_CONN_TO_PG_DB_FAIL.format(
                                    dbname=dbname)
                                self.logger.highlight('warning', msg, 'yellow')

                else:
                    msg = Msg.ACTIVE_CONNS_ERROR.format(dbname=dbname)
                    self.logger.highlight('warning', msg, 'yellow')

            else:
                msg = Msg.DB_DOES_NOT_EXIST.format(dbname=dbname)
                self.logger.highlight('warning', msg, 'yellow')

        except Exception as e:
            self.logger.debug('Error en la función "drop_pg_db": '
                              '{}.'.format(str(e)))
            self.logger.highlight('warning',
                                  Msg.DROP_DB_FAIL.format(dbname=dbname),
                                  'yellow')
示例#14
0
    def backup_dbs(self, dbs_all):
        '''
        Target:
            - make a backup of some specified databases.
        Parameters:
            - dbs_all: names of the databases which are going to be backuped.
        '''
        self.logger.highlight('info', Msg.CHECKING_BACKUP_DIR, 'white')

        # Create a new directory with the name of the group
        bkps_dir = self.bkp_path + self.group + Default.DB_BKPS_DIR
        Dir.create_dir(bkps_dir, self.logger)

        self.logger.info(Msg.DESTINY_DIR.format(path=bkps_dir))

        self.logger.highlight('info', Msg.PROCESSING_DB_BACKER, 'white')

        if dbs_all:
            for db in dbs_all:

                dbname = db['datname']
                msg = Msg.PROCESSING_DB.format(dbname=dbname)
                self.logger.highlight('info', msg, 'cyan')

                # Let the user know whether the database connection is allowed
                if not db['datallowconn']:
                    msg = Msg.FORBIDDEN_DB_CONNECTION.format(dbname=dbname)
                    self.logger.highlight('warning',
                                          msg,
                                          'yellow',
                                          effect='bold')
                    success = False

                else:
                    # Vaccum the database before the backup process if
                    # necessary
                    if self.vacuum:
                        self.logger.info(
                            Msg.PRE_VACUUMING_DB.format(dbname=dbname))
                        vacuumer = Vacuumer(self.connecter, self.in_dbs,
                                            self.in_regex, self.in_priority,
                                            self.ex_dbs, self.ex_regex,
                                            self.ex_templates, self.db_owner,
                                            self.logger)

                        # Vacuum the database
                        success = vacuumer.vacuum_db(dbname)
                        if success:
                            msg = Msg.PRE_VACUUMING_DB_DONE.format(
                                dbname=dbname)
                            self.logger.info(msg)
                        else:
                            msg = Msg.PRE_VACUUMING_DB_FAIL.format(
                                dbname=dbname)
                            self.logger.highlight('warning', msg, 'yellow')

                    self.logger.info(
                        Msg.BEGINNING_DB_BACKER.format(dbname=dbname))

                    start_time = DateTools.get_current_datetime()
                    # Make the backup of the database
                    success = self.backup_db(dbname, bkps_dir)
                    end_time = DateTools.get_current_datetime()
                    # Get and show the process' duration
                    diff = DateTools.get_diff_datetimes(start_time, end_time)

                if success:
                    msg = Msg.DB_BACKER_DONE.format(dbname=dbname, diff=diff)
                    self.logger.highlight('info', msg, 'green')
                else:
                    msg = Msg.DB_BACKER_FAIL.format(dbname=dbname)
                    self.logger.highlight('warning',
                                          msg,
                                          'yellow',
                                          effect='bold')
        else:
            self.logger.highlight('warning',
                                  Msg.BACKER_HAS_NOTHING_TO_DO,
                                  'yellow',
                                  effect='bold')

        self.logger.highlight('info', Msg.BACKER_DONE, 'green', effect='bold')
示例#15
0
    def replicate_pg_db(self):
        '''
        Target:
            - clone a specified database in PostgreSQL.
        '''
        try:
            pg_pid = self.connecter.get_pid_str()
            formatted_sql = Queries.BACKEND_PG_DB_EXISTS.format(
                pg_pid=pg_pid, target_db=self.original_dbname)
            self.connecter.cursor.execute(formatted_sql)
            result = self.connecter.cursor.fetchone()

            if result:
                msg = Msg.ACTIVE_CONNS_ERROR.format(
                    dbname=self.original_dbname)
                self.logger.stop_exe(msg)

            formatted_query_clone_pg_db = Queries.CLONE_PG_DB.format(
                dbname=self.new_dbname,
                original_dbname=self.original_dbname,
                user=self.connecter.user)

            msg = Msg.BEGINNING_REPLICATOR.format(
                original_dbname=self.original_dbname)
            self.logger.highlight('info', msg, 'white')

            # Get the database's "datallowconn" value
            datallowconn = self.connecter.get_datallowconn(
                self.original_dbname)

            # If datallowconn is allowed, change it temporarily
            if datallowconn:
                # Disallow connections to the database during the
                # process
                result = self.connecter.disallow_db_conn(self.original_dbname)
                if not result:
                    msg = Msg.DISALLOW_CONN_TO_PG_DB_FAIL.format(
                        dbname=self.original_dbname)
                    self.logger.highlight('warning', msg, 'yellow')

            # self.connecter.cursor.execute('commit')
            start_time = DateTools.get_current_datetime()
            # Replicate the database
            self.connecter.cursor.execute(formatted_query_clone_pg_db)
            end_time = DateTools.get_current_datetime()
            # Get and show the process' duration
            diff = DateTools.get_diff_datetimes(start_time, end_time)

            # If datallowconn was allowed, leave it as it was
            if datallowconn:
                # Allow connections to the database at the end of
                # the process
                result = self.connecter.allow_db_conn(self.original_dbname)
                if not result:
                    msg = Msg.ALLOW_CONN_TO_PG_DB_FAIL.format(
                        dbname=self.original_dbname)
                    self.logger.highlight('warning', msg, 'yellow')

            msg = Msg.REPLICATE_DB_DONE.format(
                new_dbname=self.new_dbname,
                original_dbname=self.original_dbname,
                diff=diff)
            self.logger.highlight('info', msg, 'green')
            self.logger.highlight('info',
                                  Msg.REPLICATOR_DONE,
                                  'green',
                                  effect='bold')

        except Exception as e:
            self.logger.debug('Error en la función "clone_pg_db": '
                              '{}.'.format(str(e)))
            self.logger.stop_exe(Msg.REPLICATE_DB_FAIL)
示例#16
0
    def trim_cluster(self, ht_bkps_list):
        '''
        Target:
            - remove (if necessary) some cluster's backups, taking into
              account some parameters in the following order: minimum number of
              backups to keep > obsolete backups.
        Parameters:
            - ht_bkps_list: list of backups of a cluster to analyse and trim.
        '''
        if self.exp_days == -1:  # No expiration date
            x_days_ago = None
        else:
            x_days_ago = time.time() - (60 * 60 * 24 * self.exp_days)

        # Store the total number of backups of the cluster
        num_bkps = len(ht_bkps_list)
        # Clone the list to avoid conflict errors when removing
        ht_bkps_lt = ht_bkps_list[:]

        unlinked = False

        self.logger.highlight('info', Messenger.BEGINNING_CL_TRIMMER, 'white')

        start_time = DateTools.get_current_datetime()

        for f in ht_bkps_list:

            # Break if number of backups do not exceed the minimum
            if num_bkps <= self.min_n_bkps:
                break

            file_info = os.stat(f)

            # Obsolete backup
            if x_days_ago and file_info.st_ctime < x_days_ago:

                self.logger.info(Messenger.DELETING_OBSOLETE_BACKUP % f)
                os.unlink(f)  # Remove backup's file
                unlinked = True
                # Update the number of backups of the database
                num_bkps -= 1
                ht_bkps_lt.remove(f)  # Update the list of cluster's backups

        end_time = DateTools.get_current_datetime()

        # Get total size of the backups in Bytes
        tsize = Dir.get_files_tsize(ht_bkps_lt)
        # Get total size of the backups in the selected unit of measure
        tsize_unit = ceil(tsize / self.equivalence)

        ## UNCOMMENT NEXT SECTION TO PROCEDURE WITH THE BACKUP'S DELETION IF
        ## THEIR TOTAL SIZE EXCEEDS THE SPECIFIED MAXIMUM SIZE

        #ht_bkps_list = ht_bkps_lt[:]

        #for f in ht_bkps_list:
            ## If there are less backups than the minimum required...
            #if num_bkps <= self.min_n_bkps:
                #break
            #if tsize <= self.max_size_bytes:
                #break
            #else:
                #file_info = os.stat(f)
                #self.logger.info('Tamaño de copias de seguridad en disco '
                                 #'mayor que {} {}: eliminando el archivo '
                                 #'{}...' % (self.max_size['size'],
                                            #self.max_size['unit'], f))
                #os.unlink(f)  # Remove backup's file
                #unlinked = True
                ## Update the number of backups of the cluster
                #num_bkps -= 1
                ## ht_bkps_lt.remove(f)  # Update the list of cluster's backups
                #tsize -= file_info.st_size  # Update total size after deletion

        if not unlinked:

            message = Messenger.NO_CL_BACKUP_DELETED
            self.logger.highlight('warning', message, 'yellow')

        if tsize > self.max_size_bytes:  # Total size exceeds the maximum

            message = Messenger.CL_BKPS_SIZE_EXCEEDED.format(
                tsize_unit=tsize_unit, size=self.max_size['size'],
                unit=self.max_size['unit'])
            self.logger.highlight('warning', message, 'yellow', effect='bold')

        # Get and show the process' duration
        diff = DateTools.get_diff_datetimes(start_time, end_time)
        self.logger.highlight('info', Messenger.CL_TRIMMER_DONE.format(
            diff=diff), 'green')
示例#17
0
    def __init__(self, log_dir=None, level=None, mute=False, police=0):
        '''
        Target:
            - create a logger to store the activity of the program.
        '''
        if log_dir:
            self.log_dir = log_dir
        else:
            # Get script's directory
            script_dir = os.path.dirname(os.path.realpath(__file__))
            # Get program's main directory
            script_pardir = os.path.abspath(os.path.join(script_dir,
                                                         os.pardir))
            # Get directory to store log files
            self.log_dir = os.path.join(script_pardir, 'log/')

        if level in Default.LOG_LEVELS:
            self.level = level
        else:
            self.level = Default.LOG_LEVEL

        if isinstance(mute, bool):
            self.mute = mute
        elif Checker.str_is_bool(mute):
            self.mute = Casting.str_to_bool(mute)
        else:
            self.mute = Default.MUTE

        # Get current date and time of the zone
        init_ts = DateTools.get_date()
        # Get file's name
        script_filename = os.path.basename(sys.argv[0])
        # Get main program's name
        script_name = os.path.splitext(script_filename)[0]
        # Set format for the log files' names
        log_name = script_name + '_' + init_ts + '.log'
        # Set absolute path for log files
        self.log_file = os.path.join(self.log_dir, log_name)
        # Create logger with the main program's name
        self.logger = logging.getLogger(script_name)
        # Set DEBUG as maximum verbosity level
        self.logger.setLevel(logging.DEBUG)
        # Create a handler for console
        ch = logging.StreamHandler()
        # Set the verbosity of console handler to "INFO"
        ch.setLevel(logging.INFO)
        # Create a format for console and file logs
        formatter = logging.Formatter('%(asctime)s - PID %(process)d - '
                                      '%(levelname)-4s - %(message)s',
                                      datefmt='%Y.%m.%d_%H:%M:%S_%Z')
        if self.mute is False:  # If log files are required...
            # Create directory for log files if it does not exist yet
            if not os.path.exists(self.log_dir):
                os.makedirs(self.log_dir)
            # Set size for each log file
            max_bytes = 4 * 1024 * 1024  # 4MiB
            # Create a file handler
            fh = CustomRotatingFileHandler(self.log_file, 'a', max_bytes, 10)
            # Set the verbosity of console handler to the selected level
            if self.level == 'debug':
                fh.setLevel(logging.DEBUG)
            if self.level == 'info':
                fh.setLevel(logging.INFO)
            if self.level == 'warning':
                fh.setLevel(logging.WARNING)
            if self.level == 'error':
                fh.setLevel(logging.ERROR)
            if self.level == 'critical':
                fh.setLevel(logging.CRITICAL)

            fh.setFormatter(formatter)  # Set format for file handler

        ch.setFormatter(formatter)  # Set format for console handler
        self.logger.addHandler(ch)  # Add console handler

        if self.mute is False:  # If log files are required...
            self.logger.addHandler(fh)  # Add file handler

        self.police = police
示例#18
0
    def restore_cluster_backup(self):
        '''
        Target:
            - restore a cluster's backup in PostgreSQL. The cluster must have
              been created before this process.
        '''
        # Regular expression which must match the backup's name
        regex = r'.*ht_(.+_cluster)_(\d{8}_\d{6}_.+)\.(dump|bz2|gz|zip)$'
        regex = re.compile(regex)

        if re.match(regex, self.cluster_backup):
            # Store the parts of the backup's name (servername, date, ext)
            parts = regex.search(self.cluster_backup).groups()
            # Store only the extension to know the type of file
            ext = parts[2]
        else:
            Messenger.NO_BACKUP_FORMAT

        message = Messenger.BEGINNING_CL_RESTORER.format(
            cluster_backup=self.cluster_backup)
        self.logger.highlight('info', message, 'white')
        self.logger.info(Messenger.WAIT_PLEASE)

        # TODO: make dissappear every line about the operation shown in console
        if ext == 'gz':
            command = 'gunzip -c {} -k | psql postgres -U {} -h {} ' \
                      '-p {}'.format(
                          self.cluster_backup, self.connecter.user,
                          self.connecter.server, self.connecter.port)
        elif ext == 'bz2':
            command = 'bunzip2 -c {} -k | psql postgres -U {} -h {} ' \
                      '-p {}'.format(
                          self.cluster_backup, self.connecter.user,
                          self.connecter.server, self.connecter.port)
        elif ext == 'zip':
            command = 'unzip -p {} | psql postgres -U {} -h {} -p {}'.format(
                self.cluster_backup, self.connecter.user,
                self.connecter.server, self.connecter.port)
        else:
            command = 'psql postgres -U {} -h {} -p {} < {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, self.cluster_backup)

        try:
            start_time = DateTools.get_current_datetime()
            # Make the restauration of the cluster
            result = subprocess.call(command, shell=True)
            end_time = DateTools.get_current_datetime()
            # Get and show the process' duration
            diff = DateTools.get_diff_datetimes(start_time, end_time)

            if result != 0:
                raise Exception()

            message = Messenger.RESTORE_CL_DONE.format(
                cluster_backup=self.cluster_backup, diff=diff)
            self.logger.highlight('info', message, 'green')

            self.logger.highlight('info',
                                  Messenger.RESTORER_DONE,
                                  'green',
                                  effect='bold')

        except Exception as e:
            self.logger.debug('Error en la función "restore_cluster_backup": '
                              '{}.'.format(str(e)))
            message = Messenger.RESTORE_CL_FAIL.format(
                cluster_backup=self.cluster_backup)
            self.logger.stop_exe(message)
示例#19
0
    def send_mail(self, detected_level):
        '''
        Target:
            - send an email to the specified email addresses.
        '''
        message = Messenger.BEGINNING_MAILER
        self.logger.highlight('info', message, 'white')

        # Get current date
        date = DateTools.get_date(fmt='%d-%m-%Y')
        time = DateTools.get_date(fmt='%H:%M:%S')
        zone = DateTools.get_date(fmt='%Z')

        # Get server name and IP addresses data
        server = IpAddress.get_hostname(self.logger)

        internal_ips = ''
        netifaces = IpAddress.get_netifaces_ips(self.logger)
        if netifaces:
            last_index = len(netifaces) - 1
        for index, netiface in enumerate(netifaces):
            internal_ips += '{} > {}'.format(netiface['netiface'],
                                             netiface['ip'])
            if index != last_index:
                internal_ips += ', '

        # Email full info template, for: John Doe <*****@*****.**>
        ADDR_TMPLT = '{} <{}>'

        # Sender and recipients email addresses (needed for sending the email)
        from_email_str = self.from_info['email']
        to_emails_list = [dict['email'] for dict in self.to_infos]
        cc_emails_list = [dict['email'] for dict in self.cc_infos]
        bcc_emails_list = [dict['email'] for dict in self.bcc_infos]
        all_emails_list = to_emails_list + cc_emails_list + bcc_emails_list

        # Sender and recipients full info (used in email message header)
        from_info_str = ADDR_TMPLT.format(self.from_info['name'],
                                          self.from_info['email'])
        to_infos_str = ', '.join(ADDR_TMPLT.format(
            dict['name'], dict['email']) for dict in self.to_infos)
        cc_infos_str = ', '.join(ADDR_TMPLT.format(
            dict['name'], dict['email']) for dict in self.cc_infos)

        # Specifying an alternative mail in case the receiver does not have a
        # mail server with HTML

        html = self.OP_RESULTS[detected_level].format(
            op_type=self.OP_TYPES[self.op_type], server_tag=self.server_tag,
            date=date, time=time, zone=zone, server=server,
            internal_ips=internal_ips, external_ip=self.external_ip,
            group=self.group, bkp_path=self.bkp_path,
            log_file=str(self.logger.log_file))

        text = self.OP_RESULTS_NO_HTML[detected_level].format(
            op_type=self.OP_TYPES[self.op_type], server_tag=self.server_tag,
            date=date, time=time, zone=zone, server=server,
            internal_ips=internal_ips, external_ip=self.external_ip,
            group=self.group, bkp_path=self.bkp_path,
            log_file=str(self.logger.log_file))

        # Specifying other email data (used in email message header)
        mail = MIMEMultipart('alternative')
        mail['From'] = from_info_str
        mail['To'] = to_infos_str
        mail['Cc'] = cc_infos_str
        mail['Subject'] = '[INFO] {op_type} results'.format(
            op_type=self.OP_TYPES[self.op_type].upper())

        # Record the MIME types of both parts - text/plain and text/html.
        part1 = MIMEText(text, 'plain')
        part2 = MIMEText(html, 'html')

        # Attach parts into message container. According to RFC 2046, the last
        # part of a multipart message, in this case the HTML message, is best
        # and preferred.
        mail.attach(part1)
        mail.attach(part2)

        msg_full = mail.as_string().encode()

        if all_emails_list:

            for email in all_emails_list:
                self.logger.info(Messenger.MAIL_DESTINY.format(email=email))

            # Sending the mail
            try:
                server = smtplib.SMTP('smtp.gmail.com:587')
                server.starttls()
                server.login(self.from_info['email'], self.from_info['pwd'])
                server.sendmail(from_email_str, all_emails_list, msg_full)
                server.quit()

            except smtplib.SMTPException as e:
                message = Messenger.SEND_MAIL_FAIL
                self.logger.highlight('info', message, 'yellow')
                self.logger.debug('Error en la función "send_mail": '
                                  '{}'.format(str(e)))

        else:
            message = Messenger.MAILER_HAS_NOTHING_TO_DO
            self.logger.highlight('info', message, 'yellow')

        message = Messenger.SEND_MAIL_DONE
        self.logger.highlight('info', message, 'green')
示例#20
0
    def backup_dbs(self, dbs_all):
        '''
        Target:
            - make a backup of some specified databases.
        Parameters:
            - dbs_all: names of the databases which are going to be backuped.
        '''
        self.logger.highlight('info', Msg.CHECKING_BACKUP_DIR, 'white')

        # Create a new directory with the name of the group
        bkps_dir = self.bkp_path + self.group + Default.DB_BKPS_DIR
        Dir.create_dir(bkps_dir, self.logger)

        self.logger.info(Msg.DESTINY_DIR.format(path=bkps_dir))

        self.logger.highlight('info', Msg.PROCESSING_DB_BACKER, 'white')

        if dbs_all:
            for db in dbs_all:

                dbname = db['datname']
                msg = Msg.PROCESSING_DB.format(dbname=dbname)
                self.logger.highlight('info', msg, 'cyan')

                # Let the user know whether the database connection is allowed
                if not db['datallowconn']:
                    msg = Msg.FORBIDDEN_DB_CONNECTION.format(dbname=dbname)
                    self.logger.highlight('warning', msg, 'yellow',
                                          effect='bold')
                    success = False

                else:
                    # Vaccum the database before the backup process if
                    # necessary
                    if self.vacuum:
                        self.logger.info(Msg.PRE_VACUUMING_DB.format(
                            dbname=dbname))
                        vacuumer = Vacuumer(self.connecter, self.in_dbs,
                                            self.in_regex, self.in_priority,
                                            self.ex_dbs, self.ex_regex,
                                            self.ex_templates, self.db_owner,
                                            self.logger)

                        # Vacuum the database
                        success = vacuumer.vacuum_db(dbname)
                        if success:
                            msg = Msg.PRE_VACUUMING_DB_DONE.format(
                                dbname=dbname)
                            self.logger.info(msg)
                        else:
                            msg = Msg.PRE_VACUUMING_DB_FAIL.format(
                                dbname=dbname)
                            self.logger.highlight('warning', msg, 'yellow')

                    self.logger.info(Msg.BEGINNING_DB_BACKER.format(
                        dbname=dbname))

                    start_time = DateTools.get_current_datetime()
                    # Make the backup of the database
                    success = self.backup_db(dbname, bkps_dir)
                    end_time = DateTools.get_current_datetime()
                    # Get and show the process' duration
                    diff = DateTools.get_diff_datetimes(start_time, end_time)

                if success:
                    msg = Msg.DB_BACKER_DONE.format(dbname=dbname, diff=diff)
                    self.logger.highlight('info', msg, 'green')
                else:
                    msg = Msg.DB_BACKER_FAIL.format(dbname=dbname)
                    self.logger.highlight('warning', msg, 'yellow',
                                          effect='bold')
        else:
            self.logger.highlight('warning', Msg.BACKER_HAS_NOTHING_TO_DO,
                                  'yellow', effect='bold')

        self.logger.highlight('info', Msg.BACKER_DONE, 'green', effect='bold')