示例#1
0
class Scraper:
	def __init__(self, output_path):
		self.output_path = output_path
		self.logger = Logger(out_path=self.output_path)
		self.all_urls = set()
		self.result = []

	def collect_urls(self):
		try:
			specialities = get_speciality_data()
			state_code_data = get_state_code_data()
			for speciality_key, code in specialities.items():
				for item in state_code_data:
					furl = con_scraper.SPECIALITY_URL.format(code, item[0], item[1])
					self.all_urls.add(furl)
			print(len(self.all_urls))
		except Exception as ex:
			traceback.print_exc()
			raise ex

	def collect_data(self):
		res = open("all_result_2.csv","a")
		req_obj = get_new_tr_obj()
		cookies = parseCookieFile(cookiefile=os.path.join(os.getcwd(), "static_input", "cookies.txt" ))
		try:
			for url in self.all_urls:
				# req_obj = get_new_tr_obj()
				try:
					req = req_obj.get(url, timeout=30)
					if req.status_code == 200:
						try:
							soup = BeautifulSoup(req.content, "html.parser")
							trs = soup.find_all("tr")
							for row in range(1,len(trs)):
								try:
									each_data = trs[row].find_all("td")
									specialty_name = each_data[1].text.strip() if each_data[1] else None
									location = each_data[2].text.strip().replace(","," ") if  len(each_data)>2 and each_data[2] else None
									doc_name = each_data[0].text
									doc_url = each_data[0].find("a")
									doc_url = doc_url["href"] if doc_url else ""
									actual_doc_url = con_scraper.BASE_URL + doc_url
								except Exception as ex:
									traceback.print_exc()
								else:
									if actual_doc_url != "https://doctorfinder.ama-assn.org/doctorfinder/":
										self.logger.info( "{}, {}".format(specialty_name.strip(), doc_name.strip().replace("\n","")))
										fdata = "|".join([ str(specialty_name).strip().replace("\n",""), str(location).strip().replace("\n",""), str(doc_name).strip().replace("\n",""), actual_doc_url.strip() ])+"\n"
										# self.result.append(fdata)
										print( "{}, {}".format(specialty_name.strip(), doc_name.strip().replace("\n","")))

										res.write(fdata)
						except Exception as ex:
							traceback.print_exc()
							# raise ex
				except Exception as ex:
					pass
			res.close()
		except Exception as ex:
			traceback.print_exc()
示例#2
0
    def on_line_received(self, payload):
        if payload and 'type' in payload:
            if payload['type'] == 'user_input':
                Logger.trace(
                    "fetch input_queue from main, from network process to screen: (%s)",
                    payload, 'client_protocol')
                user_input = UserInputDataUnit(payload['content']).get_object()
                user_id = payload['user']['id']
                self.game.apply_user_input_to_player(user_id, user_input)

            elif payload['type'] == 'authentication':
                Logger.info("Authentication (%s)", payload, 'client_protocol')
                self.localPlayerId = payload['user']['id']
                self.game.add_player(self.localPlayerId)

            elif payload['type'] == 'game_state_initial':
                Logger.info("Game state initial (%s)", payload,
                            'client_protocol')
                for user_pdu in GameDataUnit(
                        payload['content']).get_players_pdu():
                    if not user_pdu.get_id() == self.localPlayerId:
                        player = self.game.add_player(user_pdu.get_id())
                        player.set_position(user_pdu.get_position())

            elif payload['type'] == 'game_state':
                Logger.info("Game state (%s)", payload, 'client_protocol')
                for user_pdu in GameDataUnit(
                        payload['content']).get_players_pdu():
                    player = self.game.get_player(user_pdu.get_id())
                    player.set_position(user_pdu.get_position())

            elif payload['type'] == 'new_connection':
                Logger.info("User new connection (%s)", payload,
                            'client_protocol')
                user_id = payload['user']['id']
                self.game.add_player(user_id)

            elif payload['type'] == 'lost_connection':
                Logger.info("User lost connection (%s)", payload,
                            'client_protocol')
                user_id = payload['id']
                self.game.remove_player(user_id)

            else:
                Logger.error('Unknown payload type: (%s)', payload['type'],
                             'client_protocol')

        else:
            Logger.error('Payload not defined or "type" key not defined: %s',
                         payload, 'client_protocol')
    def get_filtered_dbnames(dbs_all, in_dbs=[], ex_dbs=[], in_regex='',
                             ex_regex='', in_priority=False, logger=None):
        '''
        Target:
            - filter a list of databases' names taking into account inclusion
              and exclusion parameters and their priority.
        Parameters:
            - dbs_all: list to filter.
            - in_dbs: list with the databases' names to include.
            - ex_dbs: list with the databases' names to exclude.
            - in_regex: regular expression which indicates the databases' names
              to include.
            - ex_regex: regular expression which indicates the databases' names
              to exclude.
            - in_priority: a flag which determinates if the inclusion
              parameters must predominate over the exclusion ones.
            - logger: a logger to show and log some messages.
        Return:
            - a filtered list (subset of "dbs_all").
        '''
        if not logger:
            logger = Logger()

        bkp_list = []

        if in_priority:  # If inclusion is over exclusion
            # Apply exclusion first and then inclusion
            bkp_list = DbSelector.dbname_filter_exclude(dbs_all, ex_dbs,
                                                        ex_regex, logger)
            bkp_list = DbSelector.dbname_filter_include(bkp_list, in_dbs,
                                                        in_regex, logger)
        else:
            # Apply inclusion first and then exclusion
            bkp_list = DbSelector.dbname_filter_include(dbs_all, in_dbs,
                                                        in_regex, logger)
            bkp_list = DbSelector.dbname_filter_exclude(bkp_list, ex_dbs,
                                                        ex_regex, logger)

        logger.highlight('info', Messenger.SEARCHING_SELECTED_DBS, 'white')

        if bkp_list == []:
            logger.highlight('warning', Messenger.EMPTY_DBNAME_LIST, 'yellow',
                             effect='bold')
        else:
            for dbname in bkp_list:
                logger.info(Messenger.SELECTED_DB.format(dbname=dbname))
        return bkp_list
示例#4
0
def main():
    l = Logger(__name__)
    osqonnector = Bottle()

    for app in INSTALLED_APPS:
        l.debug("loading {}".format(app.__name__))
        osqonnector.merge(app.app)

    l.debug("[{}]: ready to serv ({}:{})".format(getpid(), config.HOST,
                                                 config.PORT))
    try:
        bjoern.run(osqonnector,
                   config.HOST,
                   config.PORT,
                   reuse_port=config.REUSE_PORT)
    except KeyboardInterrupt:
        l.info("bye.")
示例#5
0
    def __init__(self):
        self.received_signal = False
        signal(SIGINT, self._signal_handler)
        signal(SIGTERM, self._signal_handler)

    def _signal_handler(self, signal, frame):
        print(f"handling signal {signal}, exiting gracefully")
        self.received_signal = True


if __name__ == "__main__":
    userid = sys.argv[1]
    password = sys.argv[2]
    request_cookies = get_logged_in_cookies(userid, password)
    logger.info(
        message="Consuming messages from sqs queue: {}".format(queue_url),
        bucket=REAUTER_SCRAPER.recommendation_estimates,
        stage='sqs_consumer')
    signal_handler = SignalHandler()
    while not signal_handler.received_signal:
        res = sqs.consume_messages(queue_url)
        if 'Messages' in res.keys():
            for message in res['Messages']:
                value = message['Body']
                try:
                    fetch_and_save(request_cookies, ast.literal_eval(value))
                except Exception as e:
                    error = {}
                    error['ExceptionMessage'] = str(e)
                    error['trace'] = traceback.format_exc().splitlines()
                    logger.error(
                        message="Exception occured for data {}".format(
示例#6
0
class TrimmerCluster:

    bkp_path = ''  # The path where the backups are stored
    prefix = ''  # The prefix of the backups' names
    min_n_bkps = None  # Minimum number of a database's backups to keep
    exp_days = None  # Number of days which make a backup obsolete
    max_size = None  # Maximum size of a group of database's backups
    # Maximum size in Bytes of a group of database's backups
    max_size_bytes = None
    # Related to max_size, equivalence to turn the specified unit of measure in
    # the max_size variable into Bytes
    equivalence = 10 ** 6
    logger = None  # Logger to show and log some messages

    def __init__(self, bkp_path='', prefix='', min_n_bkps=1, exp_days=365,
                 max_size=5000, logger=None):

        if logger:
            self.logger = logger
        else:
            self.logger = Logger()

        if bkp_path and os.path.isdir(bkp_path):
            self.bkp_path = bkp_path
        else:
            self.logger.stop_exe(Messenger.DIR_DOES_NOT_EXIST)

        if prefix is None:
            self.prefix = Default.PREFIX
        else:
            self.prefix = prefix

        if min_n_bkps is None:
            self.min_n_bkps = Default.MIN_N_BKPS
        elif isinstance(min_n_bkps, int):
            self.min_n_bkps = min_n_bkps
        elif Checker.str_is_int(min_n_bkps):
            self.min_n_bkps = Casting.str_to_int(min_n_bkps)
        else:
            self.logger.stop_exe(Messenger.INVALID_MIN_BKPS)

        if exp_days is None:
            self.exp_days = Default.EXP_DAYS
        elif isinstance(exp_days, int) and exp_days >= -1:
            self.exp_days = exp_days
        elif Checker.str_is_valid_exp_days(exp_days):
            self.exp_days = Casting.str_to_int(exp_days)
        else:
            self.logger.stop_exe(Messenger.INVALID_OBS_DAYS)

        if max_size is None:
            self.max_size = Default.MAX_SIZE
        elif Checker.str_is_valid_max_size(max_size):
            self.max_size = max_size
        else:
            self.logger.stop_exe(Messenger.INVALID_MAX_TSIZE)

        # Split a string with size and unit of measure into a dictionary
        self.max_size = Casting.str_to_max_size(self.max_size)
        # Get the equivalence in Bytes of the specified unit of measure
        self.equivalence = Casting.get_equivalence(self.max_size['unit'])
        # Get the specified size in Bytes
        self.max_size_bytes = self.max_size['size'] * self.equivalence

        message = Messenger.CL_TRIMMER_VARS.format(
            bkp_path=self.bkp_path, prefix=self.prefix,
            min_n_bkps=self.min_n_bkps, exp_days=self.exp_days,
            max_size=self.max_size)
        self.logger.debug(Messenger.CL_TRIMMER_VARS_INTRO)
        self.logger.debug(message)

    def trim_cluster(self, ht_bkps_list):
        '''
        Target:
            - remove (if necessary) some cluster's backups, taking into
              account some parameters in the following order: minimum number of
              backups to keep > obsolete backups.
        Parameters:
            - ht_bkps_list: list of backups of a cluster to analyse and trim.
        '''
        if self.exp_days == -1:  # No expiration date
            x_days_ago = None
        else:
            x_days_ago = time.time() - (60 * 60 * 24 * self.exp_days)

        # Store the total number of backups of the cluster
        num_bkps = len(ht_bkps_list)
        # Clone the list to avoid conflict errors when removing
        ht_bkps_lt = ht_bkps_list[:]

        unlinked = False

        self.logger.highlight('info', Messenger.BEGINNING_CL_TRIMMER, 'white')

        start_time = DateTools.get_current_datetime()

        for f in ht_bkps_list:

            # Break if number of backups do not exceed the minimum
            if num_bkps <= self.min_n_bkps:
                break

            file_info = os.stat(f)

            # Obsolete backup
            if x_days_ago and file_info.st_ctime < x_days_ago:

                self.logger.info(Messenger.DELETING_OBSOLETE_BACKUP % f)
                os.unlink(f)  # Remove backup's file
                unlinked = True
                # Update the number of backups of the database
                num_bkps -= 1
                ht_bkps_lt.remove(f)  # Update the list of cluster's backups

        end_time = DateTools.get_current_datetime()

        # Get total size of the backups in Bytes
        tsize = Dir.get_files_tsize(ht_bkps_lt)
        # Get total size of the backups in the selected unit of measure
        tsize_unit = ceil(tsize / self.equivalence)

        ## UNCOMMENT NEXT SECTION TO PROCEDURE WITH THE BACKUP'S DELETION IF
        ## THEIR TOTAL SIZE EXCEEDS THE SPECIFIED MAXIMUM SIZE

        #ht_bkps_list = ht_bkps_lt[:]

        #for f in ht_bkps_list:
            ## If there are less backups than the minimum required...
            #if num_bkps <= self.min_n_bkps:
                #break
            #if tsize <= self.max_size_bytes:
                #break
            #else:
                #file_info = os.stat(f)
                #self.logger.info('Tamaño de copias de seguridad en disco '
                                 #'mayor que {} {}: eliminando el archivo '
                                 #'{}...' % (self.max_size['size'],
                                            #self.max_size['unit'], f))
                #os.unlink(f)  # Remove backup's file
                #unlinked = True
                ## Update the number of backups of the cluster
                #num_bkps -= 1
                ## ht_bkps_lt.remove(f)  # Update the list of cluster's backups
                #tsize -= file_info.st_size  # Update total size after deletion

        if not unlinked:

            message = Messenger.NO_CL_BACKUP_DELETED
            self.logger.highlight('warning', message, 'yellow')

        if tsize > self.max_size_bytes:  # Total size exceeds the maximum

            message = Messenger.CL_BKPS_SIZE_EXCEEDED.format(
                tsize_unit=tsize_unit, size=self.max_size['size'],
                unit=self.max_size['unit'])
            self.logger.highlight('warning', message, 'yellow', effect='bold')

        # Get and show the process' duration
        diff = DateTools.get_diff_datetimes(start_time, end_time)
        self.logger.highlight('info', Messenger.CL_TRIMMER_DONE.format(
            diff=diff), 'green')

    def trim_clusters(self, bkps_list):
        '''
        Target:
            - remove (if necessary) some backups of a cluster, taking into
              account some parameters in the following order: minimum number of
              backups to keep > obsolete backups.
        Parameters:
            - bkps_list: list of backups found in the specified directory.
        '''
        # If not prefix specified, trim all the backups (not only the ones
        # without prefix)
        if self.prefix:
            regex = r'(' + self.prefix + ')ht_(.+_cluster)_' \
                    '(\d{8}_\d{6}_.+)\.(?:dump|bz2|gz|zip)$'
        else:
            regex = r'(.+)?ht_(.+_cluster)_(\d{8}_\d{6}_.+)\.' \
                    '(?:dump|bz2|gz|zip)$'
        regex = re.compile(regex)

        ht_bkps_list = []

        for file in bkps_list:

            # Extract file's name from the absolute path
            filename = os.path.basename(file)

            # If file matches regex (it means that file is a backup)
            if re.match(regex, filename):

                # Append backup to the group of cluster's backups
                ht_bkps_list.append(file)

            else:
                continue

        if ht_bkps_list:

            # Remove (if necessary) some backups of the cluster
            self.trim_cluster(ht_bkps_list)
            # Remove directories which could be empty after the trim
            Dir.remove_empty_dirs(self.bkp_path)

        else:
            self.logger.highlight('warning', Messenger.NO_BACKUP_IN_DIR,
                                  'yellow', effect='bold')

        self.logger.highlight('info', Messenger.TRIMMER_DONE, 'green',
                              effect='bold')
示例#7
0
class Trimmer:

    bkp_path = ''  # The path where the backups are stored
    prefix = ''  # The prefix of the backups' names
    in_dbs = []  # List of databases to be included in the process
    in_regex = ''  # Regular expression which must match the included databases
    # Flag which determinates whether inclusion conditions predominate over the
    # exclusion ones
    in_priority = False
    ex_dbs = []  # List of databases to be excluded in the process
    ex_regex = ''  # Regular expression which must match the excluded databases
    min_n_bkps = None  # Minimum number of a database's backups to keep
    exp_days = None  # Number of days which make a backup obsolete
    max_size = None  # Maximum size of a group of database's backups
    # Maximum size in Bytes of a group of database's backups
    max_size_bytes = None
    # Related to max_size, equivalence to turn the specified unit of measure in
    # the max_size variable into Bytes
    equivalence = 10 ** 6
    # Flag which determinates whether show alerts about PostgreSQL
    pg_warnings = True
    # An object with connection parameters to connect to PostgreSQL
    connecter = None
    logger = None  # Logger to show and log some messages

    def __init__(self, bkp_path='', prefix='', in_dbs=[], in_regex='',
                 in_priority=False, ex_dbs=[], ex_regex='', min_n_bkps=1,
                 exp_days=365, max_size='10000MB', pg_warnings=True,
                 connecter=None, logger=None):

        if logger:
            self.logger = logger
        else:
            self.logger = Logger()

        if bkp_path and os.path.isdir(bkp_path):
            self.bkp_path = bkp_path
        else:
            self.logger.stop_exe(Messenger.DIR_DOES_NOT_EXIST)

        if prefix is None:
            self.prefix = Default.PREFIX
        else:
            self.prefix = prefix

        if isinstance(in_dbs, list):
            self.in_dbs = in_dbs
        else:
            self.in_dbs = Casting.str_to_list(in_dbs)

        if Checker.check_regex(in_regex):
            self.in_regex = in_regex
        else:
            self.logger.stop_exe(Messenger.INVALID_IN_REGEX)

        if isinstance(in_priority, bool):
            self.in_priority = in_priority
        elif Checker.str_is_bool(in_priority):
            self.in_priority = Casting.str_to_bool(in_priority)
        else:
            self.logger.stop_exe(Messenger.INVALID_IN_PRIORITY)

        if isinstance(ex_dbs, list):
            self.ex_dbs = ex_dbs
        else:
            self.ex_dbs = Casting.str_to_list(ex_dbs)

        if Checker.check_regex(ex_regex):
            self.ex_regex = ex_regex
        else:
            self.logger.stop_exe(Messenger.INVALID_EX_REGEX)

        if min_n_bkps is None:
            self.min_n_bkps = Default.MIN_N_BKPS
        elif isinstance(min_n_bkps, int):
            self.min_n_bkps = min_n_bkps
        elif Checker.str_is_int(min_n_bkps):
            self.min_n_bkps = Casting.str_to_int(min_n_bkps)
        else:
            self.logger.stop_exe(Messenger.INVALID_MIN_BKPS)

        if exp_days is None:
            self.exp_days = Default.EXP_DAYS
        elif isinstance(exp_days, int) and exp_days >= -1:
            self.exp_days = exp_days
        elif Checker.str_is_valid_exp_days(exp_days):
            self.exp_days = Casting.str_to_int(exp_days)
        else:
            self.logger.stop_exe(Messenger.INVALID_OBS_DAYS)

        if max_size is None:
            self.max_size = Default.MAX_SIZE
        elif Checker.str_is_valid_max_size(max_size):
            self.max_size = max_size
        else:
            self.logger.stop_exe(Messenger.INVALID_MAX_TSIZE)

        # Split a string with size and unit of measure into a dictionary
        self.max_size = Casting.str_to_max_size(self.max_size)
        # Get the equivalence in Bytes of the specified unit of measure
        self.equivalence = Casting.get_equivalence(self.max_size['unit'])
        # Get the specified size in Bytes
        self.max_size_bytes = self.max_size['size'] * self.equivalence

        if isinstance(pg_warnings, bool):
            self.pg_warnings = pg_warnings
        elif Checker.str_is_bool(pg_warnings):
            self.pg_warnings = Casting.str_to_bool(pg_warnings)
        else:
            self.logger.stop_exe(Messenger.INVALID_PG_WARNINGS)

        if self.pg_warnings:
            if connecter:
                self.connecter = connecter
            else:
                self.logger.stop_exe(Messenger.NO_CONNECTION_PARAMS)

        message = Messenger.DB_TRIMMER_VARS.format(
            bkp_path=self.bkp_path, prefix=self.prefix, in_dbs=self.in_dbs,
            in_regex=self.in_regex, in_priority=self.in_priority,
            ex_dbs=self.ex_dbs, ex_regex=self.ex_regex,
            min_n_bkps=self.min_n_bkps, exp_days=self.exp_days,
            max_size=self.max_size, pg_warnings=self.pg_warnings)
        self.logger.debug(Messenger.DB_TRIMMER_VARS_INTRO)
        self.logger.debug(message)

    def trim_db(self, dbname, db_bkps_list):
        '''
        Target:
            - remove (if necessary) some database's backups, taking into
              account some parameters in the following order: minimum number of
              backups to keep > obsolete backups.
        Parameters:
            - dbname: name of the database whose backups are going to be
              trimmed.
            - db_bkps_list: list of backups of a database to analyse and trim.
        '''
        if self.exp_days == -1:  # No expiration date
            x_days_ago = None
        else:
            x_days_ago = time.time() - (60 * 60 * 24 * self.exp_days)

        # Store the total number of backups of the database
        num_bkps = len(db_bkps_list)
        # Clone the list to avoid conflict errors when removing
        db_bkps_lt = db_bkps_list[:]

        unlinked = False

        message = Messenger.BEGINNING_DB_TRIMMER.format(dbname=dbname)
        self.logger.highlight('info', message, 'white')

        start_time = DateTools.get_current_datetime()

        for f in db_bkps_list:

            # Break if number of backups do not exceed the minimum
            if num_bkps <= self.min_n_bkps:
                break

            file_info = os.stat(f)

            # Obsolete backup
            if x_days_ago and file_info.st_ctime < x_days_ago:

                self.logger.info(Messenger.DELETING_OBSOLETE_BACKUP % f)
                os.unlink(f)  # Remove backup's file
                unlinked = True
                # Update the number of backups of the database
                num_bkps -= 1
                db_bkps_lt.remove(f)  # Update the list of database's backups

        end_time = DateTools.get_current_datetime()

        # Get total size of the backups in Bytes
        tsize = Dir.get_files_tsize(db_bkps_lt)
        # Get total size of the backups in the selected unit of measure
        tsize_unit = ceil(tsize / self.equivalence)

        ## UNCOMMENT NEXT SECTION TO PROCEDURE WITH THE BACKUP'S DELETION IF
        ## THEIR TOTAL SIZE EXCEEDS THE SPECIFIED MAXIMUM SIZE

        #db_bkps_list = db_bkps_lt[:]

        #for f in db_bkps_list:
            ## If there are less backups than the minimum required...
            #if num_bkps <= self.min_n_bkps:
                #break
            #if tsize <= self.max_size_bytes:
                #break
            #else:
                #file_info = os.stat(f)
                #self.logger.info('Tamaño de copias de seguridad en disco '
                                 #'mayor que {} {}: eliminando el archivo '
                                 #'{}...' % (self.max_size['size'],
                                            #self.max_size['unit'], f))
                #os.unlink(f)  # Remove backup's file
                #unlinked = True
                ## Update the number of backups of the database
                #num_bkps -= 1
                ## Update the list of database's backups
                ## db_bkps_lt.remove(f)
                #tsize -= file_info.st_size  # Update total size after deletion

        if not unlinked:

            message = Messenger.NO_DB_BACKUP_DELETED.format(dbname=dbname)
            self.logger.highlight('warning', message, 'yellow')

        if tsize > self.max_size_bytes:  # Total size exceeds the maximum

            message = Messenger.DB_BKPS_SIZE_EXCEEDED.format(
                dbname=dbname, tsize_unit=tsize_unit,
                size=self.max_size['size'], unit=self.max_size['unit'])
            self.logger.highlight('warning', message, 'yellow', effect='bold')

        # Get and show the process' duration
        diff = DateTools.get_diff_datetimes(start_time, end_time)
        self.logger.highlight('info', Messenger.DB_TRIMMER_DONE.format(
            dbname=dbname, diff=diff), 'green')

    def trim_dbs(self, bkps_list, dbs_to_clean):
        '''
        Target:
            - remove (if necessary) some backups of a group of databases,
              taking into account some parameters in the following order:
              minimum number of backups to keep > obsolete backups.
        Parameters:
            - bkps_list: list of backups found in the specified directory.
            - dbs_to_clean: name of the database whose backups are going to be
              trimmed.
        '''
        # If not prefix specified, trim all the backups (not only the ones
        # without prefix)
        if self.prefix:
            regex = r'(' + self.prefix + ')db_(.+)_(\d{8}_\d{6}_.+)\.' \
                    '(?:dump|bz2|gz|zip)$'
        else:
            regex = r'(.+)?db_(.+)_(\d{8}_\d{6}_.+)\.(?:dump|bz2|gz|zip)$'
        regex = re.compile(regex)

        for dbname in dbs_to_clean:

            db_bkps_list = []

            for file in bkps_list:

                # Extract file's name from the absolute path
                filename = os.path.basename(file)

                # If file matches regex (it means that file is a backup)
                if re.match(regex, filename):

                    # Extract parts of the name ([prefix], dbname, date)
                    parts = regex.search(filename).groups()
                    # Store the database's name whose this backup belongs to
                    fdbname = parts[1]

                    # If that backup belongs to a database which is has to be
                    # trimmed
                    if dbname == fdbname:
                        # Append backup to the group of database's backups
                        db_bkps_list.append(file)
                    else:
                        continue
                else:
                    continue

            # Remove (if necessary) some backups of the specified database
            self.trim_db(dbname, db_bkps_list)

        # Remove directories which could be empty after the trim
        Dir.remove_empty_dirs(self.bkp_path)

        self.logger.highlight('info', Messenger.TRIMMER_DONE, 'green',
                              effect='bold')
示例#8
0
class Restorer:

    # An object with connection parameters to connect to PostgreSQL
    connecter = None
    logger = None  # Logger to show and log some messages
    db_backup = ''  # Absolute path of the backup file (of a database)
    new_dbname = ''  # New name for the database restored in PostgreSQL

    def __init__(self, connecter=None, db_backup='', new_dbname='',
                 logger=None):

        if logger:
            self.logger = logger
        else:
            self.logger = Logger()

        if connecter:
            self.connecter = connecter
        else:
            self.logger.stop_exe(Messenger.NO_CONNECTION_PARAMS)

        if db_backup and os.path.isfile(db_backup):
            self.db_backup = db_backup
        else:
            self.logger.stop_exe(Messenger.NO_BKP_TO_RESTORE)

        if new_dbname:
            self.new_dbname = new_dbname
        else:
            self.logger.stop_exe(Messenger.NO_DBNAME_TO_RESTORE)

        message = Messenger.DB_RESTORER_VARS.format(
            server=self.connecter.server, user=self.connecter.user,
            port=self.connecter.port, db_backup=self.db_backup,
            new_dbname=self.new_dbname)
        self.logger.debug(Messenger.DB_RESTORER_VARS_INTRO)
        self.logger.debug(message)

    def restore_db_backup(self):
        '''
        Target:
            - restore a database's backup in PostgreSQL.
        '''
        #replicator = Replicator(self.connecter, self.new_dbname,
                                #Default.RESTORING_TEMPLATE, self.logger)
        #result = self.connecter.allow_db_conn(Default.RESTORING_TEMPLATE)
        #if result:
            #replicator.replicate_pg_db()
            #self.connecter.disallow_db_conn(Default.RESTORING_TEMPLATE)
        #else:
            #self.logger.stop_exe(Messenger.ALLOW_DB_CONN_FAIL.format(
                #dbname=Default.RESTORING_TEMPLATE))

        # Regular expression which must match the backup's name
        regex = r'.*db_(.+)_(\d{8}_\d{6}_.+)\.(dump|bz2|gz|zip)$'
        regex = re.compile(regex)

        if re.match(regex, self.db_backup):
            # Store the parts of the backup's name (name, date, ext)
            parts = regex.search(self.db_backup).groups()
            # Store only the extension to know the type of file
            ext = parts[2]
        else:
            self.logger.stop_exe(Messenger.NO_BACKUP_FORMAT)

        message = Messenger.BEGINNING_DB_RESTORER.format(
            db_backup=self.db_backup, new_dbname=self.new_dbname)
        self.logger.highlight('info', message, 'white')
        self.logger.info(Messenger.WAIT_PLEASE)

        if ext == 'gz':
            command = 'gunzip -c {} -k | pg_restore -U {} -h {} -p {} ' \
                      '-d {}'.format(self.db_backup, self.connecter.user,
                                     self.connecter.server,
                                     self.connecter.port, self.new_dbname)
        elif ext == 'bz2':
            command = 'bunzip2 -c {} -k | pg_restore -U {} -h {} -p {} ' \
                      '-d {}'.format(self.db_backup, self.connecter.user,
                                     self.connecter.server,
                                     self.connecter.port, self.new_dbname)
        elif ext == 'zip':
            command = 'unzip -p {} | pg_restore -U {} -h {} -p {} ' \
                      '-d {}'.format(self.db_backup, self.connecter.user,
                                     self.connecter.server,
                                     self.connecter.port, self.new_dbname)
        else:
            command = 'pg_restore -U {} -h {} -p {} -d {} {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, self.new_dbname, self.db_backup)

        try:
            start_time = DateTools.get_current_datetime()
            # Make the restauration of the database
            result = subprocess.call(command, shell=True)
            end_time = DateTools.get_current_datetime()
            # Get and show the process' duration
            diff = DateTools.get_diff_datetimes(start_time, end_time)

            if result != 0:
                raise Exception()

            message = Messenger.RESTORE_DB_DONE.format(
                db_backup=self.db_backup, new_dbname=self.new_dbname,
                diff=diff)
            self.logger.highlight('info', message, 'green')

            self.logger.highlight('info', Messenger.RESTORER_DONE, 'green',
                                  effect='bold')

        except Exception as e:
            self.logger.debug('Error en la función "restore_db_backup": '
                              '{}.'.format(str(e)))
            message = Messenger.RESTORE_DB_FAIL.format(
                db_backup=self.db_backup, new_dbname=self.new_dbname)
            self.logger.stop_exe(message)
示例#9
0
class Scheduler:

    time = ''  # Time when the command is going to be executed in Cron
    command = ''  # Command which is going to be executed in Cron.
    logger = None  # Logger to show and log some messages

    def __init__(self, time='', command='', logger=None):

        if logger:
            self.logger = logger
        else:
            self.logger = Logger()

        self.time = time.strip()
        self.command = command.strip()

    def show_lines(self):
        '''
        Target:
            - show the lines of the program's CRON file.
        '''
        self.logger.highlight('info', Messenger.SHOWING_CRONTAB_FILE, 'white')
        print()

        cron = CronTab(user=True)

        if cron:
            for line in cron.lines:
                print(str(line))
        else:
            print('\033[1;40;93m' + Messenger.NO_CRONTAB_FILE + '\033[0m')

    def add_line(self):
        '''
        Target:
            - add a line to the program's CRON file.
        '''
        cron = CronTab(user=True)

        job = cron.new(command=self.command)

        if self.time in ['@yearly', '@annually']:
            job.setall('0 0 1 1 *')
        elif self.time == '@monthly':
            job.setall('0 0 1 * *')
        elif self.time == '@weekly':
            job.setall('0 0 * * 0')
        elif self.time in ['@daily', '@midnight']:
            job.setall('0 0 * * *')
        elif self.time == '@hourly':
            job.setall('0 * * * *')
        elif self.time == '@reboot':
            job.every_reboot()
        else:
            job.setall(self.time)

        self.logger.highlight('info', Messenger.SCHEDULER_ADDING, 'white')

        if not cron:
            self.logger.info(Messenger.CREATING_CRONTAB)

        try:
            cron.write()
            self.logger.highlight('info', Messenger.SCHEDULER_ADD_DONE,
                                  'green')
            #print(cron.render())

        except Exception as e:
            self.logger.debug('Error en la función "add_line": {}.'.format(
                str(e)))
            self.logger.stop_exe(Messenger.SCHEDULER_ADD_FAIL)

    def remove_line(self):
        '''
        Target:
            - remove a line from the program's CRON file.
        '''
        self.logger.highlight('info', Messenger.SCHEDULER_REMOVING, 'white')

        cron = CronTab(user=True)

        if not cron:
            self.logger.stop_exe(Messenger.NO_CRONTAB_FILE)

        deletion = False

        line = self.time + ' ' + self.command

        for job in cron:

            if str(job).strip() == line:

                try:
                    cron.remove(job)
                    message = Messenger.SCHEDULER_REMOVE_DONE.format(job=job)
                    self.logger.highlight('info', message, 'green')
                    deletion = True

                except Exception as e:
                    self.logger.debug('Error en la función "remove_line": '
                                      '{}.'.format(str(e)))
                    message = Messenger.SCHEDULER_REMOVE_FAIL.format(job=job)
                    self.logger.highlight('warning', message, 'yellow')

        if not deletion:
            self.logger.stop_exe(Messenger.NO_CRONTAB_JOB_TO_DEL)

        cron.write()
示例#10
0
文件: bobPool.py 项目: adn6868/Bob
        print("++++ {} ====".format(job))
        print('startTime {}'.format(newBob.startTime))
        print('endTime{}'.format(newBob.endTime))
        newBob.run()
        if newBob.open():
            newBob.run()
        else:
            self.jobDict[job] = jobDefinition
        # TODO: Unbulde this deadlock bullshit

    def executeBob(self):
        if not self.jobDict:
            self.status = 'Completed'
            return
        self.pool = ThreadPool(len(self.jobDict))
        stdout = self.pool.map(self._executeBob, self.jobDict.keys())


if __name__ == "__main__":
    serverStatus = 'Running'
    jobPool = BobPool()
    jobPool.genJobDict()
    jobPool.getJobQueue()
    serverStatus = jobPool.status
    log = Logger()
    log.info(jobPool.jobQueue)
    while serverStatus == 'Running':
        jobPool.executeBob()
        serverStatus = jobPool.status
    print('Job Pool {} at {}'.format(serverStatus, dt.datetime.now()))
示例#11
0
class Terminator:

    target_all = None  # Flag which determinates if terminate any connection
    target_user = None  # Terminate any connection of an specific user
    target_dbs = []  # Terminate any connection to a list of databases
    # An object with connection parameters to connect to PostgreSQL
    connecter = None
    logger = None  # Logger to show and log some messages

    def __init__(self,
                 connecter,
                 target_all=False,
                 target_user='',
                 target_dbs=[],
                 logger=None):

        if logger:
            self.logger = logger
        else:
            self.logger = Logger()

        if connecter:
            self.connecter = connecter
        else:
            self.logger.stop_exe(Messenger.NO_CONNECTION_PARAMS)

        if target_all is None:
            self.target_all = target_all
        elif isinstance(target_all, bool):
            self.target_all = target_all
        elif Checker.str_is_bool(target_all):
            self.target_all = Casting.str_to_bool(target_all)
        else:
            self.logger.stop_exe(Messenger.INVALID_TARGET_ALL)

        self.target_user = target_user

        if target_dbs is None:
            self.target_dbs = []
        elif isinstance(target_dbs, list):
            self.target_dbs = target_dbs
        else:
            self.target_dbs = Casting.str_to_list(target_dbs)

        message = Messenger.TERMINATOR_VARS.format(
            server=self.connecter.server,
            user=self.connecter.user,
            port=self.connecter.port,
            target_all=self.target_all,
            target_user=target_user,
            target_dbs=self.target_dbs)
        self.logger.debug(Messenger.TERMINATOR_VARS_INTRO)
        self.logger.debug(message)

    def terminate_backend_user(self):
        '''
        Target:
            - terminate every connection of a specific user to PostgreSQL (as
              long as the target user is the one who is running the program).
        '''
        message = Messenger.BEGINNING_TERMINATE_USER_CONN.format(
            target_user=self.target_user)
        self.logger.highlight('info', message, 'white')

        try:
            pg_pid = self.connecter.get_pid_str()  # Get PID variable's name

            sql = Queries.GET_CURRENT_PG_USER
            self.connecter.cursor.execute(sql)
            current_pg_user = self.connecter.cursor.fetchone()[0]

            if self.target_user == current_pg_user:
                message = Messenger.TARGET_USER_IS_CURRENT_USER.format(
                    target_user=self.target_user)
                self.logger.highlight('warning', message, 'yellow')

            else:
                formatted_sql = Queries.BACKEND_PG_USER_EXISTS.format(
                    pg_pid=pg_pid, target_user=self.target_user)
                self.connecter.cursor.execute(formatted_sql)
                result = self.connecter.cursor.fetchone()

                if result:
                    formatted_sql = Queries.TERMINATE_BACKEND_PG_USER.format(
                        pg_pid=pg_pid, target_user=self.target_user)
                    self.connecter.cursor.execute(formatted_sql)
                else:
                    message = Messenger.NO_USER_CONNS.format(
                        target_user=self.target_user)
                    self.logger.info(message)

            message = Messenger.TERMINATE_USER_CONN_DONE.format(
                target_user=self.target_user)
            self.logger.highlight('info', message, 'green')

        except Exception as e:
            self.logger.debug('Error en la función "terminate_backend_user": '******'{}.'.format(str(e)))
            message = Messenger.TERMINATE_USER_CONN_FAIL.format(
                target_user=self.target_user)
            self.logger.highlight('warning', message, 'yellow', effect='bold')

        self.logger.highlight('info', Messenger.TERMINATOR_DONE, 'green')

    def terminate_backend_db(self, target_db):
        '''
        Target:
            - terminate every connection to a PostgreSQL database (except the
              current one, if it is connected to the target database).
        '''
        try:
            # The variable "target_db" sometimes could be a string or a list
            # of list, so it is necessary to check it first
            if not isinstance(target_db, str):
                target_db = target_db['datname']

            pg_pid = self.connecter.get_pid_str()  # Get PID variable's name

            formatted_sql = Queries.BACKEND_PG_DB_EXISTS.format(
                pg_pid=pg_pid, target_db=target_db)

            self.connecter.cursor.execute(formatted_sql)
            result = self.connecter.cursor.fetchone()

            if result:

                formatted_sql = Queries.TERMINATE_BACKEND_PG_DB.format(
                    pg_pid=pg_pid, target_db=target_db)

                self.connecter.cursor.execute(formatted_sql)

                message = Messenger.TERMINATE_DB_CONN_DONE.format(
                    target_dbname=target_db)
                self.logger.info(message)

            else:
                message = Messenger.NO_DB_CONNS.format(target_db=target_db)
                self.logger.info(message)

        except Exception as e:
            self.logger.debug('Error en la función "terminate_backend_db": '
                              '{}.'.format(str(e)))
            message = Messenger.TERMINATE_DB_CONN_FAIL.format(
                target_dbname=target_db)
            self.logger.highlight('warning', message, 'yellow')

    def terminate_backend_dbs(self, ter_list):
        '''
        Target:
            - terminate every connection to some PostgreSQL databases (except
              the current one, if it is connected to one of the target
              databases).
        Parameters:
            - ter_list: the list of databases whose connections are going to be
              terminated.
        '''
        message = Messenger.BEGINNING_TERMINATE_DBS_CONN
        self.logger.highlight('info', message, 'white')

        if ter_list:
            for target_db in ter_list:
                self.terminate_backend_db(target_db)
        else:
            self.logger.highlight('warning',
                                  Messenger.TERMINATOR_HAS_NOTHING_TO_DO,
                                  'yellow')

        self.logger.highlight('info', Messenger.TERMINATOR_DONE, 'green')

    def terminate_backend_all(self):
        '''
        Target:
            - remove every connection to PostgreSQL (except the current one).
        '''
        try:
            message = Messenger.BEGINNING_TERMINATE_ALL_CONN
            self.logger.highlight('info', message, 'white')

            pg_pid = self.connecter.get_pid_str()  # Get PID variable's name

            formatted_sql = Queries.BACKEND_PG_ALL_EXISTS.format(pg_pid=pg_pid)

            self.connecter.cursor.execute(formatted_sql)
            result = self.connecter.cursor.fetchone()

            if result:
                formatted_sql = Queries.TERMINATE_BACKEND_PG_ALL.format(
                    pg_pid=pg_pid)
                self.connecter.cursor.execute(formatted_sql)
            else:
                self.logger.info(Messenger.NO_CONNS)

            self.logger.highlight('info', Messenger.TERMINATE_ALL_CONN_DONE,
                                  'green')

        except Exception as e:
            self.logger.debug('Error en la función "terminate_backend_all": '
                              '{}.'.format(str(e)))
            message = Messenger.TERMINATE_ALL_CONN_FAIL
            self.logger.highlight('warning', message, 'yellow', effect='bold')

        self.logger.highlight('info', Messenger.TERMINATOR_DONE, 'green')
示例#12
0
class RestorerCluster:

    # An object with connection parameters to connect to PostgreSQL
    connecter = None
    logger = None  # Logger to show and log some messages
    cluster_backup = ''  # Absolute path of the backup file (of a cluster)

    def __init__(self, connecter=None, cluster_backup='', logger=None):

        if logger:
            self.logger = logger
        else:
            self.logger = Logger()

        if connecter:
            self.connecter = connecter
        else:
            self.logger.stop_exe(Messenger.NO_CONNECTION_PARAMS)

        if cluster_backup and os.path.isfile(cluster_backup):
            self.cluster_backup = cluster_backup
        else:
            self.logger.stop_exe(Messenger.NO_BKP_TO_RESTORE)

        message = Messenger.CL_RESTORER_VARS.format(
            server=self.connecter.server,
            user=self.connecter.user,
            port=self.connecter.port,
            cluster_backup=self.cluster_backup)
        self.logger.debug(Messenger.CL_RESTORER_VARS_INTRO)
        self.logger.debug(message)

    def restore_cluster_backup(self):
        '''
        Target:
            - restore a cluster's backup in PostgreSQL. The cluster must have
              been created before this process.
        '''
        # Regular expression which must match the backup's name
        regex = r'.*ht_(.+_cluster)_(\d{8}_\d{6}_.+)\.(dump|bz2|gz|zip)$'
        regex = re.compile(regex)

        if re.match(regex, self.cluster_backup):
            # Store the parts of the backup's name (servername, date, ext)
            parts = regex.search(self.cluster_backup).groups()
            # Store only the extension to know the type of file
            ext = parts[2]
        else:
            Messenger.NO_BACKUP_FORMAT

        message = Messenger.BEGINNING_CL_RESTORER.format(
            cluster_backup=self.cluster_backup)
        self.logger.highlight('info', message, 'white')
        self.logger.info(Messenger.WAIT_PLEASE)

        # TODO: make dissappear every line about the operation shown in console
        if ext == 'gz':
            command = 'gunzip -c {} -k | psql postgres -U {} -h {} ' \
                      '-p {}'.format(
                          self.cluster_backup, self.connecter.user,
                          self.connecter.server, self.connecter.port)
        elif ext == 'bz2':
            command = 'bunzip2 -c {} -k | psql postgres -U {} -h {} ' \
                      '-p {}'.format(
                          self.cluster_backup, self.connecter.user,
                          self.connecter.server, self.connecter.port)
        elif ext == 'zip':
            command = 'unzip -p {} | psql postgres -U {} -h {} -p {}'.format(
                self.cluster_backup, self.connecter.user,
                self.connecter.server, self.connecter.port)
        else:
            command = 'psql postgres -U {} -h {} -p {} < {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, self.cluster_backup)

        try:
            start_time = DateTools.get_current_datetime()
            # Make the restauration of the cluster
            result = subprocess.call(command, shell=True)
            end_time = DateTools.get_current_datetime()
            # Get and show the process' duration
            diff = DateTools.get_diff_datetimes(start_time, end_time)

            if result != 0:
                raise Exception()

            message = Messenger.RESTORE_CL_DONE.format(
                cluster_backup=self.cluster_backup, diff=diff)
            self.logger.highlight('info', message, 'green')

            self.logger.highlight('info',
                                  Messenger.RESTORER_DONE,
                                  'green',
                                  effect='bold')

        except Exception as e:
            self.logger.debug('Error en la función "restore_cluster_backup": '
                              '{}.'.format(str(e)))
            message = Messenger.RESTORE_CL_FAIL.format(
                cluster_backup=self.cluster_backup)
            self.logger.stop_exe(message)
示例#13
0
class Restorer:

    # An object with connection parameters to connect to PostgreSQL
    connecter = None
    logger = None  # Logger to show and log some messages
    db_backup = ''  # Absolute path of the backup file (of a database)
    new_dbname = ''  # New name for the database restored in PostgreSQL

    def __init__(self,
                 connecter=None,
                 db_backup='',
                 new_dbname='',
                 logger=None):

        if logger:
            self.logger = logger
        else:
            self.logger = Logger()

        if connecter:
            self.connecter = connecter
        else:
            self.logger.stop_exe(Messenger.NO_CONNECTION_PARAMS)

        if db_backup and os.path.isfile(db_backup):
            self.db_backup = db_backup
        else:
            self.logger.stop_exe(Messenger.NO_BKP_TO_RESTORE)

        if new_dbname:
            self.new_dbname = new_dbname
        else:
            self.logger.stop_exe(Messenger.NO_DBNAME_TO_RESTORE)

        message = Messenger.DB_RESTORER_VARS.format(
            server=self.connecter.server,
            user=self.connecter.user,
            port=self.connecter.port,
            db_backup=self.db_backup,
            new_dbname=self.new_dbname)
        self.logger.debug(Messenger.DB_RESTORER_VARS_INTRO)
        self.logger.debug(message)

    def restore_db_backup(self):
        '''
        Target:
            - restore a database's backup in PostgreSQL.
        '''
        replicator = Replicator(self.connecter, self.new_dbname,
                                Default.RESTORING_TEMPLATE, self.logger)
        result = self.connecter.allow_db_conn(Default.RESTORING_TEMPLATE)
        if result:
            replicator.replicate_pg_db()
            self.connecter.disallow_db_conn(Default.RESTORING_TEMPLATE)
        else:
            self.logger.stop_exe(
                Messenger.ALLOW_DB_CONN_FAIL.format(
                    dbname=Default.RESTORING_TEMPLATE))

        # Regular expression which must match the backup's name
        regex = r'.*db_(.+)_(\d{8}_\d{6}_.+)\.(dump|bz2|gz|zip)$'
        regex = re.compile(regex)

        if re.match(regex, self.db_backup):
            # Store the parts of the backup's name (name, date, ext)
            parts = regex.search(self.db_backup).groups()
            # Store only the extension to know the type of file
            ext = parts[2]
        else:
            self.logger.stop_exe(Messenger.NO_BACKUP_FORMAT)

        message = Messenger.BEGINNING_DB_RESTORER.format(
            db_backup=self.db_backup, new_dbname=self.new_dbname)
        self.logger.highlight('info', message, 'white')
        self.logger.info(Messenger.WAIT_PLEASE)

        if ext == 'gz':
            command = 'gunzip -c {} -k | pg_restore -U {} -h {} -p {} ' \
                      '-d {}'.format(self.db_backup, self.connecter.user,
                                     self.connecter.server,
                                     self.connecter.port, self.new_dbname)
        elif ext == 'bz2':
            command = 'bunzip2 -c {} -k | pg_restore -U {} -h {} -p {} ' \
                      '-d {}'.format(self.db_backup, self.connecter.user,
                                     self.connecter.server,
                                     self.connecter.port, self.new_dbname)
        elif ext == 'zip':
            command = 'unzip -p {} | pg_restore -U {} -h {} -p {} ' \
                      '-d {}'.format(self.db_backup, self.connecter.user,
                                     self.connecter.server,
                                     self.connecter.port, self.new_dbname)
        else:
            command = 'pg_restore -U {} -h {} -p {} -d {} {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, self.new_dbname, self.db_backup)

        try:
            start_time = DateTools.get_current_datetime()
            # Make the restauration of the database
            result = subprocess.call(command, shell=True)
            end_time = DateTools.get_current_datetime()
            # Get and show the process' duration
            diff = DateTools.get_diff_datetimes(start_time, end_time)

            if result != 0:
                raise Exception()

            message = Messenger.RESTORE_DB_DONE.format(
                db_backup=self.db_backup,
                new_dbname=self.new_dbname,
                diff=diff)
            self.logger.highlight('info', message, 'green')

            self.logger.highlight('info',
                                  Messenger.RESTORER_DONE,
                                  'green',
                                  effect='bold')

        except Exception as e:
            self.logger.debug('Error en la función "restore_db_backup": '
                              '{}.'.format(str(e)))
            message = Messenger.RESTORE_DB_FAIL.format(
                db_backup=self.db_backup, new_dbname=self.new_dbname)
            self.logger.stop_exe(message)
示例#14
0
class Alterer:

    in_dbs = []  # List of databases to be included in the process
    old_role = ''  # Current owner of the database's tables
    new_role = ''  # New owner for the database and its tables
    # An object with connection parameters to connect to PostgreSQL
    connecter = None
    logger = None  # Logger to show and log some messages

    def __init__(self,
                 connecter=None,
                 in_dbs=[],
                 old_role='',
                 new_role='',
                 logger=None):

        if logger:
            self.logger = logger
        else:
            self.logger = Logger()

        if connecter:
            self.connecter = connecter
        else:
            self.logger.stop_exe(Msg.NO_CONNECTION_PARAMS)

        if isinstance(in_dbs, list):
            self.in_dbs = in_dbs
        else:
            self.in_dbs = Casting.str_to_list(in_dbs)

        if old_role:
            self.old_role = old_role
        else:
            self.logger.stop_exe(Msg.NO_OLD_ROLE)

        if not new_role:
            self.logger.stop_exe(Msg.NO_NEW_ROLE)
        # First check whether the user exists in PostgreSQL or not
        self.connecter.cursor.execute(Queries.PG_USER_EXISTS, (new_role, ))
        # Do not alter database if the user does not exist
        result = self.connecter.cursor.fetchone()
        if result:
            self.new_role = new_role
        else:
            msg = Msg.USER_DOES_NOT_EXIST.format(user=new_role)
            self.logger.stop_exe(msg)

        msg = Msg.ALTERER_VARS.format(server=self.connecter.server,
                                      user=self.connecter.user,
                                      port=self.connecter.port,
                                      in_dbs=self.in_dbs,
                                      old_role=self.old_role,
                                      new_role=self.new_role)
        self.logger.debug(Msg.ALTERER_VARS_INTRO)
        self.logger.debug(msg)

    def alter_db_owner(self, db):
        '''
        Target:
            - change the owner of a databases and its tables.
        Parameters:
            - db: database which is going to be altered.
        Return:
            - a boolean which indicates the success of the process.
        '''
        msg = Msg.ALTERER_FEEDBACK.format(old_role=self.old_role,
                                          new_role=self.new_role)
        self.logger.info(msg)

        success = True
        dbname = db['datname']

        if db['owner'] != 'postgres':  # Do not allow switch an owner postgres

            if db['datallowconn'] == 1:  # Check if the db allows connections

                try:
                    # Change the owner of the database
                    self.connecter.cursor.execute(
                        Queries.CHANGE_PG_DB_OWNER.format(
                            dbname=dbname, new_role=self.new_role))

                except Exception as e:
                    success = False
                    self.logger.debug('Error en la función "alter_db_owner": '
                                      '{}'.format(str(e)))
                    msg = Msg.CHANGE_PG_DB_OWNER_FAIL
                    self.logger.highlight('warning', msg, 'yellow')

                # Start another connection to the target database to be able to
                # apply the next query
                own_connecter = Connecter(server=self.connecter.server,
                                          user=self.connecter.user,
                                          port=self.connecter.port,
                                          database=dbname,
                                          logger=self.logger)

                # Disallow connections to the database during the process
                result = self.connecter.disallow_db_conn(dbname)
                if not result:
                    msg = Msg.DISALLOW_CONN_TO_PG_DB_FAIL.format(dbname=dbname)
                    self.logger.highlight('warning', msg, 'yellow')

                try:
                    # Change the owner of the database's tables
                    own_connecter.cursor.execute(
                        Queries.REASSIGN_PG_DB_TBLS_OWNER.format(
                            old_role=self.old_role, new_role=self.new_role))

                except Exception as e:
                    success = False
                    self.logger.debug('Error en la función "alter_db_owner": '
                                      '{}'.format(str(e)))
                    msg = Msg.REASSIGN_PG_DB_TBLS_OWNER_FAIL
                    self.logger.highlight('warning', msg, 'yellow')

                # Allow connections to the database at the end of the process
                result = self.connecter.allow_db_conn(dbname)
                if not result:
                    msg = Msg.ALLOW_CONN_TO_PG_DB_FAIL.format(dbname=dbname)
                    self.logger.highlight('warning', msg, 'yellow')

                # Close cursor and connection to the target database
                own_connecter.pg_disconnect()

            else:
                success = False
                msg = Msg.DB_DOES_NOT_ALLOW_CONN.format(dbname=dbname)
                self.logger.highlight('warning', msg, 'yellow')

        else:
            success = False
            msg = Msg.DB_OWNED_BY_POSTGRES_NOT_ALLOWED
            self.logger.highlight('warning', msg, 'yellow')

        return success

    def alter_dbs_owner(self, alt_list):
        '''
        Target:
            - change the owner of a group of databases and their tables.
        Parameters:
            - alt_list: names of the databases which are going to be altered.
        '''
        self.logger.highlight('info', Msg.PROCESSING_ALTERER, 'white')

        if alt_list:

            for db in alt_list:

                dbname = db['datname']

                msg = Msg.PROCESSING_DB.format(dbname=dbname)
                self.logger.highlight('info', msg, 'cyan')

                start_time = DateTools.get_current_datetime()
                # Change the owner of the database
                success = self.alter_db_owner(db)
                end_time = DateTools.get_current_datetime()
                # Get and show the process' duration
                diff = DateTools.get_diff_datetimes(start_time, end_time)

                if success:
                    msg = Msg.DB_ALTERER_DONE.format(dbname=dbname, diff=diff)
                    self.logger.highlight('info', msg, 'green')
                else:
                    msg = Msg.DB_ALTERER_FAIL.format(dbname=dbname)
                    self.logger.highlight('warning',
                                          msg,
                                          'yellow',
                                          effect='bold')
        else:
            self.logger.highlight('warning',
                                  Msg.ALTERER_HAS_NOTHING_TO_DO,
                                  'yellow',
                                  effect='bold')

        self.logger.highlight('info', Msg.ALTERER_DONE, 'green', effect='bold')
示例#15
0
class BackerCluster:

    bkp_path = ''  # The path where the backups are stored
    group = ''  # The name of the subdirectory where the backups are stored
    bkp_type = ''  # The type of the backups' files
    prefix = ''  # The prefix of the backups' names
    # Flag which determinates whether the databases must be vacuumed before the
    # backup process
    vacuum = True
    # An object with connection parameters to connect to PostgreSQL
    connecter = None
    logger = None  # Logger to show and log some messages

    def __init__(self,
                 connecter=None,
                 bkp_path='',
                 group='',
                 bkp_type='dump',
                 prefix='',
                 vacuum=True,
                 logger=None):

        if logger:
            self.logger = logger
        else:
            self.logger = Logger()

        if connecter:
            self.connecter = connecter
        else:
            self.logger.stop_exe(Msg.NO_CONNECTION_PARAMS)

        # If backup directory is not specified, create a default one to store
        # the backups
        if bkp_path:
            self.bkp_path = bkp_path
        else:
            self.bkp_path = Default.BKP_PATH
            Dir.create_dir(self.bkp_path, self.logger)

        if group:
            self.group = group
        else:
            self.group = Default.GROUP

        if bkp_type is None:
            self.bkp_type = Default.BKP_TYPE
        elif Checker.check_compress_type(bkp_type):
            self.bkp_type = bkp_type
        else:
            self.logger.stop_exe(Msg.INVALID_BKP_TYPE)

        self.prefix = prefix

        if isinstance(vacuum, bool):
            self.vacuum = vacuum
        elif Checker.str_is_bool(vacuum):
            self.vacuum = Casting.str_to_bool(vacuum)
        else:
            self.logger.stop_exe(Msg.INVALID_VACUUM)

        msg = Msg.CL_BACKER_VARS.format(server=self.connecter.server,
                                        user=self.connecter.user,
                                        port=self.connecter.port,
                                        bkp_path=self.bkp_path,
                                        group=self.group,
                                        bkp_type=self.bkp_type,
                                        prefix=self.prefix,
                                        vacuum=self.vacuum)
        self.logger.debug(Msg.CL_BACKER_VARS_INTRO)
        self.logger.debug(msg)

    def backup_all(self, bkps_dir):
        '''
        Target:
            - make a backup of a cluster.
        Parameters:
            - bkps_dir: directory where the backup is going to be stored.
        Return:
            - a boolean which indicates the success of the process.
        '''
        success = True
        # Get date and time of the zone
        init_ts = DateTools.get_date()
        # Get current year
        year = str(DateTools.get_year(init_ts))
        # Get current month
        month = str(DateTools.get_month(init_ts))
        # Create new directories with the year and the month of the backup
        bkp_dir = bkps_dir + year + '/' + month + '/'
        Dir.create_dir(bkp_dir, self.logger)

        # Set backup's name
        file_name = self.prefix + 'ht_' + self.connecter.server + \
            str(self.connecter.port) + '_cluster_' + init_ts + '.' + \
            self.bkp_type

        # Store the command to do depending on the backup type
        if self.bkp_type == 'gz':  # Zip with gzip
            command = 'pg_dumpall -U {} -h {} -p {} | gzip > {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)
        elif self.bkp_type == 'bz2':  # Zip with bzip2
            command = 'pg_dumpall -U {} -h {} -p {} | bzip2 > {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)
        elif self.bkp_type == 'zip':  # Zip with zip
            command = 'pg_dumpall -U {} -h {} -p {} | zip > {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)
        else:  # Do not zip
            command = 'pg_dumpall -U {} -h {} -p {} > {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)
        try:
            # Execute the command in console
            result = subprocess.call(command, shell=True)
            if result != 0:
                raise Exception()

        except Exception as e:
            self.logger.debug('Error en la función "backup_all": {}.'.format(
                str(e)))
            success = False

        return success

    def backup_cl(self):
        '''
        Target:
            - vacuum if necessary and make a backup of a cluster.
        '''
        self.logger.highlight('info', Msg.CHECKING_BACKUP_DIR, 'white')

        # Create a new directory with the name of the group
        bkps_dir = self.bkp_path + self.group + Default.CL_BKPS_DIR
        Dir.create_dir(bkps_dir, self.logger)

        self.logger.info(Msg.DESTINY_DIR.format(path=bkps_dir))

        # Vaccum the databases before the backup process if necessary
        if self.vacuum:
            vacuumer = Vacuumer(connecter=self.connecter, logger=self.logger)
            dbs_all = vacuumer.connecter.get_pg_dbs_data(
                vacuumer.ex_templates, vacuumer.db_owner)
            vacuumer.vacuum_dbs(dbs_all)

        self.logger.highlight('info', Msg.BEGINNING_CL_BACKER, 'white')

        start_time = DateTools.get_current_datetime()
        # Make the backup of the cluster
        success = self.backup_all(bkps_dir)
        end_time = DateTools.get_current_datetime()
        # Get and show the process' duration
        diff = DateTools.get_diff_datetimes(start_time, end_time)

        if success:
            msg = Msg.CL_BACKER_DONE.format(diff=diff)
            self.logger.highlight('info', msg, 'green', effect='bold')
        else:
            self.logger.highlight('warning',
                                  Msg.CL_BACKER_FAIL,
                                  'yellow',
                                  effect='bold')

        self.logger.highlight('info', Msg.BACKER_DONE, 'green', effect='bold')
示例#16
0
class Backer:

    bkp_path = ''  # The path where the backups are stored
    group = ''  # The name of the subdirectory where the backups are stored
    bkp_type = ''  # The type of the backups' files
    prefix = ''  # The prefix of the backups' names
    in_dbs = []  # List of databases to be included in the process
    in_regex = ''  # Regular expression which must match the included databases
    # Flag which determinates whether inclusion conditions predominate over the
    # exclusion ones
    in_priority = False
    ex_dbs = []  # List of databases to be excluded in the process
    ex_regex = ''  # Regular expression which must match the excluded databases
    # Flag which determinates whether the templates must be included
    ex_templates = True
    # Flag which determinates whether the included databases must be vacuumed
    # before the backup process
    vacuum = True
    # Use other PostgreSQL user during the backup process (only for superusers)
    db_owner = ''
    # An object with connection parameters to connect to PostgreSQL
    connecter = None
    logger = None  # Logger to show and log some messages

    def __init__(self,
                 connecter=None,
                 bkp_path='',
                 group='',
                 bkp_type='dump',
                 prefix='',
                 in_dbs=[],
                 in_regex='',
                 in_priority=False,
                 ex_dbs=['postgres'],
                 ex_regex='',
                 ex_templates=True,
                 vacuum=True,
                 db_owner='',
                 logger=None):

        if logger:
            self.logger = logger
        else:
            self.logger = Logger()

        if connecter:
            self.connecter = connecter
        else:
            self.logger.stop_exe(Msg.NO_CONNECTION_PARAMS)

        # If backup directory is not specified, create a default one to store
        # the backups
        if bkp_path:
            self.bkp_path = bkp_path
        else:
            self.bkp_path = Default.BKP_PATH
            Dir.create_dir(self.bkp_path, self.logger)

        if group:
            self.group = group
        else:
            self.group = Default.GROUP

        if bkp_type is None:
            self.bkp_type = Default.BKP_TYPE
        elif Checker.check_compress_type(bkp_type):
            self.bkp_type = bkp_type
        else:
            self.logger.stop_exe(Msg.INVALID_BKP_TYPE)

        self.prefix = prefix

        if isinstance(in_dbs, list):
            self.in_dbs = in_dbs
        else:
            self.in_dbs = Casting.str_to_list(in_dbs)

        if Checker.check_regex(in_regex):
            self.in_regex = in_regex
        else:
            self.logger.stop_exe(Msg.INVALID_IN_REGEX)

        if isinstance(in_priority, bool):
            self.in_priority = in_priority
        elif Checker.str_is_bool(in_priority):
            self.in_priority = Casting.str_to_bool(in_priority)
        else:
            self.logger.stop_exe(Msg.INVALID_IN_PRIORITY)

        if isinstance(ex_dbs, list):
            self.ex_dbs = ex_dbs
        else:
            self.ex_dbs = Casting.str_to_list(ex_dbs)

        if Checker.check_regex(ex_regex):
            self.ex_regex = ex_regex
        else:
            self.logger.stop_exe(Msg.INVALID_EX_REGEX)

        if isinstance(ex_templates, bool):
            self.ex_templates = ex_templates
        elif Checker.str_is_bool(ex_templates):
            self.ex_templates = Casting.str_to_bool(ex_templates)
        else:
            self.logger.stop_exe(Msg.INVALID_EX_TEMPLATES)

        if isinstance(vacuum, bool):
            self.vacuum = vacuum
        elif Checker.str_is_bool(vacuum):
            self.vacuum = Casting.str_to_bool(vacuum)
        else:
            self.logger.stop_exe(Msg.INVALID_VACUUM)

        if db_owner is None:
            self.db_owner = db_owner
        else:
            self.db_owner = Default.DB_OWNER

        msg = Msg.DB_BACKER_VARS.format(server=self.connecter.server,
                                        user=self.connecter.user,
                                        port=self.connecter.port,
                                        bkp_path=self.bkp_path,
                                        group=self.group,
                                        bkp_type=self.bkp_type,
                                        prefix=self.prefix,
                                        in_dbs=self.in_dbs,
                                        in_regex=self.in_regex,
                                        in_priority=self.in_priority,
                                        ex_dbs=self.ex_dbs,
                                        ex_regex=self.ex_regex,
                                        ex_templates=self.ex_templates,
                                        vacuum=self.vacuum,
                                        db_owner=self.db_owner)
        self.logger.debug(Msg.DB_BACKER_VARS_INTRO)
        self.logger.debug(msg)

    def backup_db(self, dbname, bkps_dir):
        '''
        Target:
            - make a backup of a specified database.
        Parameters:
            - dbname: name of the database which is going to be backuped.
            - bkps_dir: directory where the backup is going to be stored.
        Return:
            - a boolean which indicates the success of the process.
        '''
        success = True
        # Get date and time of the zone
        init_ts = DateTools.get_date()
        # Get current year
        year = str(DateTools.get_year(init_ts))
        # Get current month
        month = str(DateTools.get_month(init_ts))
        # Create new directories with the year and the month of the backup
        bkp_dir = bkps_dir + year + '/' + month + '/'
        Dir.create_dir(bkp_dir, self.logger)
        # Set backup's name
        file_name = self.prefix + 'db_' + dbname + '_' + init_ts + '.' + \
            self.bkp_type

        # Store the command to do depending on the backup type
        if self.bkp_type == 'gz':  # Zip with gzip
            command = 'pg_dump {} -Fc -U {} -h {} -p {} | gzip > {}'.format(
                dbname, self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)
        elif self.bkp_type == 'bz2':  # Zip with bzip2
            command = 'pg_dump {} -Fc -U {} -h {} -p {} | bzip2 > {}'.format(
                dbname, self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)
        elif self.bkp_type == 'zip':  # Zip with zip
            command = 'pg_dump {} -Fc -U {} -h {} -p {} | zip > {}'.format(
                dbname, self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)
        else:  # Do not zip
            command = 'pg_dump {} -Fc -U {} -h {} -p {} > {}'.format(
                dbname, self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)

        try:
            # Execute the command in console
            result = subprocess.call(command, shell=True)
            if result != 0:
                raise Exception()

        except Exception as e:
            self.logger.debug('Error en la función "backup_db": {}.'.format(
                str(e)))
            success = False

        return success

    def backup_dbs(self, dbs_all):
        '''
        Target:
            - make a backup of some specified databases.
        Parameters:
            - dbs_all: names of the databases which are going to be backuped.
        '''
        self.logger.highlight('info', Msg.CHECKING_BACKUP_DIR, 'white')

        # Create a new directory with the name of the group
        bkps_dir = self.bkp_path + self.group + Default.DB_BKPS_DIR
        Dir.create_dir(bkps_dir, self.logger)

        self.logger.info(Msg.DESTINY_DIR.format(path=bkps_dir))

        self.logger.highlight('info', Msg.PROCESSING_DB_BACKER, 'white')

        if dbs_all:
            for db in dbs_all:

                dbname = db['datname']
                msg = Msg.PROCESSING_DB.format(dbname=dbname)
                self.logger.highlight('info', msg, 'cyan')

                # Let the user know whether the database connection is allowed
                if not db['datallowconn']:
                    msg = Msg.FORBIDDEN_DB_CONNECTION.format(dbname=dbname)
                    self.logger.highlight('warning',
                                          msg,
                                          'yellow',
                                          effect='bold')
                    success = False

                else:
                    # Vaccum the database before the backup process if
                    # necessary
                    if self.vacuum:
                        self.logger.info(
                            Msg.PRE_VACUUMING_DB.format(dbname=dbname))
                        vacuumer = Vacuumer(self.connecter, self.in_dbs,
                                            self.in_regex, self.in_priority,
                                            self.ex_dbs, self.ex_regex,
                                            self.ex_templates, self.db_owner,
                                            self.logger)

                        # Vacuum the database
                        success = vacuumer.vacuum_db(dbname)
                        if success:
                            msg = Msg.PRE_VACUUMING_DB_DONE.format(
                                dbname=dbname)
                            self.logger.info(msg)
                        else:
                            msg = Msg.PRE_VACUUMING_DB_FAIL.format(
                                dbname=dbname)
                            self.logger.highlight('warning', msg, 'yellow')

                    self.logger.info(
                        Msg.BEGINNING_DB_BACKER.format(dbname=dbname))

                    start_time = DateTools.get_current_datetime()
                    # Make the backup of the database
                    success = self.backup_db(dbname, bkps_dir)
                    end_time = DateTools.get_current_datetime()
                    # Get and show the process' duration
                    diff = DateTools.get_diff_datetimes(start_time, end_time)

                if success:
                    msg = Msg.DB_BACKER_DONE.format(dbname=dbname, diff=diff)
                    self.logger.highlight('info', msg, 'green')
                else:
                    msg = Msg.DB_BACKER_FAIL.format(dbname=dbname)
                    self.logger.highlight('warning',
                                          msg,
                                          'yellow',
                                          effect='bold')
        else:
            self.logger.highlight('warning',
                                  Msg.BACKER_HAS_NOTHING_TO_DO,
                                  'yellow',
                                  effect='bold')

        self.logger.highlight('info', Msg.BACKER_DONE, 'green', effect='bold')
from common import jsonGetter
from logger.logger import Logger
from pageObjects.pages import MainPage
from utils import LinkOperations, GetUrl, GetText
import time

logger = Logger(logger="BaseTest").getlog()

LOCAL = jsonGetter.GetJson.getConfig("LOCAL")
SITE = jsonGetter.GetJson.getConfig("SITE")
actualBrowser = jsonGetter.GetJson.getConfig("actualBrowser")
ruLang = jsonGetter.GetJson.getData("ruLang")
enLang = jsonGetter.GetJson.getData("enLang")

logger.info("\n" + "Browser : " + actualBrowser + "\n" + "Language is: " + LOCAL)


class TestRunbrowser():
    def test_runbrowser(self):
        # BaseElement.RunBrowser(actualBrowser)
        logger.info("Trying to open url: " + SITE)
        LinkOperations.OpenLink(SITE)

    #def test_lang(self):
    #    assert SITE == GetUrl.Get().CurrentUrl()
    #    logger.info("Trying to set language")
    #    MainPage.MainPage().setLang()
    #    #language = GetText.GetText().byXpath(MainPage.MainPage().DropDownXpath)

示例#18
0
class Informer:

    # An object with connection parameters to connect to PostgreSQL
    connecter = None
    logger = None  # Logger to show and log some messages
    connpids = []
    dbnames = []  # List of databases to get some info about
    usernames = []  # List of users to get some info about

    def __init__(self,
                 connecter=None,
                 connpids=[],
                 dbnames=[],
                 usernames=[],
                 logger=None):

        if logger:
            self.logger = logger
        else:
            self.logger = Logger()

        if connecter:
            self.connecter = connecter
        else:
            self.logger.stop_exe(Messenger.NO_CONNECTION_PARAMS)

        self.connpids = connpids
        self.dbnames = dbnames
        self.usernames = usernames

    def show_pg_dbnames(self):
        '''
        Target:
            - show the names of every PostgreSQL database.
        '''
        msg_len = len(Messenger.SHOWING_DBS_NAME)
        message = '*' * msg_len
        self.logger.highlight('info', message, 'white')
        message = Messenger.SHOWING_DBS_NAME
        self.logger.highlight('info', message, 'white')
        message = '*' * msg_len
        self.logger.highlight('info', message, 'white')

        # Get all the names of PostgreSQL databases and show them
        dbnames = self.connecter.get_pg_dbnames()

        if dbnames:
            for dbname in dbnames:
                self.logger.info(dbname)
        else:
            message = Messenger.NO_DB_DATA_TO_SHOW
            self.logger.highlight('warning', message, 'yellow', effect='bold')

    def show_pg_usernames(self):
        '''
        Target:
            - show the names of every PostgreSQL user.
        '''
        msg_len = len(Messenger.SHOWING_USERS_NAME)
        message = '*' * msg_len
        self.logger.highlight('info', message, 'white')
        message = Messenger.SHOWING_USERS_NAME
        self.logger.highlight('info', message, 'white')
        message = '*' * msg_len
        self.logger.highlight('info', message, 'white')

        # Get all the names of PostgreSQL users and show them
        usernames = self.connecter.get_pg_usernames()

        if usernames:
            for username in usernames:
                self.logger.info(username)
        else:
            message = Messenger.NO_USER_DATA_TO_SHOW
            self.logger.highlight('warning', message, 'yellow', effect='bold')

    def show_pg_connpids(self):
        '''
        Target:
            - show the PIDs of every PostgreSQL backend.
        '''
        msg_len = len(Messenger.SHOWING_CONNS_PID)
        message = '*' * msg_len
        self.logger.highlight('info', message, 'white')
        message = Messenger.SHOWING_CONNS_PID
        self.logger.highlight('info', message, 'white')
        message = '*' * msg_len
        self.logger.highlight('info', message, 'white')

        # Get all the PIDs of PostgreSQL backends and show them
        connpids = self.connecter.get_pg_connpids()

        if connpids:
            for connpid in connpids:
                self.logger.info(str(connpid))
        else:
            message = Messenger.NO_CONN_DATA_TO_SHOW
            self.logger.highlight('warning', message, 'yellow', effect='bold')

    def show_pg_dbs_data(self):
        '''
        Target:
            - show some info about every PostgreSQL database.
        '''
        msg_len = len(Messenger.SHOWING_DBS_DATA)
        message = '*' * msg_len
        self.logger.highlight('info', message, 'white')
        message = Messenger.SHOWING_DBS_DATA
        self.logger.highlight('info', message, 'white')
        message = '*' * msg_len
        self.logger.highlight('info', message, 'white')

        dbs_data = []
        # Get every PostgreSQL database if no list specified, otherwise, keep
        # the specified list (given by console arguments)
        if self.dbnames == []:
            self.dbnames = self.connecter.get_pg_dbnames()

        for dbname in self.dbnames:  # Get data of each selected database
            result = self.connecter.get_pg_db_data(dbname)
            if result:
                dbs_data.append(result)

        if dbs_data:
            for db in dbs_data:
                message = Messenger.DATNAME + str(db['datname'])
                self.logger.highlight('info', message, 'cyan')
                message = Messenger.OWNER + str(db['owner'])
                self.logger.info(message)
                message = Messenger.ENCODING + str(db['encoding'])
                self.logger.info(message)
                message = Messenger.DATSIZE + str(db['size'])
                self.logger.info(message)
                message = Messenger.DATCOLLATE + str(db['datcollate'])
                self.logger.info(message)
                message = Messenger.DATCTYPE + str(db['datctype'])
                self.logger.info(message)
                message = Messenger.DATISTEMPLATE + str(db['datistemplate'])
                self.logger.info(message)
                message = Messenger.DATALLOWCONN + str(db['datallowconn'])
                self.logger.info(message)
                message = Messenger.DATCONNLIMIT + str(db['datconnlimit'])
                self.logger.info(message)
                message = Messenger.DATLASTSYSOID + str(db['datlastsysoid'])
                self.logger.info(message)
                message = Messenger.DATFROZENXID + str(db['datfrozenxid'])
                self.logger.info(message)
                message = Messenger.DATTABLESPACE + str(db['dattablespace'])
                self.logger.info(message)
                message = Messenger.DATACL + str(db['datacl'])
                self.logger.info(message)
        else:
            message = Messenger.NO_DB_DATA_TO_SHOW
            self.logger.highlight('warning', message, 'yellow', effect='bold')

    def show_pg_users_data(self):
        '''
        Target:
            - show some info about every PostgreSQL user.
        '''
        msg_len = len(Messenger.SHOWING_USERS_DATA)
        message = '*' * msg_len
        self.logger.highlight('info', message, 'white')
        message = Messenger.SHOWING_USERS_DATA
        self.logger.highlight('info', message, 'white')
        message = '*' * msg_len
        self.logger.highlight('info', message, 'white')

        users_data = []
        # Get every PostgreSQL user if no list specified, otherwise, keep
        # the specified list (given by console arguments)

        if self.usernames == []:
            self.usernames = self.connecter.get_pg_usernames()

        for username in self.usernames:  # Get data of each selected user
            result = self.connecter.get_pg_user_data(username)
            if result:
                users_data.append(result)

        pg_version = self.connecter.get_pg_version()  # Get PostgreSQL version

        if users_data:
            for user in users_data:
                message = Messenger.USENAME + str(user['usename'])
                self.logger.highlight('info', message, 'cyan')
                message = Messenger.USESYSID + str(user['usesysid'])
                self.logger.info(message)
                message = Messenger.USECREATEDB + str(user['usecreatedb'])
                self.logger.info(message)
                message = Messenger.USESUPER + str(user['usesuper'])
                self.logger.info(message)
                message = Messenger.USECATUPD + str(user['usecatupd'])
                self.logger.info(message)
                if pg_version >= self.connecter.PG_PID_VERSION_THRESHOLD:
                    message = Messenger.USEREPL + str(user['userepl'])
                    self.logger.info(message)
                message = Messenger.PASSWD + str(user['passwd'])
                self.logger.info(message)
                message = Messenger.VALUNTIL + str(user['valuntil'])
                self.logger.info(message)
                message = Messenger.USECONFIG + str(user['useconfig'])
                self.logger.info(message)
        else:
            message = Messenger.NO_USER_DATA_TO_SHOW
            self.logger.highlight('warning', message, 'yellow', effect='bold')

    def show_pg_conns_data(self):
        '''
        Target:
            - show some info about every PostgreSQL backend.
        '''
        msg_len = len(Messenger.SHOWING_CONNS_DATA)
        message = '*' * msg_len
        self.logger.highlight('info', message, 'white')
        message = Messenger.SHOWING_CONNS_DATA
        self.logger.highlight('info', message, 'white')
        message = '*' * msg_len
        self.logger.highlight('info', message, 'white')

        conns_data = []
        # Get every PostgreSQL connection if no list specified, otherwise, keep
        # the specified list (given by console arguments)
        if self.connpids == []:
            self.connpids = self.connecter.get_pg_connpids()

        for connpid in self.connpids:  # Get data of each selected backend
            result = self.connecter.get_pg_conn_data(connpid)
            if result:
                conns_data.append(result)

        pg_version = self.connecter.get_pg_version()  # Get PostgreSQL version

        if conns_data:
            for conn in conns_data:
                if pg_version >= self.connecter.PG_PID_VERSION_THRESHOLD:
                    message = Messenger.PID + str(conn['pid'])
                else:
                    message = Messenger.PROCPID + str(conn['procpid'])
                self.logger.highlight('info', message, 'cyan')
                message = Messenger.DATID + str(conn['datid'])
                self.logger.info(message)
                message = Messenger.DATNAME + str(conn['datname'])
                self.logger.info(message)
                message = Messenger.USESYSID + str(conn['usesysid'])
                self.logger.info(message)
                message = Messenger.USENAME + str(conn['usename'])
                self.logger.info(message)
                message = Messenger.APPLICATION_NAME + str(
                    conn['application_name'])
                self.logger.info(message)
                message = Messenger.CLIENT_ADDR + str(conn['client_addr'])
                self.logger.info(message)
                message = Messenger.CLIENT_HOSTNAME + str(
                    conn['client_hostname'])
                self.logger.info(message)
                message = Messenger.CLIENT_PORT + str(conn['client_port'])
                self.logger.info(message)
                message = Messenger.BACKEND_START + str(conn['backend_start'])
                self.logger.info(message)
                message = Messenger.XACT_START + str(conn['xact_start'])
                self.logger.info(message)
                message = Messenger.QUERY_START + str(conn['query_start'])
                self.logger.info(message)
                if pg_version >= self.connecter.PG_PID_VERSION_THRESHOLD:
                    message = Messenger.STATE_CHANGE + str(
                        conn['state_change'])
                    self.logger.info(message)
                message = Messenger.WAITING + str(conn['waiting'])
                self.logger.info(message)
                if pg_version >= self.connecter.PG_PID_VERSION_THRESHOLD:
                    message = Messenger.STATE + str(conn['state'])
                    self.logger.info(message)
                    message = Messenger.QUERY + str(conn['query'])
                    self.logger.info(message)
        else:
            message = Messenger.NO_CONN_DATA_TO_SHOW
            self.logger.highlight('warning', message, 'yellow', effect='bold')

    def show_pg_version(self):
        '''
        Target:
            - show PostgreSQL version.
        '''
        msg_len = len(Messenger.SHOWING_PG_VERSION)
        message = '*' * msg_len
        self.logger.highlight('info', message, 'white')
        message = Messenger.SHOWING_PG_VERSION
        self.logger.highlight('info', message, 'white')
        message = '*' * msg_len
        self.logger.highlight('info', message, 'white')

        pretty_pg_version = self.connecter.get_pretty_pg_version()
        if pretty_pg_version:
            self.logger.info(pretty_pg_version)
        else:
            message = Messenger.NO_PG_VERSION_TO_SHOW
            self.logger.highlight('warning', message, 'yellow', effect='bold')

    def show_pg_nversion(self):
        '''
        Target:
            - show PostgreSQL version in numeric format.
        '''
        pg_version = self.connecter.get_pg_version()
        print(pg_version)

    def show_pg_time_start(self):
        '''
        Target:
            - show when PostgreSQL was started.
        '''
        msg_len = len(Messenger.SHOWING_PG_TIME_START)
        message = '*' * msg_len
        self.logger.highlight('info', message, 'white')
        message = Messenger.SHOWING_PG_TIME_START
        self.logger.highlight('info', message, 'white')
        message = '*' * msg_len
        self.logger.highlight('info', message, 'white')

        pg_time_start = self.connecter.get_pg_time_start()
        if pg_time_start:
            self.logger.info(str(pg_time_start))
        else:
            message = Messenger.NO_PG_TIME_START_TO_SHOW
            self.logger.highlight('warning', message, 'yellow', effect='bold')

    def show_pg_time_up(self):
        '''
        Target:
            - show how long PostgreSQL has been working.
        '''
        msg_len = len(Messenger.SHOWING_PG_TIME_UP)
        message = '*' * msg_len
        self.logger.highlight('info', message, 'white')
        message = Messenger.SHOWING_PG_TIME_UP
        self.logger.highlight('info', message, 'white')
        message = '*' * msg_len
        self.logger.highlight('info', message, 'white')

        pg_time_up = self.connecter.get_pg_time_up()
        if pg_time_up:
            self.logger.info(str(pg_time_up))
        else:
            message = Messenger.NO_PG_TIME_UP_TO_SHOW
            self.logger.highlight('warning', message, 'yellow', effect='bold')
示例#19
0
def main():
    args = get_arguments()
    myargs = []  # getopts(sys.argv)
    now = datetime.datetime.now()
    cwd = os.getcwd()
    if len(myargs) > 0:
        if 'c' in myargs:
            config_file = myargs['c']
    else:
        config_file = 'config/trainer_config.yml'

    config = OmegaConf.load(os.path.join(cwd, config_file))['trainer']
    config.cwd = str(cwd)
    reproducibility(config)
    dt_string = now.strftime("%d_%m_%Y_%H.%M.%S")
    cpkt_fol_name = os.path.join(
        config.cwd,
        f'checkpoints/model_{config.model.name}/dataset_{config.dataset.name}/date_{dt_string}'
    )

    log = Logger(path=cpkt_fol_name, name='LOG').get_logger()

    best_pred_loss = 1000.0
    log.info(f"Checkpoint folder {cpkt_fol_name}")
    log.info(f"date and time = {dt_string}")

    log.info(f'pyTorch VERSION:{torch.__version__}', )
    log.info(f'CUDA VERSION')

    log.info(f'CUDNN VERSION:{torch.backends.cudnn.version()}')
    log.info(f'Number CUDA Devices: {torch.cuda.device_count()}')

    if args.tensorboard:

        writer_path = os.path.join(cpkt_fol_name + 'runs/')

        writer = SummaryWriter(writer_path + util.datestr())
    else:
        writer = None

    use_cuda = torch.cuda.is_available()

    device = torch.device("cuda:0" if use_cuda else "cpu")
    log.info(f'device: {device}')

    training_generator, val_generator, test_generator, class_dict = select_dataset(
        config)
    n_classes = len(class_dict)
    model = select_model(config, n_classes)

    log.info(f"{model}")

    if (config.load):

        pth_file, _ = load_checkpoint(config.pretrained_cpkt,
                                      model,
                                      strict=True,
                                      load_seperate_layers=False)

    else:
        pth_file = None
    if (config.cuda and use_cuda):
        if torch.cuda.device_count() > 1:
            log.info(f"Let's use {torch.cuda.device_count()} GPUs!")

            model = torch.nn.DataParallel(model)
    model.to(device)

    optimizer, scheduler = select_optimizer(model, config['model'], None)
    log.info(f'{model}')
    log.info(f"Checkpoint Folder {cpkt_fol_name} ")
    shutil.copy(os.path.join(config.cwd, config_file), cpkt_fol_name)

    trainer = Trainer(config,
                      model=model,
                      optimizer=optimizer,
                      data_loader=training_generator,
                      writer=writer,
                      logger=log,
                      valid_data_loader=val_generator,
                      test_data_loader=test_generator,
                      class_dict=class_dict,
                      lr_scheduler=scheduler,
                      checkpoint_dir=cpkt_fol_name)
    trainer.train()
示例#20
0
class Mailer:

    level = 1  # Verbosity level of the email
    from_info = {}  # Information about the sender's email account
    to_infos = []  # List with the destiny emails
    cc_infos = []  # List with the destiny emails (carbon copy)
    bcc_infos = []  # List with the destiny emails (blind carbon copy)
    server_tag = ''  # Alias of the sender's machine
    external_ip = ''  # External IP of the sender's machine
    op_type = ''  # Executed action
    group = None  # Affected group
    bkp_path = None  # Affected path of backups
    logger = None  # Logger to show and log some messages

    # Definition of constants

    OP_TYPES = {
        'u': 'Undefined method',
        'a': 'Alterer',
        'B': 'Backer',
        'd': 'Dropper',
        'r': 'Replicator',
        'R': 'Restorer',
        'T': 'Trimmer',
        't': 'Terminator',
        'v': 'Vacuumer',
    }

    OP_RESULTS = {
        0: ('<h2>{op_type}: <span style="color: green;">OK</span> at '
            '"{server_tag}"</h2>Date: <span style="font-weight: bold">{date}'
            '</span><br/>Time: <span style="font-weight: bold">{time}</span>'
            '<br/>Time zone: <span style="font-weight: bold">{zone}</span>'
            '<br/>Host name: <span style="font-weight: bold">{server}</span>'
            '<br/>Netifaces IPs: <span style="font-weight: bold">'
            '{internal_ips}</span><br/>External IP: <span style="font-weight: '
            'bold">{external_ip}</span><br/>Group: <span style="font-weight: '
            'bold">{group}</span><br/>Path: <span style="font-weight: bold">'
            '{bkp_path}</span><br/><br/><br/>The process has been executed '
            'succesfully.<br/><br/>You can see its log file at the following '
            'path:<br/><br/>{log_file}.'),
        1: ('<h2>{op_type}: <span style="color: orange;">WARNING</span> at '
            '"{server_tag}"</h2>Date: <span style="font-weight: bold">{date}'
            '</span><br/>Time: <span style="font-weight: bold">{time}</span>'
            '<br/>Time zone: <span style="font-weight: bold">{zone}</span>'
            '<br/>Host name: <span style="font-weight: bold">{server}</span>'
            '<br/>Netifaces IPs: <span style="font-weight: bold">'
            '{internal_ips}</span><br/>External IP: <span style="font-weight: '
            'bold">{external_ip}</span><br/>Group: <span style="font-weight: '
            'bold">{group}</span><br/>Path: <span style="font-weight: bold">'
            '{bkp_path}</span><br/><br/><br/>There were some warnings during '
            'the process, but not critical errors. Anyway, please check it, '
            'because its behaviour is not bound to have been the expected '
            'one.<br/><br/>You can see its log file at the following path:'
            '<br/><br/>{log_file}.'),
        2: ('<h2>{op_type}: <span style="color: red;">ERROR</span> at '
            '"{server_tag}"</h2>Date: <span style="font-weight: bold">{date}'
            '</span><br/>Time: <span style="font-weight: bold">{time}</span>'
            '<br/>Time zone: <span style="font-weight: bold">{zone}</span>'
            '<br/>Host name: <span style="font-weight: bold">{server}</span>'
            '<br/>Netifaces IPs: <span style="font-weight: bold">'
            '{internal_ips}</span><br/>External IP: <span style="font-weight: '
            'bold">{external_ip}</span><br/>Group: <span style="font-weight: '
            'bold">{group}</span><br/>Path: <span style="font-weight: bold">'
            '{bkp_path}</span><br/><br/><br/>There were some errors during '
            'the process, and they prevented some operations, because the '
            'execution was truncated. Please check immediately.<br/><br/>You '
            'can see its log file at the following path:<br/><br/>'
            '{log_file}.'),
        3: ('<h2>{op_type}: <span style="color: purple;">CRITICAL</span> at '
            '"{server_tag}"</h2>Date: <span style="font-weight: bold">{date}'
            '</span><br/>Time: <span style="font-weight: bold">{time}</span>'
            '<br/>Time zone: <span style="font-weight: bold">{zone}</span>'
            '<br/>Host name: <span style="font-weight: bold">{server}</span>'
            '<br/>Netifaces IPs: <span style="font-weight: bold">'
            '{internal_ips}</span><br/>External IP: <span style="font-weight: '
            'bold">{external_ip}</span><br/>Group: <span style="font-weight: '
            'bold">{group}</span><br/>Path: <span style="font-weight: bold">'
            '{bkp_path}</span><br/><br/><br/>There were some critical errors '
            'during the process. The execution could not be carried out. '
            'Please check immediately.<br/><br/>You can see its log file at '
            'the following path:<br/><br/>{log_file}.'),
    }

    OP_RESULTS_NO_HTML = {
        0: ('{op_type}: OK at "{server_tag}"\n'
            'Date: {date}\n'
            'Time: {time}\n'
            'Time zone: {zone}\n'
            'Host name: {server}\n'
            'Netifaces IPs: {internal_ips}\n'
            'External IP: {external_ip}\n'
            'Group: {group}\n'
            'Path: {bkp_path}\n'
            'The process has been executed succesfully.\n'
            'You can see its log file at the following path:\n'
            '{log_file}.\n'),
        1: ('{op_type}: WARNING at {server}\n'
            'Date: {date}\n'
            'Time: {time}\n'
            'Time zone: {zone}\n'
            'Host name: {server}\n'
            'Netifaces IPs: {internal_ips}\n'
            'External IP: {external_ip}\n'
            'Group: {group}\n'
            'Path: {bkp_path}\n'
            'There were some warnings during the process, but not critical\n'
            'errors. Anyway, please check it, because its behaviour is not\n'
            'bound to have been the expected one. You can see its\n'
            'log file at the following path: {log_file}.\n'),
        2: ('{op_type}: ERROR at {server}\n'
            'Date: {date}\n'
            'Time: {time}\n'
            'Time zone: {zone}\n'
            'Host name: {server}\n'
            'Netifaces IPs: {internal_ips}\n'
            'External IP: {external_ip}\n'
            'Group: {group}\n'
            'Path: {bkp_path}\n'
            'There were some errors during the process, and they prevented\n'
            'some operations, because the execution was truncated. Please\n'
            'check immediately. You can see its log file at the\n'
            'following path: {log_file}.\n'),
        3: ('{op_type}: CRITICAL at {server}\n'
            'Date: {date}\n'
            'Time: {time}\n'
            'Time zone: {zone}\n'
            'Host name: {server}\n'
            'Netifaces IPs: {internal_ips}\n'
            'External IP: {external_ip}\n'
            'Group: {group}\n'
            'Path: {bkp_path}\n'
            'The process has been executed succesfully.\n'
            'You can see its log file at the following path:\n'
            '{log_file}.\n'),
    }

    def __init__(self, level=1, username='', email='', password='',
                 to_infos=[], cc_infos=[], bcc_infos=[], server_tag='',
                 external_ip='', op_type='', logger=None):

        if logger:
            self.logger = logger
        else:
            from logger.logger import Logger
            self.logger = Logger()

        if isinstance(level, int) and level in Default.MAIL_LEVELS:
            self.level = level
        elif Checker.str_is_int(level):
            self.level = Casting.str_to_int(level)
        else:
            self.level = Default.MAIL_LEVEL

        self.from_info['email'] = email
        if not Checker.str_is_valid_mail(email):
            message = Messenger.INVALID_FROM_MAIL.format(
                email=email)
            self.logger.highlight('warning', message, 'yellow')

        self.from_info['name'] = username
        if username is '':
            message = Messenger.INVALID_FROM_USERNAME
            self.logger.highlight('warning', message, 'yellow')

        self.from_info['pwd'] = password
        if password is '':
            message = Messenger.INVALID_FROM_PASSWORD
            self.logger.highlight('warning', message, 'yellow')

        to_infos = Casting.str_to_list(to_infos)
        self.to_infos = self.get_mail_infos(to_infos)

        cc_infos = Casting.str_to_list(cc_infos)
        self.cc_infos = self.get_mail_infos(cc_infos)

        bcc_infos = Casting.str_to_list(bcc_infos)
        self.bcc_infos = self.get_mail_infos(bcc_infos)

        if op_type in self.OP_TYPES.keys():
            self.op_type = op_type
        else:
            self.op_type = 'u'

        self.server_tag = server_tag
        self.external_ip = external_ip

    def add_group(self, group):
        '''
        Target:
            - add a group to the information sent by the email. It will be used
              in case of "Backer" being executed.
        Parameters:
            - group: the group's name.
        '''
        self.group = group

    def add_bkp_path(self, bkp_path):
        '''
        Target:
            - add a path to the information sent by the email. It will be used
              in case of "Trimmer" being executed.
        Parameters:
            - bkp_path: the path where the involved backups are stored.
        '''
        self.bkp_path = bkp_path

    def get_mail_infos(self, mail_infos):
        '''
        Target:
            - takes a list of strings with mail data and a "username <email>"
              format, splits it into parts and gives the same data stored and
              classified in a dictionary.
        Parameters:
            - mail_infos: the list of strings to be converted.
        Return:
            - a list of dictionaries with the username and the address of some
              mail accounts.
        '''
        temp_list = []

        for record in mail_infos:

            if Checker.str_is_valid_mail_info(record):

                mail_info = Casting.str_to_mail_info(record)

                if Checker.str_is_valid_mail(mail_info['email']):
                    temp_list.append(mail_info)
                else:
                    message = Messenger.INVALID_TO_MAIL.format(
                        email=mail_info['email'])
                    self.logger.highlight('warning', message, 'yellow')

            else:
                message = Messenger.INVALID_TO_MAIL_INFO.format(
                    mail_info=record)
                self.logger.highlight('warning', message, 'yellow')

        return temp_list

    def send_mail(self, detected_level):
        '''
        Target:
            - send an email to the specified email addresses.
        '''
        message = Messenger.BEGINNING_MAILER
        self.logger.highlight('info', message, 'white')

        # Get current date
        date = DateTools.get_date(fmt='%d-%m-%Y')
        time = DateTools.get_date(fmt='%H:%M:%S')
        zone = DateTools.get_date(fmt='%Z')

        # Get server name and IP addresses data
        server = IpAddress.get_hostname(self.logger)

        internal_ips = ''
        netifaces = IpAddress.get_netifaces_ips(self.logger)
        if netifaces:
            last_index = len(netifaces) - 1
        for index, netiface in enumerate(netifaces):
            internal_ips += '{} > {}'.format(netiface['netiface'],
                                             netiface['ip'])
            if index != last_index:
                internal_ips += ', '

        # Email full info template, for: John Doe <*****@*****.**>
        ADDR_TMPLT = '{} <{}>'

        # Sender and recipients email addresses (needed for sending the email)
        from_email_str = self.from_info['email']
        to_emails_list = [dict['email'] for dict in self.to_infos]
        cc_emails_list = [dict['email'] for dict in self.cc_infos]
        bcc_emails_list = [dict['email'] for dict in self.bcc_infos]
        all_emails_list = to_emails_list + cc_emails_list + bcc_emails_list

        # Sender and recipients full info (used in email message header)
        from_info_str = ADDR_TMPLT.format(self.from_info['name'],
                                          self.from_info['email'])
        to_infos_str = ', '.join(ADDR_TMPLT.format(
            dict['name'], dict['email']) for dict in self.to_infos)
        cc_infos_str = ', '.join(ADDR_TMPLT.format(
            dict['name'], dict['email']) for dict in self.cc_infos)

        # Specifying an alternative mail in case the receiver does not have a
        # mail server with HTML

        html = self.OP_RESULTS[detected_level].format(
            op_type=self.OP_TYPES[self.op_type], server_tag=self.server_tag,
            date=date, time=time, zone=zone, server=server,
            internal_ips=internal_ips, external_ip=self.external_ip,
            group=self.group, bkp_path=self.bkp_path,
            log_file=str(self.logger.log_file))

        text = self.OP_RESULTS_NO_HTML[detected_level].format(
            op_type=self.OP_TYPES[self.op_type], server_tag=self.server_tag,
            date=date, time=time, zone=zone, server=server,
            internal_ips=internal_ips, external_ip=self.external_ip,
            group=self.group, bkp_path=self.bkp_path,
            log_file=str(self.logger.log_file))

        # Specifying other email data (used in email message header)
        mail = MIMEMultipart('alternative')
        mail['From'] = from_info_str
        mail['To'] = to_infos_str
        mail['Cc'] = cc_infos_str
        mail['Subject'] = '[INFO] {op_type} results'.format(
            op_type=self.OP_TYPES[self.op_type].upper())

        # Record the MIME types of both parts - text/plain and text/html.
        part1 = MIMEText(text, 'plain')
        part2 = MIMEText(html, 'html')

        # Attach parts into message container. According to RFC 2046, the last
        # part of a multipart message, in this case the HTML message, is best
        # and preferred.
        mail.attach(part1)
        mail.attach(part2)

        msg_full = mail.as_string().encode()

        if all_emails_list:

            for email in all_emails_list:
                self.logger.info(Messenger.MAIL_DESTINY.format(email=email))

            # Sending the mail
            try:
                server = smtplib.SMTP('smtp.gmail.com:587')
                server.starttls()
                server.login(self.from_info['email'], self.from_info['pwd'])
                server.sendmail(from_email_str, all_emails_list, msg_full)
                server.quit()

            except smtplib.SMTPException as e:
                message = Messenger.SEND_MAIL_FAIL
                self.logger.highlight('info', message, 'yellow')
                self.logger.debug('Error en la función "send_mail": '
                                  '{}'.format(str(e)))

        else:
            message = Messenger.MAILER_HAS_NOTHING_TO_DO
            self.logger.highlight('info', message, 'yellow')

        message = Messenger.SEND_MAIL_DONE
        self.logger.highlight('info', message, 'green')
示例#21
0
class abstract_model(torch.nn.Module):
    def __init__(self,
                 data_name="ml-1m",
                 model_name="SASRec",
                 min_user_number=5,
                 min_item_number=5):
        super(abstract_model, self).__init__()
        self.data_name = data_name
        self.model_name = model_name
        self.min_user_number = min_user_number
        self.min_item_number = min_item_number

        road = os.path.abspath(os.path.join(os.getcwd()))
        localtime = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
        self.logger = Logger("%s\log\%s\%s" %
                             (road, self.model_name, str(localtime)))

    def init_weights(self, module):
        if isinstance(module, nn.Embedding):
            xavier_normal_(module.weight.data)
        elif isinstance(module, nn.Linear):
            xavier_normal_(module.weight.data)
            if module.bias is not None:
                constant_(module.bias.data, 0)

    def logging(self):
        self.logger.info("------------------" + str(self.model_name) +
                         "--------------------")
        self.logger.info("learning_rate:" + str(self.learning_rate))
        self.logger.info("batch_size:" + str(self.batch_size))
        self.logger.info("embedding_size:" + str(self.embedding_size))
        self.logger.info("number_of_epochs:" + str(self.episodes))
        self.logger.info("verbose:" + str(self.verbose))
        self.logger.info("data_name: " + str(self.data_name))
        self.logger.info("num_user:"******"num_items:" + str(self.item_number))
示例#22
0
class BackerCluster:

    bkp_path = ''  # The path where the backups are stored
    group = ''  # The name of the subdirectory where the backups are stored
    bkp_type = ''  # The type of the backups' files
    prefix = ''  # The prefix of the backups' names
    # Flag which determinates whether the databases must be vacuumed before the
    # backup process
    vacuum = True
    # An object with connection parameters to connect to PostgreSQL
    connecter = None
    logger = None  # Logger to show and log some messages

    def __init__(self, connecter=None, bkp_path='', group='',
                 bkp_type='dump', prefix='', vacuum=True, logger=None):

        if logger:
            self.logger = logger
        else:
            self.logger = Logger()

        if connecter:
            self.connecter = connecter
        else:
            self.logger.stop_exe(Msg.NO_CONNECTION_PARAMS)

        # If backup directory is not specified, create a default one to store
        # the backups
        if bkp_path:
            self.bkp_path = bkp_path
        else:
            self.bkp_path = Default.BKP_PATH
            Dir.create_dir(self.bkp_path, self.logger)

        if group:
            self.group = group
        else:
            self.group = Default.GROUP

        if bkp_type is None:
            self.bkp_type = Default.BKP_TYPE
        elif Checker.check_compress_type(bkp_type):
            self.bkp_type = bkp_type
        else:
            self.logger.stop_exe(Msg.INVALID_BKP_TYPE)

        self.prefix = prefix

        if isinstance(vacuum, bool):
            self.vacuum = vacuum
        elif Checker.str_is_bool(vacuum):
            self.vacuum = Casting.str_to_bool(vacuum)
        else:
            self.logger.stop_exe(Msg.INVALID_VACUUM)

        msg = Msg.CL_BACKER_VARS.format(
            server=self.connecter.server, user=self.connecter.user,
            port=self.connecter.port, bkp_path=self.bkp_path, group=self.group,
            bkp_type=self.bkp_type, prefix=self.prefix, vacuum=self.vacuum)
        self.logger.debug(Msg.CL_BACKER_VARS_INTRO)
        self.logger.debug(msg)

    def backup_all(self, bkps_dir):
        '''
        Target:
            - make a backup of a cluster.
        Parameters:
            - bkps_dir: directory where the backup is going to be stored.
        Return:
            - a boolean which indicates the success of the process.
        '''
        success = True
        # Get date and time of the zone
        init_ts = DateTools.get_date()
        # Get current year
        year = str(DateTools.get_year(init_ts))
        # Get current month
        month = str(DateTools.get_month(init_ts))
        # Create new directories with the year and the month of the backup
        bkp_dir = bkps_dir + year + '/' + month + '/'
        Dir.create_dir(bkp_dir, self.logger)

        # Set backup's name
        file_name = self.prefix + 'ht_' + self.connecter.server + \
            str(self.connecter.port) + '_cluster_' + init_ts + '.' + \
            self.bkp_type

        # Store the command to do depending on the backup type
        if self.bkp_type == 'gz':  # Zip with gzip
            command = 'pg_dumpall -U {} -h {} -p {} | gzip > {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)
        elif self.bkp_type == 'bz2':  # Zip with bzip2
            command = 'pg_dumpall -U {} -h {} -p {} | bzip2 > {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)
        elif self.bkp_type == 'zip':  # Zip with zip
            command = 'pg_dumpall -U {} -h {} -p {} | zip > {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)
        else:  # Do not zip
            command = 'pg_dumpall -U {} -h {} -p {} > {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)
        try:
            # Execute the command in console
            result = subprocess.call(command, shell=True)
            if result != 0:
                raise Exception()

        except Exception as e:
            self.logger.debug('Error en la función "backup_all": {}.'.format(
                str(e)))
            success = False

        return success

    def backup_cl(self):
        '''
        Target:
            - vacuum if necessary and make a backup of a cluster.
        '''
        self.logger.highlight('info', Msg.CHECKING_BACKUP_DIR, 'white')

        # Create a new directory with the name of the group
        bkps_dir = self.bkp_path + self.group + Default.CL_BKPS_DIR
        Dir.create_dir(bkps_dir, self.logger)

        self.logger.info(Msg.DESTINY_DIR.format(path=bkps_dir))

        # Vaccum the databases before the backup process if necessary
        if self.vacuum:
            vacuumer = Vacuumer(connecter=self.connecter, logger=self.logger)
            dbs_all = vacuumer.connecter.get_pg_dbs_data(vacuumer.ex_templates,
                                                         vacuumer.db_owner)
            vacuumer.vacuum_dbs(dbs_all)

        self.logger.highlight('info', Msg.BEGINNING_CL_BACKER, 'white')

        start_time = DateTools.get_current_datetime()
        # Make the backup of the cluster
        success = self.backup_all(bkps_dir)
        end_time = DateTools.get_current_datetime()
        # Get and show the process' duration
        diff = DateTools.get_diff_datetimes(start_time, end_time)

        if success:
            msg = Msg.CL_BACKER_DONE.format(diff=diff)
            self.logger.highlight('info', msg, 'green', effect='bold')
        else:
            self.logger.highlight('warning', Msg.CL_BACKER_FAIL,
                                  'yellow', effect='bold')

        self.logger.highlight('info', Msg.BACKER_DONE, 'green',
                              effect='bold')
示例#23
0
class Terminator:

    target_all = None  # Flag which determinates if terminate any connection
    target_user = None  # Terminate any connection of an specific user
    target_dbs = []  # Terminate any connection to a list of databases
    # An object with connection parameters to connect to PostgreSQL
    connecter = None
    logger = None  # Logger to show and log some messages

    def __init__(self, connecter, target_all=False, target_user='',
                 target_dbs=[], logger=None):

        if logger:
            self.logger = logger
        else:
            self.logger = Logger()

        if connecter:
            self.connecter = connecter
        else:
            self.logger.stop_exe(Messenger.NO_CONNECTION_PARAMS)

        if target_all is None:
            self.target_all = target_all
        elif isinstance(target_all, bool):
            self.target_all = target_all
        elif Checker.str_is_bool(target_all):
            self.target_all = Casting.str_to_bool(target_all)
        else:
            self.logger.stop_exe(Messenger.INVALID_TARGET_ALL)

        self.target_user = target_user

        if target_dbs is None:
            self.target_dbs = []
        elif isinstance(target_dbs, list):
            self.target_dbs = target_dbs
        else:
            self.target_dbs = Casting.str_to_list(target_dbs)

        message = Messenger.TERMINATOR_VARS.format(
            server=self.connecter.server, user=self.connecter.user,
            port=self.connecter.port, target_all=self.target_all,
            target_user=target_user, target_dbs=self.target_dbs)
        self.logger.debug(Messenger.TERMINATOR_VARS_INTRO)
        self.logger.debug(message)

    def terminate_backend_user(self):
        '''
        Target:
            - terminate every connection of a specific user to PostgreSQL (as
              long as the target user is the one who is running the program).
        '''
        message = Messenger.BEGINNING_TERMINATE_USER_CONN.format(
            target_user=self.target_user)
        self.logger.highlight('info', message, 'white')

        try:
            pg_pid = self.connecter.get_pid_str()  # Get PID variable's name

            sql = Queries.GET_CURRENT_PG_USER
            self.connecter.cursor.execute(sql)
            current_pg_user = self.connecter.cursor.fetchone()[0]

            if self.target_user == current_pg_user:
                message = Messenger.TARGET_USER_IS_CURRENT_USER.format(
                    target_user=self.target_user)
                self.logger.highlight('warning', message, 'yellow')

            else:
                formatted_sql = Queries.BACKEND_PG_USER_EXISTS.format(
                    pg_pid=pg_pid, target_user=self.target_user)
                self.connecter.cursor.execute(formatted_sql)
                result = self.connecter.cursor.fetchone()

                if result:
                    formatted_sql = Queries.TERMINATE_BACKEND_PG_USER.format(
                        pg_pid=pg_pid, target_user=self.target_user)
                    self.connecter.cursor.execute(formatted_sql)
                else:
                    message = Messenger.NO_USER_CONNS.format(
                        target_user=self.target_user)
                    self.logger.info(message)

            message = Messenger.TERMINATE_USER_CONN_DONE.format(
                target_user=self.target_user)
            self.logger.highlight('info', message, 'green')

        except Exception as e:
            self.logger.debug('Error en la función "terminate_backend_user": '******'{}.'.format(str(e)))
            message = Messenger.TERMINATE_USER_CONN_FAIL.format(
                target_user=self.target_user)
            self.logger.highlight('warning', message, 'yellow', effect='bold')

        self.logger.highlight('info', Messenger.TERMINATOR_DONE, 'green')

    def terminate_backend_db(self, target_db):
        '''
        Target:
            - terminate every connection to a PostgreSQL database (except the
              current one, if it is connected to the target database).
        '''
        try:
            # The variable "target_db" sometimes could be a string or a list
            # of list, so it is necessary to check it first
            if not isinstance(target_db, str):
                target_db = target_db['datname']

            pg_pid = self.connecter.get_pid_str()  # Get PID variable's name

            formatted_sql = Queries.BACKEND_PG_DB_EXISTS.format(
                pg_pid=pg_pid, target_db=target_db)

            self.connecter.cursor.execute(formatted_sql)
            result = self.connecter.cursor.fetchone()

            if result:

                formatted_sql = Queries.TERMINATE_BACKEND_PG_DB.format(
                    pg_pid=pg_pid, target_db=target_db)

                self.connecter.cursor.execute(formatted_sql)

                message = Messenger.TERMINATE_DB_CONN_DONE.format(
                    target_dbname=target_db)
                self.logger.info(message)

            else:
                message = Messenger.NO_DB_CONNS.format(target_db=target_db)
                self.logger.info(message)

        except Exception as e:
            self.logger.debug('Error en la función "terminate_backend_db": '
                              '{}.'.format(str(e)))
            message = Messenger.TERMINATE_DB_CONN_FAIL.format(
                target_dbname=target_db)
            self.logger.highlight('warning', message, 'yellow')

    def terminate_backend_dbs(self, ter_list):
        '''
        Target:
            - terminate every connection to some PostgreSQL databases (except
              the current one, if it is connected to one of the target
              databases).
        Parameters:
            - ter_list: the list of databases whose connections are going to be
              terminated.
        '''
        message = Messenger.BEGINNING_TERMINATE_DBS_CONN
        self.logger.highlight('info', message, 'white')

        if ter_list:
            for target_db in ter_list:
                self.terminate_backend_db(target_db)
        else:
            self.logger.highlight('warning',
                                  Messenger.TERMINATOR_HAS_NOTHING_TO_DO,
                                  'yellow')

        self.logger.highlight('info', Messenger.TERMINATOR_DONE, 'green')

    def terminate_backend_all(self):
        '''
        Target:
            - remove every connection to PostgreSQL (except the current one).
        '''
        try:
            message = Messenger.BEGINNING_TERMINATE_ALL_CONN
            self.logger.highlight('info', message, 'white')

            pg_pid = self.connecter.get_pid_str()  # Get PID variable's name

            formatted_sql = Queries.BACKEND_PG_ALL_EXISTS.format(pg_pid=pg_pid)

            self.connecter.cursor.execute(formatted_sql)
            result = self.connecter.cursor.fetchone()

            if result:
                formatted_sql = Queries.TERMINATE_BACKEND_PG_ALL.format(
                    pg_pid=pg_pid)
                self.connecter.cursor.execute(formatted_sql)
            else:
                self.logger.info(Messenger.NO_CONNS)

            self.logger.highlight('info', Messenger.TERMINATE_ALL_CONN_DONE,
                                  'green')

        except Exception as e:
            self.logger.debug('Error en la función "terminate_backend_all": '
                              '{}.'.format(str(e)))
            message = Messenger.TERMINATE_ALL_CONN_FAIL
            self.logger.highlight('warning', message, 'yellow', effect='bold')

        self.logger.highlight('info', Messenger.TERMINATOR_DONE, 'green')
示例#24
0
class Alterer:

    in_dbs = []  # List of databases to be included in the process
    old_role = ''  # Current owner of the database's tables
    new_role = ''  # New owner for the database and its tables
    # An object with connection parameters to connect to PostgreSQL
    connecter = None
    logger = None  # Logger to show and log some messages

    def __init__(self, connecter=None, in_dbs=[], old_role='', new_role='',
                 logger=None):

        if logger:
            self.logger = logger
        else:
            self.logger = Logger()

        if connecter:
            self.connecter = connecter
        else:
            self.logger.stop_exe(Msg.NO_CONNECTION_PARAMS)

        if isinstance(in_dbs, list):
            self.in_dbs = in_dbs
        else:
            self.in_dbs = Casting.str_to_list(in_dbs)

        if old_role:
            self.old_role = old_role
        else:
            self.logger.stop_exe(Msg.NO_OLD_ROLE)

        if not new_role:
            self.logger.stop_exe(Msg.NO_NEW_ROLE)
        # First check whether the user exists in PostgreSQL or not
        self.connecter.cursor.execute(Queries.PG_USER_EXISTS, (new_role, ))
        # Do not alter database if the user does not exist
        result = self.connecter.cursor.fetchone()
        if result:
            self.new_role = new_role
        else:
            msg = Msg.USER_DOES_NOT_EXIST.format(user=new_role)
            self.logger.stop_exe(msg)

        msg = Msg.ALTERER_VARS.format(
            server=self.connecter.server, user=self.connecter.user,
            port=self.connecter.port, in_dbs=self.in_dbs,
            old_role=self.old_role, new_role=self.new_role)
        self.logger.debug(Msg.ALTERER_VARS_INTRO)
        self.logger.debug(msg)

    def alter_db_owner(self, db):
        '''
        Target:
            - change the owner of a databases and its tables.
        Parameters:
            - db: database which is going to be altered.
        Return:
            - a boolean which indicates the success of the process.
        '''
        msg = Msg.ALTERER_FEEDBACK.format(old_role=self.old_role,
                                          new_role=self.new_role)
        self.logger.info(msg)

        success = True
        dbname = db['datname']

        if db['owner'] != 'postgres':  # Do not allow switch an owner postgres

            if db['datallowconn'] == 1:  # Check if the db allows connections

                try:
                    # Change the owner of the database
                    self.connecter.cursor.execute(
                        Queries.CHANGE_PG_DB_OWNER.format(
                            dbname=dbname, new_role=self.new_role))

                except Exception as e:
                    success = False
                    self.logger.debug('Error en la función "alter_db_owner": '
                                      '{}'.format(str(e)))
                    msg = Msg.CHANGE_PG_DB_OWNER_FAIL
                    self.logger.highlight('warning', msg, 'yellow')

                # Start another connection to the target database to be able to
                # apply the next query
                own_connecter = Connecter(server=self.connecter.server,
                                          user=self.connecter.user,
                                          port=self.connecter.port,
                                          database=dbname, logger=self.logger)

                # Disallow connections to the database during the process
                result = self.connecter.disallow_db_conn(dbname)
                if not result:
                    msg = Msg.DISALLOW_CONN_TO_PG_DB_FAIL.format(dbname=dbname)
                    self.logger.highlight('warning', msg, 'yellow')

                try:
                    # Change the owner of the database's tables
                    own_connecter.cursor.execute(
                        Queries.REASSIGN_PG_DB_TBLS_OWNER.format(
                            old_role=self.old_role, new_role=self.new_role))

                except Exception as e:
                    success = False
                    self.logger.debug('Error en la función "alter_db_owner": '
                                      '{}'.format(str(e)))
                    msg = Msg.REASSIGN_PG_DB_TBLS_OWNER_FAIL
                    self.logger.highlight('warning', msg, 'yellow')

                # Allow connections to the database at the end of the process
                result = self.connecter.allow_db_conn(dbname)
                if not result:
                    msg = Msg.ALLOW_CONN_TO_PG_DB_FAIL.format(dbname=dbname)
                    self.logger.highlight('warning', msg, 'yellow')

                # Close cursor and connection to the target database
                own_connecter.pg_disconnect()

            else:
                success = False
                msg = Msg.DB_DOES_NOT_ALLOW_CONN.format(dbname=dbname)
                self.logger.highlight('warning', msg, 'yellow')

        else:
            success = False
            msg = Msg.DB_OWNED_BY_POSTGRES_NOT_ALLOWED
            self.logger.highlight('warning', msg, 'yellow')

        return success

    def alter_dbs_owner(self, alt_list):
        '''
        Target:
            - change the owner of a group of databases and their tables.
        Parameters:
            - alt_list: names of the databases which are going to be altered.
        '''
        self.logger.highlight('info', Msg.PROCESSING_ALTERER, 'white')

        if alt_list:

            for db in alt_list:

                dbname = db['datname']

                msg = Msg.PROCESSING_DB.format(dbname=dbname)
                self.logger.highlight('info', msg, 'cyan')

                start_time = DateTools.get_current_datetime()
                # Change the owner of the database
                success = self.alter_db_owner(db)
                end_time = DateTools.get_current_datetime()
                # Get and show the process' duration
                diff = DateTools.get_diff_datetimes(start_time, end_time)

                if success:
                    msg = Msg.DB_ALTERER_DONE.format(dbname=dbname, diff=diff)
                    self.logger.highlight('info', msg, 'green')
                else:
                    msg = Msg.DB_ALTERER_FAIL.format(dbname=dbname)
                    self.logger.highlight('warning', msg, 'yellow',
                                          effect='bold')
        else:
            self.logger.highlight('warning', Msg.ALTERER_HAS_NOTHING_TO_DO,
                                  'yellow', effect='bold')

        self.logger.highlight('info', Msg.ALTERER_DONE, 'green', effect='bold')
示例#25
0
class RestorerCluster:

    # An object with connection parameters to connect to PostgreSQL
    connecter = None
    logger = None  # Logger to show and log some messages
    cluster_backup = ''  # Absolute path of the backup file (of a cluster)

    def __init__(self, connecter=None, cluster_backup='', logger=None):

        if logger:
            self.logger = logger
        else:
            self.logger = Logger()

        if connecter:
            self.connecter = connecter
        else:
            self.logger.stop_exe(Messenger.NO_CONNECTION_PARAMS)

        if cluster_backup and os.path.isfile(cluster_backup):
            self.cluster_backup = cluster_backup
        else:
            self.logger.stop_exe(Messenger.NO_BKP_TO_RESTORE)

        message = Messenger.CL_RESTORER_VARS.format(
            server=self.connecter.server, user=self.connecter.user,
            port=self.connecter.port, cluster_backup=self.cluster_backup)
        self.logger.debug(Messenger.CL_RESTORER_VARS_INTRO)
        self.logger.debug(message)

    def restore_cluster_backup(self):
        '''
        Target:
            - restore a cluster's backup in PostgreSQL. The cluster must have
              been created before this process.
        '''
        # Regular expression which must match the backup's name
        regex = r'.*ht_(.+_cluster)_(\d{8}_\d{6}_.+)\.(dump|bz2|gz|zip)$'
        regex = re.compile(regex)

        if re.match(regex, self.cluster_backup):
            # Store the parts of the backup's name (servername, date, ext)
            parts = regex.search(self.cluster_backup).groups()
            # Store only the extension to know the type of file
            ext = parts[2]
        else:
            Messenger.NO_BACKUP_FORMAT

        message = Messenger.BEGINNING_CL_RESTORER.format(
            cluster_backup=self.cluster_backup)
        self.logger.highlight('info', message, 'white')
        self.logger.info(Messenger.WAIT_PLEASE)

        # TODO: make dissappear every line about the operation shown in console
        if ext == 'gz':
            command = 'gunzip -c {} -k | psql postgres -U {} -h {} ' \
                      '-p {}'.format(
                          self.cluster_backup, self.connecter.user,
                          self.connecter.server, self.connecter.port)
        elif ext == 'bz2':
            command = 'bunzip2 -c {} -k | psql postgres -U {} -h {} ' \
                      '-p {}'.format(
                          self.cluster_backup, self.connecter.user,
                          self.connecter.server, self.connecter.port)
        elif ext == 'zip':
            command = 'unzip -p {} | psql postgres -U {} -h {} -p {}'.format(
                self.cluster_backup, self.connecter.user,
                self.connecter.server, self.connecter.port)
        else:
            command = 'psql postgres -U {} -h {} -p {} < {}'.format(
                self.connecter.user, self.connecter.server,
                self.connecter.port, self.cluster_backup)

        try:
            start_time = DateTools.get_current_datetime()
            # Make the restauration of the cluster
            result = subprocess.call(command, shell=True)
            end_time = DateTools.get_current_datetime()
            # Get and show the process' duration
            diff = DateTools.get_diff_datetimes(start_time, end_time)

            if result != 0:
                raise Exception()

            message = Messenger.RESTORE_CL_DONE.format(
                cluster_backup=self.cluster_backup, diff=diff)
            self.logger.highlight('info', message, 'green')

            self.logger.highlight('info', Messenger.RESTORER_DONE, 'green',
                                  effect='bold')

        except Exception as e:
            self.logger.debug('Error en la función "restore_cluster_backup": '
                              '{}.'.format(str(e)))
            message = Messenger.RESTORE_CL_FAIL.format(
                cluster_backup=self.cluster_backup)
            self.logger.stop_exe(message)
示例#26
0
class Backer:

    bkp_path = ''  # The path where the backups are stored
    group = ''  # The name of the subdirectory where the backups are stored
    bkp_type = ''  # The type of the backups' files
    prefix = ''  # The prefix of the backups' names
    in_dbs = []  # List of databases to be included in the process
    in_regex = ''  # Regular expression which must match the included databases
    # Flag which determinates whether inclusion conditions predominate over the
    # exclusion ones
    in_priority = False
    ex_dbs = []  # List of databases to be excluded in the process
    ex_regex = ''  # Regular expression which must match the excluded databases
    # Flag which determinates whether the templates must be included
    ex_templates = True
    # Flag which determinates whether the included databases must be vacuumed
    # before the backup process
    vacuum = True
    # Use other PostgreSQL user during the backup process (only for superusers)
    db_owner = ''
    # An object with connection parameters to connect to PostgreSQL
    connecter = None
    logger = None  # Logger to show and log some messages

    def __init__(self, connecter=None, bkp_path='', group='',
                 bkp_type='dump', prefix='', in_dbs=[], in_regex='',
                 in_priority=False, ex_dbs=['postgres'], ex_regex='',
                 ex_templates=True, vacuum=True, db_owner='', logger=None):

        if logger:
            self.logger = logger
        else:
            self.logger = Logger()

        if connecter:
            self.connecter = connecter
        else:
            self.logger.stop_exe(Msg.NO_CONNECTION_PARAMS)

        # If backup directory is not specified, create a default one to store
        # the backups
        if bkp_path:
            self.bkp_path = bkp_path
        else:
            self.bkp_path = Default.BKP_PATH
            Dir.create_dir(self.bkp_path, self.logger)

        if group:
            self.group = group
        else:
            self.group = Default.GROUP

        if bkp_type is None:
            self.bkp_type = Default.BKP_TYPE
        elif Checker.check_compress_type(bkp_type):
            self.bkp_type = bkp_type
        else:
            self.logger.stop_exe(Msg.INVALID_BKP_TYPE)

        self.prefix = prefix

        if isinstance(in_dbs, list):
            self.in_dbs = in_dbs
        else:
            self.in_dbs = Casting.str_to_list(in_dbs)

        if Checker.check_regex(in_regex):
            self.in_regex = in_regex
        else:
            self.logger.stop_exe(Msg.INVALID_IN_REGEX)

        if isinstance(in_priority, bool):
            self.in_priority = in_priority
        elif Checker.str_is_bool(in_priority):
            self.in_priority = Casting.str_to_bool(in_priority)
        else:
            self.logger.stop_exe(Msg.INVALID_IN_PRIORITY)

        if isinstance(ex_dbs, list):
            self.ex_dbs = ex_dbs
        else:
            self.ex_dbs = Casting.str_to_list(ex_dbs)

        if Checker.check_regex(ex_regex):
            self.ex_regex = ex_regex
        else:
            self.logger.stop_exe(Msg.INVALID_EX_REGEX)

        if isinstance(ex_templates, bool):
            self.ex_templates = ex_templates
        elif Checker.str_is_bool(ex_templates):
            self.ex_templates = Casting.str_to_bool(ex_templates)
        else:
            self.logger.stop_exe(Msg.INVALID_EX_TEMPLATES)

        if isinstance(vacuum, bool):
            self.vacuum = vacuum
        elif Checker.str_is_bool(vacuum):
            self.vacuum = Casting.str_to_bool(vacuum)
        else:
            self.logger.stop_exe(Msg.INVALID_VACUUM)

        if db_owner is None:
            self.db_owner = db_owner
        else:
            self.db_owner = Default.DB_OWNER

        msg = Msg.DB_BACKER_VARS.format(
            server=self.connecter.server, user=self.connecter.user,
            port=self.connecter.port, bkp_path=self.bkp_path, group=self.group,
            bkp_type=self.bkp_type, prefix=self.prefix, in_dbs=self.in_dbs,
            in_regex=self.in_regex, in_priority=self.in_priority,
            ex_dbs=self.ex_dbs, ex_regex=self.ex_regex,
            ex_templates=self.ex_templates, vacuum=self.vacuum,
            db_owner=self.db_owner)
        self.logger.debug(Msg.DB_BACKER_VARS_INTRO)
        self.logger.debug(msg)

    def backup_db(self, dbname, bkps_dir):
        '''
        Target:
            - make a backup of a specified database.
        Parameters:
            - dbname: name of the database which is going to be backuped.
            - bkps_dir: directory where the backup is going to be stored.
        Return:
            - a boolean which indicates the success of the process.
        '''
        success = True
        # Get date and time of the zone
        init_ts = DateTools.get_date()
        # Get current year
        year = str(DateTools.get_year(init_ts))
        # Get current month
        month = str(DateTools.get_month(init_ts))
        # Create new directories with the year and the month of the backup
        bkp_dir = bkps_dir + year + '/' + month + '/'
        Dir.create_dir(bkp_dir, self.logger)
        # Set backup's name
        file_name = self.prefix + 'db_' + dbname + '_' + init_ts + '.' + \
            self.bkp_type

        # Store the command to do depending on the backup type
        if self.bkp_type == 'gz':  # Zip with gzip
            command = 'pg_dump {} -Fc -U {} -h {} -p {} | gzip > {}'.format(
                dbname, self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)
        elif self.bkp_type == 'bz2':  # Zip with bzip2
            command = 'pg_dump {} -Fc -U {} -h {} -p {} | bzip2 > {}'.format(
                dbname, self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)
        elif self.bkp_type == 'zip':  # Zip with zip
            command = 'pg_dump {} -Fc -U {} -h {} -p {} | zip > {}'.format(
                dbname, self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)
        else:  # Do not zip
            command = 'pg_dump {} -Fc -U {} -h {} -p {} > {}'.format(
                dbname, self.connecter.user, self.connecter.server,
                self.connecter.port, bkp_dir + file_name)

        try:
            # Execute the command in console
            result = subprocess.call(command, shell=True)
            if result != 0:
                raise Exception()

        except Exception as e:
            self.logger.debug('Error en la función "backup_db": {}.'.format(
                str(e)))
            success = False

        return success

    def backup_dbs(self, dbs_all):
        '''
        Target:
            - make a backup of some specified databases.
        Parameters:
            - dbs_all: names of the databases which are going to be backuped.
        '''
        self.logger.highlight('info', Msg.CHECKING_BACKUP_DIR, 'white')

        # Create a new directory with the name of the group
        bkps_dir = self.bkp_path + self.group + Default.DB_BKPS_DIR
        Dir.create_dir(bkps_dir, self.logger)

        self.logger.info(Msg.DESTINY_DIR.format(path=bkps_dir))

        self.logger.highlight('info', Msg.PROCESSING_DB_BACKER, 'white')

        if dbs_all:
            for db in dbs_all:

                dbname = db['datname']
                msg = Msg.PROCESSING_DB.format(dbname=dbname)
                self.logger.highlight('info', msg, 'cyan')

                # Let the user know whether the database connection is allowed
                if not db['datallowconn']:
                    msg = Msg.FORBIDDEN_DB_CONNECTION.format(dbname=dbname)
                    self.logger.highlight('warning', msg, 'yellow',
                                          effect='bold')
                    success = False

                else:
                    # Vaccum the database before the backup process if
                    # necessary
                    if self.vacuum:
                        self.logger.info(Msg.PRE_VACUUMING_DB.format(
                            dbname=dbname))
                        vacuumer = Vacuumer(self.connecter, self.in_dbs,
                                            self.in_regex, self.in_priority,
                                            self.ex_dbs, self.ex_regex,
                                            self.ex_templates, self.db_owner,
                                            self.logger)

                        # Vacuum the database
                        success = vacuumer.vacuum_db(dbname)
                        if success:
                            msg = Msg.PRE_VACUUMING_DB_DONE.format(
                                dbname=dbname)
                            self.logger.info(msg)
                        else:
                            msg = Msg.PRE_VACUUMING_DB_FAIL.format(
                                dbname=dbname)
                            self.logger.highlight('warning', msg, 'yellow')

                    self.logger.info(Msg.BEGINNING_DB_BACKER.format(
                        dbname=dbname))

                    start_time = DateTools.get_current_datetime()
                    # Make the backup of the database
                    success = self.backup_db(dbname, bkps_dir)
                    end_time = DateTools.get_current_datetime()
                    # Get and show the process' duration
                    diff = DateTools.get_diff_datetimes(start_time, end_time)

                if success:
                    msg = Msg.DB_BACKER_DONE.format(dbname=dbname, diff=diff)
                    self.logger.highlight('info', msg, 'green')
                else:
                    msg = Msg.DB_BACKER_FAIL.format(dbname=dbname)
                    self.logger.highlight('warning', msg, 'yellow',
                                          effect='bold')
        else:
            self.logger.highlight('warning', Msg.BACKER_HAS_NOTHING_TO_DO,
                                  'yellow', effect='bold')

        self.logger.highlight('info', Msg.BACKER_DONE, 'green', effect='bold')