Ejemplo n.º 1
0
def initEvent(sFile:str, oLog:logging=None, isSilent:bool=False) -> None:
    msg = "({0}) Initialisation".format(getFileName(sFile))
    if oLog:
        oLog.debug(msg, outConsole=True)
    elif not isSilent:
        print(msg)
    return
Ejemplo n.º 2
0
def malware_get_reputation_ip(ip: str, loggers: logging) -> int:
    """
    Metodo para reguntar por la reputacion de una ip, te devuelve el numero de ataques que se han recibido de esa ip
    o -2 en caso de no recibir ninguno y -1 si falla la precion
    :param ip:
    :param loggers:
    :return:
    """
    url_api = "http://127.0.0.1:8080"
    url = f'{url_api}/getReputationIp?ip={ip}'
    headers = {'Accept': 'application/json'}
    try:
        r = requests.get(url, headers=headers, timeout=5)
        if r.status_code == 200:
            # try:
            return json.loads(r.text)['reputation']
        # except json.decoder.JSONDecodeError:
        else:
            return -1
    except (requests.exceptions.ConnectionError,
            requests.exceptions.ReadTimeout):
        # print(f"No se ha podido comprobar la reputacion para {ip}")  # no se tiene acceso a logger
        loggers.warning(f"No se ha podido comprobar la reputacion para {ip}"
                        )  # fixme traducir
        return -1
Ejemplo n.º 3
0
def get_expired_certificates(acm_client: object,
                             logger: logging) -> defaultdict:
    """
    Retrieves all expired certificates from the ACM client based on specified span of days.
    :param acm_client: ACM client
    :param logger: Logging object
    :return: A defaultdict of keys
    """
    # defaultdict of all certificate ARNs and expiry dates
    certs = get_all_certificates(acm_client)
    expired_certs = defaultdict(default_value)
    for arn in certs:
        # Expiry datetime is certs[arn]
        # Compare expiry datetime with current date
        if (bool(certs) and certs[arn] != default_value):
            expiry_date = certs[arn]
        # Logs true if a certificate wil expire in specified number of days
        logger.info(
            f'Certificate will expire in {DAYS_EXP} days:{time_interval >= expiry_date}'
        )
        if (time_interval) >= expiry_date:
            expired_certs[arn] = expiry_date
        else:
            logger.info(
                f'Certificate {arn} is still valid. The expiry date is {expiry_date}'
            )
    return expired_certs
Ejemplo n.º 4
0
def execute(log: logging, config: dict):
    params = config['params']
    with FTP(params['ftp_host']) as ftp:
        ts: datetime.datetime = config['ts']
        ts_from = config['ts_from']
        ts_to = config['ts_to']
        log_types = params['log_types']
        for log_type in log_types:
            log.info("connect success")
            ftp.login(params['ftp_user'], params['ftp_password'])
            log.info("auth success")
            in_path = ts.strftime(params['in_dir'].format(log_type))
            out_path = ts.strftime(params['out_dir'].format(log_type))
            chdir(ftp, in_path)
            files = ftp.nlst()
            bio = io.BytesIO()
            for in_file in files:
                s = re.search(r"(\d+)", in_file)
                if s:
                    cur = datetime.datetime.strptime(
                        s.group(), '%H%M%S').replace(day=ts.day,
                                                     month=ts.month,
                                                     year=ts.year)
                    if (cur < ts_to) and (cur >= ts_from):
                        ftp.retrbinary('RETR {0}'.format(in_file), bio.write)
                        bio.write(b'\n')
            file_name = ts.strftime(params['in_file_template'])
            os.makedirs(out_path, exist_ok=True)
            out_file = os.path.join(out_path, file_name)
            with open(out_file, 'wb') as f:
                f.write(bio.getvalue())
Ejemplo n.º 5
0
    def __init__(self, log_dir: Union[str, Path], logger: logging, enable: bool = True):
        self.writer = None
        if enable:
            log_dir = str(log_dir)
            try:
                self.writer = importlib.import_module("tensorboardX").SummaryWriter(
                    log_dir
                )
            except ImportError:
                message = (
                    "Warning: TensorboardX visualization is configured to use, but currently not installed on "
                    "this machine. Please install the package by 'pip install tensorboardx' command or turn "
                    "off the option in the 'config.json' file."
                )
                logger.warning(message)
        self.step = 0
        self.mode = ""

        self.tb_writer_ftns = [
            "add_scalar",
            "add_scalars",
            "add_image",
            "add_images",
            "add_audio",
            "add_text",
            "add_histogram",
            "add_pr_curve",
            "add_embedding",
        ]
        self.tag_mode_exceptions = ["add_histogram", "add_embedding"]
        self.timer = Timer()
Ejemplo n.º 6
0
def evalimage(net: Yolact,
              path: str,
              save_path: str = None,
              logger: logging = None,
              detections: Detections = None,
              image_id=None):
    frame = torch.from_numpy(cv2.imread(path)).float()
    if args.cuda:
        frame = frame.cuda().float()
    batch = FastBaseTransform()(frame.unsqueeze(0))

    if cfg.flow.warp_mode != 'none':
        assert False, "Evaluating the image with a video-based model. If you believe this is a problem, please report a issue at GitHub, thanks."

    extras = {
        "backbone": "full",
        "interrupt": False,
        "keep_statistics": False,
        "moving_statistics": None
    }

    time_start = time.time()
    preds = net(batch, extras=extras)["pred_outs"]
    logger.info('Inference cost: %.3fs' % (time.time() - time_start))

    img_numpy = prep_display(preds,
                             frame,
                             None,
                             None,
                             args,
                             undo_transform=False)

    if args.output_coco_json:
        with timer.env('Postprocess'):
            _, _, h, w = batch.size()
            classes, scores, boxes, masks = \
                postprocess(preds, w, h, crop_masks=args.crop, score_threshold=args.score_threshold)

        with timer.env('JSON Output'):
            boxes = boxes.cpu().numpy()
            masks = masks.view(-1, h, w).cpu().numpy()
            for i in range(masks.shape[0]):
                # Make sure that the bounding box actually makes sense and a mask was produced
                if (boxes[i, 3] - boxes[i, 1]) * (boxes[i, 2] -
                                                  boxes[i, 0]) > 0:
                    detections.add_bbox(image_id, classes[i], boxes[i, :],
                                        scores[i])
                    detections.add_mask(image_id, classes[i], masks[i, :, :],
                                        scores[i])

    if save_path is None:
        img_numpy = img_numpy[:, :, (2, 1, 0)]

    if save_path is None:
        plt.imshow(img_numpy)
        plt.title(path)
        plt.show()
    else:
        cv2.imwrite(save_path, img_numpy)
Ejemplo n.º 7
0
def log_exception(message: str = None, logger: logging = None):
    if logger is None:
        logger = logging.getLogger(__name__)

    if message is None:
        message = "An exception occurred"

    formatted_message = f"Component version: {MEROSS_CLOUD_VERSION}, Message: \"{message}\""
    logger.exception(formatted_message)
Ejemplo n.º 8
0
 def __init__(self, conn: sqlite3.Connection, logs: logging,
              schema_file: TextIO):
     self.conn = conn
     self.logs = logs
     self.schema = schema_file.read()
     try:
         conn.executescript(self.schema)
         logs.info("Database initialized from schema.sql")
     except sqlite3.Error:
         logs.error("Failed creating database from schema.sql")
Ejemplo n.º 9
0
    def un_register_multiprocess_handlers(logger: logging=None) -> None:
        """
        Un register the multiprocess handler
        :param logger: logging.logger
        :return: None
        """
        if logger is None:
            logger = logging.getLogger()

        for handler in logger.handlers:
            handler.close()
            logger.removeHandler(handler)
Ejemplo n.º 10
0
    def un_register_multiprocess_handlers(logger: logging = None) -> None:
        """
        Un register the multiprocess handler
        :param logger: logging.logger
        :return: None
        """
        if logger is None:
            logger = logging.getLogger()

        for handler in logger.handlers:
            handler.close()
            logger.removeHandler(handler)
Ejemplo n.º 11
0
 def verify(self, logger: logging):
     sum_collected = float(0)
     sum_net = float(0)
     for player in self.players:
         sum_net += float(player.net)
         sum_collected += float(player.collected_amount)
     if float(sum_collected) - (float(self.pot) - float(self.rake)) > 0.001:
         logger.error('collected != pot+rake for hand: ' + self.hand_id)
         return False
     if float(sum_net) + float(self.rake) > 0.001:
         logger.error('net != rake for hand: ' + self.hand_id)
         return False
     return True
Ejemplo n.º 12
0
    def register_multiprocess_handlers(logger: logging=None)-> None:
        """
        Wraps the handlers in the given Logger with an MultiProcessingHandler.
        :param logger: whose handlers to wrap. By default, the root logger.
        :return: None
        """
        if logger is None:
            logger = logging.getLogger()

        for i, orig_handler in enumerate(list(logger.handlers)):
            handler = MultiProcessingHandler('mp-handler-{0}'.format(i), sub_handler=orig_handler)
            logger.removeHandler(orig_handler)
            logger.addHandler(handler)
Ejemplo n.º 13
0
def check_link(
        match_tuple: MatchTuple,
        http_session: requests.Session,
        logger: logging = None) -> Tuple[MatchTuple, bool, Optional[str]]:
    reason: Optional[str] = None
    if match_tuple.link.startswith('http'):
        result_ok, reason = check_url(match_tuple, http_session)
    else:
        result_ok = check_path(match_tuple)
    if logger is None:
        print(f"  {'✓' if result_ok else '✗'} {match_tuple.link}")
    else:
        logger.info(f"  {'✓' if result_ok else '✗'} {match_tuple.link}")
    return match_tuple, result_ok, reason
Ejemplo n.º 14
0
    def dump(self, logger:logging = None, header:str = None, footer:str = None, indent:int = 0):
        """
        :param logger: open file object, to which the dump is written
        :param header: text to write before the dump
        :param footer: text to write after the dump
        :param indent: number of leading spaces (for recursive calls)
        """
        if not logger:
            # Not in debug mode
            return

        if hasattr(self, "__slots__"):
            attr_list = sorted((attr, getattr(self, attr)) for attr in self.__slots__)
        else:
            attr_list = sorted(self.__dict__.items())

        pad = " " * indent

        if header:
            logger.debug(header)

        for attr, value in attr_list:
            if getattr(value, 'dump', None) and attr != 'book':
                obj_name = value.__class__.__name__
                value.dump(logger, header=f"{pad}{attr} ({obj_name} object):", indent=indent + 4)

            elif attr not in self._repr_these and (isinstance(value, list) or isinstance(value, dict)):
                logger.debug(f"{pad}{attr}: {type(value)}, len = {len(value)}")

            else:
                logger.debug(f"{pad}{attr}: {value}")

        if footer:
            logger.debug(footer)
Ejemplo n.º 15
0
    def print_send_survey_command(logger: logging,
                                  chat_id: int,
                                  condition: int,
                                  survey_type: SurveyType) -> None:
        """
        Logs informations about sended surveys.

        :param logger: logger instance
        :param chat_id: chat id of the user
        :param condition: condition of the user
        :param survey_type: current survey type
        :return: None
        """
        logger.info("Send %s survey to %d with condition %d" % (survey_type.name, chat_id, condition))
Ejemplo n.º 16
0
    def register_multiprocess_handlers(logger: logging = None) -> None:
        """
        Wraps the handlers in the given Logger with an MultiProcessingHandler.
        :param logger: whose handlers to wrap. By default, the root logger.
        :return: None
        """
        if logger is None:
            logger = logging.getLogger()

        for i, orig_handler in enumerate(list(logger.handlers)):
            handler = MultiProcessingHandler('mp-handler-{0}'.format(i),
                                             sub_handler=orig_handler)
            logger.removeHandler(orig_handler)
            logger.addHandler(handler)
Ejemplo n.º 17
0
def yml_reader(yml_filepath: str, logger: logging = None):
    """
    Reading the yaml file.

    param: ymal_filepath: path to the yamlfile

    Return: Dictionary of yaml file contents
    """
    logger = logger if logger is not None else logging.getLogger(__name__)
    if os.path.exists(yml_filepath):
        with open(yml_filepath) as stream:
            yml = YAML(typ="safe")
            yml_dict = yml.load(stream)
        return yml_dict
    else:
        logger.info(f"yml_filepath ({yml_filepath}) doesn't exisit")
Ejemplo n.º 18
0
 def _reader_func(stream: PIPE, queue: Queue, logger: logging):
     try:
         while True:
             line = stream.readline()
             if len(line) > 0:
                 queue.put(line)
                 if logger is not None:
                     logger.debug(
                         "LiveRNAfold, _reader_func ({}) [{}]".format(
                             queue.qsize(), line))
             else:
                 break
     except Exception as e:
         self.logger.debug(
             "LiveRNAfold, _reader_func THREAD DEAD [{}]".format(e))
         queue.put(END_SEQUENCE)
def execute(spark: SparkSession, log: logging, config: dict):
    log.info("extract")
    params = config['params']
    ps_conf = config['postgres']

    ts: datetime.datetime = params['ts']
    in_path = ts.strftime(params['in_path'])
    ts_from = config['ts_from']
    ts_to = config['ts_to']
    df = spark.read.csv(in_path, header=True, sep=';')
    df.select(
        F.col('FROM_PHONE_NUMBER'), F.col('TO_PHONE_NUMBER'),
        F.to_timestamp(df['START_TIME'], 'dd/MM/yyyy HH:mm:ss').alias('START_TIME'),
        F.col('CALL_DURATION').cast('long'), F.col('IMEI'), F.col('LOCATION')
    ).withColumn("TS", F.date_format(F.date_trunc("hour", "START_TIME"), "yyyy-MM-dd-HH"))
    df.write.partitionBy("TS").mode('append').format('hive').saveAsTable('task_02')
    df = spark.sql("select * from task_02 where TS >= {} AND TS < {}".format(ts_from, ts_to)).drop_duplicates()
    df.cache()
    ts = df.select("TS").rdd.map(lambda x: x[0]).first()
    # Number of call, total call duration.
    num_call = df.count()
    total_call_duration = list(df.select(F.sum(df['CALL_DURATION'])).first().asDict().values())[0]

    # Number of call in working hour (8am to 5pm)
    num_call_working_hour = df.filter("hour(START_TIME) >= 8 AND hour(START_TIME) <= 17").count()

    # Find the IMEI which make most call.
    imei_most = df.groupBy('IMEI').count().sort(F.col("count").desc()).first().asDict()

    # Find top 2 locations which make most call.
    locations = list(map(lambda x: x.asDict(), df.groupBy('LOCATION').count().sort(F.col("count").desc()).head(2)))

    rs = (ts, num_call, total_call_duration, num_call_working_hour, imei_most, locations)
    with get_postgres_cli(ps_conf) as ps_cli:
        with ps_cli.cursor() as cur:
            sql = """
            INSERT INTO metric_hour(
                ts, num_call, total_call_duration, 
                num_call_working_hour, imei_most, locations
            ) VALUES(%s, %s, %s, %s, %s, %s) 
            ON CONFLICT (ts) 
            DO UPDATE SET(
                num_call, total_call_duration, num_call_working_hour, imei_most, locations) = 
                (EXCLUDED.num_call, EXCLUDED.total_call_duration, EXCLUDED.num_call_working_hour
                 EXCLUDED.imei_most, EXCLUDED.locations)
            """
            cur.execute(sql, rs)
Ejemplo n.º 20
0
def get_number_lines_file(file: str, loggers: logging) -> int:
    try:
        command = f'wc -l {file} | cut -d " " -f1'
        execute = subprocess.Popen(command,
                                   shell=True,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE)
        stdout, stderr = execute.communicate()
        count_lines = int(stdout)
        print(count_lines)
        return count_lines
    except Exception as e:
        loggers.warning(e)
        loggers.warning('Method readlines')
        with open(file, 'r') as fp:
            count_lines = len(fp.readlines())
        return count_lines
Ejemplo n.º 21
0
def knobs_ranking(knob_data: dict, metric_data: dict, mode: str,
                  logger: logging) -> list:
    """
    knob_data : will be ranked by knobs_ranking
    metric_data : pruned metric_data by metric simplification
    mode : selct knob_identification(like lasso, xgb, rf)
    logger
    """
    knob_matrix: list = knob_data['data']
    knob_columnlabels: list = knob_data['columnlabels']

    metric_matrix: list = metric_data['data']
    #metric_columnlabels = metric_data['columnlabels']

    encoded_knob_columnlabels = knob_columnlabels
    encoded_knob_matrix = knob_matrix

    # standardize values in each column to N(0, 1)
    #standardizer = RobustScaler()
    standardizer = StandardScaler()
    # standardizer = MinMaxScaler()
    standardized_knob_matrix = standardizer.fit_transform(encoded_knob_matrix)
    standardized_metric_matrix = standardizer.fit_transform(metric_matrix)

    # shuffle rows (note: same shuffle applied to both knob and metric matrices)
    shuffle_indices = get_shuffle_indices(standardized_knob_matrix.shape[0],
                                          seed=17)
    shuffled_knob_matrix = standardized_knob_matrix[shuffle_indices, :]
    shuffled_metric_matrix = standardized_metric_matrix[shuffle_indices, :]

    model = Ranking(mode)
    model.fit(shuffled_knob_matrix, shuffled_metric_matrix,
              encoded_knob_columnlabels)
    encoded_knobs = model.get_ranked_features()
    feature_imp = model.get_ranked_importance()
    if feature_imp is None:
        pass
    else:
        logger.info('Feature importance')
        logger.info(feature_imp)

    consolidated_knobs = consolidate_columnlabels(encoded_knobs)

    return consolidated_knobs
Ejemplo n.º 22
0
def download_file(file_url: str, file_name: str, log: logging) -> str:
    """
     Download file_url and move it to file_name, do nothing if file_name already exists.

    :param log: log to be use.
    :param file_url: file url to be download
    :param file_name: file name where the data will be downloaded
    :return: name of the file if the file can be download.
    """
    if os.path.isfile(file_name):
        return file_name

    if log is not None:
        log = logging

    remaining_download_tries = REMAINING_DOWNLOAD_TRIES
    downloaded_file = None
    while remaining_download_tries > 0:
        try:
            downloaded_file, error_code = request.urlretrieve(
                file_url, file_name)
            log.debug("File downloaded -- " + downloaded_file)
            if downloaded_file.endswith('.gz'):
                extracted_file = downloaded_file.replace('.gz', '')
                with open(extracted_file, 'w') as outfile:
                    outfile.write(
                        gzip.decompress(open(downloaded_file,
                                             'rb').read()).decode('utf-8'))
                    os.remove(downloaded_file)
                    downloaded_file = extracted_file
                    log.debug("File extracted-- " + downloaded_file)
            break
        except (
                HTTPError,
                URLError,
                ContentTooShortError,
        ) as error:
            logging.error(
                "Error downloading -- Incorrect URL or file not found: " +
                file_url + " on trial no: " +
                str(REMAINING_DOWNLOAD_TRIES - remaining_download_tries))
            log.error("Error code: " + str(error))
            remaining_download_tries = remaining_download_tries - 1
            downloaded_file = None
            continue
        except Exception as error:
            remaining_download_tries = remaining_download_tries - 1
            log.error("Error code: " + str(error))
            downloaded_file = None

    return downloaded_file
Ejemplo n.º 23
0
def init_logger(logger: logging = None):
    """binds the logger for GraphQL view to show the stacktrace on stdout

    - This adds a hook into the "GraphQLView.format_error" function in
        flask_graphql to also log the expection in the GraphQLView to the stdout
    - once the exception is logged, call the default error hander for the
        graphql_server package

    Keyword Arguments:
        logger {logging} -- user provided intialized logger if available
                            (default: {None})
    """
    if not logger:
        # initialize logger if not provided
        logger = logging.getLogger("graphql-error-logger")
        logger.setLevel(logging.ERROR)

    @staticmethod
    def log_and_format_exception(error):
        """captures the exception catch in graphql_server and log to the stdout
        with the stacktrace included.

        once thats done, call the default_format_error for the  graphql_server
        package

        Arguments:
            error {[type]} -- error catch by the graphql server

        Returns:
            [str] -- constaining the string representation of the error
                     encountered by the graphql endpoint
        """
        if hasattr(error, "original_error") and isinstance(
            error.original_error, Exception
        ):
            logger.error("error in GraphQL view", exc_info=error.original_error)
        return default_format_error(error)

    GraphQLView.format_error = log_and_format_exception
Ejemplo n.º 24
0
 def trace(self, logger: logging):
     logger.debug('\t\t\t\tname: ' + self.name)
     s = '['
     for c in self.community_cards:
         s += c
         s += ', '
     s += ']'
     logger.debug('\t\t\t\tcommunity_cards: ' + s)
     s = '['
     for c in self.cards:
         s += c
         s += ', '
     s += ']'
     logger.debug('\t\t\t\tcards: ' + s)
     s = '['
     for c in self.discards:
         s += c
         s += ', '
     s += ']'
     logger.debug('\t\t\t\tdiscards: ' + s)
     logger.debug('\t\t\t\tactions:')
     for action in self.actions:
         action.trace(logger)
Ejemplo n.º 25
0
def log_exception(message: str = None,
                  logger: logging = None,
                  device: AbstractMerossDevice = None):
    if logger is None:
        logger = logging.getLogger(__name__)

    if message is None:
        message = "An exception occurred"

    device_info = "<Unavailable>"
    if device is not None:
        device_info = f"\tName: {device.name}\n" \
                      f"\tUUID: {device.uuid}\n" \
                      f"\tType: {device.type}\n\t" \
                      f"HW Version: {device.hwversion}\n" \
                      f"\tFW Version: {device.fwversion}"

    formatted_message = f"Error occurred.\n" \
                        f"-------------------------------------\n" \
                        f"Component version: {MEROSS_CLOUD_VERSION}\n" \
                        f"Device info: \n" \
                        f"{device_info}\n" \
                        f"Error Message: \"{message}\""
    logger.exception(formatted_message)
Ejemplo n.º 26
0
def inject_link(html: str, href: str, page: Page, logger: logging) -> str:
    """Adding PDF View button on navigation bar(using material theme)"""
    def _pdf_icon():
        _ICON = '''
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512">
<path d="M128,0c-17.6,0-32,14.4-32,32v448c0,17.6,14.4,32,32,32h320c17.6,0,32-14.4,32-32V128L352,0H128z" fill="#E2E5E7"/>
<path d="m384 128h96l-128-128v96c0 17.6 14.4 32 32 32z" fill="#B0B7BD"/>
<polygon points="480 224 384 128 480 128" fill="#CAD1D8"/>
<path d="M416,416c0,8.8-7.2,16-16,16H48c-8.8,0-16-7.2-16-16V256c0-8.8,7.2-16,16-16h352c8.8,0,16,7.2,16,16  V416z" fill="#F15642"/>
<g fill="#fff">
<path d="m101.74 303.15c0-4.224 3.328-8.832 8.688-8.832h29.552c16.64 0 31.616 11.136 31.616 32.48 0 20.224-14.976 31.488-31.616 31.488h-21.36v16.896c0 5.632-3.584 8.816-8.192 8.816-4.224 0-8.688-3.184-8.688-8.816v-72.032zm16.88 7.28v31.872h21.36c8.576 0 15.36-7.568 15.36-15.504 0-8.944-6.784-16.368-15.36-16.368h-21.36z"/>
<path d="m196.66 384c-4.224 0-8.832-2.304-8.832-7.92v-72.672c0-4.592 4.608-7.936 8.832-7.936h29.296c58.464 0 57.184 88.528 1.152 88.528h-30.448zm8.064-72.912v57.312h21.232c34.544 0 36.08-57.312 0-57.312h-21.232z"/>
<path d="m303.87 312.11v20.336h32.624c4.608 0 9.216 4.608 9.216 9.072 0 4.224-4.608 7.68-9.216 7.68h-32.624v26.864c0 4.48-3.184 7.92-7.664 7.92-5.632 0-9.072-3.44-9.072-7.92v-72.672c0-4.592 3.456-7.936 9.072-7.936h44.912c5.632 0 8.96 3.344 8.96 7.936 0 4.096-3.328 8.704-8.96 8.704h-37.248v0.016z"/>
</g>
<path d="m400 432h-304v16h304c8.8 0 16-7.2 16-16v-16c0 8.8-7.2 16-16 16z" fill="#CAD1D8"/>
</svg>
'''  # noqa: E501
        return BeautifulSoup(_ICON, 'html.parser')

    logger.info('(hook on inject_link: %s)', page.title)
    soup = BeautifulSoup(html, 'html.parser')

    nav = soup.find(class_='md-header-nav')
    if not nav:
        # after 7.x
        nav = soup.find('nav', class_='md-header__inner')
    if nav:
        a = soup.new_tag('a',
                         href=href,
                         title='PDF',
                         **{'class': 'md-header-nav__button md-icon'})
        a.append(_pdf_icon())
        nav.append(a)
        return str(soup)

    return html
Ejemplo n.º 27
0
def log_section(text: str, logger: logging) -> None:
    """
    Prints a section.
    :param text:  text to print
    :param logger: logger object
    """
    logger.info(
        "==============================================================")
    logger.info(text)
    logger.info(
        "==============================================================")
Ejemplo n.º 28
0
def execute(spark: SparkSession, log: logging, config: dict):
    log.info("extract")
    in_path = config['params']['in_path']
    out_path = config['params']['out_path']
    df = spark.read.csv(in_path, header=True).repartition(120, "PHONE_NUMBER").na.fill(
        {'DEACTIVATION_DATE': '9999-12-31'})

    log.info("transform")
    df_norm = df.sort(df.DEACTIVATION_DATE.desc()).groupby(
        ['PHONE_NUMBER']
    ).agg(
        F.collect_list(df['ACTIVATION_DATE']).alias('ACTIVATION_DATE'),
        F.collect_list(df['DEACTIVATION_DATE']).alias('DEACTIVATION_DATE')
    ).withColumn(
        'ACTUAL_ACTIVE_DATE',
        udf_actual_active_date(F.col('ACTIVATION_DATE'), F.col('DEACTIVATION_DATE'))
    ).select(['PHONE_NUMBER', 'ACTUAL_ACTIVE_DATE']).withColumn(
        "TS", F.date_format(F.date_trunc("month", "ACTUAL_ACTIVE_DATE"), "yyyy-MM"))

    log.info("load")
    df_norm.write.partitionBy("TS").parquet(out_path, mode="overwrite")
    spark.read.parquet(out_path)
Ejemplo n.º 29
0
def download_file(file_url: str, file_name: str, log: logging) -> str:
    """
     Download file_url and move it to file_name, do nothing if file_name already exists.

    :param log: log to be use.
    :param file_url: file url to be download
    :param file_name: file name where the data will be downloaded
    :return: name of the file if the file can be download.
    """
    if os.path.isfile(file_name):
        return file_name

    if log is not None:
        log = logging

    remaining_download_tries = REMAINING_DOWNLOAD_TRIES
    downloaded_file = None
    while remaining_download_tries > 0:
        try:
            downloaded_file, error_code = request.urlretrieve(
                file_url, file_name)
            log.debug("File downloaded -- " + downloaded_file)
            break
        except (HTTPError, URLError, ContentTooShortError) as error:
            logging.error(
                "Error downloading -- Incorrect URL or file not found: " +
                file_url + " on trial no: " +
                str(REMAINING_DOWNLOAD_TRIES - remaining_download_tries))
            log.error("Error code: " + str(error))
            remaining_download_tries = remaining_download_tries - 1
            downloaded_file = None
            continue
        except Exception as error:
            remaining_download_tries = remaining_download_tries - 1
            log.error("Error code: " + str(error))
            downloaded_file = None

    return downloaded_file
Ejemplo n.º 30
0
 def trace(self, logger: logging):
     logger.debug('\t\tname: ' + self.name)
     logger.debug('\t\t\tante: ' + str(self.ante))
     logger.debug('\t\t\tseat: ' + str(self.seat))
     logger.debug('\t\t\tposition: ' + str(self.position))
     logger.debug('\t\t\tblindAmount: ' + str(self.blind_amount))
     logger.debug('\t\t\tsmallBlind: ' + str(self.small_blind))
     logger.debug('\t\t\tbigBlind: ' + str(self.big_blind))
     logger.debug('\t\t\tbothBlinds: ' + str(self.both_blinds))
     logger.debug('\t\t\treturnedAmount: ' + str(self.returned_amount))
     logger.debug('\t\t\tcollectedAmount: ' + str(self.collected_amount))
     logger.debug('\t\t\tnet: ' + str(self.net))
     logger.debug('\t\t\tnetBigBets: ' + str(self.net_big_bets))
     logger.debug('\t\t\tnetBigBlinds: ' + str(self.net_big_blinds))
     logger.debug('\t\t\tsawShowdown: ' + str(self.saw_showdown))
     logger.debug('\t\t\tposition: ' + str(self.position))
     s = '['
     for c in self.final_hand:
         s += c
         s += ', '
     s += ']'
     logger.debug('\t\t\tfinalHand: ' + s)
     logger.debug('\t\t\tstreets:')
     for street in self.streets:
         street.trace(logger)
Ejemplo n.º 31
0
def log_and_print(message, logger: logging = None) -> None:
    if logger is not None:
        logger.info(message)
    print(message)
Ejemplo n.º 32
0
def show_message_log_console(message: str, logger: logging = None) -> None:
    if logger is not None:
        logger.debug(message)
    else:
        print(message)
Ejemplo n.º 33
0
def log(l: logging, filename):
    l.basicConfig(filename=filename,
                  level=logging.INFO,
                  format=LOG_FORMAT,
                  datefmt=DATE_FORMAT)
Ejemplo n.º 34
0
    def trace(self, logger: logging):
        logger.debug('handId: ' + str(self.hand_id))
        logger.debug('\ttimeStamp: ' + self.time_stamp)
        logger.debug('\tsessionStartTime: ' + self.session_start_time)
        logger.debug('\tsessionEndTime: ' + self.session_end_time)
        logger.debug('\ttable: ' + self.table_name)
        logger.debug('\tmix: ' + self.mix)
        logger.debug('\tgame: ' + self.game)
        logger.debug('\tgameSize: ' + str(self.game_size))
        logger.debug('\tstakes: $' + str(self.small_bet) + '/$' + str(self.big_bet))
        logger.debug('\tsmallBlind: ' + str(self.small_blind))
        logger.debug('\tbigBlind: ' + str(self.big_blind))
        logger.debug('\tante: ' + str(self.ante))
        logger.debug('\tcurrency: ' + self.currency)
        logger.debug('\tbuttonSeat: ' + str(self.button_seat))
        logger.debug('\tpot: ' + str(self.pot))
        logger.debug('\trake: ' + str(self.rake))
        s = '['
        for c in self.flop:
            s += c
            s += ', '
        s += ']'
        logger.debug('\tflop: ' + s)
        s = '['
        for c in self.turn:
            s += c
            s += ', '
        s += ']'
        logger.debug('\tturn: ' + s)
        s = '['
        for c in self.river:
            s += c
            s += ', '
        s += ']'
        logger.debug('\triver: ' + s)

        logger.debug('\tplayers:')
        for p in self.players:
            p.trace(logger)
Ejemplo n.º 35
0
 def trace(self, logger: logging):
     logger.debug('\t\t\t\t\tindex: ' + str(self.index))
     logger.debug('\t\t\t\t\taction: ' + self.action)
     logger.debug('\t\t\t\t\tamount: ' + str(self.amount))
     logger.debug('\t\t\t\t\tto_amount: ' + str(self.to_amount))
Ejemplo n.º 36
0
def verify(p_parameter_type: tuple, p_logger: logging, p_param_value=None):

    p_param_value_sent_type = type(p_param_value)

    param_key_name: str = p_parameter_type[0]
    param_val_expected_type: str = p_parameter_type[1]
    param_val_expected_list_of_values: [] = []

    if ',' in param_val_expected_type:
        # parameter has distinct values
        param_val_expected_list_of_values = param_val_expected_type.split(
            ',')[1].split('|')
        param_val_expected_type = param_val_expected_type.split(',')[0]
        print()

    if ('LIST[STR]' in param_val_expected_type.strip().upper()
            and 'LIST[STR]' in param_val_expected_type.strip()):
        # parameter value must be a list and exists and value can't be []
        print()

    elif ('LIST[str]' in param_val_expected_type.strip().upper()
          and 'LIST[str]' in param_val_expected_type.strip()):
        # parameter value must be a list and exists and can be []
        print()

    elif 'list[STR]' in param_val_expected_type.strip().upper():
        # parameter must be a list if exists and value can't be []
        print()

    elif 'list[str]' in param_val_expected_type.strip().upper():
        # parameter must be a list if exists and value can be []
        print()

    elif param_val_expected_type.replace(' ', '').strip().upper() == 'ANY':
        # parameter value must exists
        print()

    elif param_val_expected_type.strip() == 'any':
        # parameter value may exists
        print()

    elif (param_val_expected_type.replace(' ', str()).strip().upper() == 'STR'
          and param_val_expected_type.strip() == 'STR'):
        p_logger.debug(
            "Verify parameter ({param_key_name}) is a string and its value {p_param_value} exists"
        )

    elif param_val_expected_type.strip().upper() == 'STR':
        # parameter value must be a string if it exists
        print()

    elif (param_val_expected_type.strip().upper() == 'INT'
          and param_val_expected_type.strip() == 'INT'):
        # parameter value must be a int and exists
        print()

    elif param_val_expected_type.strip().upper() == 'INT':
        # parameter value must be a int if it exists
        print()

    elif (param_val_expected_type.strip().upper() == 'FLOAT'
          and param_val_expected_type.strip() == 'FLOAT'):
        # parameter value must be a int and exists
        print()

    elif param_val_expected_type.strip().upper() == 'FLOAT':
        # parameter value must be a int if it exists
        print()
Ejemplo n.º 37
0
def ResetDebugLevel(p_logger: logging, p_log_level_name: str) -> None:

    log_level: int = GetDebugLevelForName(p_log_level_name)
    p_logger.setLevel(log_level)