コード例 #1
0
def test_default_logger_console_formatter_parameters():
    """Test default logger level stream formatter parameters"""
    main_log = simplelogging.get_logger("__main__")
    log_colors = {
        "DEBUG": "blue",
        "INFO": "black,bg_green",
        "WARNING": "black,bg_yellow",
        "ERROR": "white,bg_red",
        "CRITICAL": "red,bg_white",
    }

    secondary_log_colors = {}

    assert main_log.handlers
    for handler in main_log.handlers:
        if isinstance(handler, colorlog.StreamHandler):
            if isinstance(handler.formatter, colorlog.ColoredFormatter):
                formatter = handler.formatter
                assert formatter._fmt == simplelogging.DEFAULT_CONSOLE_FORMAT
                assert formatter.datefmt is None
                assert formatter.log_colors == log_colors
                assert formatter.secondary_log_colors == secondary_log_colors
                assert isinstance(formatter._style, logging._STYLES["%"][0])
                break
    else:
        assert False, "No good console formatter found"
コード例 #2
0
def test_default_logger_file():
    """Test default logger level file handler presence"""
    main_log = simplelogging.get_logger("__main__")
    assert main_log.handlers
    for handler in main_log.handlers:
        if isinstance(handler, logging.handlers.RotatingFileHandler):
            assert False, "A file handler is found, but no file_name provided"
コード例 #3
0
def main():
    """Entry point."""
    global log

    parser = argparse.ArgumentParser(description="Markdown to PDF converter")
    parser.add_argument("-v",
                        "--verbose",
                        action="count",
                        default=0,
                        help="increase verbosity level")
    parser.add_argument("-t",
                        "--title",
                        help="title of the document",
                        default="")
    parser.add_argument("-a",
                        "--author",
                        help="author of the document",
                        default="")
    parser.add_argument(
        "-s",
        "--size",
        help="size of the document",
        default="A4",
        choices=["A4", "A5"],
    )
    files = parser.add_mutually_exclusive_group(required=True)
    files.add_argument(
        "-i",
        "--input-path",
        metavar="PATH",
        type=str,
        help="path of the markdown files to convert",
        default=[],
        # allow the user to provide no path at all,
        # this helps writing scripts
        nargs="+",
    )
    # TODO file on the web, with requests
    files.add_argument("--version", action="store_true", help="return version")
    args = parser.parse_args()

    if args.version:
        print(pkg_resources.get_distribution("md_to_pdf").version)
        sys.exit(0)

    log = simplelogging.get_logger("__main__")

    if args.verbose < 1:
        log.reduced_logging()
    elif args.verbose < 2:
        log.normal_logging()
    else:
        log.full_logging()

    for md_path in args.input_path:
        md_to_pdf(md_path,
                  author=args.author,
                  title=args.title,
                  size=args.size)
コード例 #4
0
def test_default_logger_console():
    """Test default logger level stream handler presence"""
    main_log = simplelogging.get_logger("__main__")
    assert main_log.handlers
    for handler in main_log.handlers:
        if isinstance(handler, colorlog.StreamHandler):
            break
    else:
        assert False, "No console handler found"
コード例 #5
0
def test_default_logger_console_level():
    """Test default logger level stream formatter level"""
    main_log = simplelogging.get_logger("__main__")
    assert main_log.handlers
    for handler in main_log.handlers:
        if isinstance(handler, colorlog.StreamHandler):
            if handler.level == simplelogging.INFO:
                break
    else:
        assert False, "Not good console level"
コード例 #6
0
def test_console_format():
    """Test that console is properly configured"""
    format = "Vincent Poulailleau"
    main_log = simplelogging.get_logger("__main__", console_format=format)
    assert main_log.handlers
    for handler in main_log.handlers:
        if isinstance(handler, colorlog.StreamHandler):
            if isinstance(handler.formatter, colorlog.ColoredFormatter):
                if handler.formatter._fmt == format:
                    break
    else:
        assert False, "no handler found with the good format"
コード例 #7
0
ファイル: __main__.py プロジェクト: cigani/tdms-reader
import pandas as pd
import simplelogging
from bokeh.models import (
    ColumnDataSource,
    Whisker,
    HoverTool,
    Band,
)
from bokeh.plotting import figure, output_file, show
from nptdms import TdmsFile
from scipy import integrate

# logging
CONSOLE_FORMAT = " %(log_color)s%(message)s%(reset)s"
log = simplelogging.get_logger(console_format=CONSOLE_FORMAT)

def create_arg_parser():
    """"Creates and returns the ArgumentParser object."""

    parser = argparse.ArgumentParser(
        description="Computation and Graphing of TDMS CO2 Data."
    )
    parser.add_argument("--data", type=str, help="Path to the data directory.")
    return parser


def get_output(path):
    ABS_PATH = os.path.abspath(path)
    CSV_PATH = os.path.join(ABS_PATH, "CSV DATA")
    PLOT_PATH = os.path.join(ABS_PATH, "PLOT DATA")
コード例 #8
0
import simplelogging

log = simplelogging.get_logger()


def log_some_messages():
    log.debug("## some debug ##")
    log.info("## some info ##")
    log.warning("## some warning ##")
    log.error("## some error ##")
コード例 #9
0
import argparse
import datetime
import hashlib
import os
import pathlib
import re
import shutil
import subprocess
from multiprocessing import Pool

import simplelogging

logger = simplelogging.get_logger(console_level=simplelogging.INFO)


def compute_sha1(filepath):
    BUF_SIZE = 65536
    sha1 = hashlib.sha1()
    with open(filepath, "rb") as f:
        while True:
            data = f.read(BUF_SIZE)
            if not data:
                break
            sha1.update(data)
    return sha1.hexdigest()


def extract_date(filepath):
    date = None
    if filepath.lower().endswith((".jpg", ".jpeg")):
        date = extract_date_image(filepath)
コード例 #10
0
def main():
    timeout = 1200
    wd = Watchdog(timeout)
    wd.start()

    IS_SEDOL = False

    incidence_file = None
    output_file = None
    input_file = None
    output_dir = None

    process_type = None
    process_id = None
    fields = []
    formulas = []
    field_types = []

    configExcel = None
    fields = None
    validation = None
    files = None

    error_format_match = False

    if len(sys.argv) < 5:
        print(
            "ERROR : faltan argumentos..  <Tipo Proceso> <ID Proceso> <Fichero> <Directorio Destino>"
        )
    else:
        start_time = time.time()
        process_type = sys.argv[1]  # Pagina en fichero
        process_id = sys.argv[2]  # ID del tipo
        input_file = sys.argv[3]  # Fichero a procesar
        output_dir = sys.argv[4]  # Direccion donde dejar el fichero
        config_file = sys.argv[5]  # Fichero de equivalencias

        if not os.path.exists(output_dir):
            print("ERROR : No se encuentra la carpeta de destino: " +
                  output_dir)
            return
        if not os.path.exists(input_file):
            print("ERROR : No se encuentra el fichero a procesar: " +
                  input_file)
            return

        now = datetime.datetime.now()
        input_file_name = os.path.splitext(ntpath.basename(input_file))
        log_name = "log_" + input_file_name[0] + "_" + now.strftime(
            "%d%m%Y%H%M%S") + ".txt"
        global log
        log = simplelogging.get_logger(file_name=output_dir + log_name,
                                       console=logger_console)

        IS_SEDOL = True if not pd.isna(re.search("SEDOL",
                                                 str(process_id))) else False

        output_file = "output_" + process_type + ".xlsx" if not IS_SEDOL else "output_SEDOL_" + process_type + ".xlsx"
        incidence_file = "incidence_" + process_type + ".xlsx" if not IS_SEDOL else "incidence_SEDOL_" + process_type + ".xlsx"

        log.info("Iniciando Proceso")
        log.info("PID: " + str(os.getpid()))
        log.info(process_type + " - ( " + process_id + " )")

        try:
            configExcel = pd.read_excel(config_file, process_type)
            configExcel.columns = configExcel.columns.astype(str)

            if set(['Campos', process_id]).issubset(configExcel.columns):
                campos = configExcel["Campos"]
                tipo = configExcel["Tipo"]
                fields = campos[1:]
                fields = fields.dropna(how='all')
                fields = fields.apply(lambda x: x.strip())

                allFormulas = configExcel[process_id]
                formulas = allFormulas[1:len(fields) + 1]
                field_types = tipo[1:len(fields) + 1]
                field_types = field_types.apply(lambda x: x.lower()
                                                if not pd.isna(x) else x)

                filter_func = allFormulas[0]

                validate_field_types(field_types)
                validate_formulas(formulas)
            else:
                raise Exception(
                    "No se han encontrado las columnas necesarias : (Campos ,"
                    + process_id + ")")
        except OSError as e:
            log.error("Error: " + str(e))
            print("ERROR : Fichero de equivalencias no encontrado")
            log.error("ERROR : Fichero de equivalencias no encontrado")
            return
        except Exception as e:
            log.error("Error: " + str(e))
            print("ERROR : Fichero de equivalencias ERRONEO")
            log.error("ERROR : Fichero de equivalencias ERRONEO")
            return

        log.info("Procesando fichero: " + input_file)

        file_extension = os.path.splitext(input_file)
        dataExcel = None
        try:
            if (file_extension[1] == ".csv"):
                dataExcel = pd.read_csv(input_file,
                                        header=None,
                                        encoding='latin-1',
                                        sep=None,
                                        engine='python')
            else:
                app = xw.App(visible=False)
                wb = xw.Book(input_file, ignore_read_only_recommended=True)
                active_sheet_name = wb.sheets.active.name
                wb.close()
                app.quit()

                dataExcel = pd.read_excel(input_file,
                                          sheet_name=active_sheet_name,
                                          header=None,
                                          parse_dates=True,
                                          dayfirst=False)
                dataExcel = dataExcel.replace('\n', '',
                                              regex=True).replace('\r',
                                                                  '',
                                                                  regex=True)
        except Exception as e:
            print("ERROR : No se pudo abrir el fichero a procesar. " + str(e))
            log.error("ERROR : No se pudo abrir el fichero a procesar. " +
                      str(e))
            return

        #print(dataExcel)

        firstLineExists = gather_data_from_excel(dataExcel, filter_func,
                                                 fields, formulas, field_types,
                                                 IS_SEDOL, True)
        outputData = []
        incidenceData = []
        if ((len(firstLineExists["outputData"]) > 0
             or len(firstLineExists["incidenceData"]) > 0)
                and firstLineExists["error_format_match"] == False):
            if (not pd.isna(filter_func)):
                filter_data_excel(filter_func, dataExcel)
                log.info("Filtro aplicado.")
            dataFromExcel = gather_data_from_excel(dataExcel, filter_func,
                                                   fields, formulas,
                                                   field_types, IS_SEDOL,
                                                   False)

            outputData = dataFromExcel["outputData"]
            incidenceData = dataFromExcel["incidenceData"]
            error_format_match = False
        else:
            error_format_match = True

        try:
            flag_ok = False
            if (len(outputData) > 0 or len(incidenceData) > 0):
                resultExcel = pd.DataFrame(outputData, columns=fields)
                resultExcel.drop_duplicates(keep='first', inplace=True)

                #print(resultExcel)
                number_fields = {}
                for i in range(len(field_types)):
                    if (field_types[i + 1] == 'number'):
                        number_fields[fields[i + 1]] = 2
                resultExcel = resultExcel.round(number_fields)

                ratio_fields = {}
                for i in range(len(field_types)):
                    if (field_types[i + 1] == 'ratio'):
                        ratio_fields[fields[i + 1]] = 12

                resultExcel = resultExcel.round(ratio_fields)

                #print(resultExcel.dtypes)
                #print(resultExcel)
                historyExcel = load_excel(output_dir + output_file, fields)
                #print(historyExcel.dtypes)
                #print(historyExcel)
                finalExcel = None
                equalRows = pd.DataFrame(columns=['ISIN'])
                if (not historyExcel.empty and not IS_SEDOL):
                    historyExcel = historyExcel.dropna(how='all')
                    match_dataframes_types(resultExcel, historyExcel)
                    resultExcel = dataframe_difference(resultExcel,
                                                       historyExcel,
                                                       "left_only")
                    #print(resultExcel)
                    finalExcel = pd.concat([historyExcel, resultExcel])
                    #print(finalExcel)
                else:
                    finalExcel = resultExcel

                #print(finalExcel)
                finalExcel.drop_duplicates(keep='first', inplace=True)
                finalExcel.reset_index(drop=True, inplace=True)
                #print(finalExcel)

                columnId = fields[1] if not IS_SEDOL else fields[2]
                equalRows = finalExcel[finalExcel.duplicated(columnId,
                                                             keep=False)]
                equalRows.reset_index(drop=True, inplace=True)
                #print(equalRows)

                columnId = fields[1] if not IS_SEDOL else fields[2]
                finalExcel.drop_duplicates(columnId, inplace=True, keep=False)
                finalExcel.reset_index(drop=True, inplace=True)
                #print(finalExcel)

                #print(resultExcel)
                newsExcel = dataframe_difference(resultExcel, finalExcel,
                                                 "both")
                #print(newsExcel)

                flag_data = False

                if (save_excel(output_dir + output_file, finalExcel)):
                    log.info("Guardando fichero: " + output_dir + output_file)
                    log.info("Proceso finalizado...")

                    if (not finalExcel.empty):
                        res_ids = newsExcel.iloc[:,
                                                 0] if not IS_SEDOL else newsExcel.iloc[:,
                                                                                        1]
                        if (len(res_ids) > 0):
                            print("OK")
                            log.info("OK")
                            flag_ok = True
                            for ids in res_ids:
                                print(str(ids) + ",OK," + output_file)
                                log.info(str(ids) + ",OK," + output_file)
                            flag_data = True
                else:
                    print(
                        "ERROR : Fallo en la escritura del fichero de salida.")
                    log.error(
                        "ERROR : Fallo en la escritura del fichero de salida.")
                    return

                if (len(incidenceData) > 0 or not equalRows.empty):
                    incidenceExcel = pd.DataFrame(incidenceData,
                                                  columns=fields)
                    #print(equalRows)
                    incidenceExcel = pd.concat([incidenceExcel, equalRows])
                    #print(incidenceExcel)

                    historyIncidenceExcel = load_excel(
                        output_dir + incidence_file, fields)
                    finalIncidenceExcel = None
                    if (not historyIncidenceExcel.empty and not IS_SEDOL):
                        incidenceExcel = incidenceExcel.where(
                            ~incidenceExcel.notnull(),
                            incidenceExcel.astype('str'))
                        historyIncidenceExcel = historyIncidenceExcel.where(
                            ~historyIncidenceExcel.notnull(),
                            historyIncidenceExcel.astype('str'))
                        #match_dataframes_types(incidenceExcel,historyIncidenceExcel)
                        finalIncidenceExcel = historyIncidenceExcel.append(
                            incidenceExcel)
                    else:
                        finalIncidenceExcel = incidenceExcel

                    finalIncidenceExcel.drop_duplicates(
                        inplace=True)  # Los identicos se ignoran
                    finalIncidenceExcel.reset_index(drop=True, inplace=True)
                    #print(finalIncidenceExcel)

                    newsIncidenceExcel = finalIncidenceExcel.append(
                        historyIncidenceExcel)
                    newsIncidenceExcel.drop_duplicates(keep=False,
                                                       inplace=True)
                    #print(newsIncidenceExcel)

                    if (save_excel(output_dir + incidence_file,
                                   finalIncidenceExcel)):
                        log.info("Guardando fichero: " + output_dir +
                                 incidence_file)
                        err_isins = newsIncidenceExcel.iloc[:, 0]
                        if (len(err_isins) > 0):
                            if (flag_ok == False):
                                print("OK")
                                log.info("OK")
                            for isin in err_isins:
                                if (not pd.isna(isin)):
                                    print(isin + ",ERROR," + incidence_file)
                                    log.info(isin + ",ERROR," + incidence_file)
                            flag_data = True
                    else:
                        print(
                            "ERROR : Fallo en la escritura del fichero de incidencias."
                        )
                        log.error(
                            "ERROR : Fallo en la escritura del fichero de incidencias."
                        )

                if (flag_data == False):
                    print("NO DATA: No se encontraron datos para procesar")
                    log.info("NO DATA: No se encontraron datos para procesar")
            else:
                if (error_format_match == True):
                    print(
                        "ERROR : El formato para este excel no es correcto: " +
                        process_type + " - " + process_id)
                    log.error(
                        "ERROR : El formato para este excel no es correcto: " +
                        process_type + " - " + process_id)
                elif (len(outputData) == 0 and len(incidenceData) == 0):
                    print("NO DATA: No se encontraron datos para procesar")
                    log.error("NO DATA: No se encontraron datos para procesar")
                else:
                    print("ERROR")
                    log.error("ERROR")
        except (InvalidFormat) as e:
            print("ERROR : " + str(e))
            log.error("ERROR : " + str(e))
        except Exception as e:
            log.error(e)
            print("ERROR : Ha ocurrido un error inesperado.")
            log.error("ERROR : Ha ocurrido un error inesperado.")

    elapsed_time = time.time() - start_time
    log.info("Tiempo de ejecución: " + str(elapsed_time) + " segundos")
コード例 #11
0
    # auto refresing csv of results open and retain past runs
    copyfile(ffname, "data/pcy_results/pcy_result_0.csv")


#Generating items-singletons
#def __main__():
if __name__ == '__main__':
    # global itemCountDict
    # global hash
    # cls()

    # data_lines = open(fileName).readlines()

    # simplelogger
    log = simplelogging.get_logger(console_level=-simplelogging.DEBUG,
                                   file_name="log/pcy_log.log",
                                   console=False)

    # Testing sets
    # chunk_percent = [0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
    chunk_percent = [
        0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1
    ]
    thresholds = [0.01, 0.05, 0.1]

    log.debug("START debug session")

    # basket count
    data_lines = open(fileName).readlines()
    basket_count = len(data_lines)
コード例 #12
0
ファイル: padpo.py プロジェクト: christopheNan/padpo
def main():
    """Entry point."""
    global log

    parser = argparse.ArgumentParser(description="Linter for *.po files.")
    parser.add_argument("-v", "--verbose", action="count", default=0)
    files = parser.add_mutually_exclusive_group(required=True)
    files.add_argument(
        "-i",
        "--input-path",
        metavar="PATH",
        type=str,
        help="path of the file or directory to check",
        default=[],
        # allow the user to provide no path at all,
        # this helps writing scripts
        nargs="*",
    )
    files.add_argument(
        "-g",
        "--github",
        metavar="python/python-docs-fr/pull/978",
        type=str,
        help="path of pull request in GitHub to check",
        default="",
    )
    files.add_argument(
        "-p",
        "--python-docs-fr",
        metavar="978",
        type=int,
        help="ID of pull request in python-docs-fr repository",
        default=0,
    )
    files.add_argument("--version", action="store_true", help="Return version")
    parser.add_argument("-c",
                        "--color",
                        action="store_true",
                        help="color output")

    for checker in checkers:
        checker.add_arguments(parser)

    args = parser.parse_args()

    if args.version:
        print(pkg_resources.get_distribution("padpo").version)
        sys.exit(0)

    if args.color:
        console_format = ("%(log_color)s[%(levelname)-8s]%(reset)s "
                          "%(green)s%(pofile)s:%(poline)s: "
                          "%(cyan)s[%(checker)s] %(message)s%(reset)s")
    else:
        console_format = "%(pofile)s:%(poline)s: %(leveldesc)s: %(message)s"
    log = simplelogging.get_logger("__main__", console_format=console_format)

    if args.verbose < 1:
        log.reduced_logging()
    elif args.verbose < 2:
        log.normal_logging()
    else:
        log.full_logging()

    if args.github or args.python_docs_fr:
        pull_request = ""
        if args.github:
            pull_request = args.github
        if args.python_docs_fr:
            pull_request = f"python/python-docs-fr/pull/{args.python_docs_fr}"
        pull_request_info = pull_request_files(pull_request)
        path = [pull_request_info.download_directory]
    else:
        path = args.input_path
        pull_request_info = None

    for checker in checkers:
        checker.configure(args)

    errors, warnings = check_paths(path, pull_request_info=pull_request_info)
    if errors:
        sys.exit(1)
コード例 #13
0
import simplelogging
import json
import pandas
from datetime import datetime
from shutil import copy


# TODO: to remove the URL column and make API column linkable I'd have to build the indivdiual tables by hand.
# TODO: or maybe use soup to edit every place a <tr><td><ADD LINK HERE>name</a> occurs
# TODO: or use the formmater in to_html to edit on export
# TODO: add group by api
# TODO: Integrate `API Value` and `Description` from https://www.dynatrace.com/support/help/shortlink/api-authentication#token-permissions

USE_LOCAL_OBJECTS = False

log = simplelogging.get_logger(
    logger_level=simplelogging.DEBUG, console=True, console_level=simplelogging.DEBUG, file_name="log.log")


def main():

    startTime = datetime.utcnow()

    # Get the inital page (used for scraping the menu)
    if USE_LOCAL_OBJECTS:
        with open('dynatrace_soup.html', 'r') as fp:
            dynatrace_soup = BeautifulSoup(fp.read(), features="html.parser")
        log.debug(
            f"Loaded soup from local storage. Object type: {type(dynatrace_soup)}")
    else:
        dynatrace_soup = get_api_start_doc()
コード例 #14
0
def test_default_imported_logger():
    """Test default logger of imported module has no handlers"""
    log = simplelogging.get_logger()
    assert not log.handlers
コード例 #15
0
revision = '1.1'

about = """
Revision: 1.00
License: GPL-3
"""

DEBUG = False

LOG_CONSOLE_FORMAT = "%(log_color)s%(asctime)s [%(levelname)-8s] %(filename)20s(%(lineno)3s):: %(message)s%(reset)s"
LOG_FILE_FORMAT = "%(asctime)s [%(levelname)-8s] %(filename)20s(%(lineno)3s):: %(message)s"

if DEBUG:
    log = simplelogging.get_logger(file_format=LOG_FILE_FORMAT,
                                   console_format=LOG_CONSOLE_FORMAT,
                                   file_name='log.log')
else:
    log = simplelogging.get_logger(file_format=LOG_FILE_FORMAT,
                                   console_format=LOG_CONSOLE_FORMAT)

nord = NordVPN()


class AppWindow(QMainWindow, Ui_Dialog):
    def center(self):
        framegm = self.frameGeometry()
        centrepoint = QDesktopWidget().availableGeometry().center()
        framegm.moveCenter(centrepoint)
        self.move(framegm.topLeft())
コード例 #16
0
def test_disable_console():
    """Test that console is disabled if console is False"""
    main_log = simplelogging.get_logger("__main__", console=False)
    for handler in main_log.handlers:
        if isinstance(handler, colorlog.StreamHandler):
            assert handler._name not in logging._handlers
コード例 #17
0
def test_default_logger_level():
    """Test default logger level"""
    main_log = simplelogging.get_logger("__main__")
    assert main_log.getEffectiveLevel() == simplelogging.DEBUG
コード例 #18
0
def test_logger_is_from_logging():
    """Test logger class"""
    log = simplelogging.get_logger()
    assert isinstance(log, logging.Logger)