Ejemplo n.º 1
0
def generate_metadata_dataset(study_id, center_id, sample_ids, sample_delim):
    mcfg = cm2.get_main_config()
    mlog, mlog_handler = cm2.get_logger(cm2.get_client_ip())
    process_name = inspect.stack()[1][3]
    dataset_name = 'sql_sp_metadata'

    if mlog:
        mlog.info(
            'Processing request from "{}" for generating metadata dataset.'.
            format(process_name))

    # get the dataset from the database
    result, columns, err = rp.get_dataset(mcfg,
                                          mlog,
                                          dataset_name,
                                          study_id=study_id,
                                          center_id=center_id,
                                          sample_ids=sample_ids,
                                          sample_delim=sample_delim)

    # check for errors and create an output
    if err and not err.exist():
        if mlog:
            mlog.info(
                'Received response from DB, proceeding to render the api response.'
            )
            cm2.stop_logger(mlog, mlog_handler)
        return jsonify(data=result, status=200)
    else:
        if mlog:
            mlog.info('Proceeding to report an error.')
            cm2.stop_logger(mlog, mlog_handler)
        return jsonify(message='Error retrieving data', status=400)
Ejemplo n.º 2
0
def generate_view(view_name):
    mcfg = cm2.get_main_config()
    mlog, mlog_handler = cm2.get_logger(cm2.get_client_ip())
    # result = None
    # columns = None
    # err = None
    process_name = inspect.stack()[1][3]

    if mlog:
        mlog.info(
            'Processing request from "{}" for generating "{}" view.'.format(
                process_name, view_name))

    # get the dataset from the database
    result, columns, err = rp.get_veiw_data(mcfg, mlog, view_name)

    # check for errors and create an output
    if err and not err.exist():
        if mlog:
            mlog.info(
                'Received response from DB, proceeding to render the api response.'
            )
            cm2.stop_logger(mlog, mlog_handler)
        # return jsonify(result), 200
        return jsonify(data=result, status=200)
        # return json.dumps(result, sort_keys=False)
        #     {
        #     'status': 'OK',
        #     'data': json.dumps(result, default=str)  # result
        # }
    else:
        mlog.info('Proceeding to report an error.')
        cm2.stop_logger(mlog, mlog_handler)
        return jsonify(message='Error retrieving data', status=400)
Ejemplo n.º 3
0
def index():
    # verify main app settings and get config and logging references
    mcfg = cm2.get_main_config()
    # mlog, mlog_handler = cm2.get_logger(cm2.get_client_ip())
    env_validated = cm2.check_env_variables(__file__)

    if mcfg and env_validated:
        request_datetime = datetime.now().strftime("%d/%m/%Y %H:%M:%S")
        # if mlog:
        # mlog.info('Successful processing of a request, reporting status 200.')
        # cm2.stop_logger(mlog, mlog_handler)
        r = request
        return jsonify(
            message='SealfonLab SampleInfo API Up and Running. Date: {}. '
            'For more details navigate to {}/api/docs'.format(
                request_datetime, request.base_url),
            status=200)
    else:
        # if mlog:
        # mlog.info('Errors were reported during validating of environment variables or reading the main config file.')
        # cm2.stop_logger(mlog, mlog_handler)
        return jsonify(
            message=
            'SealfonLab SampleInfo API - Errors encountered during retrieving data.',
            status=400)
Ejemplo n.º 4
0
 def __init__(self):  #, err_obj = None
     self.cfg = cm2.get_main_config(
     )  # ConfigData(gc.MAIN_CONFIG_FILE)  # obj_cfg
     # self.error = err_obj
     self.s_conn = self.prepare_conn_string(
     )  # self.cfg.get_item_by_key(gc.CFG_DB_CONN).strip()
     self.conn = None
Ejemplo n.º 5
0
def handle_error(error):
    code = 500
    msg = 'Internal error has occurred'

    if isinstance(error, HTTPException):
        code = error.code
    # url of the request initiated this error
    url = request.url

    if code == 500:
        # if 500 error has occurred, record error in log and send email
        # define config and log objects
        mcfg = cm2.get_main_config()
        mlog, mlog_handler = cm2.get_logger(cm2.get_client_ip())

        err = WebError('UNHANDLED ERROR', mcfg, mlog)
        _str = 'UNEXPECTED UNHANDLED ERROR "{}" occurred during processing the following URL request: "{}"; ' \
               'The original exception that has triggered the error is "{}". ' \
               'Here is the traceback: \n{} '.format(
                error.name, url, error.original_exception, traceback.format_exc())
        err.add_error(_str, code, send_email=True)
        if mlog:
            mlog.critical(_str)
    else:
        # just record error in log
        msg = error.description
        # if mlog:
        #     mlog.warning('Non-critical error has occurred during processing the following URL request: "{}". '
        #                  'Code: {}. Error: {}'.format(url, code, error.name))

    return jsonify(message=msg, status=code)
Ejemplo n.º 6
0
def _run_on_start():
    # get reference for the main config file
    mcfg = cm2.get_main_config()
    # get a config value defining if the custom logging should be used
    custom_logging = mcfg.get_value('Logging/custom_logging')
    if isinstance(custom_logging, bool):
        gc.custom_logging = custom_logging
    else:
        # assign False as a default
        gc.custom_logging = False

    # start scheduler to delete old log files
    scheduler.init_scheduler()
    # run first cleaning event on the start up
    cm2.clean_log_directory()
Ejemplo n.º 7
0
def init_scheduler():
    app.config.from_object(Scheduler_Config())

    mcfg = cm2.get_main_config()
    interval_day = mcfg.get_value('Logging/clean_log_frequency_days')
    if isinstance(interval_day, int) or isinstance(interval_day, float):
        interval_sec = 86400 * interval_day  # assign number of days as per config file
    else:
        interval_sec = 86400  # assign 1 day as default

    # define and start scheduler
    scheduler = APScheduler()
    scheduler.init_app(app)
    scheduler.add_job(id='scheduler_clean_log_files',
                      func=scheduler_clean_log_files,
                      trigger="interval",
                      seconds=interval_sec)  # 1 day: 86400
    scheduler.start()