Esempio n. 1
0
async def main():
    try:
        from summit_core import configure_logger
        from summit_core import error_dir as rundir
        logger = configure_logger(rundir, __name__)
    except Exception as e:
        print('Error logger could not be configured')
        send_processor_email(PROC, exception=e)
        return False

    errors = []

    while True:
        errors = await asyncio.create_task(
            check_for_new_data(logger, active_errors=errors))
        await asyncio.create_task(
            check_existing_errors(logger, active_errors=errors))
Esempio n. 2
0
async def main():
    try:
        from summit_core import picarro_dir as rundir
        from summit_core import configure_logger
        logger = configure_logger(rundir, __name__)
    except Exception as e:
        print(f'Error {e.args} prevented logger configuration.')
        send_processor_email(PROC, exception=e)
        return

    try:
        if await asyncio.create_task(check_load_new_data(logger)):

            if await asyncio.create_task(find_cal_events(logger)):
                await asyncio.create_task(create_mastercals(logger))

            await asyncio.create_task(plot_new_data(logger))

        return True
    except Exception as e:
        logger.error(f'Exception {e.args} occurred in Picarro main()')
        send_processor_email(PROC, exception=e)
        return False
Esempio n. 3
0
async def main():
    """
    Configure a logger and run processes in order, only proceeding if new data is created that warrants running the next
    processes.

    :return: Boolean, True if successful
    """

    try:
        from summit_core import methane_dir as rundir
        from summit_core import configure_logger
        logger = configure_logger(rundir, __name__)
    except Exception as e:
        print(f'Error {e.args} prevented logger configuration.')
        send_processor_email(PROC, exception=e)
        return

    try:
        new_pas = await asyncio.create_task(check_load_pa_log(logger))
        new_logs = await asyncio.create_task(check_load_run_logs(logger))

        if new_pas or new_logs:
            if await asyncio.create_task(match_runs_to_lines(logger)):
                if await asyncio.create_task(match_peaks_to_samples(logger)):
                    await asyncio.create_task(add_one_standard(logger))
                    if await asyncio.create_task(quantify_samples(logger)):
                        await asyncio.create_task(plot_new_data(logger))
                    await asyncio.create_task(update_excel_sheet(logger))

        return True

    except Exception as e:
        logger.critical(
            f'Exception {e.args} caused a complete failure of the CH4 processing.'
        )
        send_processor_email(PROC, exception=e)
        return False
Esempio n. 4
0
import os
import asyncio
from pathlib import Path

from summit_daily import check_load_dailies

if __name__ == '__main__':
    from summit_core import configure_logger, core_dir

    logger = configure_logger(core_dir, __name__)

    loop = asyncio.get_event_loop()
    loop.run_until_complete(check_load_dailies(logger))
Esempio n. 5
0
        errors = await asyncio.create_task(
            check_for_new_data(logger, active_errors=errors))

        if errors:
            errors = await asyncio.create_task(
                check_existing_errors(logger, active_errors=errors))

        print('Sleeping...')
        for i in range(40):
            await asyncio.sleep(30)


if __name__ == '__main__':

    try:
        from summit_core import methane_dir as rundir
        from summit_core import configure_logger

        logger = configure_logger(rundir, __name__)

    except Exception as e:
        print(f'Error {e.args} prevented logger configuration.')
        send_processor_email('MAIN', exception=e)
        raise e

    loop = asyncio.get_event_loop()
    loop.create_task(move_log_files(logger))
    loop.create_task(main(logger))

    loop.run_forever()
                    logger.warning(
                        f"Peak with name {peak_corr.name} or retention time of {peak_corr.rt} from "
                        +
                        f"NmhcCorrection {correction.date} not found in NmhcLine for {line.date}"
                    )
                    continue

            if peak.pa != peak_corr.pa:
                peak.pa = peak_corr.pa
                peak.rt = peak_corr.rt
                peak.rev = peak.rev + 1  # Sqlite *does not* like using += notation

        correction.status = 'applied'

        line.nmhc_corr_con = correction
        correction.correction_id = line

        session.merge(correction)
        session.merge(line)
        logger.info(f'Successful peak corrections made to {line.date}')
        session.commit()


if __name__ == '__main__':
    from summit_core import voc_dir as rundir
    from summit_core import configure_logger

    logger = configure_logger(rundir, 'voc_corrections')

    loop = asyncio.get_event_loop()
    loop.run_until_complete(load_excel_corrections(logger))