Esempio n. 1
0
def release_annos_on_session_timeout():
    lostconfig = config.LOSTConfig()
    dbm = DBMan(lostconfig)
    c_imgs, c_2dannos = release_annos_by_timeout(dbm,
                                                 lostconfig.session_timeout)
    dbm.close_session()
    return c_imgs, c_2dannos
Esempio n. 2
0
def delete_pipe(pipe_id):
    # logger = get_task_logger(__name__)
    # logger.info("DELETED BY CELERY {}".format(pipe_id))
    lostconfig = LOSTConfig()
    dbm = DBMan(lostconfig)
    pipeline.delete(dbm, pipe_id)
    dbm.close_session()
Esempio n. 3
0
def exec_pipe():
    lostconfig = get_args()
    dbm = DBMan(lostconfig)
    pipe_list = dbm.get_pipes_to_process()
    # For each task in this project
    for p in pipe_list:
       pipe_man = cron.PipeEngine(dbm=dbm, pipe=p, lostconfig=lostconfig)
       pipe_man.process_pipeline()
    dbm.close_session()
Esempio n. 4
0
def celery_exec_script(pipe_element_id):
    try:
        # Collect context information for celery task
        logger = get_task_logger(__name__)
        lostconfig = LOSTConfig()
        dbm = DBMan(lostconfig)
        pipe_e = dbm.get_pipe_element(pipe_e_id=pipe_element_id)
        worker = CurrentWorker(dbm, lostconfig)
        if not worker.enough_resources(pipe_e.script):
            logger.warning(
                'Not enough resources! Rejected {} (PipeElement ID {})'.format(
                    pipe_e.script.path, pipe_e.idx))
            return
        pipe_e.state = state.PipeElement.IN_PROGRESS
        dbm.save_obj(pipe_e)
        file_man = FileMan(lostconfig)
        pipe = pipe_e.pipe

        cmd = gen_run_cmd("pudb3", pipe_e, lostconfig)
        debug_script_path = file_man.get_instance_path(pipe_e)
        debug_script_path = os.path.join(debug_script_path, 'debug.sh')
        with open(debug_script_path, 'w') as sfile:
            sfile.write(cmd)

        cmd = gen_run_cmd("python3", pipe_e, lostconfig)
        start_script_path = file_man.get_instance_path(pipe_e)
        start_script_path = os.path.join(start_script_path, 'start.sh')
        with open(start_script_path, 'w') as sfile:
            sfile.write(cmd)
        p = subprocess.Popen('bash {}'.format(start_script_path),
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE,
                             shell=True)
        logger.info("{} ({}): Started script\n{}".format(
            pipe.name, pipe.idx, cmd))
        worker.add_script(pipe_e, pipe_e.script)
        out, err = p.communicate()
        worker.remove_script(pipe_e, pipe_e.script)
        if p.returncode != 0:
            raise Exception(err.decode('utf-8'))
        logger.info('{} ({}): Executed script successful: {}'.format(
            pipe.name, pipe.idx, pipe_e.script.path))
        dbm.close_session()

    except:
        pipe = pipe_e.pipe
        logger.info('{} ({}): Exception occurred in script: {}'.format(
            pipe.name, pipe.idx, pipe_e.script.path))
        msg = traceback.format_exc()
        logger.error(msg)
        script_api.report_script_err(pipe_e, pipe, dbm, msg)
        dbm.close_session()
Esempio n. 5
0
def process_pipes(log_name, client):
    lostconfig = config.LOSTConfig()
    dbm = DBMan(lostconfig)
    pipe_list = dbm.get_pipes_to_process()
    # For each task in this project
    for p in pipe_list:
        pipe_man = cron.PipeEngine(dbm=dbm,
                                   pipe=p,
                                   lostconfig=lostconfig,
                                   client=client,
                                   logger_name=log_name)
        pipe_man.process_pipeline()
    dbm.close_session()
Esempio n. 6
0
def send_life_sign():
    logger = get_task_logger(__name__)
    lostconfig = LOSTConfig()
    dbm = DBMan(lostconfig)
    worker = dbm.get_worker(lostconfig.worker_name)
    if worker is None:
        register_worker(dbm, lostconfig)
        logger.info('Registered worker: {}'.format(lostconfig.worker_name))
    else:
        worker.timestamp = datetime.utcnow()
        dbm.add(worker)
        dbm.commit()
        logger.info('Sent lifesign: {}'.format(worker.worker_name))
    dbm.close_session()
Esempio n. 7
0
def init_worker_on_startup():
    lostconfig = LOSTConfig()
    dbm = DBMan(lostconfig)
    worker = dbm.get_worker(lostconfig.worker_name)
    if worker is None:
        register_worker(dbm, lostconfig)
        print('Registered worker: {}'.format(lostconfig.worker_name))
    else:
        worker.timestamp = datetime.utcnow()
        worker.resources = '[]'
        worker.in_progress = '{}'
        dbm.add(worker)
        dbm.commit()
        print('Reset worker on startup: {}'.format(worker.worker_name))
    dbm.close_session()
Esempio n. 8
0
def exec_pipe():
    lostconfig = config.LOSTConfig()
    dbm = DBMan(lostconfig)
    pipe_list = dbm.get_pipes_to_process()
    if lostconfig.worker_management != 'dynamic':
        client = Client('{}:{}'.format(lostconfig.scheduler_ip,
                                       lostconfig.scheduler_port))
    else:
        client = None
    # For each task in this project
    for p in pipe_list:
        pipe_man = cron.PipeEngine(dbm=dbm,
                                   pipe=p,
                                   lostconfig=lostconfig,
                                   client=client)
        pipe_man.process_pipeline()
    dbm.close_session()
Esempio n. 9
0
def remove_empty_annos():
    lostconfig = config.LOSTConfig()
    dbm = DBMan(lostconfig)
    c_2dannos = remove_empty_annos_by_timeout(dbm, lostconfig.session_timeout)
    dbm.close_session()
    return c_2dannos
Esempio n. 10
0
def release_annos():
    lostconfig = get_args()
    dbm = DBMan(lostconfig)
    __release_project_annos(dbm)
    dbm.close_session()