def classify_studies(studies):
    # TODO: seems like a lot of nested loops here...revisit and optimize
    for modality, study_paths in studies.items():


        # Check to see if the case is a CT scan by seeing if the dicom modality is 'CT'
        # or the DICOMDIR has multiple slices
        # TODO: come up with a better solution for identifying CT scans
        
        if modality == 'CT' or modality != 'XR':
            for orthanc_id in study_paths:
                study_db.save_study_type(orthanc_id, 'CT')
                messaging_service.send_notification(f'Study {orthanc_id} ready', 'study_ready')
            continue

        # evaluate the study using classifier model
        classifier_model = classifier_db.get_classifier_model(modality)

        # check to see if there is currently a classifier set for the given modality
        # if not just get the default one from the db
        if classifier_model is None:
            classifier_model = eval_db.get_default_model()

        # run studies through the classifier model
        eval_service.evaluate(classifier_model, study_paths, str(uuid.uuid4()))
Пример #2
0
def fail_dicom_eval(eval_id):
    traceback.print_exc()
    error_message = f'evaluation for study {eval_id} failed'
    logger_service.log_error(error_message, traceback.format_exc())
    # update eval status to FAILED
    eval_db.fail_eval(eval_id)
    messaging_service.send_notification(error_message, 'eval_failed')
Пример #3
0
def on_classifier_result(ch, method, properties, body):
    print(f'received classifier result {body}')
    message = json.loads(body)
    orthanc_id = message['id']
    result = message['output']
    classifier_service.save_classification(orthanc_id, result)

    messaging_service.send_notification(f'Study {orthanc_id} ready', 'new_result')
Пример #4
0
def fail_evals(model_id, eval_ids):
    traceback.print_exc()
    error_message = f'evaluation using model {model_id} failed'
    logger_service.log_error(error_message, traceback.format_exc())

    for eval_id in eval_ids:
        eval_db.fail_eval(eval_id)
    messaging_service.send_notification(error_message, 'eval_failed')
def fail_experiment(experiment):
    """
    """
    messaging_service.send_notification(
        f'Failed experiment {experiment["name"]}', 'experiment_failed')
    experiment_db.set_experiment_failed(experiment['id'])
    traceback.print_exc()
    experiment_id = experiment['id']
    logger_service.log_error(f'experiment {experiment_id} failed',
                             traceback.format_exc())
Пример #6
0
def on_eval_result(ch, method, properties, body):
    print(f'received eval result {body}')

    message = json.loads(body)
    eval_id = message['id']
    result = message['output']
    type = message['type']

    if type == 'FAIL':
        eval_service.fail_dicom_eval(eval_id)
        return
    # write result to db
    eval_service.write_eval_results(result, eval_id)

    # send notification to frontend
    messaging_service.send_notification(f'Finished evaluation {eval_id}', 'new_result')
Пример #7
0
def run_experiments(batch_size: int):
    """
    Monitors db for active experiments and runs them
    """
    print('running experiments')

    # get experiments
    experiments = experiment_service.get_running_experiments()

    # run experiments
    for experiment in experiments:
        messaging_service.send_notification(f'Started experiment {experiment["name"]}', 
                                             'experiment_started')
        # restart failed evaluations
        eval_service.restart_failed_by_exp(experiment['id'])
        # get experiment studies
        studies = experiment_service.get_experiment_studies(experiment['id'])
        # get model
        model = model_service.get_model(experiment['modelId'])
        batch = studies[:batch_size]
        experiment_service.run_experiment(batch, dict(model), dict(experiment))
Пример #8
0
def start_study_evaluations(studies: List[object], model_id: int) -> List[int]:
    """
    inserts entries into the study_evaluation table and sets them to 'RUNNING'

    Args:
        studies (List[object]): a list of the primary keys of study db entries to evaluate
        model (int): the id of the model to use in evalution

    Returns:
        List[int]: a list of ids of the db entries that were inserted
    """

    logger_service.log(f'starting study evaluations for {studies}')

    for study in studies:
        messaging_service.send_notification(
            f"Started evaluation of study {study['orthancStudyId']}",
            'eval_started')
    # create string that contains the insert values for the studies
    # kind of janky TBH
    values = [
        f'(\'{study["id"]}\', null, \'RUNNING\', {model_id})'
        for study in studies
    ]

    if len(studies) == 0:
        return []

    # join the insert arrays by , so that it can be used to insert multiple
    reduced = ','.join(list(values))

    # query and fetch all results
    sql = f'''
    INSERT INTO study_evaluation ("studyId", "modelOutput", status, "modelId")
    VALUES {reduced}
    RETURNING id;
    '''
    query_result = query_and_fetchall(sql)

    return [evaluation['id'] for evaluation in query_result]
def finish_experiment(experiment):
    """
    """
    experiment_db.set_experiment_complete(experiment['id'])
    messaging_service.send_notification(
        f'Completed experiment {experiment["name"]}', 'experiment_finished')
Пример #10
0
def fail_model(model_id):
    traceback.print_exc()
    error_message = f'evaluation using model {model_id} failed'

    logger_service.log_error(error_message, traceback.format_exc())
    messaging_service.send_notification(error_message, 'eval_failed')