Exemple #1
0
def evaluate_dicom(model_id: int, orthanc_id: str):
    """
    takes in a image, path to a dicom and a eval id
    from the db and evaluates the dicom using the image

    Args:
        model_id (int): the id of the database model
        dicom_path (str): the path on the disk to the directory containing the DICOMDIR file
        eval_id (int): the database id of the study evaluation that has already been saved by caller
    """
    try:
        print('evaluating dicom')
        eval_id = eval_service.add_evals_to_db(orthanc_id, model_id)
        print('getting model')
        # get the model from the database
        model = model_service.get_model(model_id)

        print('downloading dicom')
        # download the study from orthanc
        study_path, _, _, _, _ = orthanc_service.get_study(orthanc_id)
        print('evaluating...')
        # evaluate study
        eval_service.evaluate(model, [study_path], str(uuid.uuid4()), [eval_id])

        
    except Exception as e:
        # catch errors and print output
        eval_service.fail_dicom_eval(eval_id)
Exemple #2
0
def evaluate_studies(model_id: List[str], batch_size: int):
    """
    Gets studies from orthanc and evaluates all of the applicable studies using a given model

    Args:
        model_id (:obj:`list`): A list of study IDs from orthanc
        batch_size (int): the number of images to process at a time
    """
    try:
        print(f'evaluating studies for {model_id}')

        # get all studies
        studies = study_service.get_studies_for_model(model_id, batch_size)

        # exit if no studies
        if len(studies) < 1:
            return

        # get the appropriate evaluating model
        model = model_service.get_model(model_id)

        # get ids of study evaluations
        eval_ids = eval_service.get_eval_ids(model, studies) + eval_service.get_failed_eval_ids(model)

        eval_service.evaluate_studies(studies, model, eval_ids)
            
    except Exception as e:
        eval_service.fail_model(model_id)
Exemple #3
0
def run_experiments(batch_size: int):
    """
    Monitors db for active experiments and runs them
    """
    print('running experiments')

    # get experiments
    experiments = experiment_service.get_running_experiments()

    # run experiments
    for experiment in experiments:
        messaging_service.send_notification(f'Started experiment {experiment["name"]}', 
                                             'experiment_started')
        # restart failed evaluations
        eval_service.restart_failed_by_exp(experiment['id'])
        # get experiment studies
        studies = experiment_service.get_experiment_studies(experiment['id'])
        # get model
        model = model_service.get_model(experiment['modelId'])
        batch = studies[:batch_size]
        experiment_service.run_experiment(batch, dict(model), dict(experiment))
from services.exception import TrainingInProgressError
from services.exception import ClassifierNotReadyError

# Globals
classifier_ready = False
is_training = False
is_cleaning = False
MODEL_TYPE = 'STOCKV2'

# Try to load the tokenizer & classfier
classifier = None
tokenizer = None

model_id = model_service.find_current_model_by_model_type(MODEL_TYPE)
if model_id is not None:
    model = model_service.get_model(model_id)

    classifier = model.predictor
    tokenizer = model.tokenizer

    classifier_ready = True
# endif


# Begin stockv2 functions --------------------------------------------------------------------------------------------------
def predict(texts):
    global classifier_ready

    if not classifier_ready:
        raise ClassifierNotReadyError()