コード例 #1
0
def generate_test_result(fm: FineModel,
                         key,
                         lr_list,
                         save=True,
                         verbose=1,
                         workers=4,
                         use_multiprocessing=False,
                         load_weights=True,
                         total_epochs=None):
    """
    Generates test results using all test images from db_index=1 (CAP TEST)

    If the weights are already loaded, set load_weight=False
    """
    params = parse_key(key)
    if 'epochs' not in params:
        params['epochs'] = total_epochs
    description = ''

    exp = params['exp']
    del params['exp']
    if exp == 1:
        description = 'Failed Experiment'
    if exp == 2:
        description = 'Original (seed=1)'
    if exp == 3:
        description = 'Original (seed=2)'
    if exp == 4:
        description = 'Failed Experiment'
    if exp == 5:
        description = 'No Crop (seed=1)'

    lr_index = params['lr']
    params['lr'] = lr_list[lr_index]

    dt = datetime.datetime.now()
    description += '\n'
    description += 'Analyzed on: {}'.format(dt.strftime("%Y-%m-%d %H:%M:%S"))

    fm.load_weights(key)
    fm.compile_model()
    test = cri.CrCollection.load().filter_by(
        dataset_index=1).tri_label().labeled()
    result = fm.generate_test_result(test,
                                     verbose=verbose,
                                     save_to_key=key,
                                     params=params,
                                     workers=workers,
                                     use_multiprocessing=use_multiprocessing,
                                     description=description)

    if (verbose):
        print(key.center(80, '-'))
        print(result.describe())

    return result
コード例 #2
0
ファイル: main.py プロジェクト: itsnamgyu/cardiac-research
def run_by_lr(model_key,
              train_folds,
              test_collection,
              lr_index=None,
              fold_index=None):
    print(' MODEL: {} '.format(model_key).center(100, '#'))
    keras.backend.clear_session()
    models = FineModel.get_dict()
    fm = models[model_key]()
    train_gens, val_gens = fm.get_train_val_generators(train_folds)
    test_gen = fm.get_test_generator(test_collection)

    enumerated_learning_rates = list(enumerate(LEARNING_RATES))
    if lr_index is not None:
        try:
            elr = enumerated_learning_rates[lr_index]
            enumerated_learning_rates = [elr]
        except IndexError as e:
            raise IndexError('Invalid lr_index: {}'.format(lr_index))
        print('Learning rate #{} ({}) specified'.format(elr[0], elr[1]))

    for i, lr in enumerated_learning_rates:
        print('Starting training {} lr={}'.format(fm.get_name(),
                                                  lr).center(100, '-'))
        run_by_fold(fm, 0, i, EPOCHS, train_gens, val_gens, test_gen,
                    fold_index)
コード例 #3
0
def generate_test_result(fm: FineModel,
                         key: str,
                         lr: float,
                         epochs: int,
                         save=True,
                         verbose=1,
                         workers=4,
                         params=None,
                         use_multiprocessing=False,
                         load_weights=True,
                         description=''):
    """
    Generates test results using all test images from db_index=1 (CAP TEST)

    If the weights are already loaded, set load_weight=False
    """
    if params is None:
        params = dict()
    params['lr'] = lr
    params['epochs'] = epochs

    description = str(description)
    description += '\n'
    description += 'Analyzed on: {}'.format(
        datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))

    if load_weights:
        fm.load_weights(key)
    fm.compile_model()
    test = cri.CrCollection.load().filter_by(
        dataset_index=1).tri_label().labeled()

    save_to_instance_key = key if save else None
    result = fm.generate_test_result(test,
                                     verbose=verbose,
                                     save_to_instance_key=save_to_instance_key,
                                     params=params,
                                     workers=workers,
                                     use_multiprocessing=use_multiprocessing,
                                     description=description)
    if verbose:
        print(key.center(80, '-'))
        print(result.describe())

    return result
コード例 #4
0
def run_all_lrs(model_key,
                train_folds,
                depth_index=0,
                lr_index=None,
                fold_index=None,
                reverse=False):
    print(" MODEL: {} ".format(model_key).center(100, "#"))
    keras.backend.clear_session()
    models = FineModel.get_dict()
    fm: FineModel = models[model_key]()
    train_gens, val_gens = fm.get_train_val_generators(train_folds,
                                                       augment_factor=BALANCE)

    enumerated_learning_rates = list(enumerate(LEARNING_RATES))
    if lr_index is not None:
        try:
            elr = enumerated_learning_rates[lr_index]
            enumerated_learning_rates = [elr]
        except IndexError as e:
            raise IndexError("Invalid lr_index: {}".format(lr_index))
        print("Learning rate #{} ({}) specified".format(elr[0], elr[1]))

    if reverse:
        enumerated_learning_rates.reverse()

    for lr_index, lr in enumerated_learning_rates:
        print("Starting training {} lr={}".format(fm.get_key(),
                                                  lr).center(100, "-"))
        start = time.time()
        run_all_folds(fm,
                      depth_index,
                      lr_index,
                      EPOCHS,
                      train_gens,
                      val_gens,
                      fold_index,
                      generate_plots=False)
        end = time.time()
        if not os.path.exists(RECORD_FILE):
            with open(RECORD_FILE, "w") as f:
                f.write("Learning Rate,Experiment Date,Duration,Mode\n")

        mode = 'Normal'
        if reverse:
            mode = 'Reverse'
        elif lr_index is not None:
            mode = 'Individual'

        with open(RECORD_FILE, "a") as f:
            now = datetime.datetime.now()
            fmt = "{lr},{now},{duration:04.4f},{mode}\n"
            line = fmt.format(lr=lr,
                              now=str(now),
                              duration=end - start,
                              mode=mode)
            f.write(line)
コード例 #5
0
def main():
    # set LR_LIST based on the specifications of the experiment
    EXP_LIST = [1, 2]
    LR_LIST = [0.0001, 0.00001]
    MODEL_KEY = 'mobileneta25'

    print('Generating results for existing weights (model={}) (exp={}) ...'.
          format(MODEL_KEY, EXP_LIST))

    fm = FineModel.get_dict()[MODEL_KEY]()
    fm.get_weight_keys()

    for key in fm.get_weight_keys():
        params = parse_key(key)
        if params['exp'] in EXP_LIST:
            generate_test_result(fm, key, lr_list=LR_LIST)
コード例 #6
0
def run_all_lrs(model_key,
                train_folds,
                depth_index,
                lr_index=None,
                fold_index=None):
    print(" MODEL: {} ".format(model_key).center(100, "#"))
    keras.backend.clear_session()
    models = FineModel.get_dict()
    fm: FineModel = models[model_key]()
    train_gens, val_gens = fm.get_train_val_generators(train_folds,
                                                       batch_size=BATCH_SIZE,
                                                       augment_factor=BALANCE)

    enumerated_learning_rates = list(enumerate(LEARNING_RATES))
    if lr_index is not None:
        try:
            elr = enumerated_learning_rates[lr_index]
            enumerated_learning_rates = [elr]
        except IndexError as e:
            raise IndexError("Invalid lr_index: {}".format(lr_index))
        print("Learning rate #{} ({}) specified".format(elr[0], elr[1]))

    for lr_index, lr in enumerated_learning_rates:
        print("Starting training {} lr={}".format(fm.get_key(),
                                                  lr).center(100, "-"))
        run_all_folds(fm,
                      depth_index,
                      lr_index,
                      EPOCHS,
                      train_gens,
                      val_gens,
                      fold_index,
                      generate_plots=False)

    for metric_key in analysis.metric_names:
        analysis.analyze_depth(fm,
                               fm.get_key(),
                               depth_index,
                               metric_key,
                               lr_list=LEARNING_RATES)
コード例 #7
0
ファイル: result.py プロジェクト: itsnamgyu/cardiac-research
def generate_result_from_weights() -> Result:
    """Interactive method
    """
    def _has_weights_but_no_result(e, m, i):
        weights = paths.get_weights_path(e, m, i)
        result = paths.get_test_result_path(e, m, i)
        return os.path.exists(weights) and not os.path.exists(result)

    key = paths.select_output(_has_weights_but_no_result)
    if not key:
        return None

    print('Generating results for {}'.format(key))
    from core.fine_model import FineModel
    e, m, i = key
    fm = FineModel.load_by_key(m)
    fm.load_weights(exp_key=e, instance_key=i)
    test = cri.CrCollection.load().filter_by(
        dataset_index=1).tri_label().labeled()
    result = fm.generate_test_result(test, save_to_instance_key=i, exp_key=e)
    print('Complete!')

    return result
コード例 #8
0
def optimize_all_models(train_collection, test_collection):
    for fm_class in FineModel.get_list():
        optimize_full_model(train_collection, test_collection, fm_class())
コード例 #9
0
def run_all_folds(
    fm: FineModel,
    depth_index,
    lr_index,
    epochs,
    train_gens,
    val_gens,
    fold_index=None,
    generate_plots=True,
):
    """
    Train the model (frozen at some depth) for all five folds OR a specific
    fold. Weights, history and results are saved using instance keys in the
    following format:

    D01_L03_F01:
    1st freeze depth, 3rd learning rate, fold 1
    D01_L03_F01_E025:
    1st freeze depth, 3rd learning rate, fold 1, trained until the 25th epoch

    :param fm:
    FineModel to train, i.e., the base network to train on

    :param depth_index:
    The INDEX of the "freeze depth" for the given FineModel

    :param lr_index:
    The INDEX of the learning rate, i.e., lr = LEARNING_RATES[lr_index]

    :param epochs:
    Number of epochs to train. MUST BE MULTIPLE OF 5.

    :param train_gens:
    List of train ImageDataGenerators for each fold

    :param val_gens:
    List of validation ImageDataGenerators for each fold

    :param fold_index
    If specified, will only run the specific fold index
    """
    _depth_key = "D{:02}"
    _final_key = _depth_key + "_FINAL"
    _fold_key = _depth_key + "_L{:02}_F{:02}"
    _epoch_key = _fold_key + "_E{:03}"

    lr = LEARNING_RATES[lr_index]

    folds = range(K)
    if fold_index is not None:
        if fold_index < 0 or K <= fold_index:
            raise IndexError("Invalid fold_index: {}".format(fold_index))
        folds = [fold_index]
        print("Fold index {} specified".format(fold_index))

    # Train model K times, one for each fold
    for i in folds:
        fold_key = _fold_key.format(depth_index, lr_index, i)

        # Load model at previous state
        previous_depth_index = depth_index - 1
        if previous_depth_index < 0:
            fm.reload_model()
        else:
            fm.load_weights(_final_key.format(previous_depth_index))
        fm.set_depth(depth_index)
        fm.compile_model(lr=lr)
        model = fm.get_model()

        print("[DEBUG] Batch size: {}".format(BATCH_SIZE))
        print("[DEBUG] Number of images: {}".format(train_gens[i].n))
        print("[DEBUG] Steps: {}".format(len(train_gens[i])))

        # Train T epochs at a time
        start_epoch = 0
        save_interval = T
        # Reset training history
        ch.reset_history(fm.get_key(), fold_key)
        while start_epoch < epochs:
            print("[DEBUG] Starting epoch {}".format(start_epoch))
            target_epoch = start_epoch + save_interval
            if target_epoch > epochs:
                target_epoch = epochs
            result = model.fit_generator(
                train_gens[i],
                validation_data=val_gens[i],
                steps_per_epoch=len(train_gens[i]),
                validation_steps=len(val_gens[i]),
                workers=MULTIPROCESSING_WORKERS,
                use_multiprocessing=USE_MULTIPROCESSING,
                shuffle=True,
                epochs=target_epoch,
                initial_epoch=start_epoch,
            )
            start_epoch = target_epoch

            # Update training history every T epochs
            ch.append_history(result.history, fm.get_key(), fold_key)

            # Save intermediate weights every T epochs
            if SAVE_ALL_WEIGHTS:
                epoch_key = _epoch_key.format(depth_index, lr_index, i,
                                              target_epoch)
                fm.save_weights(epoch_key)

        # Save final weights
        fm.save_weights(fold_key)

    if fold_index is None and generate_plots:
        # Only generate analysis when running all K folds
        print("[debug] generating analysis of training process")
        for metric_key in analysis.metric_names:
            analysis.analyze_lr(fm, fm.get_key(), depth_index, lr_index, lr,
                                metric_key)
コード例 #10
0
        if history is not None and not history.empty:
            histories.append(history)
    name = 'Fold {} [{}D{}@{:.1E}].eps'.format(metric.upper(), model_name,
                                               depth_index, lr_value)
    path = os.path.join(DIR, name)
    os.makedirs(DIR, exist_ok=True)
    ax = plot_average_by_fold(histories, title=title, metric=metric)
    ax.get_figure().savefig(path, format='eps', dpi=320, bbox_inches='tight')


def analyze_all(fm, verbose_model_name, depth_index, lr_list=None):
    if lr_list is None:
        warnings.warn('You should specify lr_list for analyze_all')
        lr_list = default_lr_list

    metrics = metric_names.keys()
    for metric in metrics:
        print('Analyzing {} metric={}, depth_index={}'.format(
            verbose_model_name, metric, depth_index))
        analyze_depth(fm,
                      verbose_model_name,
                      lr_list=default_lr_list,
                      depth_index=depth_index,
                      metric=metric)


if __name__ == '__main__':
    fm = FineModel.get_dict()['mobileneta25']()
    verbose_model_name = 'MobileNet(a=25)'
    analyze_all(fm, verbose_model_name, depth_index=0)
コード例 #11
0
def main():
    global metadata, results, predictions, percentages, image_collection, LABELS, show_cam, cam_fm, show_predictions, index

    metadata = cri.load_metadata()
    for p in metadata:
        if 'label' in p:
            print(p['label'])

    parser = argparse.ArgumentParser()
    description = 'Start in prediction mode. Note that in predicitons mode,' \
        'you can press the spacebar to use the predictions to label the images'
    parser.add_argument('-P',
                        '--predictions',
                        help=description,
                        action='store_true')
    description = 'Show class activation maps in prediction mode'
    parser.add_argument('-C', '--cam', help=description, action='store_true')
    description = 'Export all plots'
    parser.add_argument('-E',
                        '--export',
                        help=description,
                        action='store_true')
    args = parser.parse_args()

    show_cam = args.cam
    show_predictions = args.predictions or args.cam

    if show_predictions:
        if args.cam:

            def _output_filter(e, m, i):
                result = paths.get_test_result_path(e, m, i)
                weights = paths.get_weights_path(e, m, i)
                return os.path.exists(result) and os.path.exists(weights)
        else:

            def _output_filter(e, m, i):
                result = paths.get_test_result_path(e, m, i)
                return os.path.exists(result)

    if show_predictions:
        output_key = paths.select_output(_output_filter)
        if not output_key:
            return None
        e, m, i = output_key
        result = Result.load(exp_key=e, model_key=m, instance_key=i)
        result_dict = result.data

        p = result_dict['predictions']
        import json
        print('Predictions: {}'.format(json.dumps(p, indent=4)))

        # hotfix
        if cri.is_tri_label_result(result_dict):
            LABELS = [None, 'oap', 'in', 'obs']

        predictions = {}
        percentages = {}
        for basename, result in p.items():
            cr_code = cri.extract_cr_code(basename)
            predictions[cr_code] = result['prediction']
            percentages[cr_code] = result['percentages']

        image_collection = {}
        for basename, result in predictions.items():
            cr = cri.parse_cr_code(basename, match=False)
            image_collection[tuple(cr[:3])] = []

        # get list of patients then add all of their images (not just from predictions)
        for cr_code in metadata.keys():
            cr = cri.parse_cr_code(cr_code)
            if tuple(cr[:3]) in image_collection:
                image_collection[tuple(cr[:3])].append(cr_code)
    else:
        image_collection = collections.defaultdict(list)
        for cr_code in metadata.keys():
            cr = cri.parse_cr_code(cr_code)
            image_collection[tuple(cr[:3])].append(cr_code)

    if show_cam:
        try:
            print('Loading {} for CAM analysis'.format(output_key))
            fm = FineModel.load_by_key(m)
            fm.load_weights(exp_key=e, instance_key=i)
        except Exception:
            raise RuntimeError('Failed to load corresponding model weights')
        cam_fm = fm

    image_collection = sorted(image_collection.items())

    fig.canvas.mpl_connect('key_press_event', on_key_press)
    fig.canvas.mpl_connect('button_press_event', on_button_press)

    plt.subplots_adjust(top=0.95,
                        bottom=0.05,
                        right=1,
                        left=0,
                        hspace=0.2,
                        wspace=0)

    if args.export:
        export_dir = os.path.abspath('labeler_exports')
        os.makedirs(export_dir, exist_ok=True)
        print('Exporting all images to {}'.format(export_dir))
        for i in tqdm(range(len(image_collection))):
            index = i
            update()
            patient = image_collection[i]
            basename = '[{:03d}] D{:02d}_P{:08d}.png'.format(
                i, patient[0][0], patient[0][1])
            path = os.path.join(export_dir, basename)
            plt.savefig(path,
                        dpi=320,
                        transparent=False,
                        bbox_inches=None,
                        pad_inches=0.1)
    else:
        update()
        plt.show()

    cri.save_metadata(metadata)
コード例 #12
0
def run(fm: FineModel,
        training_set: cri.CrCollection,
        epochs=EPOCHS,
        depth_index=DEPTH_INDEX,
        batch_size=BATCH_SIZE,
        augment_factor=BALANCE,
        learning_rate=LEARNING_RATE,
        save_interval=T,
        use_multiprocessing=USE_MULTIPROCESSING,
        workers=MULTIPROCESSING_WORKERS):
    """
    Train model and evalute results. Output files are saved to
    `output/<model_key>/D00_FINAL/`. These include:

    - Intemediate model weights
    - Final model weights
    - Test set result
    - Training history
    """
    _depth_key = 'D{:02d}_FINAL'
    instance_key = _depth_key.format(depth_index)
    _epoch_key = instance_key + "_E{:03}"

    if depth_index >= 1:
        fm.load_weights(_depth_key.format(depth_index - 1))
    fm.set_depth(depth_index)
    fm.compile_model(lr=learning_rate)
    model = fm.get_model()

    gen = fm.get_directory_iterator(training_set,
                                    'train',
                                    augment=True,
                                    augment_factor=augment_factor,
                                    shuffle=True,
                                    batch_size=batch_size,
                                    verbose=1,
                                    title='final training set')

    print("[DEBUG] Batch size: {}".format(batch_size))
    print("[DEBUG] Number of images: {}".format(gen.n))
    print("[DEBUG] Steps: {}".format(len(gen)))

    # Train T epochs at a time
    start_epoch = 0
    # Reset training history
    ch.reset_history(fm.get_key(), instance_key)
    while start_epoch < epochs:
        print("[DEBUG] Starting epoch {}".format(start_epoch))
        target_epoch = start_epoch + save_interval
        if target_epoch > epochs:
            target_epoch = epochs
        result = model.fit_generator(
            gen,
            steps_per_epoch=len(gen),
            shuffle=True,
            epochs=target_epoch,
            use_multiprocessing=use_multiprocessing,
            workers=workers,
            initial_epoch=start_epoch,
        )
        start_epoch = target_epoch

        # Update training history every T epochs
        ch.append_history(result.history, fm.get_key(), instance_key)

        # Save intermediate weights every T epochs
        if SAVE_ALL_WEIGHTS:
            epoch_key = _epoch_key.format(target_epoch)
            fm.save_weights(epoch_key)

    # Save final weights
    fm.save_weights(instance_key)

    # Generate test results
    print("[DEBUG] Generating test results...")
    results.generate_test_result(
        fm,
        instance_key,
        learning_rate,
        epochs,
        load_weights=False,
        workers=MULTIPROCESSING_WORKERS,
        use_multiprocessing=USE_MULTIPROCESSING,
    )
コード例 #13
0
def main():
    fm = FineModel.get_dict()[MODEL_KEY]()
    training_set = load_training_set()
    run(fm, training_set)
コード例 #14
0
import cr_interface as cri
from core.fine_model import FineModel
from functions import optimize_full_model

train = cri.CrCollection.load().filter_by(
    dataset_index=0).tri_label().labeled()
test = cri.CrCollection.load().filter_by(dataset_index=1).tri_label().labeled()

models = FineModel.get_dict()
models.keys()
#dict_keys(['xception', 'mobileneta25', 'mobilenetv2a35', 'vgg16', 'resnet50v2',
#'inception_v3','inception_resnet_v2', 'densenet121', 'nasnet_mobile'])

fm = models['mobileneta25']()
optimize_full_model(train, test, fm)