def evaluate(conf, epoch=None):
    # Create the mdoel
    if epoch is None:
        m = model.Model(conf, load_weights='last')
    else:
        m = model.Model(conf, load_weights='epoch', epoch=epoch)

    # TODO implement
    if conf['data']['name'] == 'cityscapes':
        raise ValueError(
            'Evaluation is not supported for the cityscapes dataset yet.')

    # Create the dataset
    d = dataset.Dataset(conf)
    test_x, test_y = d.create_test_dataset()

    # Run the prediction
    pred = m.predict_and_process(test_x)
    pred_segm, scores = zip(*pred)  # unzip

    results = {}
    for evalu in conf['evaluation']:
        ev_name = evalu['name']
        if ev_name == 'ap_segm_interpolated':
            res = mean_ap.ap_segm_interpolated(pred_segm, test_y, scores,
                                               **evalu['arguments'])
        elif ev_name == 'ap_dsb2018':
            res = mean_ap.ap_dsb2018(pred_segm, test_y, **evalu['arguments'])
        else:
            raise ValueError('Unknown evaluation "{}".'.format(ev_name))

        for n, r in zip(evalu['names'], res):
            results[ev_name + '#' + n] = r

    return results
def _model_features(model_config: dict,
                    data_configs: list,
                    feature_layer_name: str,
                    num_samples=20) -> list:
    # Create the feature model
    full_model = model.Model(model_config, load_weights='last')
    feature_model = utils.utils.model_up_to_layer(full_model.model,
                                                  feature_layer_name)

    # Compute the features for each data config
    return [
        _model_features_for_data(feature_model, d, num_samples)
        for d in data_configs
    ]
def main(arguments):

    parser = argparse.ArgumentParser(
        description=__doc__,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('configfile',
                        help='Config file',
                        type=argparse.FileType('r'))
    args = parser.parse_args(arguments)

    # Load the config yaml
    conf = yaml_load(args.configfile)

    # Load the model
    m = model.Model(conf, load_weights='last')

    # Create dataset
    d = dataset.Dataset(conf)
    test_x, _ = d.create_test_dataset()

    # Get the image ids
    folders = sorted(
        glob.glob(
            os.path.join(conf['data']['data_dir'], 'test', 'images', '*.tif')))
    ids = [i.split(os.path.sep)[-1][:-4] for i in folders]

    # Run the prediction
    pred, _ = zip(*(m.predict_and_process(test_x)))

    # Create the submission file
    with open(os.path.join('models', conf['name'], 'submission.csv'),
              'w') as submission_file:

        # Add the header
        submission_file.write('ImageId,EncodedPixels\n')

        # Loop over images and write the encoded pixels
        for image_id, p in tqdm.tqdm(list(zip(ids, pred))):
            added_one = False
            for i in range(1, np.max(p) + 1):
                segment = p == i
                if np.any(segment):
                    enc_pixels = get_encoded_pixels(segment)
                    submission_file.write(image_id + ',' + enc_pixels + '\n')
                    added_one = True
            if not added_one:
                print("WARNING: No nuclei found for image " + image_id)

        submission_file.flush()
def train(conf: dict, epochs: int, initial_epoch: int = 0):
    print('\n\nStarting training of model {}\n'.format(conf['name']))
    print('Creating the model...')
    if initial_epoch == 0:
        load_weights = 'pretrained'
    else:
        load_weights = 'last'
    m = model.Model(conf, load_weights=load_weights, epoch=initial_epoch)
    m.model.summary(line_length=140)

    m.prepare_for_training()

    # Prepare the data generators
    print('Preparing data...')
    d = dataset.Dataset(conf)
    train_generator, val_generator = d.create_data_generators()

    # Create the callbacks
    training_callbacks = _create_callbacks(conf, m.model_dir)

    # Prepare the model directory
    if initial_epoch == 0:
        m.create_model_dir()

    # Train the model
    print('Training the model...')
    history = m.model.fit_generator(train_generator,
                                    validation_data=val_generator,
                                    epochs=epochs,
                                    initial_epoch=initial_epoch,
                                    callbacks=training_callbacks)

    # Save the history
    print('Saving the history...')
    history_df = pd.DataFrame(history.history)
    history_df.to_csv(os.path.join(m.model_dir, 'history.csv'))

    # Save the final weights
    print('Saving the final weights...')
    m.model.save_weights(os.path.join(m.model_dir, 'weights_final.h5'))
def main(args):
    conf = yaml_load(args.configfile)
    m = model.Model(conf, load_weights='last', for_exporting=True)
    m.model.summary()
    m.export_to(args.outfile.name)