def run():
    parser = argparse.ArgumentParser()
    parser.add_argument("--weights-path", type=str)
    parser.add_argument("--input-images", type=str, default="")
    parser.add_argument("--output-path", type=str, default="")
    parser.add_argument("--input-size", type=int, default=713)
    parser.add_argument("--model-name", type=str, default="standard")
    parser.add_argument("--save-imgs", type=bool, default=True)
    args = parser.parse_args()

    model_name = args.model_name
    images_path = args.input_images
    input_size = args.input_size
    save_imgs = args.save_imgs

    model_choices = {
        'densenet': build_densenet,
        'unet': build_unet,
        'unet-old': build_unet_old
    }

    model_choice = model_choices[model_name]
    model = model_choice((input_size, input_size), 1)

    gpus = get_number_of_gpus()
    if gpus > 1:
        model = ModelMGPU(model, gpus)

    model.compile(optimizer=Adam(lr=1e-4),
                  loss=binary_soft_jaccard_loss,
                  metrics=['acc', binary_jaccard_distance_rounded])

    model.load_weights(args.weights_path)

    probs = []
    for i, filename in enumerate(tqdm(os.listdir(images_path))):
        imgpath = os.path.join(images_path, filename)
        if not os.path.isfile(imgpath):
            continue
        img = np.array(Image.open(imgpath))
        if not img.shape[0] == img.shape[1]:
            continue
        img = np.expand_dims(img, axis=0)
        prob = model.predict(img, verbose=1)
        probs.append(prob)
    probs = np.round(probs)
    if not save_imgs:
        return
    for i, prob in enumerate(probs):
        prob = (prob[:, :, 0] * 255.).astype(np.uint8)
        pred_name = "pred-{}.tif".format(i)
        pred_save_path = "{}/{}".format(args.output_path, pred_name)
        cv2.imwrite(pred_save_path, prob)
def lrtest_densenet(data_dir, logdir, weights_dir, weights_name, input_size,
                    nb_classes, batch_size, config, initial_epoch,
                    pre_trained_weight, augment):
    session_config()
    model = build_densenet(input_size, nb_classes, config=config)

    gpus = get_number_of_gpus()
    print('Found {} gpus'.format(gpus))
    if gpus > 1:
        model = ModelMGPU(model, gpus)

    binary = nb_classes == 1
    if binary:
        loss = binary_soft_jaccard_loss
    else:
        loss = soft_jaccard_loss

    model.compile(optimizer=Adam(lr=1e-3),
                  loss=loss,
                  metrics=['acc', binary_jaccard_distance_rounded])

    train_generator, num_samples = create_generator(os.path.join(
        data_dir, 'train'),
                                                    input_size,
                                                    batch_size,
                                                    nb_classes,
                                                    rescale_masks=False,
                                                    binary=binary,
                                                    augment=augment)
    steps_per_epoch = num_samples // batch_size
    if augment:
        steps_per_epoch = steps_per_epoch * 4

    steps_per_epoch = 1000

    clr = CyclicLR(base_lr=0,
                   max_lr=1e-1,
                   step_size=10 * steps_per_epoch,
                   mode='triangular')
    model.fit_generator(generator=train_generator,
                        steps_per_epoch=steps_per_epoch,
                        epochs=10,
                        verbose=True,
                        workers=8,
                        callbacks=[clr])

    h = clr.history
    lr = h['lr']
    acc = h['acc']
    print(lr)
    print()
    print(acc)
Beispiel #3
0
def _main():
    annotation_path = 'annotations_updated/annotations/4000_train_updated_final.txt'
    log_dir = 'logs/4000_adagrad_e1e2_50x2/'
    classes_path = 'annotations_updated/annotations/4000_train_updated_classes.txt'
    anchors_path = 'model_data/tiny_yolo_anchors.txt'
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    input_shape = (416, 416)  # multiple of 32, hw

    is_tiny_version = len(anchors) == 6  # default setting
    if is_tiny_version:
        model = create_tiny_model(
            input_shape,
            anchors,
            num_classes,
            freeze_body=2,
            weights_path='model_data/yolo_weights-tiny.h5')
    else:
        #with tf.device('/cpu:0'):
        model = create_model(input_shape,
                             anchors,
                             num_classes,
                             freeze_body=2,
                             weights_path='model_data/trained_weights_final.h5'
                             )  # make sure you know what you freeze
        #model = multi_gpu_model(model, gpus=2)

    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(
        log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
        monitor='val_loss',
        save_weights_only=True,
        save_best_only=True,
        period=3)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=3,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=10,
                                   verbose=1)

    val_split = 0.1
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(lines)
    np.random.seed(None)
    num_val = int(len(lines) * val_split)
    num_train = len(lines) - num_val

    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    if True:
        gpus = get_number_of_gpus()
        print('Found {} gpus'.format(gpus))
        #if gpus > 1:
        #model = ModelMGPU(model, gpus)

        model.compile(
            optimizer=Adagrad(lr=1e-1),
            loss={
                # use custom yolo_loss Lambda layer.
                'yolo_loss': lambda y_true, y_pred: y_pred
            })

        #model.compile(optimizer=Adam(lr=1e-3), losses={'concatenate_1': lambda y_true, y_pred: y_pred, 'concatenate_2': lambda y_true, y_pred: y_pred})

        #model.compile(optimizer=Adam(lr=1e-3), loss=categorical_crossentropy(y_true, y_pred))
        batch_size = 64
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))
        model.fit_generator(data_generator_wrapper(lines[:num_train],
                                                   batch_size, input_shape,
                                                   anchors, num_classes),
                            steps_per_epoch=max(1, num_train // batch_size),
                            validation_data=data_generator_wrapper(
                                lines[num_train:], batch_size, input_shape,
                                anchors, num_classes),
                            validation_steps=max(1, num_val // batch_size),
                            epochs=50,
                            initial_epoch=0,
                            callbacks=[logging, checkpoint])
        model.save_weights(log_dir + 'trained_weights_stage_1.h5')

    # Unfreeze and continue training, to fine-tune.
    # Train longer if the result is not good.
    if True:
        for i in range(len(model.layers)):
            model.layers[i].trainable = True
        model.compile(optimizer=Adagrad(lr=1e-2),
                      loss={
                          'yolo_loss': lambda y_true, y_pred: y_pred
                      })  # recompile to apply the change
        print('Unfreeze all of the layers.')

        batch_size = 64  # note that more GPU memory is required after unfreezing the body
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))
        model.fit_generator(
            data_generator_wrapper(lines[:num_train], batch_size, input_shape,
                                   anchors, num_classes),
            steps_per_epoch=max(1, num_train // batch_size),
            validation_data=data_generator_wrapper(lines[num_train:],
                                                   batch_size, input_shape,
                                                   anchors, num_classes),
            validation_steps=max(1, num_val // batch_size),
            epochs=100,
            initial_epoch=50,
            callbacks=[logging, checkpoint, reduce_lr, early_stopping])
        model.save_weights(log_dir + 'trained_weights_final.h5')
Beispiel #4
0
def run():
    parser = argparse.ArgumentParser()
    parser.add_argument("--weights-path", type=str)
    parser.add_argument("--test-image", type=str, default="")
    parser.add_argument("--sample-images", type=str, default="")
    parser.add_argument("--output-path", type=str, default="")
    parser.add_argument("--input-size", type=int, default=713)
    parser.add_argument("--model-name", type=str, default="")
    parser.add_argument("--name", type=str, default="")

    args = parser.parse_args()
    model_name = args.model_name
    image_path = args.test_image
    input_size = args.input_size
    output_path = args.output_path
    output_name = args.name

    model_choices = {
        'unet': build_unet,
        'densenet': build_densenet
    }

    model_choice = model_choices[model_name]
    model = model_choice((input_size, input_size), 1)
    gpus = get_number_of_gpus()
    print('Fund {} gpus'.format(gpus))
    if gpus > 1:
        model = ModelMGPU(model, gpus)
    model.compile(
        optimizer=Adam(lr=1e-4),
        loss=binary_soft_jaccard_loss,
        metrics=['acc', binary_jaccard_distance_rounded])

    model.load_weights(args.weights_path)
    generator = get_generator()
    # load the image
    image = Image.open(image_path)
    size = image.size[0]
    factor = input_size / 512
    image = image.resize((int(size*factor), int(size*factor)))
    image = np.array(image)

    pred = predict_img_with_smooth_windowing(
        image,
        window_size=input_size,
        subdivisions=2,  # Minimal amount of overlap for windowing. Must be an even number.
        nb_classes=1,
        pred_func=(
            lambda img_batch_subdiv: model.predict(
                image_to_neural_input(img_batch_subdiv, generator), verbose=True
            )
        )
    )
    pred = np.round(pred)
    pred = (pred[:, :, 0] * 255.).astype(np.uint8)

    pred = Image.fromarray(pred, 'L')
    pred = pred.resize((size, size))
    pred = np.array(pred)
    out_path = os.path.join(output_path, output_name)
    print(cv2.imwrite(out_path, pred))

    cheap = cheap_tiling_prediction(image, window_size=input_size, nb_classes=1, pred_func=(
        lambda img_batch_subdiv: model.predict(
            image_to_neural_input(np.array(img_batch_subdiv), generator), verbose=True
        )
    ))
    cheap = np.round(cheap)
    cheap = (cheap[:, :, 0] * 255.).astype(np.uint8)

    cheap = Image.fromarray(cheap, 'L')
    cheap = cheap.resize((size, size))
    cheap = np.array(cheap)
    out_path = os.path.join(output_path, '{}-cheap.tif'.format(output_name))
    print(cv2.imwrite(out_path, cheap))
def run():
    np.random.seed(2)
    tf.set_random_seed(2)
    data_dir = '/data/{}/'
    weights_dir = 'weights_train'

    for i, run in enumerate(runs):
        base_lr = run['base_lr']
        max_lr = run['max_lr']
        input_size = (run['input_size'], run['input_size'])
        weights_name = run['name']
        logs_dir = 'logs/{}'.format(run['name'])
        batch_size = run['batch_size']

        print("Running for config {}".format(run))

        for j, dataset in enumerate(run['datasets']):

            binary = True if dataset != 'multiclass' else False
            nb_classes = 1 if binary else 5

            print('Running training for {}'.format(dataset))

            train_generator, num_samples = create_generator(
                os.path.join(data_dir.format(dataset), 'train'),
                input_size,
                batch_size,
                nb_classes=nb_classes,
                rescale_masks=run['rescale_masks'],
                binary=binary,
                augment=False,
                mean=np.array([[[0.36654497, 0.35386439, 0.30782658]]]),
                std=np.array([[[0.19212837, 0.19031791, 0.18903286]]]))

            val_generator, val_samples = create_generator(
                os.path.join(data_dir.format(dataset), 'val'),
                input_size,
                batch_size,
                nb_classes=nb_classes,
                rescale_masks=run['rescale_masks'],
                binary=binary,
                augment=False,
                mean=np.array([[[0.36654497, 0.35386439, 0.30782658]]]),
                std=np.array([[[0.19212837, 0.19031791, 0.18903286]]]))

            if run['network'] == 'unet':
                model = build_unet(input_size, nb_classes=nb_classes)
            else:
                model = build_densenet(input_size, nb_classes, 67)

            model.summary()
            gpus = get_number_of_gpus()
            print('Fund {} gpus'.format(gpus))
            if gpus > 1:
                model = ModelMGPU(model, gpus)

            if binary:
                loss = binary_soft_jaccard_loss
            else:
                loss = soft_jaccard_loss
            model.compile(optimizer=Adam(),
                          loss=loss,
                          metrics=['acc', binary_jaccard_distance_rounded])

            if run['pre_weights_name']:
                pre_weights_name = run['pre_weights_name'].format(dataset)
                weight = 'weights_train/weights.{}.h5'.format(pre_weights_name)
                print('Loading weights: {}'.format(weight))
                model.load_weights(weight)

            steps_per_epoch = num_samples // batch_size
            cyclic = 'triangular2'

            cb = [
                ValidationCallback(
                    val_samples // batch_size, val_generator, binary=binary)
            ] + callbacks(logs_dir.format(dataset),
                          filename=weights_name.format(dataset),
                          weightsdir=weights_dir,
                          monitor_val='mIOU',
                          base_lr=base_lr,
                          max_lr=max_lr,
                          steps_per_epoch=steps_per_epoch,
                          cyclic=cyclic)
            model.fit_generator(generator=train_generator,
                                steps_per_epoch=steps_per_epoch,
                                epochs=100,
                                verbose=True,
                                workers=8,
                                callbacks=cb)
            K.clear_session()
Beispiel #6
0
def build_unet16(input_shape, nb_classes, lr=1e-4):
    """
    Build a Unet16 with a VGG16 encoder pretrained on the imagenet dataset
    """
    base_model = VGG16(weights='imagenet', include_top=False)
    inputs = layers.Input((input_shape[0], input_shape[1], 3))

    # Block 1
    x = base_model.get_layer('block1_conv1')(inputs)
    x = BatchNormalization(axis=-1)(x)
    x = base_model.get_layer('block1_conv2')(x)
    x1 = BatchNormalization(axis=-1)(x)
    x = MaxPooling2D(pool_size=(2, 2), data_format="channels_last")(x1)

    # Block 2
    x = base_model.get_layer('block2_conv1')(x)
    x = BatchNormalization(axis=-1)(x)
    x = base_model.get_layer('block2_conv2')(x)
    x2 = BatchNormalization(axis=-1)(x)
    x = MaxPooling2D(pool_size=(2, 2), data_format="channels_last")(x2)

    # Block 3
    x = base_model.get_layer('block3_conv1')(x)
    x = BatchNormalization(axis=-1)(x)
    x = base_model.get_layer('block3_conv2')(x)
    x = BatchNormalization(axis=-1)(x)
    x = base_model.get_layer('block3_conv3')(x)
    x3 = BatchNormalization(axis=-1)(x)
    x = MaxPooling2D(pool_size=(2, 2), data_format="channels_last")(x3)

    # Block 4
    x = base_model.get_layer('block4_conv1')(x)
    x = BatchNormalization(axis=-1)(x)
    x = base_model.get_layer('block4_conv2')(x)
    x = BatchNormalization(axis=-1)(x)
    x = base_model.get_layer('block4_conv3')(x)
    x4 = BatchNormalization(axis=-1)(x)
    x = MaxPooling2D(pool_size=(2, 2), data_format="channels_last")(x4)

    # Block 5
    x = base_model.get_layer('block5_conv1')(x)
    x = BatchNormalization(axis=-1)(x)
    x = base_model.get_layer('block5_conv2')(x)
    x = BatchNormalization(axis=-1)(x)
    x = base_model.get_layer('block5_conv3')(x)
    x5 = BatchNormalization(axis=-1)(x)
    x = MaxPooling2D(pool_size=(2, 2), data_format="channels_last")(x5)

    # Bottleneck
    x = down_block(x, 1024, bottleneck=True)

    x = up_block(x, x5, 512)

    x = up_block(x, x4, 512)

    x = up_block(x, x3, 256)

    x = up_block(x, x2, 128)

    x = up_block(x, x1, 64)

    x = layers.Conv2D(nb_classes, (1, 1))(x)

    if nb_classes == 1:
        activation = 'sigmoid'
    else:
        activation = 'softmax'

    act = Activation(activation)(x)
    model = Model(inputs=inputs, outputs=act)

    gpus = get_number_of_gpus()
    if gpus > 1:
        model = ModelMGPU(model, gpus)
    return model
Beispiel #7
0
def build_unet_old(input_shape, nb_classes, lr=1e-4):
    concat_axis = 3
    inputs = layers.Input((input_shape[0], input_shape[1], 3))
    conv1 = Conv2D(32, (3, 3), padding="same", name="conv1_1", activation="relu", data_format="channels_last",
                   kernel_initializer='he_uniform')(inputs)
    conv1 = BatchNormalization(axis=-1)(conv1)
    conv1 = Conv2D(32, (3, 3), padding="same", activation="relu", data_format="channels_last",
                   kernel_initializer='he_uniform')(conv1)
    conv1 = BatchNormalization(axis=-1)(conv1)
    pool1 = MaxPooling2D(pool_size=(2, 2), data_format="channels_last")(conv1)

    conv2 = Conv2D(64, (3, 3), padding="same", activation="relu", data_format="channels_last",
                   kernel_initializer='he_uniform')(pool1)
    conv2 = BatchNormalization(axis=-1)(conv2)
    conv2 = Conv2D(64, (3, 3), padding="same", activation="relu", data_format="channels_last",
                   kernel_initializer='he_uniform')(conv2)
    conv2 = BatchNormalization(axis=-1)(conv2)
    pool2 = MaxPooling2D(pool_size=(2, 2), data_format="channels_last")(conv2)

    conv3 = Conv2D(128, (3, 3), padding="same", activation="relu", data_format="channels_last",
                   kernel_initializer='he_uniform')(pool2)
    conv3 = BatchNormalization(axis=-1)(conv3)
    conv3 = Conv2D(128, (3, 3), padding="same", activation="relu", data_format="channels_last",
                   kernel_initializer='he_uniform')(conv3)
    conv3 = BatchNormalization(axis=-1)(conv3)
    pool3 = MaxPooling2D(pool_size=(2, 2), data_format="channels_last")(conv3)

    conv4 = Conv2D(256, (3, 3), padding="same", activation="relu", data_format="channels_last",
                   kernel_initializer='he_uniform')(pool3)
    conv4 = BatchNormalization(axis=-1)(conv4)
    conv4 = Conv2D(256, (3, 3), padding="same", activation="relu", data_format="channels_last",
                   kernel_initializer='he_uniform')(conv4)
    conv4 = BatchNormalization(axis=-1)(conv4)
    pool4 = MaxPooling2D(pool_size=(2, 2), data_format="channels_last")(conv4)

    conv5 = Conv2D(512, (3, 3), padding="same", activation="relu", data_format="channels_last",
                   kernel_initializer='he_uniform')(pool4)
    conv5 = BatchNormalization(axis=-1)(conv5)
    conv5 = Conv2D(512, (3, 3), padding="same", activation="relu", data_format="channels_last",
                   kernel_initializer='he_uniform')(conv5)
    conv5 = BatchNormalization(axis=-1)(conv5)
    pool5 = MaxPooling2D(pool_size=(2, 2), data_format="channels_last")(conv5)

    convbott = Conv2D(1024, (3, 3), padding="same", activation="relu", data_format="channels_last",
                      kernel_initializer='he_uniform')(pool5)
    convbott = BatchNormalization(axis=-1)(convbott)
    convbott = Conv2D(1024, (3, 3), padding="same", activation="relu", data_format="channels_last",
                      kernel_initializer='he_uniform')(convbott)
    convbott = BatchNormalization(axis=-1)(convbott)
    up_convbott = UpSampling2D(size=(2, 2), data_format="channels_last")(convbott)

    ch, cw = get_crop_shape(conv5, up_convbott)
    crop_conv5 = Cropping2D(cropping=(ch, cw), data_format="channels_last")(conv5)
    upbott = concatenate([up_convbott, crop_conv5], axis=concat_axis)
    convbott1 = Dropout(0.2)(upbott)
    convbott1 = Conv2D(512, (3, 3), padding="same", activation="relu", data_format="channels_last",
                       kernel_initializer='he_uniform')(convbott1)
    convbott1 = Conv2D(512, (3, 3), padding="same", activation="relu", data_format="channels_last",
                       kernel_initializer='he_uniform')(convbott1)
    up_convbott1 = UpSampling2D(size=(2, 2), data_format="channels_last")(convbott1)

    ch, cw = get_crop_shape(conv4, up_convbott1)
    crop_conv4 = Cropping2D(cropping=(ch, cw), data_format="channels_last")(conv4)
    up6 = concatenate([up_convbott1, crop_conv4], axis=concat_axis)
    conv6 = Dropout(0.2)(up6)
    conv6 = Conv2D(256, (3, 3), padding="same", activation="relu", data_format="channels_last",
                   kernel_initializer='he_uniform')(conv6)
    conv6 = Conv2D(256, (3, 3), padding="same", activation="relu", data_format="channels_last",
                   kernel_initializer='he_uniform')(conv6)
    up_conv6 = UpSampling2D(size=(2, 2), data_format="channels_last")(conv6)

    ch, cw = get_crop_shape(conv3, up_conv6)
    crop_conv3 = Cropping2D(cropping=(ch, cw), data_format="channels_last")(conv3)
    up7 = concatenate([up_conv6, crop_conv3], axis=concat_axis)
    conv7 = Dropout(0.2)(up7)
    conv7 = Conv2D(128, (3, 3), padding="same", activation="relu", data_format="channels_last",
                   kernel_initializer='he_uniform')(conv7)
    conv7 = Conv2D(128, (3, 3), padding="same", activation="relu", data_format="channels_last",
                   kernel_initializer='he_uniform')(conv7)
    up_conv7 = UpSampling2D(size=(2, 2), data_format="channels_last")(conv7)

    ch, cw = get_crop_shape(conv2, up_conv7)
    crop_conv2 = Cropping2D(cropping=(ch, cw), data_format="channels_last")(conv2)
    up8 = concatenate([up_conv7, crop_conv2], axis=concat_axis)
    conv8 = Dropout(0.2)(up8)
    conv8 = Conv2D(64, (3, 3), padding="same", activation="relu", data_format="channels_last",
                   kernel_initializer='he_uniform')(conv8)
    conv8 = Conv2D(64, (3, 3), padding="same", activation="relu", data_format="channels_last",
                   kernel_initializer='he_uniform')(conv8)
    up_conv8 = UpSampling2D(size=(2, 2), data_format="channels_last")(conv8)

    ch, cw = get_crop_shape(conv1, up_conv8)
    crop_conv1 = Cropping2D(cropping=(ch, cw), data_format="channels_last")(conv1)
    up9 = concatenate([up_conv8, crop_conv1], axis=concat_axis)
    conv9 = Dropout(0.2)(up9)
    conv9 = Conv2D(32, (3, 3), padding="same", activation="relu", data_format="channels_last",
                   kernel_initializer='he_uniform')(conv9)
    conv9 = Conv2D(32, (3, 3), padding="same", activation="relu", data_format="channels_last",
                   kernel_initializer='he_uniform')(conv9)

    ch, cw = get_crop_shape(inputs, conv9)
    conv9 = layers.ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0], cw[1])))(conv9)
    conv10 = layers.Conv2D(nb_classes, (1, 1))(conv9)

    activation = 'softmax'
    if nb_classes == 1:
        activation = 'sigmoid'

    act = Activation(activation)(conv10)
    model = Model(inputs=inputs, outputs=act)
    gpus = get_number_of_gpus()
    if gpus > 1:
        model = ModelMGPU(model, gpus)
    return model
Beispiel #8
0
def pred():
    image_path = '/data/{}/test'
    for dataset in datasets:
        im_path = image_path.format(dataset['name'])

        for model in models:
            input_size = model['input_size']
            generator, _ = create_generator(
                im_path,
                (input_size, input_size),
                dataset['size'],
                1,
                rescale_masks=True,
                with_file_names=True,
                binary=True,
                mean=np.array([[[0.36654497, 0.35386439, 0.30782658]]]),
                std=np.array([[[0.19212837, 0.19031791, 0.18903286]]])
            )
            images, masks, file_names = next(generator)
            m = model['method']((input_size, input_size), 1)
            gpus = get_number_of_gpus()
            if gpus > 1:
                m = ModelMGPU(m, gpus)

            m.compile(
                optimizer=Adam(lr=1e-4),
                loss=binary_soft_jaccard_loss,
                metrics=['acc', binary_jaccard_distance_rounded])

            weights_path = 'weights_train/weights.{}-{}-final-finetune.h5'.format(model['name'], dataset['name'])
            m.load_weights(weights_path)

            probs = m.predict(images, verbose=1)
            probs = np.round(probs)
            iou = batch_general_jaccard(masks, probs)
            f1 = f1_score(masks, probs)
            print('Mean IOU for {} on {}: {}'.format(model['name'], dataset['name'], np.mean(iou)))
            print('F1 score for {} on {}: {}'.format(model['name'], dataset['name'], f1))

            # wow such hack
            from keras_utils.prediction import get_real_image, get_geo_frame, geo_reference_raster

            for i, (prob, mask) in enumerate(zip(probs, masks)):
                if i > 200:
                    break
                raster = get_real_image(im_path, file_names[i], use_gdal=True)
                R = raster.GetRasterBand(1).ReadAsArray()
                G = raster.GetRasterBand(2).ReadAsArray()
                B = raster.GetRasterBand(3).ReadAsArray()
                img = np.zeros((512, 512, 3))
                img[:, :, 0] = B
                img[:, :, 1] = G
                img[:, :, 2] = R
                prob = np.round(prob)
                prob = (prob[:, :, 0] * 255.).astype(np.uint8)
                mask = (mask[:, :, 0] * 255.).astype(np.uint8)
                pred_name = "pred-{}.tif".format(i)

                out_path = '/data/finalpreds/{}/{}'.format(model['name'], dataset['name'])
                pred_save_path = "{}/{}".format(out_path, pred_name)

                cv2.imwrite(pred_save_path, prob)
                cv2.imwrite("{}/image-{}.tif".format(out_path, i), img)
                cv2.imwrite("{}/mask-{}.tif".format(out_path, i), mask)

                try:
                    # Get coordinates for corresponding image
                    ulx, scalex, skewx, uly, skewy, scaley = get_geo_frame(raster)

                    # Geo reference newly created raster
                    geo_reference_raster(
                        pred_save_path,
                        [ulx, scalex, skewx, uly, skewy, scaley]
                    )
                except ValueError as e:
                    print("Was not able to reference image at path: {}".format(pred_save_path))
            K.clear_session()
Beispiel #9
0
def run():
    parser = argparse.ArgumentParser()
    parser.add_argument("--weights-path", type=str)
    parser.add_argument("--epoch-number", type=int, default=5)
    parser.add_argument("--test-images", type=str, default="")
    parser.add_argument("--output-path", type=str, default="")
    parser.add_argument("--input-size", type=int, default=713)
    parser.add_argument("--batch-size", type=int, default=713)
    parser.add_argument("--model-name", type=str, default="standard")
    parser.add_argument("--save-imgs", type=bool, default=True)
    args = parser.parse_args()

    model_name = args.model_name
    images_path = args.test_images
    input_size = args.input_size
    batch_size = args.batch_size
    save_imgs = args.save_imgs
    all_probs = {}

    model_choices = {
        'densenet': build_densenet,
        'unet': build_unet
    }

    model_choice = model_choices[model_name]

    generator, _ = create_generator(
        images_path,
        (input_size, input_size),
        batch_size,
        5,
        rescale_masks=True,
        with_file_names=True,
        binary=False,
        mean=np.array([[[0.36654497, 0.35386439, 0.30782658]]]),
        std=np.array([[[0.19212837, 0.19031791, 0.18903286]]])
    )
    images, masks, file_names = next(generator)

    for dataset in datasets:
        model = model_choice((input_size, input_size), 1)

        gpus = get_number_of_gpus()
        print('Fund {} gpus'.format(gpus))
        if gpus > 1:
            model = ModelMGPU(model, gpus)

        model.compile(
            optimizer=Adam(lr=1e-4),
            loss=binary_soft_jaccard_loss,
            metrics=['acc', binary_jaccard_distance_rounded])

        model.load_weights(args.weights_path.format(dataset))

        probs = model.predict(images, verbose=1)
        probs = np.round(probs)
        # iou = batch_general_jaccard(masks, probs, binary=True)
        # f1 = K.eval(f1_score(K.variable(masks), K.variable(probs)))
        # print('Mean IOU for {}: {}'.format(dataset, np.mean(iou)))
        # print('F1 score for {}: {}'.format(dataset, f1))

        all_probs[dataset] = probs

    final_prob = None

    for i, key in enumerate(all_probs):
        prob = all_probs[key]
        prob[prob == 1] = scores[key]

        if i == 0:  # First iteration
            final_prob = prob
            continue

        final_prob = np.maximum.reduce([final_prob, prob])

    masks = np.argmax(masks, axis=2)
    iou = batch_general_jaccard(masks, final_prob)
    f1 = K.eval(f1_score(K.variable(masks), K.variable(final_prob)))
    print('Mean IOU for {}: {}'.format('multiclass', np.mean(iou)))
    print('F1 score for {}: {}'.format('multiclass', f1))

    if not save_imgs:
        return

    # wow such hack
    from keras_utils.prediction import get_real_image, get_geo_frame, geo_reference_raster

    for i, prob in enumerate(final_prob):
        mask = np.argmax(masks[i], axis=2)
        raster = get_real_image(os.path.join(images_path, 'multiclass', 'test'), file_names[i], use_gdal=True)
        R = raster.GetRasterBand(1).ReadAsArray()
        G = raster.GetRasterBand(2).ReadAsArray()
        B = raster.GetRasterBand(3).ReadAsArray()
        img = np.zeros((512, 512, 3))
        img[:, :, 0] = B
        img[:, :, 1] = G
        img[:, :, 2] = R

        seg_pred = np.zeros((input_size, input_size, 3))
        seg_mask = np.zeros((input_size, input_size, 3))

        for c in range(5):
            seg_pred[:, :, 0] += ((prob[:, :, 0] == c) * (class_color_map[c][2])).astype('uint8')
            seg_pred[:, :, 1] += ((prob[:, :, 0] == c) * (class_color_map[c][1])).astype('uint8')
            seg_pred[:, :, 2] += ((prob[:, :, 0] == c) * (class_color_map[c][0])).astype('uint8')

            seg_mask[:, :, 0] += ((mask[:, :] == c) * (class_color_map[c][2])).astype('uint8')
            seg_mask[:, :, 1] += ((mask[:, :] == c) * (class_color_map[c][1])).astype('uint8')
            seg_mask[:, :, 2] += ((mask[:, :] == c) * (class_color_map[c][0])).astype('uint8')

        pred_name = "pred-{}.tif".format(i)
        pred_save_path = "{}/{}".format(args.output_path, pred_name)

        cv2.imwrite(pred_save_path, seg_pred)
        cv2.imwrite("{}/mask-{}.tif".format(args.output_path, i), seg_mask)
        cv2.imwrite("{}/image-{}.tif".format(args.output_path, i), img)

        try:
            # Get coordinates for corresponding image
            ulx, scalex, skewx, uly, skewy, scaley = get_geo_frame(raster)

            # Geo reference newly created raster
            geo_reference_raster(
                pred_save_path,
                [ulx, scalex, skewx, uly, skewy, scaley]
            )
        except ValueError as e:
            print("Was not able to reference image at path: {}".format(pred_save_path))
Beispiel #10
0
def run():
    parser = argparse.ArgumentParser()
    parser.add_argument("--weights-path", type=str)
    parser.add_argument("--test-image", type=str, default="")
    parser.add_argument("--sample-images", type=str, default="")
    parser.add_argument("--output-path", type=str, default="")
    parser.add_argument("--input-size", type=int, default=713)
    parser.add_argument("--model-name", type=str, default="")
    parser.add_argument("--name", type=str, default="")
    parser.add_argument("--mask", type=str, default="")

    args = parser.parse_args()
    model_name = args.model_name
    image_path = args.test_image
    input_size = args.input_size
    output_path = args.output_path
    file_name = args.name
    mask_path = args.mask

    nb_classes = 5
    model_choices = {'unet': build_unet, 'densenet': build_densenet}

    model_choice = model_choices[model_name]
    model = model_choice((input_size, input_size), nb_classes)
    gpus = get_number_of_gpus()
    print('Fund {} gpus'.format(gpus))
    if gpus > 1:
        model = ModelMGPU(model, gpus)
    model.compile(optimizer=Adam(lr=1e-4),
                  loss=binary_soft_jaccard_loss,
                  metrics=['acc', binary_jaccard_distance_rounded])

    model.load_weights(args.weights_path)
    generator = get_generator()
    # load the image
    image = Image.open(image_path)
    size = image.size[0]
    factor = input_size / 512
    image = image.resize((int(size * factor), int(size * factor)))
    image = np.array(image)

    pred = predict_img_with_smooth_windowing(
        image,
        window_size=input_size,
        subdivisions=
        2,  # Minimal amount of overlap for windowing. Must be an even number.
        nb_classes=nb_classes,
        pred_func=(lambda img_batch_subdiv: model.predict(
            image_to_neural_input(img_batch_subdiv, generator), verbose=True)))
    pred = np.argmax(pred, axis=2)
    pred_color = np.zeros((pred.shape[0], pred.shape[0], 3))

    if mask_path:
        mask = Image.open(mask_path).convert('L')
        mask = mask.resize((pred.shape[0], pred.shape[0]))
        mask = np.array(mask)
        mask = np.reshape(mask, (1, mask.shape[0], mask.shape[1]))
        p = np.reshape(pred, (1, pred.shape[0], pred.shape[1]))
        print('mIOU:', batch_general_jaccard(mask, p))
        print('F1:', batch_classwise_f1_score(mask, p))

    class_color_map = {
        0: [237, 237, 237],  # Empty
        1: [254, 241, 179],  # Roads
        2: [116, 173, 209],  # Water
        3: [193, 235, 176],  # Grass
        4: [170, 170, 170]  # Buildings
    }

    for c in range(nb_classes):
        pred_color[:, :, 0] += ((pred[:, :] == c) *
                                (class_color_map[c][2])).astype('uint8')
        pred_color[:, :, 1] += ((pred[:, :] == c) *
                                (class_color_map[c][1])).astype('uint8')
        pred_color[:, :, 2] += ((pred[:, :] == c) *
                                (class_color_map[c][0])).astype('uint8')

    out_path = os.path.join(output_path, file_name)
    print(cv2.imwrite(out_path, pred_color))

    cheap = cheap_tiling_prediction(
        image,
        window_size=input_size,
        nb_classes=nb_classes,
        pred_func=(lambda img_batch_subdiv: model.predict(
            image_to_neural_input(np.array(img_batch_subdiv), generator),
            verbose=True)))

    cheap = np.argmax(cheap, axis=2)
    cheap_color = np.zeros((cheap.shape[0], cheap.shape[1], 3))

    if mask_path:
        mask = Image.open(mask_path).convert('L')
        mask = mask.resize((cheap.shape[0], cheap.shape[0]))
        mask = np.array(mask)
        mask = np.reshape(mask, (1, mask.shape[0], mask.shape[1]))
        p = np.reshape(cheap, (1, cheap.shape[0], cheap.shape[1]))
        print('mIOU:', batch_general_jaccard(mask, p))
        print('F1:', batch_classwise_f1_score(mask, p))

    for c in range(nb_classes):
        cheap_color[:, :, 0] += ((cheap[:, :] == c) *
                                 (class_color_map[c][2])).astype('uint8')
        cheap_color[:, :, 1] += ((cheap[:, :] == c) *
                                 (class_color_map[c][1])).astype('uint8')
        cheap_color[:, :, 2] += ((cheap[:, :] == c) *
                                 (class_color_map[c][0])).astype('uint8')

    out_path = os.path.join(output_path, 'cheap-{}'.format(file_name))
    print(cv2.imwrite(out_path, cheap_color))
def train_densenet(data_dir, logdir, weights_dir, weights_name, input_size,
                   nb_classes, batch_size, config, initial_epoch,
                   pre_trained_weight, augment):
    session_config()
    model = build_densenet(input_size, nb_classes, config=config)
    model.summary()
    gpus = get_number_of_gpus()
    print('Found {} gpus'.format(gpus))
    if gpus > 1:
        model = ModelMGPU(model, gpus)

    is_binary = nb_classes == 1
    if is_binary:
        loss = binary_soft_jaccard_loss
    else:
        loss = soft_jaccard_loss

    model.compile(optimizer=Adam(lr=1e-3),
                  loss=loss,
                  metrics=['acc', binary_jaccard_distance_rounded])

    train_generator, num_samples = create_generator(
        os.path.join(data_dir, 'train'),
        input_size,
        batch_size,
        nb_classes,
        rescale_masks=True,
        binary=is_binary,
        augment=False,
        mean=np.array([[[0.42800662, 0.40565866, 0.3564895]]]),
        std=np.array([[[0.19446792, 0.1984272, 0.19501258]]]))

    val_generator, val_samples = create_generator(
        os.path.join(data_dir, 'val'),
        input_size,
        batch_size,
        nb_classes,
        rescale_masks=True,
        binary=is_binary,
        augment=False,
        mean=np.array([[[0.42800662, 0.40565866, 0.3564895]]]),
        std=np.array([[[0.19446792, 0.1984272, 0.19501258]]]))

    if pre_trained_weight:
        print('Loading weights: {}'.format(pre_trained_weight))
        model.load_weights(pre_trained_weight)

    steps_per_epoch = num_samples // batch_size

    if augment:
        steps_per_epoch = steps_per_epoch * 4

    base_lr = 0.00002
    max_lr = 0.00055

    cb = [
        ValidationCallback(val_samples // batch_size, val_generator, is_binary)
    ] + callbacks(logdir,
                  filename=weights_name,
                  weightsdir=weights_dir,
                  monitor_val='mIOU',
                  base_lr=base_lr,
                  max_lr=max_lr,
                  steps_per_epoch=steps_per_epoch,
                  cyclic='triangular2')

    model.fit_generator(generator=train_generator,
                        steps_per_epoch=steps_per_epoch,
                        epochs=100,
                        verbose=True,
                        workers=8,
                        callbacks=cb,
                        initial_epoch=initial_epoch)
def run(args):
    n_classes = args.classes
    model_name = args.model_name
    images_path = args.test_images
    input_size = args.input_size
    batch_size = args.batch_size
    no_save_imgs = args.no_save_imgs

    model_choices = {
        'unet': build_unet,
        'densenet': build_densenet,
        'pspnet': build_pspnet
    }

    model_choice = model_choices[model_name]

    model = model_choice((input_size, input_size), n_classes)

    gpus = get_number_of_gpus()
    print('Fund {} gpus'.format(gpus))
    if gpus > 1:
        model = ModelMGPU(model, gpus)

    model.compile(optimizer=Adam(lr=1e-4),
                  loss=binary_soft_jaccard_loss,
                  metrics=['acc', binary_jaccard_distance_rounded])

    model.load_weights(args.weights_path)

    binary = n_classes == 1
    generator, samples = create_generator(
        images_path, (input_size, input_size),
        batch_size,
        nb_classes=n_classes,
        rescale_masks=False,
        with_file_names=True,
        binary=binary,
        augment=False,
        mean=np.array([[[0.36654497, 0.35386439, 0.30782658]]]),
        std=np.array([[[0.19212837, 0.19031791, 0.18903286]]]))

    steps = samples // batch_size
    f1s = []
    ious = []
    for i in range(steps):
        images, masks, file_names = next(generator)
        probs = model.predict(images, verbose=1)

        probs = np.argmax(probs, axis=3)
        masks = np.argmax(masks, axis=3)
        iou = batch_general_jaccard(masks, probs)
        f1 = batch_classwise_f1_score(masks, probs)
        ious.append(iou)
        f1s.append(f1)

        if no_save_imgs:
            continue

        for i, prob in enumerate(probs):
            result = prob
            mask_result = masks[i]

            # img = get_real_image(images_path, file_names[i])
            raster = get_real_image(images_path, file_names[i], use_gdal=True)
            R = raster.GetRasterBand(1).ReadAsArray()
            G = raster.GetRasterBand(2).ReadAsArray()
            B = raster.GetRasterBand(3).ReadAsArray()
            img = np.zeros((512, 512, 3))
            img[:, :, 0] = B
            img[:, :, 1] = G
            img[:, :, 2] = R

            seg_img = np.zeros((input_size, input_size, 3))
            seg_mask = np.zeros((input_size, input_size, 3))

            for c in range(n_classes):
                seg_img[:, :, 0] += ((result[:, :] == c) *
                                     (class_color_map[c][2])).astype('uint8')
                seg_img[:, :, 1] += ((result[:, :] == c) *
                                     (class_color_map[c][1])).astype('uint8')
                seg_img[:, :, 2] += ((result[:, :] == c) *
                                     (class_color_map[c][0])).astype('uint8')

                seg_mask[:, :, 0] += ((mask_result[:, :] == c) *
                                      (class_color_map[c][2])).astype('uint8')
                seg_mask[:, :, 1] += ((mask_result[:, :] == c) *
                                      (class_color_map[c][1])).astype('uint8')
                seg_mask[:, :, 2] += ((mask_result[:, :] == c) *
                                      (class_color_map[c][0])).astype('uint8')

            pred_name = "pred-{}.tif".format(i)
            pred_save_path = "{}/{}".format(args.output_path, pred_name)

            cv2.imwrite(pred_save_path, seg_img)
            cv2.imwrite("{}/mask-{}.tif".format(args.output_path, i), seg_mask)
            cv2.imwrite("{}/image-{}.tif".format(args.output_path, i), img)

            try:
                # Get coordinates for corresponding image
                ulx, scalex, skewx, uly, skewy, scaley = get_geo_frame(raster)

                # Geo reference newly created raster
                geo_reference_raster(pred_save_path,
                                     [ulx, scalex, skewx, uly, skewy, scaley])
            except ValueError as e:
                print("Was not able to reference image at path: {}".format(
                    pred_save_path))

    print('Mean IOU: {}'.format(np.mean(ious)))
    print('F1 score: {}'.format(np.mean(f1s)))