Exemplo n.º 1
0
 def U_net(self):
     # Build U-Net model
     transfer_model = Unet(backbone_name=backbone, input_shape=(None, None, 3), classes=1,
                           activation='relu', encoder_weights='imagenet', encoder_freeze=True)
     transfer_model.compile(optimizer='Adam', loss='binary_crossentropy', metrics=[self.mean_iou])
     transfer_model.load_weights(self.modelPath)
     transfer_model.summary()
     return transfer_model
Exemplo n.º 2
0
def get_model(net_name,
              num_class,
              weight_path,
              input_shape=[],
              weighted_loss=False):
    number_class = num_class
    if net_name == 'psp':
        model_name = 'pspnet101_cityscapes'
        input_shape = (473, 473, 3)
        model = pspnet.PSPNet101(nb_classes=num_class,
                                 input_shape=input_shape,
                                 weights=model_name)
        model = model.model
    elif net_name == 'psp_50':
        input_shape = (473, 473, 3)
        model_name = 'pspnet50_ade20k'
        #output_mode='sigmoid'
        model = pspnet.PSPNet50(nb_classes=num_class,
                                input_shape=input_shape,
                                weights=model_name)
        model = model.model

    elif net_name[-1:] == 'c':
        if net_name == 'unet_rgbh_c' or net_name == 'unet_rgbc_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 4]
        elif net_name == 'unet_rgb_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 3]
        elif net_name == 'unet_msi_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 3]
        elif net_name == 'unet_msih_c' or net_name == 'unet_msic_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 9]
        from keras.layers import Input
        input_tensor = Input(shape=(input_shape[0], input_shape[1],
                                    input_shape[2]))
        model = Unet(input_shape=input_shape,
                     input_tensor=input_tensor,
                     backbone_name=params.BACKBONE,
                     encoder_weights=None,
                     classes=num_class)
    if weighted_loss:
        loss = my_class_weighted_loss
    else:
        loss = params.SEMANTIC_LOSS
    lr = params.LEARN_RATE
    optimizer = Adam(lr=lr, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    if (len(weight_path) > 2):
        model.load_weights(weight_path, True)
        print('use pre-trained weights', weight_path)
    model.compile(optimizer, loss=loss, metrics=[categorical_accuracy])

    model.summary()
    return model, input_shape
Exemplo n.º 3
0
def net_predict():
    model = Unet(backbone_name=backbone,
                 encoder_weights=None,
                 input_shape=(256, 256, 1))
    model.load_weights(checkpoint)
    preds_train = model.predict(X_train, verbose=1)
    preds_val = model.predict(X_valid, verbose=1)

    preds_train_t = (preds_train > 0.5).astype(np.uint8)
    preds_val_t = (preds_val > 0.5).astype(np.uint8)
    plot_sample(X_valid, y_valid, preds_val, preds_val_t, ix=None)
Exemplo n.º 4
0
def _load_pretrained_model(checkpoint_path):
    model = Unet(backbone_name='resnet34',
                 input_shape=(IMG_HEIGHT, IMG_WIDTH, 3),
                 classes=CLASS_NUM,
                 activation='softmax',
                 encoder_weights=None,
                 decoder_block_type='transpose')

    checkpoint_path = os.path.join(_get_resources_path(), checkpoint_path)
    model.load_weights(checkpoint_path)

    return model
Exemplo n.º 5
0
def run_navigation_trials(args):
    # Load model if specified
    if args.model_path:
        K.set_learning_phase(1)
        model = Unet('resnet18',
                     input_shape=(256, 320, 4),
                     activation='sigmoid',
                     classes=1,
                     encoder_weights=None)
        if args.multi_gpu_model:
            model = multi_gpu_model(model)

        model.load_weights(args.model_path)
    else:
        model = None

    # Create save directories for results and vids
    if args.save_dir:
        os.makedirs(args.save_dir, exist_ok=True)
        if args.save_vid:
            vid_dir = join(args.save_dir, 'vids')
            os.makedirs(vid_dir, exist_ok=True)

    # Read saved experimental setup from disk
    with open(args.experiment_path, 'r') as fp:
        experiment_dict = json.load(fp)

    # Execute experiments for all maps
    result_dict = defaultdict(list)
    for wad_id, experiments in sorted(experiment_dict.items()):
        print('INFO: Testing on map {}'.format(wad_id))
        game = util.setup_game(args.wad_dir,
                               wad_id,
                               visible=args.viz_output,
                               localization_noise=args.localization_noise)

        # Execute individual experiments within map
        for exp_idx, experiment in enumerate(experiments):
            for i in range(args.iterations):
                util.setup_trial(game, experiment['start'])
                vid_path = join(vid_dir, '{}_{}_{}.mp4'.format(
                    wad_id, exp_idx, i)) if args.save_vid else None  # NOQA
                result, full_path = navigate(game, args.max_steps,
                                             experiment['goal'], model,
                                             vid_path)
                result_dict[wad_id].append(result)
                print('INFO: Trial complete {}'.format(result))

        # Save results from experiment
        if args.save_dir:
            result_path = join(args.save_dir, 'results.json')
            with open(result_path, 'w') as fp:
                json.dump(result_dict, fp, sort_keys=True, indent=4)
Exemplo n.º 6
0
def load_model(args):
    # Load trained seed segmentation model to use with active sampling
    K.set_learning_phase(0)
    model = Unet('resnet18',
                 input_shape=(256, 320, 4),
                 activation='sigmoid',
                 classes=1,
                 encoder_weights=None)

    # Convert to multi-GPU model if necessary
    if args.multi_gpu_model:
        model = multi_gpu_model(model)

    model.load_weights(args.model_path)
    return model
Exemplo n.º 7
0
    def workflow(self):
        # define model
        model = Unet(backbone_name='resnet50', encoder_weights='imagenet')
        #model.compile('Adam', 'binary_crossentropy', ['binary_accuracy'])
        model.load_weights(
            os.path.join(self.cfgs["SAVE_DIR"],
                         "epoch" + str(self.epoch) + ".h5"))
        print("RETORE SUCCESSFULLY!")
        test_images, test_ulabels, test_elabels, test_rlabels, filelist = self.dl.get_test_data(
        )
        # TEST:
        print('start')
        start = time.clock()
        results = model.predict(test_images, batch_size=5, verbose=1)
        stop = time.clock()
        print('程序运行时间:', str(stop - start), ' 秒')
        pmlabels = results[0]

        print(len(results))

        mkdirs(self.cfgs["SAVE_DIR"],
               ['images', 'labels_e', 'labels_r', 'preds', 'preds_threshold'])
        for ii in range(results[0].shape[0]):
            cv2.imwrite(
                os.path.join(self.cfgs["SAVE_DIR"],
                             'images/{}'.format(filelist[ii][0])),
                test_images[ii, :] * 255)

            cv2.imwrite(
                os.path.join(self.cfgs["SAVE_DIR"],
                             'labels_e/{}'.format(filelist[ii][1])),
                test_elabels[ii, :] * 255)
            cv2.imwrite(
                os.path.join(self.cfgs["SAVE_DIR"],
                             'labels_r/{}'.format(filelist[ii][1])),
                test_rlabels[ii, :] * 255)

            cv2.imwrite(
                os.path.join(self.cfgs["SAVE_DIR"],
                             'preds/{}'.format(filelist[ii][1])),
                results[-1][ii, :])
            pred_threshold = threshold(results[-1][ii, :])
            cv2.imwrite(
                os.path.join(self.cfgs["SAVE_DIR"],
                             'preds_threshold/{}'.format(filelist[ii][1])),
                pred_threshold * 255)
Exemplo n.º 8
0
def convert_model(args):
    if args.out_path is None:
        args.out_path = args.model_path.split('.h5')[0] + '_single.h5'

    # Load multi-GPU model weights
    K.set_learning_phase(1)
    model = Unet('resnet18',
                 input_shape=(256, 320, 4),
                 activation='sigmoid',
                 classes=1,
                 encoder_weights=None)
    model = multi_gpu_model(model)
    model.load_weights(args.model_path)

    # Set weights in single-GPU model and save
    single_model = model.layers[-2]
    single_model.save(args.out_path)
Exemplo n.º 9
0
def main():
    with open('/home/rbuddhad/NIH-XRAY/test_sml.txt') as f1:
        lines1 = f1.readlines()

    test_datagen = ImageDataGenerator()
    test_batches = test_datagen.flow_from_directory(TEST_DATASET_PATH,
                                                    target_size=(1024, 1024),
                                                    shuffle=True,
                                                    class_mode=None,
                                                    batch_size=BATCH_SIZE)

    test_crops_orig = crop_generator(test_batches, CROP_LENGTH, lines1)  # 224

    model = Unet(backbone_name='resnet18', encoder_weights=None)
    model.load_weights('best_model1.h5')
    model.compile(optimizer='Adam',
                  loss='mean_squared_error',
                  metrics=['mae', 'mean_squared_error'])
    model.summary()

    # callbacks = [EarlyStopping(monitor='val_loss', patience=10),
    #              ModelCheckpoint(filepath='best_model1.h5', monitor='val_loss', save_best_only=True),
    #              TensorBoard(log_dir='./logs', histogram_freq=0, write_graph=True, write_images=True)]
    # model.fit_generator(generator=test_crops_orig,
    #                     steps_per_epoch=100,
    #                     validation_data=valid_crops_orig,
    #                     callbacks=callbacks,
    #                     validation_steps=200,
    #                     epochs=1000,
    #                     shuffle=True)
    # model.predict(generator=test_crops_orig,
    #               steps=2,
    #               verbose=1)

    # model.save('unet2.h5')
    predict = model.predict_generator(generator=test_crops_orig,
                                      steps=1,
                                      verbose=1)
    # predict = model.predict()
    print(predict.shape, 'predict_batch_size')
    for i in range(50):
        plt.imshow(predict[i, :, :, 0], cmap='gray', vmin=0, vmax=1)
        plt.show()
    def get_model(self, net_name, input_shape, number_class, class_weight,
                  weight_path):
        from segmentation_models import pspnet  #PSPNet
        if net_name == 'psp':
            model_name = 'pspnet101_cityscapes'
            input_shape = (473, 473, 9)
            model = pspnet.PSPNet101(nb_classes=number_class,
                                     input_shape=input_shape,
                                     weights=model_name)
            model = model.model
        if net_name == 'psp_50':
            input_shape = (473, 473, 9)
            model_name = 'pspnet50_ade20k'
            #output_mode='sigmoid'
            pspnet = pspnet.PSPNet101(nb_classes=number_class,
                                      input_shape=input_shape,
                                      weights=model_name)
            model = model.model
        elif net_name == 'unet':
            input_shape = [256, 256, 9]
            from keras.layers import Input
            input_tensor = Input(shape=(input_shape[0], input_shape[1],
                                        input_shape[2]))
            model = Unet(input_shape=input_shape,
                         input_tensor=input_tensor,
                         backbone_name=params.BACKBONE,
                         encoder_weights=None,
                         classes=number_class)
        ##[1.0,10.0,10.0,20.,30.]
        weights = np.array(class_weight)
        #        loss = weighted_categorical_crossentropy(weights)
        loss = my_weighted_loss
        #        loss=params.SEMANTIC_LOSS
        optimizer = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
        if (len(weight_path) > 2):
            model.load_weights(weight_path)
        model.compile(optimizer, loss=loss)

        model.summary()
        return model
Exemplo n.º 11
0
    def resnet(self):
        """
        Load model and weights

        :return: neural network model
        """
        #define model
        # model = Unet(backbone_name='resnet34', input_shape=(None, None, 3), encoder_weights=None, classes=1, encoder_freeze=False)
        # model.load_weights(self.data_path + '/weights/true_weights.hdf5')
        # model.compile('Adam', 'dice_loss', metrics=['iou_score'])

        model = Unet(
            backbone_name='resnet18',
            input_shape=(None, None, 3),
            decoder_filters=(64, 32, 32, 16, 4),
            encoder_weights='imagenet',
            classes=1,
            encoder_freeze=True,
        )
        model.load_weights(self.data_path + '/weights/new.hdf5')
        model.compile('Adam', 'bce_jaccard_loss', metrics=['iou_score'])
        return model
    def train_track3(self, net='unet', check_folder=params.CHECKPOINT_DIR):
        os.environ["CUDA_VISIBLE_DEVICES"] = params.GPUS
        if os.path.exists(check_folder) == 0:
            os.mkdir(check_folder)
        CHECKPOINT_DIR = check_folder
        CHECKPOINT_PATH = os.path.join(check_folder,
                                       'weights.{epoch:02d}.hdf5')

        data_folder = 'C:/TrainData/Track3/Train/patch_473/'
        img_train, dsm_train, lable_train, img_val, dsm_val, label_val = load_all_data_files(
            data_folder)

        num_training_sample = len(img_train)
        batch_size = 1
        n_batch_per_epoch = num_training_sample // batch_size

        num_val_sample = len(img_val)
        n_batch_per_epoch_val = num_val_sample // batch_size

        nb_epoch = 200
        NUM_CATEGORIES = 5
        train_generator = input_generator_RGBH(img_train, dsm_train,
                                               lable_train, batch_size)
        val_generator = input_generator_RGBH(img_val, dsm_val, label_val,
                                             batch_size)

        if net == 'psp':
            from segmentation_models import pspnet  #PSPNet
            model_name = 'pspnet101_cityscapes'
            input_shape = (473, 473, 9)
            model = pspnet.PSPNet101(nb_classes=NUM_CATEGORIES,
                                     input_shape=input_shape,
                                     weights=model_name)
            model = model.model
        elif net == 'unet':
            input_shape = [256, 256, 9]
            from keras.layers import Input
            input_tensor = Input(shape=(input_shape[0], input_shape[1],
                                        input_shape[2]))
            model = Unet(input_shape=input_shape,
                         input_tensor=input_tensor,
                         backbone_name=params.BACKBONE,
                         encoder_weights=None,
                         classes=2)

            model.load_weights(
                os.path.join('./checkpoint_track3-1/', 'weights.80.hdf5'))

        from keras.optimizers import Adam, SGD
        from keras.callbacks import ModelCheckpoint, CSVLogger
        #loss=params.SEMANTIC_LOSS
        #   loss=my_weighted_loss
        weights = np.array([1.0, 10.0, 10.0, 20., 30.])
        loss = weighted_categorical_crossentropy(weights)

        optimizer = Adam(lr=1E-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
        model.compile(optimizer, loss=loss)
        model.summary()
        csv_logger = CSVLogger(os.path.join(CHECKPOINT_DIR, 'train.csv'))

        checkpoint = ModelCheckpoint(filepath=CHECKPOINT_PATH,
                                     monitor='loss',
                                     verbose=1,
                                     save_best_only=False,
                                     save_weights_only=True,
                                     mode='auto',
                                     period=params.MODEL_SAVE_PERIOD)
        callbacks = [csv_logger, checkpoint]

        model.fit_generator(train_generator,
                            steps_per_epoch=n_batch_per_epoch,
                            validation_data=val_generator,
                            validation_steps=n_batch_per_epoch_val,
                            epochs=nb_epoch,
                            callbacks=callbacks)
log_fname = 'training_log.csv'
csv_logger = CSVLogger(filename=log_fname,
                       separator=',',
                       append=False)

callbacks_list = [checkpoint, earlystopper, csv_logger, reduce_lr]

history = model.fit_generator(train_gen, steps_per_epoch=train_steps, epochs=5, 
                              validation_data=val_gen, validation_steps=val_steps,
                             verbose=1,
                             callbacks=callbacks_list)

#initialize the test generator
test_gen = test_generator(batch_size=1)

model.load_weights('model.h5')
predictions = model.predict_generator(test_gen, 
                                      steps=len(df_test),  
                                      verbose=1)

!ls

preds_test_thresh = (predictions >= 0.7).astype(np.uint8)
preds_test_thresh.shape

print(preds_test_thresh.min())
print(preds_test_thresh.max())

mask = preds_test_thresh[3,:,:,0]
plt.imshow(mask, cmap='Reds', alpha=0.3)
Exemplo n.º 14
0
                 activation=config.activation)
elif config.model == "Nestnet":
    model = Nestnet(backbone_name=config.backbone,
                    encoder_weights=config.weights,
                    decoder_block_type=config.decoder_block_type,
                    classes=config.nb_class,
                    activation=config.activation)
elif config.model == "Xnet":
    model = Xnet(backbone_name=config.backbone,
                 encoder_weights=config.weights,
                 decoder_block_type=config.decoder_block_type,
                 classes=config.nb_class,
                 activation=config.activation)
else:
    raise
model.load_weights(os.path.join(model_path, config.exp_name + ".h5"))
model.compile(optimizer="Adam",
              loss=dice_coef_loss,
              metrics=["binary_crossentropy", mean_iou, dice_coef])
p_test = model.predict(x_test,
                       batch_size=config.batch_size,
                       verbose=config.verbose)
eva = model.evaluate(x_test,
                     y_test,
                     batch_size=config.batch_size,
                     verbose=config.verbose)
IoU = compute_iou(y_test, p_test)
print("\nSetup: {}".format(config.exp_name))
print(">> Testing dataset mIoU  = {:.2f}%".format(np.mean(IoU)))
print(">> Testing dataset mDice = {:.2f}%".format(eva[3] * 100.0))
Exemplo n.º 15
0
        X_test.append(temp)
    gc.collect()
    X_test = np.array(X_test)
    return X_test

TEST_PATH = '../s2_data/data/test/'
X_test = Test2Npy(TEST_PATH)



backbone_name = 'efficientnetb3'
weight = '20201122-170215_Unet_efficientnetb3_model.h5'
model = Unet(backbone_name, classes=1, activation='sigmoid')

model_path ='../user_data/model/'  + weight
model.load_weights(model_path)

# model summary
print(model.summary(line_length=120))



TEST_MASK_PATH = '../prediction_result/images/'
predicted_test = model.predict(X_test)




# Save test mask
print('Get img name and path')
each_test_name = []
Exemplo n.º 16
0
def test(gcp_bucket, dataset_id, model_id, batch_size):

    start_dt = datetime.now()

    assert "gs://" in gcp_bucket

    # clean up the tmp directory
    try:
        shutil.rmtree(tmp_directory.as_posix())
    except FileNotFoundError:
        pass
    tmp_directory.mkdir()

    local_dataset_dir = Path(tmp_directory, 'datasets')
    local_model_dir = Path(tmp_directory, 'models')

    copy_folder_locally_if_missing(os.path.join(gcp_bucket, 'datasets', dataset_id), local_dataset_dir)

    copy_folder_locally_if_missing(os.path.join(gcp_bucket, 'models', model_id), local_model_dir)

    test_id = "{}_{}".format(model_id, dataset_id, datetime.now(pytz.UTC).strftime('%Y%m%dT%H%M%SZ'))
    test_dir = Path(tmp_directory, 'tests', test_id)
    test_dir.mkdir(parents=True)

    with Path(local_dataset_dir, dataset_id, 'config.yaml').open('r') as f:
        dataset_config = yaml.safe_load(f)['dataset_config']

    with Path(local_model_dir, model_id, 'config.yaml').open('r') as f:
        train_config = yaml.safe_load(f)['train_config']

    target_size = dataset_config['target_size']

    test_generator = ImagesAndMasksGenerator(
        Path(local_dataset_dir, dataset_id, 'test').as_posix(),
        rescale=1./255,
        target_size=target_size,
        batch_size=batch_size,
        shuffle=True,
        seed=None)

    model = Unet('vgg16', input_shape=(None, None, 1), classes=len(test_generator.mask_filenames), encoder_weights=None)

    crossentropy = binary_crossentropy if len(test_generator.mask_filenames) == 1 else categorical_crossentropy
    loss_fn = crossentropy

    model.compile(optimizer=Adam(),
                  loss=loss_fn,
                  metrics=[accuracy, iou_score, jaccard_loss, dice_loss, crossentropy])

    model.load_weights(Path(local_model_dir, model_id, "model.hdf5").as_posix())

    results = model.evaluate_generator(test_generator)

    metric_names = [loss_fn.__name__, 'accuracy', 'iou_score', 'jaccard_loss', 'dice_loss', 'crossentropy']
    with Path(test_dir, 'metrics.csv').open('w') as f:
        f.write(','.join(metric_names) + '\n')
        f.write(','.join(map(str, results)))

    metadata = {
        'gcp_bucket': gcp_bucket,
        'dataset_id': dataset_id,
        'model_id': model_id,
        'batch_size': batch_size,
        'created_datetime': datetime.now(pytz.UTC).strftime('%Y%m%dT%H%M%SZ'),
        'git_hash': git.Repo(search_parent_directories=True).head.object.hexsha,
        'elapsed_minutes': round((datetime.now() - start_dt).total_seconds() / 60, 1),
        'dataset_config': dataset_config,
        'train_config': train_config
    }

    with Path(test_dir, metadata_file_name).open('w') as f:
        yaml.safe_dump(metadata, f)

    os.system("gsutil -m cp -r '{}' '{}'".format(Path(tmp_directory, 'tests').as_posix(), gcp_bucket))

    shutil.rmtree(tmp_directory.as_posix())
Exemplo n.º 17
0
def train_model(args):
    # Avoid Tensorflow eats up GPU memory
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    K.set_session(sess)

    # Set file paths
    args.log_path = os.path.join(args.save_dir, 'logs')
    args.model_path = os.path.join(args.save_dir, 'seg_model.h5')
    args.data_path_x = os.path.join(args.data_dir, 'x.npy')
    args.data_path_y = os.path.join(args.data_dir, 'y.npy')
    args.data_path_weights = None
    if args.use_weights:
        args.data_path_weights = os.path.join(args.data_dir, 'weights.npy')

    # Set up training data generator
    training_generator = DataGenerator(args.data_path_x,
                                       args.data_path_y,
                                       args.data_path_weights,
                                       batch_size=args.batch_size,
                                       in_shape=(256, 320, 4),
                                       out_shape=(256, 320, 1))

    # Set up model
    model = Unet('resnet18',
                 input_shape=(256, 320, 4),
                 activation='sigmoid',
                 classes=1,
                 encoder_weights=None)
    adam = keras.optimizers.Adam(lr=1e-4,
                                 beta_1=0.9,
                                 beta_2=0.999,
                                 epsilon=1e-08,
                                 decay=0.0)
    callbacks_list = [
        TensorBoardImage(args.data_path_x, args.log_path),
        keras.callbacks.TensorBoard(log_dir=args.log_path,
                                    update_freq=1000,
                                    write_graph=True),
        keras.callbacks.ModelCheckpoint(args.model_path,
                                        save_weights_only=True)
    ]

    # Run model on multiple GPUs if available
    try:
        model = multi_gpu_model(model)
        print("Training model on multiple GPUs")
    except ValueError:
        print("Training model on single GPU")

    # Load weights if specified
    if args.saved_model_path is not None:
        model.load_weights(args.saved_model_path)

    # Set loss
    loss = mask_loss
    acc = mask_acc
    if args.use_weights:
        loss = mask_loss_weighted
        acc = mask_acc_weighted

    # Compile model and start training
    model.compile(loss=[loss], optimizer=adam, metrics=[acc])
    model.fit_generator(generator=training_generator,
                        epochs=args.epochs,
                        use_multiprocessing=True,
                        workers=8,
                        callbacks=callbacks_list)

    return model
Exemplo n.º 18
0
    num_train=5000,  #train_IMG_COUNT
    factor=0.5,
    epsilon=0.0001,
    cooldown=2,
    min_lr=train_config.min_lr,
    verbose=1)

from segmentation_models import Unet

model = Unet(train_config.BACKBONE,
             encoder_weights=train_config.encoder_weights,
             encoder_freeze=False)
step_count = min(train_config.MAX_TRAIN_STEPS, 5000 // train_config.BATCH_SIZE)

if train_config.pretrain:
    model.load_weights(train_config.pretrain_model_path)

from keras.utils import multi_gpu_model

muti_model = multi_gpu_model(model, gpus=4)
muti_model.compile(optimizer=keras.optimizers.Adam(0.001),
                   loss=train_config.loss,
                   metrics=train_config.metrics)

if train_config.if_train:
    loss_history = [
        muti_model.fit_generator(aug_gen,
                                 steps_per_epoch=step_count,
                                 epochs=train_config.NB_EPOCHS,
                                 validation_data=(valid_x, valid_y),
                                 callbacks=callbacks_list,
crop_scale = 10
batch_size = len(os.listdir(source_data_folder + "/Broken")) + len(
    os.listdir(source_data_folder + "/Healthy"))
save_folder = "/home/isidor/Documents/keras/data/auto_generated_masks"
auto_cropped_folder = "/home/isidor/Documents/keras/data/auto_cropped"
# This is the average pixel value for the picture
N = 5

#Clear the folders from data
os.system("find " + save_folder + " -name '*.jpg' -delete")
os.system("find " + auto_cropped_folder + " -name '*.jpg' -delete")
# define model
model = Unet(BACKBONE, encoder_weights='imagenet')
opt = Adam(lr=0.001)
# model.compile(opt, loss=bce_jaccard_loss, metrics=[iou_score])
model.load_weights(net_file)

data_gen = ImageDataGenerator(rescale=1. / 255)
image_generator = data_gen.flow_from_directory(
    source_data_folder,
    batch_size=batch_size // batches,
    target_size=(img_width, img_height),
    class_mode='binary',
    seed=1)
crop_generator = data_gen.flow_from_directory(
    source_data_folder,
    batch_size=batch_size // batches,
    target_size=(img_width * crop_scale, img_height * crop_scale),
    class_mode='binary',
    seed=1)
Exemplo n.º 20
0
def generate_compiled_segmentation_model(
        model_name,
        model_parameters,
        num_classes,
        loss,
        optimizer,
        weights_to_load=None,
        optimizing_threshold_class_metric=None,
        optimizing_class_id=None,
        optimizing_input_threshold=None,
        optimized_class_thresholds=None):

    # These are the only model, loss, and optimizer currently supported
    assert model_name == 'Unet'
    assert loss == 'cross_entropy'
    assert optimizer == 'adam'

    loss_fn = BinaryCrossentropyL()

    all_metrics = [
    ]  # one-hot versions are generally preferred for given metric
    # make first metric a copy of loss, to continually verify `val_loss` is correct
    if isinstance(loss_fn, BinaryCrossentropyL):
        all_metrics.append(BinaryCrossentropyM(name='binary_ce_metric'))
    else:
        all_metrics.append(CategoricalCrossentropyM(name='categ_ce_metric'))

    # standard thresholded version (default threshold is 0.5) also kept below, in case it's desired in certain scenario
    for class_num in range(num_classes + 1):
        if class_num == 0 and optimizing_threshold_class_metric is None:  # all class metrics
            # note, `loss_fn` for all classes placed before `all_metrics` in lineup of command window metrics and plots
            if not isinstance(loss_fn, BinaryCrossentropyL):
                all_metrics.extend([CategoricalCELoss()])
                all_metrics[1].name = str('categ_cross_entropy_sm')
            all_metrics.extend([
                AccuracyTfKeras(),
                # OneHotAccuracyTfKeras(),  # `global_threshold` built-in
                ClassBinaryAccuracyTfKeras(thresholds=global_threshold),
                # OneHotClassBinaryAccuracyTfKeras(thresholds=global_threshold),
                ClassBinaryAccuracySM(threshold=global_threshold),
                # OneHotClassBinaryAccuracySM(threshold=global_threshold),
                BinaryAccuracy(threshold=global_threshold),
                CategoricalAccuracy(),
                FalseNegatives(name='false_neg', thresholds=global_threshold),
                # OneHotFalseNegatives(name='false_neg_1H', thresholds=global_threshold),
                TrueNegatives(name='true_neg', thresholds=global_threshold),
                # OneHotTrueNegatives(name='true_neg_1H', thresholds=global_threshold),
                FalsePositives(name='false_pos', thresholds=global_threshold),
                # OneHotFalsePositives(name='false_pos_1H', thresholds=global_threshold),
                TruePositives(name='true_pos', thresholds=global_threshold),
                # OneHotTruePositives(name='true_pos_1H', thresholds=global_threshold),
                Recall(name='recall', thresholds=global_threshold),
                # OneHotRecall(name='recall_1H', thresholds=global_threshold),
                Precision(name='precision', thresholds=global_threshold),
                # OneHotPrecision(name='precision_1H', thresholds=global_threshold),
                FBetaScore(name='f1_score',
                           beta=1,
                           thresholds=global_threshold),
                # OneHotFBetaScore(name='f1_score_1H', beta=1, thresholds=global_threshold),
                IoUScore(name='iou_score', thresholds=global_threshold),
                # OneHotIoUScore(name='iou_score_1H', thresholds=global_threshold)
            ])
        elif class_num == 0 and optimizing_threshold_class_metric is not None:  # all class metrics
            continue
        else:  # per class metrics
            if optimizing_threshold_class_metric is not None:
                class_threshold = optimizing_input_threshold
                class_num = optimizing_class_id + 1
            elif optimized_class_thresholds is None:
                class_threshold = global_threshold
            else:
                class_threshold = optimized_class_thresholds[str(
                    'class' + str(class_num - 1))]

            all_metrics.append(CategoricalCELoss(class_indexes=class_num - 1))
            all_metrics[-1].name = str('class' + str(class_num - 1) +
                                       '_binary_cross_entropy')
            all_metrics.append(
                ClassBinaryAccuracySM(name=str('class' + str(class_num - 1) +
                                               '_binary_accuracy_sm'),
                                      class_indexes=class_num - 1,
                                      threshold=class_threshold))
            all_metrics.append(
                ClassBinaryAccuracyTfKeras(
                    name=str('class' + str(class_num - 1) +
                             '_binary_accuracy_tfkeras'),
                    class_id=class_num - 1,
                    thresholds=class_threshold))
            all_metrics.append(
                IoUScore(name=str('class' + str(class_num - 1) + '_iou_score'),
                         class_id=class_num - 1,
                         thresholds=class_threshold))
            all_metrics.append(
                FBetaScore(name=str('class' + str(class_num - 1) +
                                    '_f1_score'),
                           class_id=class_num - 1,
                           beta=1,
                           thresholds=class_threshold))
            all_metrics.append(
                Precision(name=str('class' + str(class_num - 1) +
                                   '_precision'),
                          class_id=class_num - 1,
                          thresholds=class_threshold))
            all_metrics.append(
                Recall(name=str('class' + str(class_num - 1) + '_recall'),
                       class_id=class_num - 1,
                       thresholds=class_threshold))

            if optimizing_threshold_class_metric is not None:
                break

        if num_classes == 1:
            break

    # strategy = tf.distribute.MirroredStrategy()
    # with strategy.scope():
    model = Unet(input_shape=(None, None, 1),
                 classes=num_classes,
                 **model_parameters)
    model.compile(optimizer=Adam(), loss=loss_fn, metrics=all_metrics)

    if weights_to_load:
        model.load_weights(weights_to_load)

    if optimizing_threshold_class_metric is None:
        print(model.summary())

    return model
Exemplo n.º 21
0
app = Flask(__name__)

app.config['CACHE_TYPE'] = 'null'
cache.init_app(app)

# Load model pretrained
print('Carregando o modelo')

model = Unet('resnet34')

graph = tf.get_default_graph()

model_filename = 'weights_model_IOU_DICE.h5'

model.load_weights(model_filename)

model.summary()


@app.route("/")
def index():
    return render_template('index.html')


@app.route("/", methods=['POST'])
def imagem_post():
    lat = request.form['lat']
    lon = request.form['lon']

    coor, contador = ML(lat, lon)
Exemplo n.º 22
0
def main():
    # with open('/home/kunal/Desktop/Feature-Learning-for-Disease-Classification/temp_patch.txt') as f:
    # 	lines = f.readlines()

    with open('/home/rbuddhad/NIH-XRAY/train_sml.txt') as f1:
        lines1 = f1.readlines()

    with open('/home/rbuddhad/NIH-XRAY/validation_sml.txt') as f2:
        lines2 = f2.readlines()

    # print((lines1))

    train_datagen = ImageDataGenerator()
    train_batches = train_datagen.flow_from_directory(TRAIN_DATASET_PATH,
                                                      target_size=(1024, 1024),
                                                      shuffle=True,
                                                      class_mode=None,
                                                      batch_size=BATCH_SIZE)

    valid_datagen = ImageDataGenerator()
    valid_batches = valid_datagen.flow_from_directory(VALID_DATASET_PATH,
                                                      target_size=(1024, 1024),
                                                      shuffle=False,
                                                      class_mode=None,
                                                      batch_size=BATCH_SIZE)

    train_crops_orig = crop_generator(train_batches, CROP_LENGTH,
                                      lines1)  # 224
    valid_crops_orig = crop_generator(valid_batches, CROP_LENGTH, lines2)

    # batch_x_random_crop, batch_y_targeted_crop = next(train_crops)
    # valid_x, valid_y = next(valid_crops)
    # print(train_crops_orig.shape)
    # train_crops_orig=np.reshape(train_crops_orig,(train_crops_orig.shape[0]*train_crops_orig.shape[1],224,224,3))
    # print(train_crops_orig.shape)
    # in_painted_x= out_painting_mask(train_crops_orig)
    # valid_in_x=in_painting_mask(valid_x,valid_y)

    # train_crops_1_ch=rgb2gray(train_crops_orig)
    # train_crops_1_ch=np.reshape(train_crops_1_ch,(train_crops_1_ch.shape[0],224,224,1))

    # valid_x=rgb2gray(valid_x)
    # valid_x=np.reshape(valid_x,(valid_x.shape[0],224,224,1))

    # model = Unet(backbone_name='resnet18', encoder_weights='imagenet', decoder_block_type='transpose') # build U-Net
    model = Unet(backbone_name='resnet18', encoder_weights=None)  # build U-Net
    model.load_weights('best_model.h5')
    model.compile(optimizer='Adam', loss='mean_squared_error')
    model.summary()
    # print('inpaited',in_painted_x.shape)
    # print('1 channel y',train_crops_1_ch.shape)
    # print(in_painted_x.shape)
    # print(train_crops_1_ch.shape)

    callbacks = [
        EarlyStopping(monitor='val_loss', patience=70),
        ModelCheckpoint(filepath='best_model70_withgray_finetuned.h5',
                        monitor='val_loss',
                        save_best_only=True),
        TensorBoard(log_dir='./logs',
                    histogram_freq=0,
                    write_graph=True,
                    write_images=True)
    ]
    model.fit_generator(generator=train_crops_orig,
                        steps_per_epoch=100,
                        validation_data=valid_crops_orig,
                        callbacks=callbacks,
                        validation_steps=200,
                        epochs=300)
    model.save('outpaint70_withgray_finetuned.h5')
Exemplo n.º 23
0
def get_model(net_name, num_class, weight_path, input_shape=[]):
    from segmentation_models import pspnet  #PSPNet
    number_class = num_class

    if net_name == 'psp':
        model_name = 'pspnet101_cityscapes'
        input_shape = (473, 473, 3)
        model = pspnet.PSPNet101(nb_classes=number_class,
                                 input_shape=input_shape,
                                 weights=model_name)
        model = model.model
    elif net_name == 'psp_50':
        input_shape = (473, 473, 3)
        model_name = 'pspnet50_ade20k'
        #output_mode='sigmoid'
        model = pspnet.PSPNet50(nb_classes=number_class,
                                input_shape=input_shape,
                                weights=model_name)
        model = model.model

    elif net_name[-1:] == 'c':
        if net_name == 'unet_rgbh_c' or net_name == 'unet_rgbc_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 4]
        elif net_name == 'unet_rgb_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 3]
        elif net_name == 'unet_msi_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 3]
        elif net_name == 'unet_msih_c' or net_name == 'unet_msic_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 9]
        from keras.layers import Input
        input_tensor = Input(shape=(input_shape[0], input_shape[1],
                                    input_shape[2]))
        model = Unet(input_shape=input_shape,
                     input_tensor=input_tensor,
                     backbone_name=params.BACKBONE,
                     encoder_weights=None,
                     classes=number_class)
    elif net_name[-1:] == 'h':
        if net_name == 'unet_rgbh_h' or net_name == 'unet_rgbc_h':
            if len(input_shape) < 3:
                input_shape = [512, 512, 4]
        elif net_name == 'unet_rgb_h':
            if len(input_shape) < 3:
                input_shape = [512, 512, 3]
        elif net_name == 'unet_msi_h' or net_name == 'unet_msi_c':
            if len(input_shape) < 3:
                input_shape = [512, 512, 8]
        elif net_name == 'unet_msih_h' or net_name == 'unet_msic_h':
            if len(input_shape) < 3:
                input_shape = [512, 512, 9]
        from keras.layers import Input
        input_tensor = Input(shape=(input_shape[0], input_shape[1],
                                    input_shape[2]))
        model = UnetRegressor(input_shape=input_shape,
                              input_tensor=input_tensor,
                              backbone_name=params.BACKBONE)

    if net_name[-1:] == 'h':
        loss = no_nan_mse_evenloss
    elif number_class == 2:
        loss = my_weighted_loss
    elif number_class == 5:
        loss = my_weighted_loss_5_classes
    elif number_class == 3:
        loss = my_weighted_loss_3_classes
        #loss='categorical_crossentropy'
        #loss=my_tf_balanced_loss
    optimizer = Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    if (len(weight_path) > 2):
        model.load_weights(weight_path)
        print('use pre-trained weights', weight_path)
    model.compile(optimizer, loss=loss)

    model.summary()
    return model, input_shape
                                validation_steps=val_pe,
                                steps_per_epoch=s_p_e,
                                epochs=n_epochs)

            times = time_callback.times
            dic_times = {}
            dic_times['times'] = times
            savemat(
                combinations + "_" + BACKBONE + '_' + name_model +
                '_times.mat', dic_times)
            model.save_weights(combinations + "_" + BACKBONE + "_" +
                               name_model + "_model_wIoU" + str(size) + ".h5")
            ############END TRAINING#############

            # Load best model
            model.load_weights(combinations + "_" + BACKBONE + "_" +
                               name_model + "_model_wIoU" + str(size) + ".h5")

            #    model.evaluate(x_val, y_val, verbose=1)
            # Predict on train, val and test
            if name_model == "PSPNet":
                preds_train = model.predict(x_train2, verbose=1)
                preds_val = model.predict(x_val2, verbose=1)
                for k in range(0, x_val.shape[0], int(x_val.shape[0] / 100)):
                    x_val_1 = x_val2[k, :, :, :]
                    y_val_1 = y_val2[k, :, :, :]
                    pred_val_1 = preds_val[k, :, :, :]

                    ndvi = y_val_1.astype('float32')
                    im = comb[
                        combinations] + "_" + BACKBONE + '_' + name_model + '_target_' + str(
                            k) + '_wpatches' + str(size) + '.tif'
Exemplo n.º 25
0
                    steps_per_epoch=len(X_train) // batch_size, 
                    validation_steps=len(X_test) // batch_size, 
                    epochs=epoch_no, 
                    callbacks=backroom.callbacks)

elapsed_time = time.time()-start_time # measuring modelling time
print(time.strftime("%H:%M:%S", time.gmtime(elapsed_time))) # beautifying time format




#------------------------------------------------------------------------------
# 4. STEP
# Prediction 

model.load_weights(SSD_path + "model/weights/fin_M1_unet_best.h5")
y_pred = model.predict(X_test)


# croping image vector extensions
y_pred = y_pred[:,16:80,16:80,:]
y_test = y_test[:,16:80,16:80]  # original y_test is stored above


# dimension reduction (uses 54x64 vector size)
y_pred_twodim = backroom.dimension_reduction(y_pred, mask_label_dict)




#------------------------------------------------------------------------------
Exemplo n.º 26
0
    model.fit_generator(
        generator=train_generator,
        # steps_per_epoch=2,
        steps_per_epoch=200,
        epochs=epochs,
        max_queue_size=100,
        validation_data=validation_generator,
        validation_steps=50,
        callbacks=[tensorboard, checkpoint, lrate],
        workers=1,
        use_multiprocessing=False)

elif status == 'Test':

    path = os.path.join('', '')
    model.load_weights(os.path.join(path, '', ''))
    import scipy.io as sio

    testData = sio.loadmat('Training.mat')
    images = testData['images']
    masks = testData['masks']
    N = images.shape[0]
    for i in range(N):
        oriimage = images[i, :, :].astype(np.float64) / 255
        s = oriimage.shape
        image = cv2.resize(oriimage, (256, 256))
        image = image[np.newaxis, :, :, np.newaxis]
        pred = model.predict(image)
        pred = pred[0, :, :, 0]
        finalPred = cv2.resize(pred, (s[1], s[0]))
        predMask = (finalPred > 0.05).astype(np.int)