Exemplo n.º 1
0
def train(image_path, mask_path):
    print('load data>>>>')
    image_train, image_valid, mask_train, mask_valid = preprocess_data_train(
        image_path, mask_path, size=64, replica=3, split=True)

    print('data loading complete!')

    print('model loaded>>>>')
    print('fitting model>>>>')
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(graph=tf.get_default_graph(), config=config) as sess:
        K.set_session(sess)
        sess.run(tf.global_variables_initializer())
        os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1"
        stop = EarlyStopping(patience=4)

        # checkpoint = ModelCheckpoint(filepath='/checkpoint-{epoch:02d}-{val_loss:.4f}.hdf5',
        #                             monitor='val_loss', verbose=1, save_best_only=True)

        model = unet(lr=1e-4)
        model.summary()
        model.fit_generator(
            generator=generator(image_train, mask_train),
            steps_per_epoch=len(image_train),
            epochs=10,
            validation_data=[image_valid, mask_valid],
            #validation_steps=64,
            verbose=1,
            callbacks=[stop])
        model.save_weights('./weight.h5')
    def gen_meteor_mask_from_folder(self, image_folder, output_folder):
        print("\nGenerating mask from Unet ...")

        if not os.path.exists(output_folder):
            os.mkdir(output_folder)

        # The image size supported is (256, 256)
        unet_model = model.unet(input_size=(settings.UNET_IMAGE_SIZE,
                                            settings.UNET_IMAGE_SIZE, 1))
        unet_model.load_weights(settings.UNET_SAVED_MODEL)

        test_image_list = os.listdir(image_folder)
        num_image = len(test_image_list)

        testGene = unet_proc.testGenerator(image_folder, as_gray=True)
        '''
        test_datagen = ImageDataGenerator(rescale=1. / 255)

        batch_size = 1

        # test_folder = os.path.dirname(image_folder)
        test_generator = test_datagen.flow_from_directory(
            image_folder,
            target_size=(256, 256),
            batch_size=batch_size,
            color_mode='grayscale',
            shuffle=False,
            class_mode=None)
        '''

        results = unet_model.predict_generator(testGene, num_image, verbose=1)
        # results = unet_model.predict_generator(test_generator, num_image, verbose=1)

        unet_proc.saveResult_V2(output_folder, results, test_image_list)
Exemplo n.º 3
0
def prediction(im, is_pc, pretrained_weights=None):
    """
    Calculate the prediction of the label corresponding to image im
    Param:
        im: a numpy array image (numpy array), with max size 2048x2048
    Return:
        res: the predicted distribution of probability of the labels (numpy array)
    """
    # pad with zeros such that is divisible by 16
    (nrow, ncol) = im.shape
    row_add = 16 - nrow % 16
    col_add = 16 - ncol % 16
    padded = np.pad(im, ((0, row_add), (0, col_add)))

    if pretrained_weights is None:
        if is_pc:
            pretrained_weights = path_weights + 'unet_weights_batchsize_25_Nepochs_100_SJR0_10.hdf5'
        else:
            pretrained_weights = path_weights + 'weights_budding_BF_multilab_0_1.hdf5'

    if not os.path.exists(pretrained_weights):
        raise ValueError('Path does not exist')

    # WHOLE CELL PREDICTION
    model = unet(pretrained_weights=pretrained_weights,
                 input_size=(None, None, 1))

    results = model.predict(padded[np.newaxis, :, :, np.newaxis], batch_size=1)

    res = results[0, :, :, 0]
    return res[:nrow, :ncol]
Exemplo n.º 4
0
def get_model(args, input_shape=(512, 512, 5)):
    if args.modelweights != None:
        custom_objects = custom_objects = {
            'binary_accuracy': binary_accuracy,
            'recall': recall,
            'precision': precision,
            'dice_coefficient': dice_coefficient,
            'dice_coefficient_loss': dice_coefficient_loss,
            'weighted_binary_crossentropy_loss':
            weighted_binary_crossentropy_loss
        }
        return load_model(args.modelweights, custom_objects=custom_objects)
    #else:
    #if(args.model=="BVNet3D"):
    #train_data, label_data = get_training_patches(train_files, args.label, remove_only_background_patches=True)
    #val_data, val_label = get_training_patches(train_files, args.label)
    #model =  BVNet3D(input_size =train_data.shape[1:], loss=get_loss(args.loss))
    #return model, train_data, label_data, val_data, val_label

    #train_data, label_data = get_train_data_slices(train_files, tag=args.label)
    #print("Done geting training slices...")
    #val_data, val_label = get_slices(val_files, args.label)
    #print("Done geting validation slices...")
    else:
        if (args.model == "BVNet3D"):
            return BVNet3D(input_size=(64, 64, 64, 1),
                           loss=get_loss(args.loss, args.label))
        if (args.model == "BVNet"):
            return BVNet(input_size=input_shape,
                         loss=get_loss(args.loss, args.label))
        if (args.model == "unet"):
            return unet(input_size=input_shape,
                        loss=get_loss(args.loss, args.label))
Exemplo n.º 5
0
def main():
    model_dir = './checkpoints/seg2/DismapDiceLoss_k3/model_at_epoch_042.dat'
    save_dir = './test/1015/DismapDiceLoss_k3'
    test_txt_path = './data/seg1/test.txt'

    model = unet(in_channel=1, n_classes=1)
    model = load_model(model, model_dir)
    model = model.cuda()
    model.eval()

    test_dataset = GuideWireDataset(test_txt_path)
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=BATCH_SIZE,
                                              shuffle=False,
                                              num_workers=NUM_WORKERS)
    prefetcher = data_prefetcher(test_loader)
    input, _, _ = prefetcher.next()

    i = -1
    while input is not None:
        i += 1
        with torch.no_grad():
            output = model(input)

            output = torch.sigmoid(output).squeeze().data.cpu().numpy()
            output[output < 0.5] = 0
            output[output >= 0.5] = 1
            cv2.imwrite(os.path.join(save_dir,
                                     str(i) + '_output.jpg'), output * 255)
            print(str(i) + ' finish!')

        input, _, _ = prefetcher.next()
Exemplo n.º 6
0
def main():
    targetSize = (256, 256)
    model = unet(input_size=targetSize + (3, ))
    model.load_weights("checkpoints/color_shapes/shapes_20_0.0000_1.000.hdf5")

    while True:
        shapeImage, _ = generate_shapes(img_shape=targetSize + (3, ),
                                        bgColor=[117, 122, 125],
                                        shapeColor=[180, 211, 250])
        batch = adjustImage(shapeImage, targetSize, asGray=False)
        results = model.predict(batch, batch_size=1, verbose=0)

        results = np.round(np.squeeze(results[0]) * 255, 0).astype(np.uint8)

        cv2.imshow('image', shapeImage)
        cv2.imshow('results', results)

        while True:
            key = cv2.waitKey()
            if key == 27:
                return
            if key == ord('d'):
                break
        cv2.imshow('image', np.zeros_like(shapeImage))
        cv2.imshow('results', np.zeros_like(shapeImage))
Exemplo n.º 7
0
def run(flags_obj):
    data_aug_args = dict(rotation_range=0.2,
                         width_shift_range=0.05,
                         height_shift_range=0.05,
                         shear_range=0.05,
                         zoom_range=0.05,
                         horizontal_flip=True,
                         fill_mode='nearest')

    train_gene = train_generator(flags_obj, data_aug_args)

    model = unet(flags_obj, n_filters=64)

    model.compile(optimizer=tf.keras.optimizers.Adam(
        learning_rate=flags_obj.learning_rate),
                  loss=tf.keras.losses.BinaryCrossentropy(),
                  metrics=['accuracy'])

    example = load_example(flags_obj)
    example_img = imageio.imread('data/membrane/test/image/0.png')
    # Save first prediction without training.
    save_prediction(model, example_img, example, 0)

    test_ds = load_test_dataset()

    history = model.fit_generator(train_gene,
                                  epochs=flags_obj.epoch,
                                  steps_per_epoch=flags_obj.steps_per_epoch,
                                  validation_data=test_ds,
                                  callbacks=[DisplayCallback(model, example)])

    create_gif()
    plot_history(history, flags_obj.epoch)
Exemplo n.º 8
0
def train_model():

    (
        train_clean_paths,
        train_noisy_paths,
        test_clean_paths,
        test_noisy_paths,
    ) = split_data()

    print("Train data:", len(train_clean_paths))
    print("Test data:", len(test_clean_paths))
    train_generator = data_generator(train_clean_paths, train_noisy_paths)
    test_generator = data_generator(test_clean_paths, test_noisy_paths)

    print("Initialize the model...")
    model = unet(pretrained_weights=weights_path)

    print("Start training...")
    model.fit_generator(
        generator=train_generator,
        validation_data=test_generator,
        steps_per_epoch=cnf.steps_per_epoch,
        epochs=cnf.epochs,
        validation_steps=cnf.validation_steps,
    )
    print("Finishing training...")

    if SAVE_WEIGHTS:
        model.save_weights(f"weights/{str(datetime.now())}_unet_model.h5")

    return model
Exemplo n.º 9
0
    def train(self):
        from model import unet
        from data import trainGenerator
        from keras.callbacks import ModelCheckpoint
        from keras.optimizers import Adam
        from keras.callbacks import ReduceLROnPlateau

        # myGene = trainGenerator(4, 'data', 'colorImage', 'label', self.data_gen_args, image_color_mode='rgb',
        #                         target_size=self.targetSize, flag_multi_class=True, num_class=6, save_to_dir=None)
        myGene = self.getTrainGenerator()

        model = unet(input_size=self.targetSize + (1, ))
        model.compile(optimizer=Adam(lr=1e-4),
                      loss='binary_crossentropy',
                      metrics=['accuracy'])

        chckPtsDir = 'checkpoints/membrane/multiclass'
        os.makedirs(chckPtsDir, exist_ok=True)
        chckPtsPath = os.path.join(
            chckPtsDir, 'unet_multiclass_{epoch}_{loss:.3f}_{acc:.3f}.hdf5')
        model_checkpoint = ModelCheckpoint(chckPtsPath,
                                           monitor='loss',
                                           verbose=1,
                                           save_best_only=True)
        # reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.2, patience=4, min_lr=0.001)
        model.fit_generator(myGene,
                            steps_per_epoch=20,
                            epochs=3,
                            callbacks=[model_checkpoint])
Exemplo n.º 10
0
def unet_crf():
    model = unet()
    model.load_weights("./unet_membrane.hdf5")
    testGene = testGenerator("./dataset/membrane/test/img", 1)
    labelGene = labelGenerator("./dataset/membrane/test/label", 1)
    dataset = zip(testGene, labelGene)
    for idx, (data, target) in enumerate(dataset):
        pred = model.predict(data, 1, verbose=1)
        pred = pred.squeeze()
        print("pred", pred.shape)

        # original image, 0-255, 3 channel
        img = cv2.imread("./dataset/membrane/test/img/%s" % str(idx) + ".png")
        # perform dense crf
        final_mask = dense_crf(idx, np.array(img).astype(np.uint8), pred)
        # binarization score map to get predict result of Unet
        mask_pos = pred >= 0.5
        mask_neg = pred < 0.5
        pred[mask_pos] = 1
        pred[mask_neg] = 0
        pred = np.uint8(pred)
        pred = pred * 255
        pred = Image.fromarray(pred, 'L')
        pred.save('./{}.png'.format(idx))
        # draw result after denseCRF
        final_mask = np.uint8(final_mask)
        final_mask = final_mask * 255
        crf = Image.fromarray(final_mask, 'L')
        crf.save('./img/{}.png'.format(idx))
Exemplo n.º 11
0
def train():
    """
    A function to perform training by initializing the values of the model object.
    ModelCheckpoint is used to save the trained weights.
    fit_generator starts the training process.

    """
    model = unet(in_shape=(256, 512, 3),
                 num_classes=7,
                 lrate=1e-4,
                 decay_rate=5e-4,
                 vgg_path=None,
                 dropout_rate=0.5)

    # Define callbacks
    mod_save = ModelCheckpoint(filepath='gpu_exp_6classes_model_weight.h5',
                               save_weights_only=True)

    # training
    model.fit_generator(data_generator('data.h5', 4, 'train'),
                        steps_per_epoch=2975 // 4,
                        validation_data=data_generator('data.h5', 4, 'val'),
                        validation_steps=500 / 4,
                        callbacks=[mod_save],
                        epochs=100,
                        verbose=1)
Exemplo n.º 12
0
def bn_update(tf_config, logger):
    dataset = data.Dataset(cfg.DATASET, cfg.RNG_SEED)
    cfg.MODEL.BN_MOMENTUM = 0.
    assert cfg.MODEL.BN_MOMENTUM == 0., 'BN_MOMENTUM should be 0. for update step'
    imgs, _ = dataset.preprocessing(training=True,
                                    augment=False,
                                    batch_size=dataset.train_num,
                                    num_epochs=1)

    net, _ = model.unet(imgs,
                        bn_training=True,
                        dropout_training=False,
                        dataset=cfg.DATASET)
    with tf.variable_scope('cls'):
        _ = tf.layers.conv2d(net, 1, 1, activation=tf.nn.relu)

    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)

    saver = tf.train.Saver(max_to_keep=1000)
    with tf.Session(config=tf_config) as sess:
        weights_path = tf.train.latest_checkpoint(cfg.OUTPUT_DIR)
        logger.info('Restoring weights from {}'.format(weights_path))
        saver.restore(sess, weights_path)

        sess.run(update_ops)

        weights_path = saver.save(sess,
                                  os.path.join(cfg.OUTPUT_DIR, 'bn-model'),
                                  global_step=int(weights_path.split('-')[-1]))
        logger.info('Updating weights to {}'.format(weights_path))
    tf.reset_default_graph()
Exemplo n.º 13
0
Arquivo: train.py Projeto: Trevol/unet
def trainColor():
    from model import unet
    from data import trainGenerator
    from keras.callbacks import ModelCheckpoint
    from keras.optimizers import Adam
    from keras.callbacks import ReduceLROnPlateau

    data_gen_args = dict(rotation_range=0.2,
                         width_shift_range=0.05,
                         height_shift_range=0.05,
                         shear_range=0.05,
                         zoom_range=0.05,
                         horizontal_flip=True,
                         fill_mode='nearest')
    targetSize = (256, 256)
    myGene = trainGenerator(4, 'data/membrane/train', 'colorImage', 'label', data_gen_args, image_color_mode='rgb',
                            target_size=targetSize, save_to_dir=None)

    model = unet(input_size=targetSize + (3,))
    model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])

    chckPtsDir = 'checkpoints'
    os.makedirs(chckPtsDir, exist_ok=True)
    chckPtsPath = os.path.join(chckPtsDir, 'unet_membrane_{epoch}_{loss:.3f}_{acc:.3f}.hdf5')
    model_checkpoint = ModelCheckpoint(chckPtsPath, monitor='loss', verbose=1, save_best_only=True)
    # reduce_lr = ReduceLROnPlateau(monitor='loss', factor=0.2, patience=4, min_lr=0.001)
    model.fit_generator(myGene, steps_per_epoch=2000, epochs=17, callbacks=[model_checkpoint])
def main():
    images_path = '/Users/ericfornaciari/Desktop/data/images/patches/raw/1024x1024/'
    masks_path = '/Users/ericfornaciari/Desktop/data/masks/patches/encoded/1024x1024/'

    images = np.zeros((len(os.listdir(images_path)), 1024, 1024, 3), np.uint16)
    masks = np.zeros((len(os.listdir(masks_path)), 1024, 1024, 1), np.bool)
    for i, filename in enumerate(os.listdir(images_path)):
        images[i, :, :, :] = imread(os.path.join(images_path, filename))
    for i, filename in enumerate(os.listdir(masks_path)):
        masks[i, :, :, :] = np.load(os.path.join(masks_path, filename),
                                    allow_pickle=True)

    unet = model.unet(input_size=(1024, 1024, 3))
    unet.compile(optimizer='adam',
                 loss='binary_crossentropy',
                 metrics=['accuracy'])

    earlystopper = EarlyStopping(patience=5, verbose=1)
    checkpointer = ModelCheckpoint('init.h5', verbose=1, save_best_only=True)
    unet.fit(images,
             masks,
             validation_split=0.1,
             batch_size=16,
             epochs=50,
             callbacks=[earlystopper, checkpointer])
Exemplo n.º 15
0
def train(args):
    # data augmentation configuration
    data_gen_args = dict(rotation_range=0.2,
                         width_shift_range=0.05,
                         height_shift_range=0.05,
                         shear_range=0.05,
                         zoom_range=0.05,
                         horizontal_flip=True,
                         fill_mode='nearest')
    # training set
    myGene = trainGenerator(2,
                            os.path.join(args.root, 'train'),
                            'image',
                            'label',
                            data_gen_args,
                            target_size=(512, 512),
                            save_to_dir=None)
    # model
    model = unet()
    model_checkpoint = ModelCheckpoint(args.save_path,
                                       monitor='loss',
                                       verbose=1,
                                       save_best_only=True)
    # train
    model.fit_generator(myGene,
                        steps_per_epoch=2000,
                        epochs=5,
                        callbacks=[model_checkpoint])
Exemplo n.º 16
0
def main():
    data_gen_args = dict(rotation_range=0.2,
                    width_shift_range=0.05,
                    height_shift_range=0.05,
                    shear_range=0.05,
                    zoom_range=0.05,
                    horizontal_flip=True,
                    fill_mode='nearest')
    img_gen_arg_dict = dict(directory = train_path,
                        classes = [image_folder],
                        class_mode = None,
                        color_mode = image_color_mode,
                        target_size = target_size,
                        batch_size = batch_size,
                        save_to_dir = save_to_dir,
                        save_prefix  = image_save_prefix,
                        seed = seed)

    mask_gen_arg_dict = dict(directory = train_path,
                        classes = [mask_folder],
                        class_mode = None,
                        color_mode = mask_color_mode,
                        target_size = target_size,
                        batch_size = batch_size,
                        save_to_dir = save_to_dir,
                        save_prefix  = mask_save_prefix,
                        seed = seed)
    val_img_gen_arg_dict = dict(directory = val_path,
                        classes = [val_image_folder],
                        class_mode = None,
                        color_mode = image_color_mode,
                        target_size = target_size,
                        batch_size = batch_size,
                        save_to_dir = save_to_dir,
                        save_prefix  = image_save_prefix,
                        seed = seed)
    val_mask_gen_arg_dict = dict(directory = val_path,
                        classes = [mask_folder],
                        class_mode = None,
                        color_mode = mask_color_mode,
                        target_size = target_size,
                        batch_size = batch_size,
                        save_to_dir = save_to_dir,
                        save_prefix  = mask_save_prefix,
                        seed = seed)    
    myGene = trainGenerator(data_gen_args, img_gen_arg_dict, mask_gen_arg_dict)
    valGene = trainGenerator(data_gen_args, val_img_gen_arg_dict, val_mask_gen_arg_dict)

    history = LossHistory()
    model = unet()
    model_checkpoint = ModelCheckpoint(model_path, monitor='loss', verbose=1, save_best_only=True)
    model.fit_generator(myGene, steps_per_epoch=300, epochs=10, validation_data=valGene, validation_steps=30, callbacks=[model_checkpoint, history])

    history.loss_plot('epoch', result_image_path)
    print('result saved')

    testGene = testGenerator(test_path, test_image_type)
    results = model.predict_generator(testGene, 10, verbose=1)
    saveResult(result_save_path, results)
Exemplo n.º 17
0
def eval_and_save_result(dataset_dir, model_path, eval_result_dirpath,
                         eval_summary_name='eval_summary.yml',
                         files_2b_copied=None,
                         num_filters=64,num_maxpool=4,
                         modulo=16):
    '''
    '''
    #---- load ----
    train_dir = os.path.join(dataset_dir,'train')
    valid_dir = os.path.join(dataset_dir,'valid')
    test_dir = os.path.join(dataset_dir,'test')
    train_img_dir = os.path.join(train_dir,'image')
    train_label_dir = os.path.join(train_dir,'label')
    valid_img_dir = os.path.join(valid_dir,'image')
    valid_label_dir = os.path.join(valid_dir,'label')
    test_img_dir = os.path.join(test_dir, 'image')
    test_label_dir = os.path.join(test_dir, 'label')
    assert_exists(train_img_dir)
    assert_exists(train_label_dir)
    assert_exists(valid_img_dir)
    assert_exists(valid_label_dir)
    assert_exists(test_img_dir)
    assert_exists(test_label_dir)
    train_inputs = list(load_imgs(train_img_dir))
    train_answers = list(load_imgs(train_label_dir))
    valid_inputs = list(load_imgs(valid_img_dir))
    valid_answers = list(load_imgs(valid_label_dir))
    test_inputs = list(load_imgs(test_img_dir))
    test_answers = list(load_imgs(test_label_dir))

    #---- eval ----
    segnet = model.unet(model_path, (None,None,1), 
                        num_filters=num_filters, num_maxpool=num_maxpool)
    train_iou_arr, train_result_tuples = evaluate(segnet, train_inputs, train_answers, modulo)
    valid_iou_arr, valid_result_tuples = evaluate(segnet, valid_inputs, valid_answers, modulo)
    test_iou_arr, test_result_tuples = evaluate(segnet, test_inputs, test_answers, modulo)
    K.clear_session()
    print('Evaluation completed!')

    #---- save ----
    summary_path, train_path, valid_path, test_path = make_eval_directory(eval_result_dirpath,
                                                                          eval_summary_name)
    save_eval_summary(summary_path, train_iou_arr, valid_iou_arr, test_iou_arr)
    print('Evaluation summary is saved!')

    save_img_tuples(train_result_tuples, train_path)
    save_img_tuples(valid_result_tuples, valid_path)
    save_img_tuples(test_result_tuples, test_path)
    print('Evaluation result images are saved!')

    if files_2b_copied is None:
        files_2b_copied = [model_path]
    else:
        files_2b_copied.append(model_path)

    for file_path in files_2b_copied:
        file_name = os.path.basename(file_path)
        shutil.copyfile(file_path, os.path.join(eval_result_dirpath, file_name))
        print("file '%s' is copyed into '%s'" % (file_name,eval_result_dirpath))
Exemplo n.º 18
0
def test_unet():
    from utils import calc_params
    from model import unet
    import torch
    model = unet()
    calc_params(model)
    x = torch.randn(1, 3, 192, 128)
    print(model(x).shape)
Exemplo n.º 19
0
def train_and_predict(data_path, data_filename, batch_size, n_epoch):
    """
    Create a model, load the data, and train it.
    """
    """
    Step 1: Load the data
    """
    hdf5_filename = os.path.join(data_path, data_filename)
    print("-" * 30)
    print("Loading the data from HDF5 file ...")
    print("-" * 30)

    imgs_train, msks_train, imgs_validation, msks_validation, \
        imgs_testing, msks_testing = \
        load_data(hdf5_filename, args.batch_size,
                  [args.crop_dim, args.crop_dim],
                  args.channels_first, args.seed)

    print("-" * 30)
    print("Creating and compiling model ...")
    print("-" * 30)
    """
    Step 2: Define the model
    """

    unet_model = unet()
    model = unet_model.create_model(imgs_train.shape, msks_train.shape)

    model_filename, model_callbacks = unet_model.get_callbacks()

    # If there is a current saved file, then load weights and start from
    # there.
    saved_model = os.path.join(args.output_path, args.inference_filename)
    if os.path.isfile(saved_model):
        model.load_weights(saved_model)
    """
    Step 3: Train the model on the data
    """
    print("-" * 30)
    print("Fitting model with training data ...")
    print("-" * 30)

    model.fit(imgs_train,
              msks_train,
              batch_size=batch_size,
              epochs=n_epoch,
              validation_data=(imgs_validation, msks_validation),
              verbose=1,
              shuffle="batch",
              callbacks=model_callbacks)
    """
    Step 4: Evaluate the best model
    """
    print("-" * 30)
    print("Loading the best trained model ...")
    print("-" * 30)

    unet_model.evaluate_model(model_filename, imgs_testing, msks_testing)
Exemplo n.º 20
0
 def __init__(self, _train_list, _val_list, _inf_list, _dag_it = 0, _input_shape = (256, 1024, 3),
              _train_steps = 500, _val_steps = 200, _num_epochs = 15, _batch_size = 4, _gpu_num = '0, 1',
              _no_inidices = True, _segnet = False):
     self.dag_it = _dag_it
     self.train_list = _train_list
     self.val_list = _val_list
     self.inf_list = _inf_list
     self.base_dir = '/media/localadmin/Test/11Nils/kitti/dataset/sequences/Data/'
     self.img_dir = 'images/'
     self.label_dir = 'labels/'
     self.inf_dir = 'inf/'
     self.dag_dir = 'dagger/'
     self.log_dir = 'log/'
     self.optimizer = 'adagrad'
     self.gpu_num = _gpu_num  # '1'
     os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
     os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu_num
     self.untrained = 'store_true'
     self.loss = 'categorical_crossentropy'
     self.output_mode = 'softmax'
     self.pool_size = (2, 2)
     self.kernel = 3
     self.input_shape = _input_shape  # (128, 512, 3)
     self.n_labels = 3  # num classes
     self.val_steps = _val_steps
     self.epoch_steps = _train_steps
     self.n_epochs = _num_epochs
     self.batch_size = _batch_size
     self.filters = 8
     self.b_pool_indices = _no_inidices
     self.b_use_segnet = _segnet
     if not self.b_pool_indices and not self.b_use_segnet:
         self.model = unet_wIndices(self.input_shape, self.n_labels, self.filters, self.kernel, self.pool_size,
                                    self.output_mode)
     elif not self.b_use_segnet:
         self.model = unet(self.input_shape, self.n_labels, self.filters, self.kernel, self.pool_size,
                           self.output_mode)
     else:
         self.model = segnet(self.input_shape, self.n_labels, self.filters, self.kernel, self.pool_size,
                             self.output_mode)
     print(self.model.summary())
     list_gpus_trained = [int(x) for x in self.gpu_num.split(',')]
     self.num_gpus = len(list_gpus_trained)
     if self.num_gpus > 1:
         trained_gpu_str = ', '.join(str(e) for e in list_gpus_trained)
         print('Training on GPU\'s: ' + trained_gpu_str)
         self.multi_model = multi_gpu_model(self.model, gpus = self.num_gpus)
     else:
         self.multi_model = self.model
     self.multi_model.compile(loss = self.loss, optimizer = self.optimizer, metrics = ['accuracy'])
     plot_model(model = self.multi_model, to_file = self.base_dir + 'model.png')
     print(print_summary(self.multi_model))
     self.std = [0.32636853, 0.31895106, 0.30716496]
     self.mean = [0.39061851, 0.38151629, 0.3547171]
     self.es_cb = []
     self.tb_cb = []
     self.cp_cb = []
Exemplo n.º 21
0
def run():
    input_dim = (768, 768, 3)
    print("Instantiating model...")
    model = unet(input_dim)
    print(model.summary())
    print("Creating training generator...")
    val_generator = load_data_generator('data/val_images', 'data/val_masks', batch_size=2)
    train_generator = load_data_generator('data/images', 'data/masks', batch_size=2)

    plot_generator(val_generator)
Exemplo n.º 22
0
def create_unet(myGene):
    mdl = model.unet()
    model_checkpoint = ModelCheckpoint('../model/unet_membrane.hdf5', monitor='loss',verbose=1, save_best_only=True)
    history = mdl.fit_generator(myGene,steps_per_epoch=steps,epochs=epochs,callbacks=[model_checkpoint])
    print(history.history['loss'])
    # ulozit historiu
    #with open('../model/history.json', 'w') as f:
        #json.dump(history.history, f)
    data.saveHist('../model/history.json', history)
    return mdl, history.history
Exemplo n.º 23
0
def plotSavedModel(modeindex):

    # Import Dataset
    modes = DataSet.learningModes
    data = DataSet(modes[modeindex])
    data.print()

    # Network Parameters
    WIDTH = data.WIDTH
    HEIGHT = data.HEIGHT
    CHANNELS = data.CHANNELS_IN
    NUM_INPUTS = WIDTH * HEIGHT * CHANNELS
    NUM_OUTPUTS = data.CHANNELS_OUT

    # Network Varibles and placeholders
    X = tf.placeholder(tf.float32, [None, HEIGHT, WIDTH, CHANNELS])  # Input
    Y = tf.placeholder(
        tf.float32, [None, HEIGHT, WIDTH, NUM_OUTPUTS])  # Truth Data - Output
    global_step = tf.Variable(0,
                              dtype=tf.int32,
                              trainable=False,
                              name='global_step')

    # Define loss and optimizer
    prediction = model.unet(X, NUM_OUTPUTS)

    # Setup Saver
    saver = tf.train.Saver()

    # Initalize varibles, and run network
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    ckpt = ckpt = tf.train.get_checkpoint_state('./checkpoints/' +
                                                modes[modeindex])
    if (ckpt and ckpt.model_checkpoint_path):
        print('Restoring Prev. Model ....')
        saver.restore(sess, ckpt.model_checkpoint_path)
        print('Model Loaded....')

        # Show results
        prediction = sess.run(prediction,
                              feed_dict={
                                  X: data.x_test,
                                  Y: data.y_test
                              })

        # Compute metrics of prediction
        metrics = do_metrics(prediction, data)

        # index = np.random.randint(data.x_test.shape[0])
        index = 4
        print('Selecting Test Image #', index)
        plot(data, prediction, modeindex, index)
def main():
    # 构建模型.
    model = unet(input_shape=(320, 320, 3), num_cls=32)
    # 加载模型
    model.load_weights("weights/unet_camvid_weights.h5")
    sumiou = 0
    sumacc = 0
    root = "dataset/test/"
    testList = os.listdir("dataset/test/")

    # 数据
    for file in testList:
        if file.endswith("png"):
            labelfile = "C:\\Users\\41648\\Downloads\\camvid-master\\camvid-master\\LabeledApproved_full\\" + \
                        file.split(".")[0] + "_L.png"
            gtfile = "dataset/testannot/" + file.split(".")[0] + "_P.png"
            image = cv2.imread(os.path.join(root, file))
            image = cv2.resize(image, (320, 320))
            image_ = np.float32(image) / 255.
            batch_test_img = np.expand_dims(image_, axis=0)
            pred = model.predict(batch_test_img)[0]

            gt = cv2.imread(gtfile, 0)
            gt = cv2.resize(gt, (320, 320))
            pr = pred.argmax(axis=2)
            sumacc += np.sum((gt == pr)) / (320 * 320)
            iouCls = np.zeros((32, 2))
            for cl in range(32):
                if np.sum((gt == cl)) > 0:
                    intersection = np.sum((gt == cl) * (pr == cl))
                    union = np.sum(np.maximum((gt == cl), (pr == cl)))
                    iou = float(intersection) / (union + EPS)
                    iouCls[cl][0] += 1
                    iouCls[cl][1] += iou
            # print(iouCls)
            sumiou += iouCls
            seg_img = visualize_segmentation(pr,
                                             image,
                                             n_classes=32,
                                             colors=class_colors,
                                             overlay_img=False,
                                             show_legends=False,
                                             class_names=class_names)

            seg_img = np.asarray(seg_img, np.uint8)
            seg_img = cv2.cvtColor(seg_img, cv2.COLOR_RGB2BGR)

            label = cv2.imread(labelfile)
            label = cv2.resize(label, (320, 320))

            out = np.hstack([label, image, seg_img])
            # cv2.imwrite("result/"+file,out)
            cv2.imshow("seg_img", out)
            cv2.waitKey(50)
Exemplo n.º 25
0
def main():
    model = unet(input_size=(512, 512, 3))
    model.load_weights("unet_pins.hdf5")

    framesDir = '/home/trevol/HDD_DATA/Computer_Vision_Task/Computer_Vision_Task/frames_6'
    savePath = '/home/trevol/HDD_DATA/Computer_Vision_Task/Computer_Vision_Task/frames_6_unet_pins_only'
    for image, fileName in yieldImages(framesDir):
        results = model.predict(image, verbose=0)
        result = (results[0, :, :, 0] * 255).astype(np.uint8)
        skimage.io.imsave(
            os.path.join(savePath, fileName.replace('.jpg', '.png')), result)
Exemplo n.º 26
0
def train(validate_input=False):
    training_generator = MyAugGenerator(BATCH_SIZE, train_path, 'training_images', 'training_labels', aug_dict, IMG_SIZE)
    val_generator = MyAugGenerator(BATCH_SIZE, train_path, 'training_images', 'training_labels', {}, IMG_SIZE)
    if validate_input:
        validate_generator(training_generator)
    model = unet(IMG_SIZE, dropout)
    model.compile(optimizer=Adam(lr=learning_rate), loss=wbce_loss, metrics=my_metrics)
    model_checkpoint = ModelCheckpoint('unet_checkpoint.hdf5', monitor='loss', save_best_only=True)
    model.fit(training_generator, 
              validation_data=val_generator, 
              epochs=EPOCHS, 
              callbacks=[model_checkpoint])
Exemplo n.º 27
0
def main_():
    image, originalImage = readImage('testData/f_0021_1400.00_1.40.jpg',
                                     as_gray=True)

    model = unet(input_size=(512, 512, 1))
    model.load_weights("checkpoints/unet_grayscale_pins_4_0.0021_0.999.hdf5")
    results = model.predict(image, batch_size=1, verbose=0)

    results = np.round(np.squeeze(results[0]) * 255, 0).astype(np.uint8)

    cv2.imshow('image', originalImage)
    cv2.imshow('results', results)
    cv2.waitKey()
Exemplo n.º 28
0
def draw(model_path, image_path):

    model = unet()
    model.load_weights(model_path)
    testGene = testGenerator(image_path, 5, target_size=(512, 512))
    results = model.predict_generator(testGene, 5, verbose=1)
    results = results > 0.5

    for i in range(results.shape[0]):
        array = np.uint8(results[i].squeeze())
        array = array * 255
        img = Image.fromarray(array, 'L')
        img.save('{}.png'.format(i))
Exemplo n.º 29
0
def main_():
    model = unet()
    model.load_weights("../unet_membrane_5_0.123_0.946.hdf5")
    # image, originalImage = readImage('../data/membrane/test/0.png', target_size=(256, 256), as_gray=True)
    image, originalImage = readImage('data/image/f_0350_23333.33_23.33.jpg',
                                     target_size=(256, 256),
                                     as_gray=True)
    results = model.predict(image, batch_size=1, verbose=0)

    results = np.round(np.squeeze(results[0]) * 255, 0).astype(np.uint8)

    cv2.imshow('image', originalImage)
    cv2.imshow('results', results)
    cv2.waitKey()
Exemplo n.º 30
0
def predict256x256x3_n_save():
    targetSize = (256, 256)
    model = unet(input_size=targetSize + (3, ))
    model.load_weights("checkpoints/rgb/unet_pins_20_0.001_1.000.hdf5")

    framesDir = '/home/trevol/HDD_DATA/Computer_Vision_Task/Computer_Vision_Task/frames_6'
    savePath = '/home/trevol/HDD_DATA/Computer_Vision_Task/Computer_Vision_Task/frames_6_unet_pins_only'
    for batch, originalImage, fileName in yieldImages(framesDir,
                                                      targetSize,
                                                      as_gray=False):
        results = model.predict(batch, verbose=0)
        results = np.round((results[0, :, :, 0] * 255), 0).astype(np.uint8)
        skimage.io.imsave(
            os.path.join(savePath, fileName.replace('.jpg', '.png')), results)