Exemplo n.º 1
0
def inference(input_img,
              path='/content/gdrive/My Drive/[epoch50]q1_part_e.pth'):
    '''
  input: input_img is a RGB image
         path is where saved checkpint

  output: out_img is a gray scale image
  '''
    kernel = np.ones((5, 5), np.uint8)
    # Initailza Unet for Segmentation
    model = unet()
    if torch.cuda.is_available():  # use gpu if available
        print('using cuda')
        model = model.cuda()
    model = unet()
    model.load_state_dict(torch.load(path))
    with torch.no_grad():
        model.eval()  # Set model to evaluate mode
        try:
            input_img = cv2.resize(input_img,
                                   dsize=(320, 256),
                                   interpolation=cv2.INTER_CUBIC)
            input_img = input_img.transpose((2, 0, 1))
            input_img = np.expand_dims(input_img, axis=0)
        except:
            print('image format invalid')
        input = torch.from_numpy(input_img).type(
            torch.FloatTensor)  # change to float torch tensor
        outputs = model(input)
        output = np.squeeze(outputs.cpu().numpy())
        output = reverse_one_hot_encoding(output)
        output = cv2.erode(output.astype('uint8'), kernel)

    return output
Exemplo n.º 2
0
def generator(inputs,
              layers,
              features_root=64,
              filter_size=3,
              pool_size=2,
              output_channel=3):
    im = unet.unet(inputs, layers, features_root, filter_size, pool_size,
                   output_channel)
    return unet.unet(im, layers, features_root, filter_size, pool_size,
                     output_channel)
Exemplo n.º 3
0
def main(argv=None):
    os.environ["CUDA_VISIBLE_DEVICES"] = "0"
    input_images = tf.placeholder(tf.float32,
                                  shape=[None, None, None, 3],
                                  name="input")
    gt_maps = tf.placeholder(tf.float32,
                             shape=[None, None, None, 1],
                             name="input_gt")
    unet_output = unet.unet(name="UNET", input_data=input_images)
    loss = tf.reduce_mean(
        tf.keras.losses.sparse_categorical_crossentropy(gt_maps, unet_output))
    train_ops = tf.train.AdamOptimizer(
        learning_rate=FLAGS.learning_rate).minimize(loss)
    saver = tf.train.Saver(tf.global_variables())

    summaty_writer = tf.summary.FileWriter(FLAGS.checkpoint_path,
                                           tf.get_default_graph())
    init = tf.global_variables_initializer()

    with tf.Session(graph=tf.get_default_graph()) as sess:
        sess.run(init)
        data_generator = data.get_batch(num_workers=FLAGS.number_reasers,
                                        batch_size=FLAGS.batch_size)
        for step in range(FLAGS.max_step):
            input_list = next(data_generator)
            peer_loss, _ = sess.run([loss, train_ops],
                                    feed_dict={
                                        input_images: input_list[0],
                                        gt_maps: input_list[1]
                                    })
            print("step {}, model loss {}".format(step, peer_loss))
            saver.save(sess=sess,
                       save_path=FLAGS.checkpoint_path + str(step) + ".ckpt",
                       global_step=step)
Exemplo n.º 4
0
def modelSelection(model_name, patch_shape, learnRate):

	nadam = Nadam(lr=learnRate, beta_1=0.9, beta_2=0.999, epsilon=None, schedule_decay=0.004)

	dataGenerFlow=0

	# print(model_name is "sen2mt_net-Loss_mae")
 	##################################################

	if model_name == "sen2mt_net_Loss_mae":
		model = model_sep_cbam.sen2mt_net(input_size=patch_shape, flow = dataGenerFlow, taskAttation=1)
		model.compile(optimizer=nadam, loss="mean_absolute_error", metrics=['mae'])

	if model_name == "sen2mt_net_Loss_mse":
		model = model_sep_cbam.sen2mt_net(input_size=patch_shape, flow = dataGenerFlow, taskAttation=1)
		model.compile(optimizer=nadam, loss="mean_squared_error", metrics=['mae'])

	if model_name == "sen2mt_net":
		model = model_sep_cbam.sen2mt_net(input_size=patch_shape, flow = dataGenerFlow, taskAttation=0)
		model.compile(optimizer=nadam, loss=model_sep_cbam.mean_absolute_error_weight, metrics=['mae'])

	if model_name == "dlab":
		import deepLabV3_adapted
		model = deepLabV3_adapted.dl_net(input_size=patch_shape)
		model.compile(optimizer=nadam, loss=model_sep_cbam.mean_absolute_error_weight, metrics=['mae'])

	if model_name == "unet":
		import unet
		model = unet.unet(input_size=patch_shape)
		model.compile(optimizer=nadam, loss=model_sep_cbam.mean_absolute_error_weight, metrics=['mae'])

	return model, dataGenerFlow
Exemplo n.º 5
0
    def build_model(self):
        self.G = unet().cuda()
        if self.parallel:
            self.G = nn.DataParallel(self.G)

        # print networks
        print(self.G)
Exemplo n.º 6
0
def main(argv=None):
    unet_graph = tf.Graph()
    with unet_graph.as_default():
        input_images = tf.placeholder(tf.float32,
                                      shape=[None, None, None, 3],
                                      name="input")
        unet_output = unet.unet(name="UNET", input_data=input_images)
        saver = tf.train.Saver(tf.global_variables())

    with tf.Session(graph=unet_graph) as sess:
        ckpt_state = tf.train.get_checkpoint_state(
            checkpoint_dir=FLAGS.ckpt_restore_path)
        model_path = os.path.join(
            FLAGS.ckpt_restore_path,
            os.path.basename(ckpt_state.model_checkpoint_path))
        saver.restore(sess=sess, save_path=model_path)

        image_list = get_images()
        for img_name in image_list:
            im = cv2.imread(img_name)[:, :, ::-1]
            output = sess.run(unet_output,
                              feed_dict={input_images: [im / 255.0]})
            coordinates = np.argmax(output[0], axis=-1)
            image = np.where(coordinates != 0, 255, 0)
            print(img_name, coordinates)
            cv2.imwrite(FLAGS.output_path + "a.jpg", image)
Exemplo n.º 7
0
def main(init_channels, batch_size, num_epochs):
    """ Entry point for training U-Net model.
        init_channels:  Determines the size of the U-Net model.
                        The default is 64
        batch_size:     Default is 2. Since the image size is large
                        it's easy to get out-of-memory errors.
        num_epochs:     Default is 100. It's not uncommon for training
                        to move slowly in the beginning and then take
                        off after even 50 epochs.

        Data is loaded from data_generator.py
        U-Net model is defined in unet.py
        csv log and graph are saved in ./logs/
        Trained model is saved in ./models/ with format unet-{init_channels}_{start_time}.h5 """

    now = datetime.now().strftime('%Y-%m-%d_%H:%M:%S')
    model_name = 'unet-{}_'.format(init_channels) + now + '.h5'

    # Setup our model which is defined in model.py
    model = unet(init_channels)
    model.summary()

    # Extra data augmetation can be done by changing the data_generator.py
    # file. The data augmentation arguments used are returned and logged
    train_generator, img_gen_args, mask_gen_args = train_gen(batch_size)
    validation_generator = valid_gen(batch_size)

    num_train_samples = len(os.listdir(os.path.join(TRAIN_IMAGE_DIR, '0')))
    num_valid_samples = len(os.listdir(os.path.join(VALID_IMAGE_DIR, '0')))

    steps_per_epoch = num_train_samples / batch_size
    validation_steps = num_valid_samples / batch_size

    log(model_name, batch_size, num_epochs, img_gen_args, mask_gen_args)

    # Basic callbacks
    checkpoint = callbacks.ModelCheckpoint(filepath=MODEL_DIR + model_name,
                                           monitor='val_loss',
                                           save_best_only=True)

    early_stop = callbacks.EarlyStopping(monitor='val_loss', patience=50)

    csv_logger = callbacks.CSVLogger(LOG_DIR + model_name.split('.')[0] +
                                     '.csv')

    callback_list = [checkpoint, early_stop, csv_logger]

    # Training begins
    history = model.fit_generator(train_generator,
                                  steps_per_epoch=steps_per_epoch,
                                  epochs=num_epochs,
                                  verbose=1,
                                  callbacks=callback_list,
                                  validation_data=validation_generator,
                                  validation_steps=validation_steps)

    plot_history(history, model_name[:-2] + 'png')

    model.save('./models/' + model_name)
Exemplo n.º 8
0
def run_training(opts):

    train_dir = opts.train_dir
    train_dir_slices = os.path.join(train_dir, 'slices/imgs/')
    train_dir_masks = os.path.join(train_dir, 'masks/imgs/')

    val_dir = opts.val_dir
    val_dir_slices = os.path.join(val_dir, 'slices/imgs/')
    val_dir_masks = os.path.join(val_dir, 'masks/imgs/')

    test_dir = opts.test_dir
    test_dir_slices = os.path.join(test_dir, 'slices/imgs/')
    test_dir_masks = os.path.join(test_dir, 'masks/imgs/')

    BATCH_SIZE = opts.batch_size
    SEED = opts.seed
    lr = opts.lr
    loss = opts.loss
    epochs = opts.epochs
    n_levels = opts.n_levels

    IMG_SIZE = (40, 40)

    if loss == 'BCE':

        loss = 'binary_crossentropy'

    elif loss == 'DICE':

        loss = dice_coefficient_loss

    NUM_TRAIN = 386
    NUM_TEST = 158

    EPOCH_STEP_TRAIN = NUM_TRAIN // BATCH_SIZE
    EPOCH_STEP_TEST = NUM_TEST // BATCH_SIZE

    train_generator = create_segmentation_generator_train(
        train_dir + '/slices/', train_dir + '/masks/', BATCH_SIZE, SEED)
    val_generator = create_segmentation_generator_test(val_dir + '/slices/',
                                                       val_dir + '/masks/',
                                                       BATCH_SIZE, SEED)
    test_generator = create_segmentation_generator_test(
        test_dir + '/slices/', test_dir + '/masks/', BATCH_SIZE, SEED)

    model = unet(4, out_channels=1)
    model.compile(optimizer=keras.optimizers.Adam(learning_rate=lr),
                  loss=loss,
                  metrics=['accuracy'])

    #model.summary()
    history = model.fit_generator(generator=train_generator,
                                  steps_per_epoch=EPOCH_STEP_TRAIN,
                                  validation_data=val_generator,
                                  validation_steps=EPOCH_STEP_TEST,
                                  epochs=epochs,
                                  shuffle=True)

    model.save(f'UNET_model.h5')
Exemplo n.º 9
0
def train_(model_filename, EPOCH_NUMBER, STEP_PER_EPOCH, loss_loaded,
           BATCH_SIZE, NEW_IMAGES_FOLDER, norm):
    '''
    training function. 
    
    :param model_filename: name of the model (to save as or to load)
    :param EPOCH_NUMBER: number of epochs
    :param STEP_PER_EPOCH: number of step per epoch
    :param loss: loss to use during training
    :BATCH_SIZE: batch size
    :param norm: boolean, if true normalizes the data
    :return model: the model weights after training
    '''

    print("Loading images")
    x_train, y_train = load_images(NEW_IMAGES_FOLDER, norm)

    print(
        f"Beginning training using the following parameters:\n    Model filename: {model_filename},\n    Number of epoch: {EPOCH_NUMBER},\n    Steps per epochs: {STEP_PER_EPOCH},\n    Batch size: {BATCH_SIZE}"
    )

    model = unet(DIMS)

    cp = ModelCheckpoint(model_filename,
                         verbose=1,
                         monitor='val_loss',
                         save_best_only=True)
    lr = ReduceLROnPlateau(monitor='val_loss',
                           factor=0.5,
                           patience=7,
                           verbose=1,
                           mode='min',
                           min_lr=1e-9)
    es = EarlyStopping(monitor='val_loss', patience=25, mode='min')

    model.compile(optimizer='adam',
                  loss=loss_loaded,
                  metrics=['accuracy', f1_m])

    checkpoint_path = "train_checkpoints/{}.ckpt".format(model_filename)
    checkpoint_dir = os.path.dirname(checkpoint_path)

    cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path,
                                                     save_weights_only=True,
                                                     verbose=1)

    callbacks = [
        tf.keras.callbacks.EarlyStopping(patience=2, monitor='val_loss'),
        tf.keras.callbacks.TensorBoard(log_dir='./logs'), cp_callback
    ]
    results = model.fit(x_train,
                        y_train,
                        validation_split=0.2,
                        batch_size=BATCH_SIZE,
                        epochs=EPOCH_NUMBER,
                        shuffle=True,
                        callbacks=callbacks)

    return model
Exemplo n.º 10
0
def main():
    args = parse_params()
    tf.keras.backend.set_floatx(args.dtype)
    set_seed(args.seed)

    # get data
    if args.host_generated_data:
        X, y, X_test = generate_numpy_data(args)
    else:
        X, y, X_test = get_images_labels(args)

    ds_infer = predict_data_set(args, X_test)
    if args.eval and args.kfold > 1:
        # k fold cross validation
        kfold = KFold(n_splits=args.kfold, shuffle=True)
        # Define per-fold accuracy and loss
        loss_per_fold = []
        acc_per_fold = []
        fold_no = 0

        # Generate indices to split data into training and test set.
        for train, val in kfold.split(X, y):
            logger.info(f"Fold: {fold_no} ........")
            ds_train = tf_fit_dataset(args, X[train], y[train])
            ds_eval = tf_eval_dataset(args, X[val], y[val])
            # cross validation on UNet for each fold
            eval_accuracy, eval_loss = unet(args, ds_train, ds_eval, ds_infer)
            if eval_loss is not None and eval_accuracy is not None:
                loss_per_fold.append(eval_loss)
                acc_per_fold.append(eval_accuracy)

            fold_no += 1

        logger.info(f"{args.kfold}-fold cross validation results:")
        logger.info(
            f"Loss:\n {pretty_print_nested_list(loss_per_fold)}, \n mean:\n {np.mean(np.array(loss_per_fold), axis=0)}."
        )
        logger.info(
            f"Accuracy:\n {pretty_print_nested_list(acc_per_fold)}, \n mean:\n {np.mean(np.array(acc_per_fold), axis=0)}."
        )
    else:
        # no cross validation
        ds_train = tf_fit_dataset(args, X[:24], y[:24])
        ds_eval = tf_eval_dataset(args, X[24:], y[24:])

        unet(args, ds_train, ds_eval, ds_infer)
Exemplo n.º 11
0
def create_network(input_shape, name):
    output_path = "checkpoints/"
    if not os.path.exists(output_path):
        os.makedirs(output_path)

    tf.reset_default_graph()

    raw = tf.placeholder(tf.float32, shape=input_shape)
    raw_batched = tf.reshape(raw, (1, 1) + input_shape)

    out = unet(raw_batched, 12, 5, [[1, 2, 2], [2, 2, 2], [2, 2, 2]])

    logits_batched = conv_pass(out,
                               kernel_size=1,
                               num_fmaps=1,
                               num_repetitions=1,
                               activation=None)

    output_shape_batched = logits_batched.get_shape().as_list()
    output_shape = output_shape_batched[1:]  # strip the batch dimension

    logits = tf.reshape(logits_batched, output_shape)
    probs = tf.sigmoid(logits)
    gt_labels = tf.placeholder(tf.float32, shape=output_shape)

    loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=gt_labels,
                                           logits=logits,
                                           reduction=tf.losses.Reduction.MEAN)

    tf.summary.scalar('loss_total', loss)
    merged = tf.summary.merge_all()

    opt = tf.train.AdamOptimizer(learning_rate=1e-5,
                                 beta1=0.95,
                                 beta2=0.999,
                                 epsilon=1e-8)
    optimizer = opt.minimize(loss)

    tf.train.export_meta_graph(filename=os.path.join(output_path, name +
                                                     '.meta'))

    print("input shape : %s" % (input_shape, ))
    print("output shape: %s" % (output_shape, ))

    names = {
        'raw': raw.name,
        'logits': logits.name,
        'labels': probs.name,
        'gt_labels': gt_labels.name,
        'loss': loss.name,
        'optimizer': optimizer.name,
        'summary': merged.name,
        'input_shape': input_shape,
        'output_shape': output_shape
    }
    with open(os.path.join(output_path, name + '_config.json'), 'w') as f:
        json.dump(names, f)
Exemplo n.º 12
0
    def __init__(self, model_dir, sess):
        self.sess = sess
        self.images = tf.placeholder(tf.float32, [None, vh, vw, 3])
        _, self.pred = unet(self.images, False)

        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(model_dir)

        print("loading model: ", ckpt.model_checkpoint_path)
        saver.restore(self.sess, ckpt.model_checkpoint_path)
Exemplo n.º 13
0
 def build_model(self):
     self.G = unet().to(self.device)
     if self.parallel:
         self.G = nn.DataParallel(self.G)
     if(torch.cuda.is_available()):
         self.G.load_state_dict(torch.load(os.path.join(self.model_save_path, self.model_name)))
     else:
         self.G.load_state_dict(torch.load(os.path.join(self.model_save_path, self.model_name), map_location=torch.device('cpu')))
     self.G.eval() 
     # print networks
     print("Model Loaded!")
Exemplo n.º 14
0
def generateCNN(X, pretrained_weights = None): #generates convolutional layers based off the data
    if pretrained_weights is None:
        print("Warning, no model has been loaded")
    mod = unet(pretrained_weights)  # load model

    # prepare the data
    X_nu = np.zeros(shape=(X.shape[0], (X.shape[1])**2))
    print(X_nu.shape)
    for i in range(X.shape[0]):
         X_nu[i,:] = mod.predict(X[i,:,:])
    return X_nu
Exemplo n.º 15
0
def generator(inputs,
              layers,
              features_root=64,
              filter_size=3,
              pool_size=2,
              output_channel=3):
    print(
        "layers {} \n, features_root {} \n, filter_size {} \n, pool_size {} \n, output_channel {} \n"
        .format(layers, features_root, filter_size, pool_size, output_channel))
    return unet.unet(inputs, layers, features_root, filter_size, pool_size,
                     output_channel)
Exemplo n.º 16
0
 def __init__(self, lr=1e-3, pos_weight=1):
     self.unet = unet(is_deconv=False)
     
     #weight = torch.from_numpy(np.array(weight)).float()
     #self.loss = nn.CrossEntropyLoss(weight=weight)
     self.pos_weight = pos_weight
     self.loss = nn.BCELoss()
     self.optimizer = optim.Adam(self.unet.parameters(), lr=lr)
     
     if torch.cuda.is_available():
         self.loss.cuda()
         self.unet.cuda()
Exemplo n.º 17
0
    def baseline(devices: List[int]) -> Stuffs:
        B, C = 6, 72

        model = unet(depth=5,
                     num_convs=B,
                     base_channels=C,
                     input_channels=3,
                     output_channels=1)
        device = devices[0]
        model.to(device)

        return model, B, C, [torch.device(device)]
Exemplo n.º 18
0
    def build_model(self):

        self.G = unet().cuda()
        if self.parallel:
            self.G = nn.DataParallel(self.G)

        # Loss and optimizer
        self.g_optimizer = torch.optim.Adam(self.G.parameters(), self.g_lr, [self.beta1, self.beta2])
        self.g_optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.G.parameters()), self.g_lr, [self.beta1, self.beta2])

        # print networks
        print(self.G)
Exemplo n.º 19
0
    def pipeline4(devices: List[int]) -> Stuffs:
        B, C = 24, 160
        balance = [472, 54, 36, 515]

        model: nn.Module = unet(depth=5,
                                num_convs=B,
                                base_channels=C,
                                input_channels=3,
                                output_channels=1)
        model = cast(nn.Sequential, model)
        model = GPipe(model, balance, devices=devices, chunks=32)

        return model, B, C, list(model.devices)
Exemplo n.º 20
0
    def pipeline8(devices: List[int]) -> Stuffs:
        B, C = 48, 160
        balance = [800, 140, 62, 36, 36, 36, 36, 987]

        model: nn.Module = unet(depth=5,
                                num_convs=B,
                                base_channels=C,
                                input_channels=3,
                                output_channels=1)
        model = cast(nn.Sequential, model)
        model = GPipe(model, balance, devices=devices, chunks=128)

        return model, B, C, list(model.devices)
Exemplo n.º 21
0
def run(model_filename, subname, train, EPOCH_NUMBER, STEP_PER_EPOCH, loss,
        IMAGES_GEN_NUMBER, NEW_IMAGES_FOLDER, BATCH_SIZE, norm):
    '''
    main function, can either launch the training or load a defined model, 
    then will compute a submission file
    
    :param model_filename: name of the model (to save as or to load)
    :param subname: name of the submission file
    :param train: boolean, if true launch new training
    :param EPOCH_NUMBER: number of epochs
    :param STEP_PER_EPOCH: number of step per epoch
    :param loss: loss to use during training
    :BATCH_SIZE: batch size
    :param norm: boolean, if true images are normalized
    :return: 1
    '''
    if (not train):
        list_models = os.listdir("../models/")
        list_models = re.findall('([^\s]+).index', ' '.join(list_models))
        if model_filename not in list_models:
            print(bcolors.FAIL + "[ERROR]" + bcolors.ENDC +
                  "  Model not found")
            print("Please choose a model in the following ones:", list_models)
            print("or train a new model by appending flag -train")
            exit()
    if (loss == 'binary_ce'):
        loss_loaded = tf.keras.losses.binary_crossentropy
    elif (loss == 'dice_bce'):
        loss_loaded = dice_and_binary_crossentropy
    elif (loss == 'jaccard_bce'):
        loss_loaded = jaccard_and_binary_crossentropy
    else:
        print(bcolors.FAIL + "[ERROR]" + bcolors.ENDC + "  Loss not found")
        print("possibles loss are: binary_ce, dice_bce, jaccard_bce")
        exit()

    if train is False:
        print("loading model")
        model = unet(DIMS)
        model.load_weights("../models/{}".format(model_filename))
    else:
        print("generating dataset in {} folder".format(NEW_IMAGES_FOLDER))
        generate_images(NEW_IMAGES_FOLDER, IMAGES_GEN_NUMBER)
        model = train_(model_filename, EPOCH_NUMBER, STEP_PER_EPOCH,
                       loss_loaded, BATCH_SIZE, NEW_IMAGES_FOLDER)
    print("Creating Submission file")
    create_submission(model, subname, norm)
    print(bcolors.OKGREEN + "[Success]" + bcolors.ENDC +
          "  Submission file successfully created")

    return 1
Exemplo n.º 22
0
def get_model(model_name,input_size,one_hot_label, change=False):

    if model_name == "unet":
        model = unet.unet(input_size=input_size)
    elif model_name == "vgg_unet":
        model = vgg_unet.vgg10_unet(input_size,'imagenet',one_hot_label)
    elif model_name == "lanenet":
        h,w,c = input_size
        print('Input size ~~~~~', input_size)
        model = lanenet.build_lanenet(input_shape=input_size, input_shape1=[h/2, w/2, c], input_shape2=[h/4, w/4, c],
            input_shape3=[h/8, w/8, c],input_shape4=[h/16, w/16, c], one_hot_label=one_hot_label)
    elif model_name == "lanenet_att" or model_name == 'attention_lane':
        h,w,c = input_size
        print('Input size ~~~~~', input_size)
        model = lanenet_att.build_lanenet_att(input_shape=input_size, input_shape1=[h/2, w/2, c], input_shape2=[h/4, w/4, c],
            input_shape3=[h/8, w/8, c],input_shape4=[h/16, w/16, c], one_hot_label=one_hot_label)
    elif model_name == "lanenet2":
        h,w,c = input_size
        print('Input size ~~~~~', input_size)
        model = lanenet2.build_lanenet2(input_shape=input_size, input_shape1=[h/2, w/2, c], input_shape2=[h/4, w/4, c],
            input_shape3=[h/8, w/8, c],input_shape4=[h/16, w/16, c], one_hot_label=one_hot_label)
    elif model_name == "vgg_fusion":
        model = unetVgg.vgg10_fusion(input_size,'imagenet',one_hot_label)
    elif model_name == "gcn" or model_name == "fusionGCN":
        model = fusionGCN.build_fusionGCN(input_size,one_hot_label=one_hot_label)
    elif model_name == "ce" or model_name=='cenet':
        model = ce_net.build_ce(input_size,one_hot_label)
    elif model_name == "res_unet" or model_name=='resunet':
        model = res_unet.build_res_unet(input_size, one_hot_label)
    elif model_name == "multiunet" or model_name == "munet":
        model = multiUnet.build_multiUnet(input_size, one_hot_label)
    elif model_name == "fusionNet" or model_name == "fusionnet":
        model = fusionNet.build_fusion(input_size,one_hot_label=one_hot_label)
    elif model_name == "deep" or model_name == "deepunet":
        model = deepUnet.build_deep(input_size,one_hot_label=one_hot_label)
    elif model_name == "unetpp" or model_name == "unet++":
        model = unetpp.build_unetpp(input_size,one_hot_label=one_hot_label)
    elif model_name == "fusionNet2" or model_name == "fusionnet2":
        model = fusionNet2.build_fusion(input_size,one_hot_label=one_hot_label)
    elif model_name == "fusionnet_ppl":
        model = fusionnet_ppl.build_model(input_size,one_hot_label=one_hot_label)
    elif model_name == "temp":
        model = temp.build_temp(input_size,one_hot_label=one_hot_label)
    elif model_name == "fusionnet_atten":
        model = fusionnet_atten.build_model(input_size,one_hot_label=one_hot_label)
    else:
        model = None

    return model
Exemplo n.º 23
0
def main():
    model = unet()
    model.load_weights('modelsave/model_weights_iou222test_agian.h5')

    print("training finish")

    x_test, y_test = getdata(1)
    score = model.evaluate(x_test, y_test, batch_size=32, verbose=1)
    # score = model.evaluate_generator(generator_data(4,1),steps=200)
    print("score = ", score)

    print("predict")
    data_test, name = predicted_data()
    result = model.predict(data_test, batch_size=32)
    #
    resultsave("predicts", result, name)
Exemplo n.º 24
0
def get_unet(in_rows, in_col, in_ch):
    model = unet.unet((in_rows, in_col, in_ch),
                      out_ch=out_ch,
                      start_ch=16,
                      depth=3,
                      inc_rate=2.,
                      activation='relu',
                      dropout=.5,
                      batchnorm=False,
                      maxpool=True,
                      upconv=True,
                      residual=False)
    model.compile(optimizer=Adam(lr=1e-4),
                  loss=mean_squared_error,
                  metrics=[mean_squared_error, mean_absolute_error])
    model.summary()
    return model
Exemplo n.º 25
0
def predict_tiles(index, path='Images/000029.las'):
    model = unet(pretrained_weights="unet_turret.hdf5", input_size=(256, 256, 1))

    res = (256, 256)
    x, all_files = read_images_from_folder(path, 'images', thresholding=False, target_size=res, maxNImages=0,
                                           showImages=False)

    predicted_tiles = []
    file_names = []
    for i in range(0, x.shape[0], 3):
        images = x[i:i + 3]
        files = all_files[i:i + 3]

        p = model.predict(images)

        for pr in p: predicted_tiles += [pr.reshape(res)]
        for f in files: file_names += [os.path.basename(f)]

    return join_tiles(predicted_tiles, file_names, index, res=(256,256))
Exemplo n.º 26
0
def _get_model(model_type, sess, process_size):
  """ Return a model instance to use 

  """
  x_dims = [process_size, process_size, 3]
  if model_type == 'densenet':
    model = densenet(sess=sess, x_dims=x_dims)
  if model_type == 'densenet_s':
    model = densenet_s(sess=sess, x_dims=x_dims)
  if model_type == 'fcn8s':
    model = fcn8s(sess=sess, x_dims=x_dims)
  if model_type == 'fcn8s_s':
    model = fcn8s_s(sess=sess, x_dims=x_dims)
  if model_type == 'unet':
    model = unet(sess=sess, x_dims=x_dims)
  if model_type == 'unet_s':
    model = unet_s(sess=sess, x_dims=x_dims)

  return model
Exemplo n.º 27
0
def main(_):
    """ main func for train

    :return:
    """
    with tf.Session() as sess:

        name_val, image_val, label_val = batch_input(tfrecord_path_val, batch_size=16)

        label_val_ = tf.reshape(label_val, (-1, 3))

        model_val = unet(image_val, 'val')

        loss_val = total_loss(model_val['output'], label_val_, 'val')
        acc_val = accuracy(model_val['output'], label_val_, 'val')

        logging.info('variable initialization')

        sess.run(tf.local_variables_initializer())
        sess.run(tf.global_variables_initializer())

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        try:
            while not coord.should_stop():
                logging.info('sess run for image pass through the network, please waite...')
                pc_bar = ShowProcess(iter_each_epoch, '')

                for epoch_i in range(epochs):
                    print ('Epoch {}'.format(epoch_i) + '/{}'.format(epochs))
                    for j in range(1, iter_each_epoch + 1):
                        loss_val_, acc_val_ = evaluate(sess, loss_val, acc_val)

                        pc_bar.show_process(j, iter_each_epoch, loss_val_, acc_val_)

                coord.request_stop()
        except tf.errors.OutOfRangeError:
            print 'done! limit epochs achieved.'
        finally:
            coord.request_stop()
            coord.join(threads)
Exemplo n.º 28
0
def main():
    model = unet()
    # model.load_weights('modelsave/my_model_weights.h5')

    print("loading data")
    x_train,y_train = getdata(0)
    x_valid,y_valid = getdata(1)
    x_test,y_test = getdata(2)
    print("start training")
    model.fit(x_train,y_train,validation_data = [x_valid,y_valid],epochs=120,batch_size=32,verbose=1)

    # model = load_model('modelsave/my_model.h5')
    # model.fit_generator(generator_data(4,0),steps_per_epoch=3200,epochs=10)
    print("training finish")

    score = model.evaluate(x_test,y_test,batch_size = 32,verbose = 1)
    # score = model.evaluate_generator(generator_data(4,1),steps=200)
    print("score = ",score)


    model.save("modelsave/model_iou_resUNET_nodrop.h5")
    model.save_weights("modelsave/unet_model_weights_iou_resUNET_nodrop.h5")
Exemplo n.º 29
0
def main(argv):
	with tf.Session() as sess:
		input_images = tf.placeholder(tf.float32, shape=[None, None, None, 3],
			name='input_images')
		input_score_maps = tf.placeholder(tf.float32, shape=[None, None, None, 1],
			name='input_score_maps')
		input_geo_maps = tf.placeholder(tf.float32, shape=[None, None, None, 5],
			name='input_geo_maps')
		input_training_masks = tf.placeholder(tf.float32, shape=[None, None, None, 1],
			name='input_training_masks')
		input_enhancement_mask = tf.placeholder(tf.float32, shape=[None, None, None, 3],
			name='input_enhancement_mask')

		output_images = tf.multiply(unet(tf.divide(input_images, 255.)), 255.)
		output_images = tf.multiply(input_enhancement_mask, output_images)
		global_step = tf.Variable(0, trainable=False)

		loss, f_score, f_geometry = east_loss(sess, output_images,
			input_score_maps, input_geo_maps, input_training_masks)

		restore_from_dir(sess, FLAGS.checkpoint_joint_model)
		
		data_generator = data_loader.get_batch(num_workers=FLAGS.num_readers,
			input_size=FLAGS.input_size, batch_size=FLAGS.batch_size_per_gpu,
			image_path=FLAGS.validation_data_path, validation=True)

		for i in range(400):
			data = next(data_generator)
			img_icdar = data[0][0]
			im_enhancement_mask = data[1][0]
			im, score, geometry = sess.run([output_images, f_score, f_geometry],
				feed_dict={input_images:[img_icdar],
					input_enhancement_mask:[im_enhancement_mask]})
			im = (im[0]).astype(np.int32)[:, :, ::-1]
			file_name = os.path.split(data[6][0])[-1]
			# cv2.imwrite(os.path.join('sample_outputs/enhancement_output/unet', file_name),
			# 	change_img(im))
			plt.imshow(im[:, :, ::-1])
			plt.show()
Exemplo n.º 30
0
 def init_model(self):
     """ Initialize model. """
     pool_size = (2, 2)
     conv_size = 3
     upconv_size = 2
     nb_conv_1 = 64
     nb_conv_2 = 128
     nb_conv_3 = 256
     nb_conv_4 = 512
     nb_conv_5 = 1024
     dropout_rate = self.dropout_rate
     # lk_alpha = 0.1
     inputs = Input(
         (self.window_size, self.window_size, self.channels_size))
     self.model = unet(
         inputs,
         dropout_rate,
         pool_size,
         conv_size,
         upconv_size,
         nb_conv_1,
         nb_conv_2,
         nb_conv_3,
         nb_conv_4,
         nb_conv_5,
         self.lk_alpha,
     )
     # select loss function and metrics, as well as optimizer
     self.model.compile(
         optimizer=Adam(lr=1e-4),
         loss="binary_crossentropy",
         metrics=[
             iou,
             f1,
             "accuracy",
         ],
     )