Esempio n. 1
0
 def save(self):
     for weights in sorted(os.listdir('weights')):
         epoch = weights[:3]
         model = Unet(f'weights/{weights}').create()
         results = model.predict_generator(PredictionGenerator(), verbose=1)
         os.makedirs(f'data/test/prediction{epoch}', exist_ok=True)
         self.save_epoch_results(f'data/test/prediction{epoch}', results)
Esempio n. 2
0
def eval(cfg):
    # Setup seeds
    torch.manual_seed(cfg.get("seed", 1337))
    torch.cuda.manual_seed(cfg.get("seed", 1337))
    np.random.seed(cfg.get("seed", 1337))
    random.seed(cfg.get("seed", 1337))

    # Setup device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # Setup evaluation data
    loader_source = get_loader(cfg, "train")
    # data_eval_labels = utils.recursive_glob(os.path.join(cfg["data"]["path"], 'labels'))

    # Setup model
    model = Unet(cfg).to(device)
    checkpoint = torch.load(cfg["training"]["checkpoint"])
    model.load_state_dict(checkpoint["model_state"])
    stats = None
    model.eval()
    for images, labels in tqdm.tqdm(loader_source):
        model.set_input(images, labels)
        model.forward()
        if stats is None:
            stats = [StatsRecorder() for i in range(len(model.hooks))]
        for i, hook in enumerate(model.hooks):
            activation = hook.output
            b, c, h, w = activation.shape
            activation = activation.transpose(0,
                                              1).reshape(c,
                                                         -1).transpose(0, 1)
            stats[i].update(activation.cpu().data.numpy())

    print([s.mean for s in stats])
    print([s.std for s in stats])
Esempio n. 3
0
def main():
    input_size = (320, 480, 3)
    classes = 21
    weight_path = './checkpoint/final_stage.h5'
    filename = '../seg_train_images/seg_train_images/train_1118.jpg'
    color_index = [[0, 0, 255], [193, 214, 0], [180, 0, 129], [255, 121, 166],
                   [255, 0, 0], [65, 166, 1], [208, 149, 1], [255, 255, 0],
                   [255, 134, 0], [0, 152, 225], [0, 203, 151], [85, 255, 50],
                   [92, 136, 125], [69, 47, 142], [136, 45, 66], [0, 255, 255],
                   [215, 0, 255], [180, 131, 135], [81, 99, 0], [86, 62, 67]]
    net = Unet(input_size, classes)
    net.load_weights(weight_path)
    img = Image.open(filename).resize((input_size[1], input_size[0]))
    img = np.asarray(img, dtype='float32')
    img = img / 255.0
    img = np.expand_dims(img, axis=0)
    pred = net.predict(img)
    b, w, h, c = pred.shape
    res = []
    for i in range(w):
        tmp = []
        for j in range(h):
            tmp.append(color_index[np.argmax(pred[0, i, j, :])])
        res.append(tmp)
    img = Image.fromarray(np.asarray(res, dtype='uint8'))
    img.save('result.png')
Esempio n. 4
0
def createModel(hparam):
    graph = tf.Graph()
    with graph.as_default():
        train_input, test_input = prepare_dataset(hparam)
        sess = tf.Session()
        train_model = Unet(hparam, train_input, 'train')
        eval_model = Unet(hparam, test_input, 'eval')
    return (graph, sess, train_model, eval_model)
Esempio n. 5
0
def predict(image_path, checkpoint_path, save_path):
    model = Unet()
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model.load_state_dict(torch.load(checkpoint_path, map_location=device))

    image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
    size = image.shape
    image = Compose([
        ToTensor(),
        Resize((512, 512)),
    ])(image)
    image = image.unsqueeze(0)

    mask = model(image)[0]
    mask[mask < 0.5] = 0
    mask[mask > 0.5] = 255
    mask = Resize(size)(mask)
    mask = mask.detach().numpy()

    cv2.imwrite('result.png', mask[0])
    pass
Esempio n. 6
0
def train(args):
    my_dataset = MyDataset("../data/train", transform=x_transforms, target_transform=y_transforms)
    dataloaders = DataLoader(my_dataset, batch_size=args.batch_size, shuffle=True, num_workers=1)
    model = Unet(3, 1).to(device)
    model.train()
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    num_epochs = args.epochs
    for epoch in range(num_epochs):
        print('Epoch {}/{}'.format(epoch, num_epochs - 1))
        print('-' * 10)
        data_size = len(dataloaders.dataset)
        epoch_loss = 0
        step = 0
        for x, y in dataloaders:
            step += 1
            inputs = x.to(device)
            lables = y.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, lables)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
            print("%d/%d, train_loss:%0.3f" % (step, (data_size - 1) // dataloaders.batch_size + 1, loss.item()))
        print("epoch %d loss:%0.3f" % (epoch, epoch_loss))
    torch.save(model.state_dict(), 'model_weights.pth')
    return model
Esempio n. 7
0
def main(_):
    pp.pprint(FLAGS.__flags)

    if FLAGS.height is None:
        FLAGS.height = FLAGS.width

    unet = Unet(width=FLAGS.width,
                height=FLAGS.height,
                learning_rate=FLAGS.learning_rate,
                data_set=FLAGS.data_set,
                test_set=FLAGS.test_set,
                result_name=FLAGS.result_name,
                ckpt_dir=FLAGS.ckpt_dir,
                logs_step=FLAGS.logs_step,
                restore_step=FLAGS.restore_step,
                hidden_num=FLAGS.hidden_num,
                epoch_num=FLAGS.epoch_num,
                batch_size=FLAGS.batch_size,
                num_gpu=FLAGS.num_gpu,
                is_train=FLAGS.is_train,
                w_bn=FLAGS.w_bn)

    show_all_variables()

    if FLAGS.is_train:
        unet.train()
    else:
        unet.test()
Esempio n. 8
0
def main(args):
    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    d = dcrf.DenseCRF2D(512, 512, 2)

    down_method = args.down_method
    up_method = args.up_method
    separable = args.separable

    ds = DataSetWrapper(args.batch_size, args.num_workers, 0.2)
    test_dl = ds.get_data_loaders(train=False)

    model = Unet(input_dim=1,
                 separable=True,
                 down_method='conv',
                 up_method='transpose')
    model = nn.DataParallel(model).to(device)

    load_state = torch.load(f'./checkpoint/conv_transpose_True.ckpt')

    model.load_state_dict(load_state['model_state_dict'])

    model.eval()
    name = 0
    with torch.no_grad():
        for (img, label) in test_dl:
            imgs, labels = img.to(device), label.to(device)
            preds = model(img)
            name += 1
            for i in range(args.batch_size):
                img, label, pred = imgs[i, :], labels[i, :], preds[i, :]

                probs = torch.stack([1 - pred, pred], dim=0).cpu().numpy()
                img, label = img.cpu().numpy(), label.cpu().numpy()
                pairwise_energy = create_pairwise_bilateral(sdims=(10, 10),
                                                            schan=(0.01, ),
                                                            img=img,
                                                            chdim=0)
                U = unary_from_softmax(probs)
                d = dcrf.DenseCRF2D(512, 512, 2)
                d.setUnaryEnergy(U)
                d.addPairwiseEnergy(pairwise_energy, compat=10)

                Q = d.inference(100)
                map = np.argmax(Q, axis=0).reshape((512, 512))
                print(map.shape)

                img = (255. / img.max() * (img - img.min())).astype(np.uint8)
                label = (255. / label.max() * (label - label.min())).astype(
                    np.uint8)
                pred = (255. / map.max() * (map - map.min())).astype(np.uint8)

                img = Image.fromarray(img[0, :], mode='L')
                label = Image.fromarray(label[0, :], mode='L')
                pred = Image.fromarray(pred, mode='L')

                img.save(f'./results/{name}_{i}_i.png')
                label.save(f'./results/{name}_{i}_l.png')
                pred.save(f'./results/{name}_{i}_p.png')
def main(FLAGS):

    "train and validate the Unet model"
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    #data directory
    data_dir = FLAGS.dataset_dir
    #log_directory
    log_dir = FLAGS.log_dir
    # Hyper and other parameters
    train_batch_size = FLAGS.train_batch_size
    val_batch_size = FLAGS.val_batch_size
    aug_flag = FLAGS.aug
    num_epochs = FLAGS.epochs
    num_classes = 2
    # get the train and validation dataloaders
    dataloaders = get_dataloaders(data_dir, train_batch_size, val_batch_size,
                                  aug_flag)
    model = Unet(3, num_classes)

    # Uncomment to run traiing on Multiple GPUs
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model, device_ids=[0, 1])
    else:
        print("no multiple gpu found")
    model.to(device)
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=0.02,
                          momentum=0.9,
                          weight_decay=0.0005)
    #optimizer = optim.Adam(model.parameters(),lr = learning_rate)
    exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
    plotter = VisdomLinePlotter(env_name='Unet Train')
    # uncomment for leraning rate schgeduler..
    train_val(dataloaders, model, criterion, optimizer, num_epochs, log_dir,
              device)
Esempio n. 10
0
def make_predictions():

    test_ids = next(os.walk(TRAIN_PATH))[1]
    X_test = np.zeros((len(test_ids), IMG_HEIGHT, IMG_WIDTH, 1),
                      dtype=np.uint8)
    sizes_test = []
    print('Getting and resizing test images ... ')
    sys.stdout.flush()

    for n, id_ in tqdm(enumerate(test_ids), total=len(test_ids)):
        path = TRAIN_PATH + id_
        img = imread(path + '/images/' + id_ + '.png')
        rgb = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        img_rgb = cv2.resize(rgb, (IMG_HEIGHT, IMG_WIDTH), 1)
        im_gray = np.expand_dims(img_rgb, axis=-1)
        X_test[n] = im_gray
    print('Done!')
    unet = Unet()
    model = unet.build_model(IMG_HEIGHT=IMG_HEIGHT, IMG_WIDTH=IMG_WIDTH)
    checkpoint_path = "model_weights.h5"
    # model.save_weights(checkpoint_path)
    model.load_weights(checkpoint_path)
    predict_masks_X = model.predict(X_test)
    return predict_masks_X
Esempio n. 11
0
def main(args):
    data = load_json(json_path)
    batch_size = args.batch_size
    lr = args.lr
    epochs = args.epochs

    test_loader = Dataset(data['test'], phase='Testing', batch_size=batch_size)
    train_loader = Dataset(data['train'],
                           phase='Training',
                           batch_size=batch_size)

    print(len(test_loader))

    model = Unet()

    losses = tf.keras.losses.BinaryCrossentropy()
    optimizer = tf.keras.optimizers.SGD(learning_rate=lr, momentum=0.9)
    metric = tf.keras.metrics.MeanIoU(num_classes=1)
Esempio n. 12
0
def main(args):
    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    separable = args.separable
    up_method = args.up_method
    down_method = args.down_method
    img_src = args.s

    ######## Model Setting ########
    model = Unet(input_dim=1,
                 separable=separable,
                 down_method=down_method,
                 up_method=up_method)
    model = nn.DataParallel(model).to(device)
    load_state = torch.load(
        f'./checkpoint/{down_method}_{up_method}_{separable}.ckpt')
    model.load_state_dict(load_state['model_state_dict'])
    model.eval()
    ###############################

    d = dcrf.DenseCRF2D(512, 512, 2)

    img = Image.open(img_src).convert('L')
    img = pad_resize(img, 512)
    img = TF.to_tensor(img)
    img = img.unsqueeze(0)

    with torch.no_grad():
        img = img.to(device)
        pred = model(img)

        probs = torch.stack([1 - pred, pred], dim=0).cpu().numpy()
        img, pred = img.cpu().numpy(), pred.cpu().numpy()
        pairwise_energy = create_pairwise_bilateral(sdims=(10, 10),
                                                    schan=(0.01, ),
                                                    img=img,
                                                    chdim=0)
        U = unary_from_softmax(probs)
        d.setUnaryEnergy(U)
        d.addPairwiseEnergy(pairwise_energy, compat=10)

        Q = d.inference(100)
        map = np.argmax(Q, axis=0).reshape((512, 512))

        img = (255. / img.max() * (img - img.min())).astype(np.uint8)
        pred = (255. / map.max() * (map - map.min())).astype(np.uint8)

        img = Image.fromarray(np.squeeze(img), mode='L')
        pred = Image.fromarray(pred, mode='L')

        img.save(f'../similarity/{img_src[:-4]}_o.png')
        pred.save(f'../similarity/{img_src[:-4]}_p.png')
def make_model(i_model, i_loss, img_size, nb_class, weights, lr):
    FCN = FullyConvolutionalNetwork(img_height=img_size,
                                    img_width=img_size,
                                    FCN_CLASSES=nb_class)
    unet = Unet(img_height=img_size, img_width=img_size, FCN_CLASSES=nb_class)

    def crossentropy(y_true, y_pred):
        return K.mean(-K.sum(y_true * K.log(y_pred + 1e-7), axis=[3]),
                      axis=[1, 2])

    def weighted_crossentropy(y_true, y_pred):
        return K.mean(-K.sum(
            (y_true * weights) * K.log(y_pred + 1e-7), axis=[3]),
                      axis=[1, 2])

    if i_model == 0:
        model = FCN.create_fcn32s()
    elif i_model == 1:
        model = unet.create_unet()
    elif i_model == 2:
        model = unet.create_unet2()
    elif i_model == 12:
        model = unet.create_unet2_gray()
    elif i_model == 3:
        model = unet.create_pix2pix()
    elif i_model == 13:
        model = unet.create_pix2pix_gray()
    elif i_model == 4:
        model = unet.create_pix2pix_2()

    adam = Adam(lr)
    if i_loss == 0:
        model.compile(loss=crossentropy, optimizer=adam)
    if i_loss == 1:
        model.compile(loss=weighted_crossentropy, optimizer=adam)
    return model
Esempio n. 14
0
def test(args):
    my_dataset = MyDataset("../data/val", transform=x_transforms, target_transform=y_transforms)
    dataloaders = DataLoader(my_dataset, batch_size=1)
    model = Unet(3, 1)
    model.load_state_dict(torch.load(args.model_path, map_location='cpu'))
    model.eval()

    plt.ion()
    with torch.no_grad():
        count = 0
        for x, _ in dataloaders:
            y = model(x)
            img_y = torch.squeeze(y).numpy()
            predict_path = os.path.join("../data/predict/", "%03d_predict.png" % count)
            plt.imsave(predict_path, img_y)
            plt.imshow(img_y)
            plt.pause(0.1)
            count += 1
        plt.show()
Esempio n. 15
0
    new_state_dict = OrderedDict()
    for k, v in state_dict['net'].items():
        name = k[7:]  # remove `module.`
        new_state_dict[name] = v
    # load params
    model.load_state_dict(new_state_dict)
    return model


if __name__ == "__main__":

    matches = [100, 200, 300, 400, 500, 600, 700, 800]
    dir_checkpoint = 'checkpoints/'
    device = torch.device('cuda:1' if torch.cuda.is_available() else 'cpu')
    #net = Unet(n_channels=3, n_classes=8, bilinear=True)
    net = Unet(n_channels=3, n_classes=8)
    net.to(device=device)
    #net=load_GPUS(net, dir_checkpoint + 'best_score_model_res50_deeplabv3+.pth', kwargs)
    checkpoint = torch.load(dir_checkpoint + 'student_net.pth',
                            map_location=device)
    net.load_state_dict(checkpoint['net'])
    logging.info("Model loaded !")

    list_path = "data/test.lst"
    output_path = "data/results/"
    img_list = [line.strip('\n') for line in open(list_path)]
    for i, fn in tqdm(enumerate(img_list)):
        save_img = np.zeros((256, 256), dtype=np.uint16)
        logging.info("\nPredicting image {} ...".format(i))
        img = Image.open(fn)
        pre, _ = predict_img(net, img, device)
Esempio n. 16
0
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
            print("%d/%d,train_loss:%0.3f" %
                  (step,
                   (dt_size - 1) // dataload.batch_size + 1, loss.item()))
        break
        print("epoch %d loss:%0.3f" % (epoch, epoch_loss / step))
    return
    torch.save(model.state_dict(), 'weights_%d.pth' % epoch)
    return model


#训练模型
model = Unet(3, 1).to(device)
batch_size = 1
criterion = nn.BCEWithLogitsLoss()
optimizer = optim.Adam(model.parameters())

dataset = MyDataset(image_path,
                    label_path,
                    train_data,
                    transform=x_transforms,
                    target_transform=y_transforms)
data_loader = data.DataLoader(dataset,
                              batch_size=1,
                              shuffle=True,
                              num_workers=0)

train_model(model, criterion, optimizer, data_loader)
Esempio n. 17
0
IMG_WIDTH = 128
IMG_HEIGHT = 128
IMG_CHANNELS = 3
TRAIN_PATH = '.\data\stage1_train/'
X, Y = preprocess_data.get_X_Y(IMG_HEIGHT=IMG_HEIGHT, IMG_WIDTH=IMG_WIDTH, TRAIN_PATH = TRAIN_PATH)
X_train, X_test, Y_train, Y_test = preprocess_data.split_data(X, Y)
img_avg = np.zeros((Y.shape[0], IMG_HEIGHT, IMG_WIDTH, 1), dtype=np.bool)
img_objects = np.zeros((len(Y), 1), dtype=np.int)
for i in range(len(Y)):
    img_avg += np.asarray(Y[i])
    img_objects[i] =  len(np.where(Y[i] > 0))

img_avg = img_avg/ len(Y)
print(img_objects)

unet = Unet()
model = unet.build_model(IMG_HEIGHT=IMG_HEIGHT, IMG_WIDTH=IMG_WIDTH)

checkpoint_path = "model_weights.h5"
model.load_weights(checkpoint_path)

# Predict on train, val and test
preds_train = model.predict(X_train[:int(X_train.shape[0]*0.9)], verbose=1)
preds_val = model.predict(X_train[int(X_train.shape[0]*0.9):], verbose=1)
preds_test = model.predict(X_test, verbose=1)
print(preds_test, type(preds_test), preds_test.shape)
# Threshold predictions
preds_train_t = (preds_train > 0.5).astype(np.uint8)
preds_val_t = (preds_val > 0.5).astype(np.uint8)
preds_test_t = (preds_test > 0.5).astype(np.uint8)
print(preds_train_t, preds_val_t, preds_test_t)
Esempio n. 18
0
    train_dir = "/home/huijian/exps/Data/building_UT/train/"
    test_dir = "/home/huijian/exps/Data/building_UT/test/"

    composed = transforms.Compose([RandomCrop(256)])

    file_path = "/home/huijian/exps/segmentation_unet/model/"
    train_data = BuildingDataset(root_dir=train_dir, transform=composed)
    test_data = BuildingDataset(root_dir=test_dir, transform=composed)

    train_loader = utils.data.DataLoader(train_data,
                                         batch_size=4,
                                         shuffle=True)
    test_loader = utils.data.DataLoader(test_data, batch_size=4, shuffle=True)

    # building the net
    unet = Unet(features=[32, 64])
    print(unet)

    trainer = Trainer(net=unet, file_path=file_path)

    # restore the model
    if True:
        trainer.restore_model()

    # begin training
    if False:
        print("begin training!")
        trainer.train_model(train_loader=train_loader,
                            test_loader=test_loader,
                            epoch=100)
Esempio n. 19
0
def train():
    if FLAGS.load_model is not None:
        checkpoints_dir = "checkpoints/" + FLAGS.load_model.lstrip(
            "checkpoints/")
    else:
        current_time = datetime.now().strftime("%Y%m%d-%H%M")
        checkpoints_dir = "checkpoints/" + current_time.lstrip("checkpoints/")
        try:
            os.makedirs(checkpoints_dir)
        except os.error:
            pass

    graph = tf.Graph()
    with graph.as_default():
        u_net = Unet('Unet',
                     image_size=FLAGS.image_size,
                     norm=FLAGS.norm,
                     learning_rate=FLAGS.learning_rate)

        loss = u_net.loss
        optimizer = u_net.optimizer
        u_net.model()

        summary_op = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(checkpoints_dir, graph)
        saver = tf.train.Saver()
        reader = Reader(FLAGS.data, batch_size=FLAGS.batch_size)
        image_train, image_label = reader.feed()

    config = tf.ConfigProto(log_device_placement=True,
                            allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    with tf.Session(graph=graph, config=config) as sess:
        if FLAGS.load_model is not None:
            checkpoint = tf.train.get_checkpoint_state(checkpoints_dir)
            meta_graph_path = checkpoint.model_checkpoint_path + ".meta"
            restore = tf.train.import_meta_graph(meta_graph_path)
            restore.restore(sess, tf.train.latest_checkpoint(checkpoints_dir))
            step = int(meta_graph_path.split("-")[2].split(".")[0])
        else:
            sess.run(tf.global_variables_initializer())
            step = 0

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        try:
            while not coord.should_stop() and step < 100:
                _train, _label = sess.run([image_train, image_label])
                ls, op, summary = sess.run([loss, optimizer, summary_op],
                                           feed_dict={
                                               u_net.x: _train,
                                               u_net.y: _label
                                           })
                train_writer.add_summary(summary, step)
                train_writer.flush()

                if (step + 1) % 5 == 0:
                    logging.info('-----------Step %d:-------------' % step)
                    logging.info('    loss    :{}'.format(ls))

                if (step + 1) % 25 == 0:
                    save_path = saver.save(sess,
                                           checkpoints_dir + "/model.ckpt",
                                           global_step=step)
                    logging.info("Model saved in file: %s" % save_path)

                step += 1

        except KeyboardInterrupt:
            logging.info('Interrupted')
            coord.request_stop()
        except Exception as e:
            coord.request_stop(e)
        finally:
            save_path = saver.save(sess,
                                   checkpoints_dir + "/model.ckpt",
                                   global_step=step)
            logging.info("Model saved in file: %s" % save_path)
            # When done, ask the threads to stop.
            coord.request_stop()
            coord.join(threads)
Esempio n. 20
0
    if not os.path.exists('asset/checkpoints'):
        os.makedirs('asset/checkpoints')
    if not os.path.exists('asset/checkpoints/' + tt.arg.experiment):
        os.makedirs('asset/checkpoints/' + tt.arg.experiment)

    if tt.arg.backbone == 'wrn':
        enc_module = wrn(tt.arg.emb_size)
    elif tt.arg.backbone == 'rn':
        enc_module = ResNet(tt.arg.emb_size)
    else:
        enc_module = EmbeddingImagenet(emb_size=tt.arg.emb_size)
        dcompression = Dcompression(1024, tt.arg.emb_size)
    if tt.arg.transductive == False:
        if unet2_flag == False:
            unet_module = Unet(tt.arg.ks, tt.arg.in_dim, tt.arg.num_ways, 1)
        else:
            unet_module = Unet2(tt.arg.ks_1, tt.arg.ks_2, mode_1, mode_2,
                                tt.arg.in_dim, tt.arg.num_ways, 1)
    else:
        if unet2_flag == False:
            unet_module = Unet(tt.arg.ks, tt.arg.in_dim, tt.arg.num_ways,
                               tt.arg.num_queries)
        else:
            unet_module = Unet2(tt.arg.ks_1, tt.arg.ks_2, mode_1, mode_2,
                                tt.arg.in_dim, tt.arg.num_ways,
                                tt.arg.num_queries)

    if tt.arg.dataset == 'mini':
        train_loader = MiniImagenetLoader(root=tt.arg.dataset_root,
                                          partition='train')
                                std=(0.229, 0.224, 0.225))
    # train_dataset.load_images()
    loaders = train_dataset.yield_dataloader(
        num_workers=0,
        batch_size=BATCH_SIZE,
        # auxiliary_df=TGS_Dataset.create_dataset_df(AUX_PATH)
    )

    for i, (train_loader, val_loader) in enumerate(loaders, 1):
        with timer('Fold {}'.format(i)):
            if i < 5:
                continue
            # net = NET(lr=LR, debug=DEBUG, pretrained=PRETRAINED, fold=i, activation=ACTIVATION, comment=COMMENT)
            # model = MODEL(classes=4)
            model = Unet("resnet34",
                         encoder_weights="imagenet",
                         classes=5,
                         activation=None)
            train = SegmentationNetwork(model,
                                        lr=LR,
                                        debug=DEBUG,
                                        fold=i,
                                        comment=COMMENT)
            train.define_criterion(LOSS)
            train.create_optmizer(optimizer=OPTIMIZER,
                                  use_scheduler=USE_SCHEDULER,
                                  milestones=MILESTONES,
                                  gamma=GAMMA,
                                  patience=PATIENCE,
                                  T_max=T_MAX,
                                  T_mul=T_MUL,
                                  lr_min=LR_MIN)
Esempio n. 22
0
 def network(self, x):
     
     pred = Unet(x)
     
     return pred
        plt.xlim([min_v, max_v])
        plt.savefig(dst + '/prediction_hist.png')
        plt.close()


if __name__ == "__main__":
    # load data
    if not 'SS_FPP_CNN' in in_path:
        train_ds, validate_ds, test_ds, image_idx_validate, image_idx_test = load_digital_data(
            in_path)
    else:
        train_ds, validate_ds, test_ds, image_idx_test = load_SSFPP_data(
            in_path)

    # create an instance of neural network model
    unet = Unet()

    # Create checkpoint for the training of the neural network model.
    # Neural network models including trained parameters will be stored
    # in the checkpoint_dir.
    if not os.path.exists(checkpoint_dir):
        os.mkdir(checkpoint_dir)
    checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
    checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=unet)

    # create a csv file to record training loss
    if not os.path.exists(out_path):
        os.mkdir(out_path)
    with open(out_path + 'training_loss.csv', 'w') as csv_file:
        csv_writer = csv.writer(csv_file, delimiter=',')
        csv_writer.writerow([
Esempio n. 24
0
def load_mode():
    num_class = 3
    model = Unet(num_class, input_size=(256, 256, 1), deep_supervision=False)
    model.model.load_weights('./gan.hdf5')
    return model.model
Esempio n. 25
0
                        default=1,
                        help='classes number')
    parser.add_argument('--gpu',
                        type=str,
                        default='0',
                        help='Which GPU to use')
    # parser.add_argument('--test-filename', type=str, default='test', help='The test filenames from Benchmarks.')

    args = parser.parse_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    x_train = tf.placeholder(tf.float32,
                             shape=[None, None, None, 3],
                             name='x_train')
    unet = Unet(batch_size=args.batch_size,
                classes=args.classes,
                img_size=args.image_size)
    y_pred = unet.create_unet(x_train, train=False)

    # Create log folder
    if args.load and not args.name:
        log_path = os.path.dirname(args.load)
    else:
        log_path = build_log_dir(args, sys.argv)

    with tf.Session() as sess:
        sess.run(tf.local_variables_initializer())
        sess.run(tf.global_variables_initializer())

        benchmarks = [
            Benchmarks('./Benchmarks/retina_test/', name='retina image')
Esempio n. 26
0
    )
    parser.add_argument(
        "--lr",
        type=float,
        default=0.0001,
        help="learning rate",
    )
    parser.add_argument(
        "--logs",
        type=str,
        default="./logs",
        help="Log folder",
    )
    args = parser.parse_args()

    model = Unet()
    model.build(input_shape=(None, 256, 256, 3))
    model.compile(optimizer=optimizers.Adam(lr=args.lr),
                  loss=dice_coef_loss,
                  metrics=['accuracy', dice_coef])

    # Loading Dataset
    X_train, Y_train, X_test, Y_test = get_db(args.data_dir)
    # Set tf.keras.callbacks.ModelCheckpoint callback to automatically save the model
    checkpoint_path = "weight/ep{epoch:03d}-val_dice_coef{val_dice_coef:.3f}-val_acc{val_accuracy:.3f}.ckpt"
    modelCheckpoint = callbacks.ModelCheckpoint(
        filepath=checkpoint_path,  # Path to save the model
        verbose=1,  # Whether to output information
        save_weights_only=True,
        period=1,  # Save the model every few rounds
    )
Esempio n. 27
0
 def __init__(self, args):
     #save args
     self.args = args
     #init coco utils
     self.coco_train = COCO("../annotations/instances_train2014.json")
     self.coco_val = COCO("../annotations/instances_val2014.json")
     #init tensorflow session
     tf.reset_default_graph()
     config = tf.ConfigProto(allow_soft_placement=True)
     config.gpu_options.allow_growth = True
     self.sess = tf.Session(config=config)
     #init model
     self.input_img = tf.placeholder(tf.float32,
                                     shape=(None, None, None, 3))
     self.label = tf.placeholder(tf.float32,
                                 shape=(None, None, None, args.nb_classes))
     self.model = Unet(input_img=self.input_img, nb_classes=args.nb_classes)
     #define loss : Cross Entropy and Dice
     with tf.variable_scope('optimization'):
         with tf.variable_scope('loss'):
             if args.loss == 'crossentropy':
                 """logits = tf.reshape(self.model.output_log, [-1, args.nb_classes])
                 labels = tf.reshape(self.label, [-1, args.nb_classes])"""
                 self.loss = -tf.reduce_mean(
                     tf.multiply(self.label, tf.log(
                         self.model.output_proba)))
             elif args.loss == "dice":
                 labels = self.label
                 proba = self.model.output_proba
                 intersection = tf.reduce_sum(proba * labels)
                 union = tf.reduce_sum(proba + labels)
                 self.loss = -intersection / union
         #Optimizer
         self.optimizer = tf.train.MomentumOptimizer(
             learning_rate=args.learning_rate, momentum=0.99)
         self.train_op = self.optimizer.minimize(self.loss)
     #summary file for tensorboard
     self.tf_train_loss = tf.Variable(0.0,
                                      trainable=False,
                                      name='Train_Loss')
     self.tf_train_loss_summary = tf.summary.scalar("Loss",
                                                    self.tf_train_loss)
     self.tf_train_accuracy = tf.Variable(0.0,
                                          trainable=False,
                                          name='Train_Accuracy')
     self.tf_train_accuracy_summary = tf.summary.scalar(
         "Train Accuracy", self.tf_train_accuracy)
     self.tf_train_dice = tf.Variable(0.0,
                                      trainable=False,
                                      name="Train_Dice_Coef")
     self.tf_train_dice_summary = tf.summary.scalar("Train Dice Coef",
                                                    self.tf_train_dice)
     self.tf_eval_accuracy = tf.Variable(0.0,
                                         trainable=False,
                                         name='Eval_accuracy')
     self.tf_eval_accuracy_summary = tf.summary.scalar(
         'Evaluation Accuracy', self.tf_eval_accuracy)
     self.tf_eval_dice = tf.Variable(0.0,
                                     trainable=False,
                                     name="Eval_Dice_Coef")
     self.tf_eval_dice_summary = tf.summary.scalar("Evaluation Dice Coef",
                                                   self.tf_eval_dice)
     self.writer = tf.summary.FileWriter('./graphs', self.sess.graph)
     #saver
     self.saver = tf.train.Saver()
     self.sess.run(tf.initialize_all_variables())
Esempio n. 28
0
    model.eval()
    img_list = os.listdir(img_dir)
    with torch.no_grad():
        for img_name in img_list:
            img = cv2.imread(os.path.join(img_dir, img_name))
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img = img.transpose((2, 0, 1))
            img = np.expand_dims(img, axis=0)
            img = np.array(img / 255, dtype=np.float32)
            img = torch.from_numpy(img)
            img = img.to(torch.device('cuda'))
            output = model(img)
            output[output >= 0.5] = 1
            output[output < 0.5] = 0
            output = output.cpu().numpy()
            output = np.squeeze(output, 0)
            output = np.transpose(output, (1, 2, 0))
            output = np.array(output * 255, dtype='uint8')
            cv2.imwrite('./result/' + img_name, output)


if __name__ == '__main__':
    unet = Unet().to(torch.device('cuda'))
    optimizer = optim.Adam(unet.parameters(), lr=0.00001)
    train_data = MyDataset(base_path='./data')
    train_loader = DataLoader(dataset=train_data, batch_size=1, shuffle=True)
    train(unet, train_loader, optimizer, 100)
    # model_path = './model/unet.pth'
    # img_dir = './test_img'
    # inference(unet, model_path, img_dir)
Esempio n. 29
0
min_size = 3500
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
df = pd.read_csv(sample_submission_path)
testset = DataLoader(
    TestDataset(test_data_folder, df, mean, std),
    batch_size=batch_size,
    shuffle=False,
    num_workers=num_workers,
    pin_memory=True
)

# Initialize mode and load trained weights
ckpt_path = "../input/res18ex6/model26.pth"
device = torch.device("cuda")
model = Unet("resnet18", encoder_weights=None, classes=4, activation=None)
model.to(device)
model.eval()
state = torch.load(ckpt_path, map_location=lambda storage, loc: storage)
model.load_state_dict(state["state_dict"])

# start prediction
predictions = []
for i, batch in enumerate(tqdm(testset)):
    fnames, images = batch
    batch_preds = torch.sigmoid(model(images.to(device)))
    batch_preds = batch_preds.detach().cpu().numpy()
    for fname, preds in zip(fnames, batch_preds):
        for cls, pred in enumerate(preds):
            pred, num = post_process(pred, best_threshold, min_size)
            rle = mask2rle(pred)
Esempio n. 30
0
train_loader = data.DataLoader(dataset=train_data,
                               batch_size=batch_size,
                               shuffle=True,
                               drop_last=True,
                               num_workers=4)

generator = define_G(
    4,
    1,
    64,
    'unet_128',
    norm='instance',
)
discriminator = netD()
unet = Unet()
unet.load_state_dict(torch.load("./weight/unet_pretrained.pth"))

optimizer_g = torch.optim.Adam(generator.parameters(), lr=0.0002)
optimizer_d = torch.optim.Adam(discriminator.parameters(), lr=0.0002)
optimizer_s = torch.optim.Adam(unet.parameters(), lr=0.0002)

generator.cuda()
discriminator.cuda()
unet.cuda()
EPOCH = 100
num_iter = len(train_loader)
D_LOSS = []
G_LOSS = []
# S_LOSS=[]
f = open("./loss_gan.txt", 'a')