Exemple #1
0
def trainAndTest():

    filePath = "/home/omer/gaitRec/persons/"
    X, y = getImages(filePath)
    print(np.asarray(X).shape, np.asarray(y).shape)

    sample, a1, a2, a3, a4, a5 = np.shape(X)
    X = np.array(X).reshape(sample * a1 * a2, a3, a4, a5)
    rand_state = np.random.randint(0, 100)
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=rand_state)

    datagen = ImageDataGenerator()

    datagen.fit(X_train)

    valgen = ImageDataGenerator()
    valgen.fit(X_test)

    model = unet(input_size=(64, 64, 3))

    batch_size = 32
    inputShape = (64, 64)
    #model = CNN(inputShape)

    checkpoint = ModelCheckpoint('model-{epoch:03d}.h5',
                                 monitor='val_acc',
                                 verbose=0,
                                 save_best_only=True,
                                 mode='auto')

    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer=Adam(lr=1.0e-4),
                  metrics=['accuracy'])

    model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size),
                        steps_per_epoch=len(X_train) / batch_size,
                        validation_data=valgen.flow(X_test,
                                                    y_test,
                                                    batch_size=batch_size),
                        validation_steps=len(X_test) / batch_size,
                        epochs=30,
                        callbacks=[checkpoint])

    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    model.fit(X, y, epochs=30, batch_size=batch_size, verbose=0)
    scores = model.evulate(X, y, verbose=0)
    print("%s: %2f%%" % (model.metrics_names[1], score[1] * 100))

    model_json = model.to_json()
    with open("model.json", "w") as json_file:
        json_file.write(model_json)
    model.save_weights("model.h5")
def cnncheck():
    batch_size = 8
    epochs = 100
    data_augmentation = True

    trainable = True
    imagepath = './TCGA/'
    ids = ['5270_01_1']
    i = 0
    for id_i in ids:
        i = i + 1

        print('loading ' + id_i)
        if i == 1:
            image1, phi = load_imgphi(imagepath, id_i)
            continue
        image1temp, phitemp = load_imgphi(imagepath, id_i)
        image1 = np.concatenate((image1, image1temp), axis=0)
        phi = np.concatenate((phi, phitemp), axis=0)

    print(image1.shape, phi.shape)

    x1_train, x1_test, y_train, y_test = train_test_split(image1,
                                                          phi,
                                                          test_size=0.2,
                                                          shuffle=True)

    model = unet()

    sgd = optimizers.SGD(lr=0.001, momentum=0.9, nesterov=True)

    prior = sio.loadmat('./resized_prior.mat')['vol']
    model.compile(loss=[CustomedLoss(kernel=prior)],
                  optimizer='sgd',
                  metrics=['mse'])

    checkpoint = ModelCheckpoint('./weights/model.h5',
                                 verbose=1,
                                 monitor='val_loss',
                                 save_best_only=True,
                                 mode='min')
    print(model.summary())
    if trainable == True:
        history = model.fit(x1_train,
                            y_train,
                            batch_size=batch_size,
                            epochs=epochs,
                            validation_data=(x1_test, y_test),
                            shuffle=True,
                            callbacks=[checkpoint])

    return
Exemple #3
0
def test(args):

    # Data Load
    testset = dataset(args, mode='test')

    # Model Load
    model = unet(args)
    model.load_weights(args.ckpt_path)

    # Model Test
    results = model.predict_generator(testset, steps=1, verbose=1)

    # Save predictions
    save_result(args, results)
Exemple #4
0
def test(unet, criterion, testloader, write_mask=False, filename=""):
    test_loss = 0
    idx = 0
    for images, masks in tqdm(testloader):
        if torch.cuda.is_available():
            images = images.cuda()
            masks = masks.cuda()
        masks_pred = unet(images)
        if write_mask:
            write_image(images, masks_pred, masks, idx, filename)
        loss = criterion(masks_pred, masks)

        idx += 1
        test_loss += loss.item()
    print("Test Loss {}".format(test_loss / len(testloader)))
    print("Test Accuracy {}".format(1 - test_loss / len(testloader)))
Exemple #5
0
def train(args):

    # Data Load
    trainset = dataset(args, mode='train')
    validset = dataset(args, mode='valid')

    # Model Load
    model = unet(args)
    model_checkpoint = ModelCheckpoint(args.ckpt_path,
                                       monitor='val_loss',
                                       verbose=2,
                                       save_best_only=True)
    # Model Train
    model.fit_generator(trainset,
                        steps_per_epoch=500,
                        shuffle=True,
                        epochs=args.epoch,
                        validation_data=validset,
                        validation_steps=2000,
                        callbacks=[model_checkpoint],
                        workers=16)
Exemple #6
0
def train(unet, criterion, optimizer, trainloader, epoch=EPOCH):
    for epoch in range(epoch):

        unet.train()

        epoch_loss = 0

        for images, masks in tqdm(trainloader):
            if torch.cuda.is_available():
                images = images.cuda()
                masks = masks.cuda()

            masks_pred = unet(images)

            loss = criterion(masks_pred, masks)

            epoch_loss += loss.item()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        print("Epoch {}, loss {}".format(epoch, epoch_loss / len(trainloader)))
valid_label_folder = "lbl"
log_filepath = './log'
flag_multi_class = False
num_classes = 2
dp = data_preprocess(train_path=train_path,
                     image_folder=image_folder,
                     label_folder=label_folder,
                     valid_path=valid_path,
                     valid_image_folder=valid_image_folder,
                     valid_label_folder=valid_label_folder,
                     flag_multi_class=flag_multi_class,
                     num_classes=num_classes)

train_data = dp.trainGenerator(batch_size=2)
valid_data = dp.validLoad(batch_size=1)
model = unet(lrate=1e-4, ls=2)  # WBCE

model_checkpoint1 = keras.callbacks.ModelCheckpoint(
    'UnetWBCE_DL.hdf5',
    monitor='val_dice_loss',
    verbose=1,
    mode='min',
    save_best_only=True
)  #para guardar el entranamiento con menor dice loss en validation
#steps_per_epoch number= number of trainining samples / batch size of training
#validation steps number = number de validation samples  / batch size of validation
csv_logger = CSVLogger('trainingUnetWBCE.log', append=True,
                       separator=';')  #respaldo de datos de entranamiento
history = model.fit_generator(train_data,
                              steps_per_epoch=1912,
                              epochs=32,
def IoU(y_true, y_pred, eps=1e-6):
    if K.max(y_true) == 0.0:
        return IoU(1 - y_true, 1 - y_pred)  ## empty image; calc IoU of zeros
    intersection = K.sum(y_true * y_pred, axis=[1, 2, 3])
    union = K.sum(y_true, axis=[1, 2, 3]) + K.sum(
        y_pred, axis=[1, 2, 3]) - intersection
    return K.mean((intersection + eps) / (union + eps), axis=0)


train_images, train_labels = load_h5("../dataset/voc2012_train.h5")
print("traing shape:", train_images.shape, train_labels.shape)
val_images, val_labels = load_h5("../dataset/voc2012_val.h5")
print("valid shape:", val_images.shape, val_labels.shape)

from keras.models import *
model = unet(21, (224, 224, 3))
loss_names = ["dice_coef_loss"]  # ["binary_crossentropy" ,"dice_coef_loss"]
for loss_name in loss_names:
    model.compile(loss=eval(loss_name), optimizer=Adam(lr=0.03), metrics=[IoU])
    checkpoint = ModelCheckpoint(
        './weights/sigmoid_%s_weights.h5' % loss_name,  # model filename
        monitor='val_IoU',  # quantity to monitor
        verbose=1,  # verbosity - 0 or 1
        save_best_only=True,  # The latest best model will not be overwritten
        mode='max')  # The decision to overwrite model is m
    model = Sequential()
    history = model.fit(train_images,
                        train_labels,
                        batch_size=8,
                        shuffle=True,
                        validation_data=(val_images, val_labels),
def main(args):
    dataset = args.data
    gpu = args.gpu
    batch_size = args.batch_size
    model_path = args.model_path
    log_path = args.log_path
    num_epochs = args.num_epochs
    learning_rate = args.learning_rate
    start_epoch = args.start_epoch
    islocal = args.islocal

    # make directory for models saved when there is not.
    make_folder(model_path, dataset)  # for sampling model
    make_folder(log_path, dataset)  # for logpoint model
    make_folder(log_path, dataset + '/ckpt')  # for checkpoint model

    # see if gpu is on
    print("Running on gpu : ", gpu)
    cuda.set_device(gpu)

    # set the data-loaders
    train_dataset, train_loader, val_loader, imsize = Color_Dataloader(
        dataset, batch_size)

    # declare unet class
    unet = UNet(imsize, islocal)

    # make the class run on gpu
    unet.cuda()

    # Loss and Optimizer
    optimizer = torch.optim.Adam(unet.parameters(), lr=learning_rate)
    criterion = torch.nn.SmoothL1Loss()

    # optionally resume from a checkpoint
    if args.resume:
        ckpt_path = os.path.join(log_path, dataset, 'ckpt/local/model.ckpt')
        if os.path.isfile(ckpt_path):
            print("=> loading checkpoint")
            checkpoint = torch.load(ckpt_path)
            start_epoch = checkpoint['epoch']
            unet.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> Loaded checkpoint (epoch {})".format(
                checkpoint['epoch']))
            print("=> Meaning that start training from (epoch {})".format(
                checkpoint['epoch'] + 1))
        else:
            print("=> Sorry, no checkpoint found at '{}'".format(args.resume))

    # record time
    tell_time = Timer()
    iter = 0
    # Train the Model
    for epoch in range(start_epoch, num_epochs):

        unet.train()
        for i, (images, _) in enumerate(train_loader):

            batch = images.size(0)
            '''
            additional variables for later use.
            change the picture type from rgb to CIE Lab.
            def process_data, def process_global in util file
            '''
            if islocal:
                input, labels, _ = process_data(images, batch, imsize, islocal)
                local_ab, local_mask = process_local(labels, batch, imsize)
                side_input = torch.cat(
                    [local_ab, local_mask], 1
                )  # concat([batch x 2 x imsize x imsize , batch x 1 x imsize x imsize], 1) = batch x 3 x imsize x imsize
                random_expose = random.randrange(1, 101)
                if random_expose == 100:
                    print("Jackpot! expose the whole!")
                    local_mask = torch.ones(batch_size, 1, imsize, imsize)
                    side_input = torch.cat([labels, local_mask], 1)
            else:  # if is local
                input, labels, ab_for_global = process_data(
                    images, batch, imsize, islocal)
                side_input = process_global(images,
                                            ab_for_global,
                                            batch,
                                            imsize,
                                            hist_mean=0.03,
                                            hist_std=0.13)

            # make them all variable + gpu avialable

            input = Variable(input).cuda()
            labels = Variable(labels).cuda()
            side_input = Variable(side_input).cuda()

            # initialize gradients
            optimizer.zero_grad()
            outputs = unet(input, side_input)

            # make outputs and labels as a matrix for loss calculation
            outputs = outputs.view(batch, -1)  # 100 x 32*32*3(2048)
            labels = labels.contiguous().view(batch, -1)  # 100 x 32*32*3

            loss_train = criterion(outputs, labels)
            loss_train.backward()
            optimizer.step()

            if (i + 1) % 10 == 0:
                print(
                    'Epoch [%d/%d], Iter [%d/%d], Loss: %.10f, iter_time: %2.2f, aggregate_time: %6.2f'
                    % (epoch + 1, num_epochs, i + 1,
                       (len(train_dataset) // batch_size), loss_train.data[0],
                       (tell_time.toc() - iter), tell_time.toc()))
                iter = tell_time.toc()

        torch.save(
            unet.state_dict(),
            os.path.join(model_path, dataset, 'unet%d.pkl' % (epoch + 1)))

        # start evaluation
        print("-------------evaluation start------------")

        unet.eval()
        loss_val_all = Variable(torch.zeros(100), volatile=True).cuda()
        for i, (images, _) in enumerate(val_loader):

            # change the picture type from rgb to CIE Lab
            batch = images.size(0)

            if islocal:
                input, labels, _ = process_data(images, batch, imsize, islocal)
                local_ab, local_mask = process_local(labels, batch, imsize)
                side_input = torch.cat([local_ab, local_mask], 1)
                random_expose = random.randrange(1, 101)
                if random_expose == 100:
                    print("Jackpot! expose the whole!")
                    local_mask = torch.ones(batch_size, 1, imsize, imsize)
                    side_input = torch.cat([labels, local_mask], 1)
            else:  # if is local
                input, labels, ab_for_global = process_data(
                    images, batch, imsize, islocal)
                side_input = process_global(images,
                                            ab_for_global,
                                            batch,
                                            imsize,
                                            hist_mean=0.03,
                                            hist_std=0.13)

                # make them all variable + gpu avialable

            input = Variable(input).cuda()
            labels = Variable(labels).cuda()
            side_input = Variable(side_input).cuda()

            # initialize gradients
            optimizer.zero_grad()
            outputs = unet(input, side_input)

            # make outputs and labels as a matrix for loss calculation
            outputs = outputs.view(batch, -1)  # 100 x 32*32*3(2048)
            labels = labels.contiguous().view(batch, -1)  # 100 x 32*32*3

            loss_val = criterion(outputs, labels)

            logpoint = {
                'epoch': epoch + 1,
                'args': args,
            }
            checkpoint = {
                'epoch': epoch + 1,
                'args': args,
                'state_dict': unet.state_dict(),
                'optimizer': optimizer.state_dict(),
            }

            loss_val_all[i] = loss_val

            if i == 30:
                print(
                    'Epoch [%d/%d], Validation Loss: %.10f' %
                    (epoch + 1, num_epochs, torch.mean(loss_val_all).data[0]))
                torch.save(
                    logpoint,
                    os.path.join(
                        log_path, dataset, 'Model_e%d_train_%.4f_val_%.4f.pt' %
                        (epoch + 1, torch.mean(loss_train).data[0],
                         torch.mean(loss_val_all).data[0])))
                torch.save(checkpoint,
                           os.path.join(log_path, dataset, 'ckpt/model.ckpt'))
                break
Exemple #10
0
# Proposed network
Width = 320
Height = 320
batch_size = 1
nClass = 2

# Variables
I = tf.placeholder(tf.float32, shape=[None, None, None, 1])  # 0~1 input image
LR = tf.placeholder(tf.float32)

# initial base & detail layers
I_d = tf.placeholder(tf.float32, shape=[None, None, None, 1])  # 0~1 input image
I_b = tf.placeholder(tf.float32, shape=[None, None, None, 1])  # 0~1 input image

# DR-Net
I_dr = unet(I_d, outputChn=1, name='test_Dt_rstr', reuse=False)
I_dr = tf.nn.tanh(I_dr)
I_dgt = tf.placeholder(tf.float32, shape=[None, None, None, 1])
Loss_dr = tf.reduce_mean(tf.square(I_dr - I_dgt))

# TM-Net
I_r = tf.clip_by_value(I_b+tf.stop_gradient(I_dr), 0.0, 1.0)
I_o = unet(I_r, outputChn=1, name='test_TMO', reuse=False)
I_o = tf.nn.sigmoid(I_o)

Y_8bit = (tf.round(I_o*255)) / 255.0
Y_8bit_for_loss = tf.stop_gradient(Y_8bit - I_o) + I_o

# Structural similarity loss
Loss_ss = 0
Loss_ss = Loss_ss + 1e-0 * tf.reduce_mean(1.0 - tf.image.ssim(I, I_o, max_val=1.0))
Exemple #11
0
data_gen_args = dict(rotation_range=0.2,
                    width_shift_range=0.05,
                    height_shift_range=0.05,
                    shear_range=0.05,
                    zoom_range=0.05,
                    horizontal_flip=True,
                    fill_mode='nearest')

# change folder name to training set folder 
#myGene = trainGenerator(2,'data/membrane/train','image','label',data_gen_args,save_to_dir = None)
myGene = trainGenerator(2,'/home/lenovo/Documents/Major Project/Project material/Dataset/Retinal/AV - DRIVE/AV_DRIVE_groundtruth/training/','images','matlab_unet',data_gen_args,save_to_dir = None)
#print(*myGene, sep = '\n')   ##batch size changed from 32 to 2
print(myGene)

model = unet()

## Can't find this function
##model_checkpoint = ModelCheckpoint('unet_membrane.hdf5', monitor='loss',verbose=1, save_best_only=True)
#model.fit_generator(myGene,steps_per_epoch=300,epochs=1,callbacks=[model_checkpoint])
model.fit_generator(myGene,steps_per_epoch=30,epochs=1)  #steps per epoch changed from 300 to 30

# change folder name to test set folder
#testGene = testGenerator("data/membrane/test")
#testGene = testGenerator("AV_DRIVE_groundtruth/test/images")
test_datagen = ImageDataGenerator(rescale=1./255)

testGene = test_datagen.flow_from_directory(
        'AV_DRIVE_groundtruth/test/images',
        target_size=(256, 256),
        batch_size=32)
Exemple #12
0
def train(rotation_range, width_shift_range, height_shift_range, shear_range,
          zoom_range, fill_mode, datadir, batch_size, validation_split,
          learning_rate, decay, optimizer, loss, loss_weights):
    augmentation_args = {
        'rotation_range': rotation_range,
        'width_shift_range': width_shift_range,
        'height_shift_range': height_shift_range,
        'shear_range': shear_range,
        'zoom_range': zoom_range,
        'fill_mode': fill_mode,
    }
    train_generator, train_steps_per_epoch, val_generator, val_steps_per_epoch = create_generators(
        datadir,
        batch_size,
        validation_split=validation_split,
        mask=train_labels,
        shuffle_train_val=shuffle_train_val,
        shuffle=shuffle,
        seed=seed,
        normalize_images=normalize,
        augment_training=augment_training,
        augment_validation=augment_validation,
        augmentation_args=augmentation_args)
    # get image dimensions from first batch
    images, masks = next(train_generator)
    _, height, width, channels = images.shape
    _, _, _, classes = masks.shape
    # start building model
    model = unet(height=height,
                 width=width,
                 channels=channels,
                 classes=classes,
                 dropout=0.5)

    model.summary()

    optimizer_args = {'lr': learning_rate, 'decay': decay}
    for k in list(optimizer_args):
        if optimizer_args[k] is None:
            del optimizer_args[k]
    optimizer = use_optimizer(optimizer, optimizer_args)

    if loss == 'binary_crossentropy':

        def lossfunc(y_true, y_pred):
            return weighted_categorical_crossentropy(y_true, y_pred,
                                                     loss_weights)
    elif loss == 'dice':

        def lossfunc(y_true, y_pred):
            return sorensen_dice_loss(y_true, y_pred, loss_weights)
    elif loss == 'jaccard':

        def lossfunc(y_true, y_pred):
            return jaccard_loss(y_true, y_pred, loss_weights)
    else:
        raise Exception("Unknown loss")
    model.compile(optimizer=optimizer, loss='binary_crossentropy')
    model.fit_generator(
        generator=train_generator,
        validation_data=val_generator,
        steps_per_epoch=train_steps_per_epoch,
        validation_steps=val_steps_per_epoch,
        use_multiprocessing=False,
    )
Exemple #13
0
    return best_model_wts, best_acc


if __name__ == "__main__":
    os.environ["CUDA_VISIBLE_DEVICES"] = "2"
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    train_batch_size = 1
    validation_batch_size = 1
    learning_rate = 0.001
    num_epochs = 70
    num_class = 12
    writer = SummaryWriter()

    model = unet(useBN=True)
    model.to(device)

    dice_loss = DICELoss(np.ones((num_class, 1)))
    dice_loss.to(device)

    # intialize optimizer and lr decay
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=10,
                                                gamma=0.1)

    train_both_aug = Compose([
        PadIfNeeded(min_height=256, min_width=256, border_mode=0, value=0,
                    p=1),
        RandomCrop(height=256, width=256, p=1),
#x_tumor = 0.5*(x_tumor+1)
#y_tumor = 0.5*(y_tumor+1)
#print (y_tumor.shape)
#y_tumor = y_tumor[:,:,:,0]
#y_tumor = np.expand_dims(y_tumor,axis=3)

#x_tumor,y_tumor = OverSample(x_tumor,y_tumor,times=3)
#x_train = np.concatenate((x_train,x_tumor),axis=0)
#y_train = np.concatenate((y_train,y_tumor),axis=0)
#x_train,y_train = CrossMixup(x_train,y_train,x_tumor,y_tumor,num=80000)
#x_train,y_train = Data_shuffle(x_train,y_train)

########################### Model Construction ######################
model = unet(batch_size=batch_size,
             height=256,
             width=192,
             channel=3,
             classes=1)
#Model.summary()
Model = multi_gpu_model(model, gpus=2)
reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                              mode='auto',
                              factor=0.2,
                              patience=5,
                              min_lr=0.00000001)
Adam = keras.optimizers.Adam(lr=0.0001, beta_1=0.9, beta_2=0.999)
Model.compile(optimizer=Adam, loss=custom_loss, metrics=['MAE'])
#model.load_weights('model_unet.h5')
#for num, layer in enumerate(model.layers[:-5]):
#    layer.trainable = False
#    print (num,layer.trainable)
Exemple #15
0
def main(args):
    dataset     = args.data
    gpu         = args.gpu
    batch_size  = args.batch_size
    model_path  = args.model_path
    image_save  = args.image_save
    model       = args.model
    idx         = args.idx
    global_hist = args.global_hist
    global_sat  = args.global_sat
    hist_ref_idx = args.hist_ref_idx
    sat_ref_idx  = args.hist_ref_idx
    islocal     = args.islocal
    nohint      = args.nohint

    make_folder(image_save, dataset)

    print("Running on gpu : ", gpu)
    cuda.set_device(gpu)

    _, _, test_loader, imsize = Color_Dataloader(dataset, batch_size)

    unet = UNet(imsize, islocal)

    unet.cuda()

    unet.eval()
    unet.load_state_dict(torch.load(os.path.join(model_path, dataset, model)))


    for i, (images, _) in enumerate(test_loader):

        batch = images.size(0)
        '''
        additional variables for later use.
        change the picture type from rgb to CIE Lab.
        def process_data, def process_global in util file
        '''
        if islocal:
            input, labels, _ = process_data(images, batch, imsize, islocal)
            local_ab, local_mask = process_local_sampling(batch_size, imsize, p=1)
            if nohint:
                local_ab = torch.zeros(batch_size, 2, imsize, imsize)
                local_mask = torch.zeros(batch_size, 1, imsize, imsize)

            side_input = torch.cat([local_ab, local_mask], 1)


        else:
            input, labels, ab_for_global = process_data(images, batch, imsize, islocal)

            print('global hint for histogram : ', global_hist)
            print('global hint for saturation : ', global_sat)

            side_input = process_global_sampling(batch, imsize, 0.03, 0.13,
                                                 global_hist, global_sat, hist_ref_idx, sat_ref_idx)

        # make them all variable + gpu avialable

        input = Variable(input).cuda()
        labels = Variable(labels).cuda()
        side_input = Variable(side_input).cuda()

        outputs = unet(input, side_input)

        criterion = torch.nn.SmoothL1Loss()
        loss = criterion(outputs, labels)
        print('loss for test data: %2.4f'%(loss.cpu().data[0]))


        colored_images = torch.cat([input,outputs],1).data # 100 x 3 x 32 x 32
        gray_images = torch.zeros(batch_size, 3, imsize, imsize)
        img_gray =np.zeros((imsize, imsize,3))

        colored_images_np = colored_images.cpu().numpy().transpose((0,2,3,1))

        j = 0
        # make sample images back to rgb
        for img in colored_images_np:

            img[:,:,0] = img[:,:,0]*100
            img[:, :, 1:3] = img[:, :, 1:3] * 200 - 100
            img = img.astype(np.float64)
            img_RGB = lab2rgb(img)
            img_gray[:,:,0] = img[:,:,0]
            img_gray_RGB = lab2rgb(img_gray)

            colored_images[j] = torch.from_numpy(img_RGB.transpose((2,0,1)))
            gray_images[j] = torch.from_numpy(img_gray_RGB.transpose((2,0,1)))
            j+=1

        #
        torchvision.utils.save_image(images,
                                 os.path.join(image_save, dataset, '{}_real_samples.png'.format(idx)))
        torchvision.utils.save_image(colored_images,
                                     os.path.join(image_save, dataset, '{}_colored_samples.png'.format(idx)))
        torchvision.utils.save_image(gray_images,
                                     os.path.join(image_save, dataset, '{}_input_samples.png'.format(idx)))


        print('-----------images sampled!------------')
        break
Exemple #16
0
  print ('Building model')
  if args.model: 
    model_json      = open(args.model, 'r')
    read_model_json = model_json.read()
    model_json.close()
    model = model_from_json(read_model_json)
  else: 
    if model_type == 'vgg': 
      model = vgg_unet(inputshape = (patch_w, patch_h, patch_depth))
    elif model_type == 'inception': 
      model = inception_unet(inputshape = (patch_w, patch_h, patch_depth), 
                             conv_depth=conv_depth, 
                             number_base_nodes = number_base_nodes, 
                             number_layers = number_layers)
    else: 
      model = unet(inputshape = (patch_w, patch_h, patch_depth))
  
  optimizer = Nadam()
  
  model.compile(optimizer = optimizer, loss = jaccard_distance,
                metrics = [efficiency, purity])
  model.summary()

  if args.weights: 
    print ('Loading initial weights')
    model.load_weights(args.weights)
  
  loss, losses         = 0., []
  val_loss, val_losses = 0., []
  epoch                = 0
  
    return pixelacc


# In[6]:


def init_weights(m):
    if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
        torch.nn.init.xavier_uniform(m.weight.data)
        torch.nn.init.constant(m.bias, 0)


epochs = 100
criterion = nn.CrossEntropyLoss()
# fcn_model = FCN(n_class=n_class)
fcn_model = unet()
fcn_model.apply(init_weights)
#fcn_model = torch.load('best_model')
optimizer = optim.Adam(fcn_model.parameters(), lr=8e-2)

# In[ ]:

use_gpu = torch.cuda.is_available()
if use_gpu:
    fcn_model = fcn_model.cuda()


def train():
    val_loss = []
    x_axis = []
    count = 0
Exemple #18
0
        images = Variable(image)
        labels = Variable(label)

        #print(test*255)
        #test = Image.fromarray(np.uint8(test*255))
        #test.show()
        #print(to_np(labels))

        labels = rgb2onehot(labels)
        if (use_gpu):
            images = images.cuda()
            labels = labels.cuda()

        # Forward + Backward + Optimize
        optimizer.zero_grad()
        outputs = unet(images)
        #print(outputs.size(), labels.size())
        #target = labels.view(-1, )
        #print(outputs)
        #print(labels)
        loss = criterion(outputs, labels)
        #loss = dice_loss(outputs, labels)
        loss.backward()
        optimizer.step()

        if (i + 1) % 100 == 0:
            print('Epoch [%d/%d], Batch [%d/%d] Loss: %.6f' %
                  (epoch + 1, num_epochs, i + 1, len(train_loader),
                   loss.data[0]))

    #if(epoch == 0 or epoch % 20 != 0):
Exemple #19
0
def centerNet(c=1, input_size=(512, 512, 3)):
    #Decrease resolution and extract feature map
    inputs = tf.keras.Input(shape=input_size)
    x = conv_module(inputs, size=(7, 7), strides_=(2, 2))
    x = conv_module(x, size=(3, 3), strides_=(2, 2))
    x = unet(x)
    feature_map = unet(x)

    #Top left
    tl = tl_pooling(feature_map)
    tl = tf.keras.layers.Conv2D(256, (3, 3), padding="same")(tl)
    tl = tf.keras.layers.BatchNormalization()(tl)

    x = tf.keras.layers.Conv2D(256, (1, 1), padding="same")(feature_map)

    x = tl + x
    x = tf.keras.layers.ReLU()(x)
    x = conv_module(x)

    #Top left heatmap
    tl_hm = tf.keras.layers.Conv2D(256, (3, 3),
                                   activation="relu",
                                   padding="same")(x)
    tl_hm = tf.keras.layers.Conv2D(c, (1, 1),
                                   activation="sigmoid",
                                   padding="same",
                                   name="tl_hm")(tl_hm)

    #Top left embeddings
    tl_em = tf.keras.layers.Conv2D(256, (3, 3),
                                   activation="relu",
                                   padding="same")(x)
    tl_em = tf.keras.layers.Conv2D(1, (1, 1), padding="same")(tl_em)
    tl_em = tf.squeeze(tl_em, [-1], name="tl_em")

    #Bottom right
    br = br_pooling(feature_map)
    br = tf.keras.layers.Conv2D(256, (3, 3), padding="same")(br)
    br = tf.keras.layers.BatchNormalization()(br)

    x = tf.keras.layers.Conv2D(256, (1, 1), padding="same")(feature_map)

    x = br + x
    x = tf.keras.layers.ReLU()(x)
    x = conv_module(x)

    #Bottom right heatmap
    br_hm = tf.keras.layers.Conv2D(256, (3, 3),
                                   activation="relu",
                                   padding="same")(x)
    br_hm = tf.keras.layers.Conv2D(c, (1, 1),
                                   activation="sigmoid",
                                   padding="same",
                                   name="br_hm")(br_hm)

    #Bottom right embeddings
    br_em = tf.keras.layers.Conv2D(256, (3, 3),
                                   activation="relu",
                                   padding="same")(x)
    br_em = tf.keras.layers.Conv2D(1, (1, 1), padding="same")(br_em)
    br_em = tf.squeeze(br_em, [-1], name="br_em")

    #Create model
    model = tf.keras.Model(inputs=inputs, outputs=[tl_hm, tl_em, br_hm, br_em])
    return model
Exemple #20
0
X_train = X[train_start:train_end]
Y_train = Y[train_start:train_end]

########################################################

# Define callbacks
callbacks = [
    EarlyStopping(patience=15, verbose=1),
    TensorBoard(log_dir='./logs'),
    ReduceLROnPlateau(factor=0.75, patience=15, min_lr=0.000005, verbose=1),
    ModelCheckpoint('./models/setD_model_ckpt.h5', verbose=1, save_best_only=True, save_weights_only=False, monitor='val_loss')
]

# Initialize model
NUM_GPUS = 2
model, parallel_model = unet(num_gpus=NUM_GPUS)

# Train the model. 90% train and 10% validation
#model.fit(X_train, Y_train, batch_size=16, epochs=75, verbose=1,validation_data=(X_valid, Y_valid), shuffle=True, callbacks=callbacks)


data_gen_args = dict(
    rotation_range=15,
    shear_range=0.05,
    zoom_range=0.2)
image_datagen = ImageDataGenerator(**data_gen_args)
mask_datagen = ImageDataGenerator(**data_gen_args)

# Provide the same seed and keyword arguments to the fit and flow methods
seed = 209
batch_size = 32
Exemple #21
0
print('Building data generator')
test_gen = DataGenerator(dataset_type='data',
                         dirname='MichelEnergyImage',
                         batch_size=batch_size,
                         shuffle=False,
                         root_data=args.input,
                         patch_w=patch_w,
                         patch_h=patch_h,
                         patch_depth=n_channels)

sess = tf.InteractiveSession()
with sess.as_default():

    print('Loading model')
    model = unet(inputshape=(patch_w, patch_h, n_channels),
                 conv_depth=conv_depth)
    model.load_weights(args.weights)

    print('Loading charge info')
    if steps == 0:
        test_charge = np.zeros(
            (test_gen.__len__() * batch_size, patch_w, patch_h, 1))
        test_energy = np.zeros(
            (test_gen.__len__() * batch_size, patch_w, patch_h, 1))
        for i in range(test_gen.__len__()):
            wires = test_gen.getitembykey(i, 'wire')
            energies = test_gen.getitembykey(i, 'energy')
            for j in range(batch_size):
                test_charge[(i * batch_size) + j] = wires[j]
                test_energy[(i * batch_size) + j] = energies[j]
    else:
Exemple #22
0
def build_model():
    input_tensor = Input((256, 256, 1))
    model = unet(input_tensor, maxpool=False)
    return model
Exemple #23
0
    batch_ind = 0
    # X = tf.placeholder(tf.float32, shape = [batch_size,image_size_w,image_size_h,1])
    # Y = tf.placeholder(tf.float32, shape = [batch_size,image_size_w,image_size_h,1])

    while step < training_iters:
        print('In the {}'.format(step),
              'trainning iteration~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~')
        batch_ind = step * batch_size - batch_size

        batch_x = tf.slice(train_image, [batch_ind, 0, 0, 0],
                           [batch_size, image_size_w, image_size_h, 1])
        batch_y = tf.slice(train_label, [batch_ind, 0, 0, 0],
                           [batch_size, image_size_w, image_size_h, 1])

        feature_map = unet(batch_x,
                           weights=weights,
                           biases=biases,
                           keep_prob=dropout)
        loss = compute_loss(feature_map=feature_map, label=batch_y)

        optimizer = tf.train.AdamOptimizer(
            learning_rate=learning_rate
        ).minimize(
            loss=loss
        )  # Since AdamOptimizer has it's own variables,  should define the initilizer init after opt, not before.
        sess.run(tf.global_variables_initializer())

        cost = sess.run(loss)
        # print('In this iteration, the loss is = ', cost)
        opt = sess.run([optimizer])
        # print('the optimaizer had been trained!!')
Exemple #24
0
    source = params.test_dir
    if source[-1] != '/':
        source += '/'
    save_path = params.save_path
    if save_path[-1] != '/':
        save_path += '/'
    source += params.imge_name
    print(source)
    model_path = params.model
    unet = UNet()
    unet.load_state_dict(torch.load('ckpts/best.pt', map_location='cpu'))

    if params.add_text:
        imge = add_text(source, params.noise_param)
    else:
        imge = Image.open(source).convert('RGB')
    if params.pre_set != 1:
        w, h = imge.size
        imge = imge.resize((int(w / params.pre_set), int(h / params.pre_set)),
                           Image.ANTIALIAS)
        imge = imge.filter(ImageFilter.SHARPEN)
    imge = tvF.ToTensor()(imge)
    imge = tvF.Normalize(mean=(0.5, 0.5, 0.5), std=(1, 1, 1))(imge)
    imge = imge.unsqueeze(0)
    denoise = unet(imge).detach()
    denoise = denoise.squeeze(0)
    denoise = tvF.Normalize(mean=(-0.5, -0.5, -0.5), std=(1, 1, 1))(denoise)
    denoise = tvF.ToPILImage()(denoise)
    denoise.show()
    denoise.save(save_path + params.imge_name)