def test_colorization(self):
        imgs_l, imgs_true_ab, imgs_emb = self._tensors()

        # Build the network and the optimizer step
        col = Colorization(256)
        imgs_ab = col.build(imgs_l, imgs_emb)
        opt_operations = color_optimizer(imgs_ab, imgs_true_ab)

        self._run(imgs_l, imgs_ab, imgs_true_ab, opt_operations)
    def test_colorization(self):
        imgs_l, imgs_true_ab, imgs_emb = self._tensors()

        # Build the network and the optimizer step
        col = Colorization(256)
        imgs_ab = col.build(imgs_l, imgs_emb)
        cost = tf.reduce_mean(tf.squared_difference(imgs_ab, imgs_true_ab))
        optimizer = tf.train.AdamOptimizer(0.001).minimize(cost)

        opt_operations = {'cost': cost, 'optimizer': optimizer}

        self._run(imgs_l, imgs_ab, imgs_true_ab, opt_operations)
Exemple #3
0
outdir = "/opt/data_out/test"
used_model = 67999

if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    davis_path = "/opt/data_davis"
    trainset = DavisDataset(base_path=davis_path)

    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=0)

    #Create network
    model = Colorization()
    #load model
    model.load_state_dict(torch.load(model_path))
    # model.to(device)
    #Set model to eval mode.
    model.eval()

    #Read from data
    for i, data in enumerate(trainloader, 0):
        # get the inputs; data is a list of [inputs, labels]
        # Inputs is normalized 256,256 grayscale image: 2, 1, 256, 256
        # Labels is annotations: 2, 1, 256, 256
        inputs, originals, labels_dummy, key = data
        inputs = [x.squeeze(0) for x in inputs]
        originals = [x.squeeze(0) for x in originals]
        labels_dummy = [x.type(torch.int64) for x in labels_dummy]
Exemple #4
0
epochs = 200  #default 100
val_number_of_images = 10
total_train_images = 65000  #default 130 * 500
batch_size = 38  #default 100
learning_rate = 0.0001  #default 0.001
batches = total_train_images // batch_size

# START
print_term('Starting session...', run_id)
sess = tf.Session()
K.set_session(sess)
print_term('Started session...', run_id)

# Build the network and the various operations
print_term('Building network...', run_id)
col = Colorization(256)
fwd_col = Feedforward_Colorization(256)
ref = Refinement()

opt_operations = training_pipeline(col, fwd_col, ref, learning_rate,
                                   batch_size)
evaluations_ops = evaluation_pipeline(col, fwd_col, ref, val_number_of_images)
train_col_writer, train_fwd_writer, train_ref_writer, val_col_writer, val_fwd_writer, val_ref_writer = metrics_system(
    run_id, sess)
saver, checkpoint_paths, latest_checkpoint = checkpointing_system(run_id)
print_term('Built network', run_id)

with sess.as_default():
    # tf.summary.merge_all()
    # writer = tf.summary.FileWriter('./graphs', sess.graph)
Exemple #5
0
from torchvision import transforms
from labels_viewer import GenerateLabelImage
import cv2
import os
import matplotlib.pyplot as plt
import numpy as np
if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    kinetics_path = "/opt/data"
    trainset = KineticsClustered(base_path=kinetics_path)

    trainloader = torch.utils.data.DataLoader(trainset, batch_size=32,
                                              shuffle=False, num_workers=0)

    model = Colorization()
    # model.load_state_dict(torch.load("/opt/model/8/models/model{}.pth".format(65999)))
    # print(model)
    model.setdev(device)
    parallel_model = nn.DataParallel(model, device_ids=[0, 1])
    parallel_model.to(device)


    criterion = nn.CrossEntropyLoss()
    # optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
    optimizer = optim.Adam(parallel_model.parameters(), lr=0.001)

    loss_values = []

    for epoch in range(10):  # loop over the dataset multiple times
        running_loss = 0.0