Example #1
0
args = parser.parse_args()
print(args)

if torch.cuda.is_available():
    device = torch.device("cuda")
else:
    device = torch.device("cpu")

resize_num = 64
part_width = 64
part_mouth = 80
## Load test data
test_dataset = ImageDataset(txt_file='testing.txt',
                            root_dir='data/SmithCVPR2013_dataset_resized',
                            bg_indexs=set([0, 1, 10]),
                            transform=transforms.Compose([
                                Rescale((resize_num, resize_num)),
                                ToTensor(),
                            ]))
test_loader = DataLoader(test_dataset,
                         batch_size=args.batch_size,
                         shuffle=True,
                         num_workers=4)

unresized_dataset = ImageDataset(txt_file='testing.txt',
                                 root_dir='data/SmithCVPR2013_dataset_resized',
                                 transform=ToTensor(),
                                 calc_bg=False)

## Load models
model = pickle.load(open('res/saved-model.pth', 'rb'))
Example #2
0
from preprocess import ToTensor, ImageDataset
from torch.utils.data import DataLoader
from torchvision import transforms, utils

test_dataset = ImageDataset(txt_file='testing.txt',
                            root_dir='data/SmithCVPR2013_dataset_resized',
                            bg_indexs=set([0]),
                            transform=transforms.Compose([
                                ToTensor(),
                            ]))
test_loader = DataLoader(test_dataset,
                         batch_size=4,
                         shuffle=True,
                         num_workers=4)

print("here")
for batch in test_loader:
    print(batch['rects'])
Example #3
0
                    type=bool,
                    help="Load saved-model")
args = parser.parse_args()
print(args)

if torch.cuda.is_available():
    device = torch.device("cuda")
else:
    device = torch.device("cpu")

resize_num = 64

# Load data
train_dataset = ImageDataset(txt_file='exemplars.txt',
                             root_dir='data/SmithCVPR2013_dataset_resized_' +
                             str(resize_num),
                             bg_indexs=set([0, 1, 10]),
                             transform=transforms.Compose([ToTensor()]))
train_loader = DataLoader(train_dataset,
                          batch_size=args.batch_size,
                          shuffle=True,
                          num_workers=4)

valid_dataset = ImageDataset(txt_file='tuning.txt',
                             root_dir='data/SmithCVPR2013_dataset_resized_' +
                             str(resize_num),
                             bg_indexs=set([0, 1, 10]),
                             transform=transforms.Compose([ToTensor()]))

valid_loader = DataLoader(valid_dataset,
                          batch_size=args.batch_size,
Example #4
0
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=10, type=int, help="Batch size")
args = parser.parse_args()
print(args)

if torch.cuda.is_available():
    device = torch.device("cuda")
else:
    device = torch.device("cpu")

resize_num = 64
warp_size = 128

test_dataset = ImageDataset(txt_file='testing.txt',
                            root_dir='data/SmithCVPR2013_dataset_resized_' +
                            str(resize_num),
                            bg_indexs=set([0, 1, 10]),
                            transform=transforms.Compose([ToTensor()]))
test_loader = DataLoader(test_dataset,
                         batch_size=args.batch_size,
                         shuffle=True,
                         num_workers=1)

unresized_dataset = ImageDataset(txt_file='testing.txt',
                                 root_dir='data/SmithCVPR2013_dataset_resized',
                                 bg_indexs=set([0, 1, 10]),
                                 transform=None)


def evaluate(model, loader, criterion):
    epoch_loss = 0
Example #5
0
                    type=int,
                    help="Number of epochs to train")
parser.add_argument("--load_model",
                    default=False,
                    type=bool,
                    help="Wether to continue training from last checkpoint")
args = parser.parse_args()
print(args)

if torch.cuda.is_available():
    device = torch.device("cuda")
else:
    device = torch.device("cpu")

train_dataset = ImageDataset(txt_file='exemplars.txt',
                             root_dir='data/SmithCVPR2013_dataset_warped',
                             bg_indexs=set([0]),
                             transform=transforms.Compose([ToTensor()]))
train_loader = DataLoader(train_dataset,
                          batch_size=args.batch_size,
                          shuffle=True,
                          num_workers=4)

valid_dataset = ImageDataset(txt_file='tuning.txt',
                             root_dir='data/SmithCVPR2013_dataset_warped',
                             bg_indexs=set([0]),
                             transform=transforms.Compose([ToTensor()]))
valid_loader = DataLoader(valid_dataset,
                          batch_size=args.batch_size,
                          shuffle=True,
                          num_workers=4)
Example #6
0
def main():
    time_now = datetime.datetime.now()
    timestamp = time_now.strftime("%m%d%y-%H%M%S")

    gpus = tf.config.list_physical_devices("GPU")
    if gpus:
        try:
            for gpu in gpus:
                tf.config.experimental.set_memory_growth(gpu, True)
        except RuntimeError as e:
            print(e)

    content_images = glob.glob(ARGS.content_dir + os.sep + "*.jpg")
    style_images = glob.glob(ARGS.style_dir + os.sep + "*.jpg")
    num_images = min(len(content_images), len(style_images))
    content_images = content_images[:num_images]
    style_images = style_images[:num_images]

    model = AdaIN_NST(ARGS.pretrained_vgg19)
    checkpoint_path = "./output/checkpoints" + os.sep + timestamp + os.sep
    logs_path = "./output/logs" + os.sep + timestamp + os.sep
    logs_path = os.path.abspath(logs_path)
    model.compile(optimizer=model.optimizer, loss=model.loss_fn)

    if ARGS.load_checkpoint is not None:
        model.load_weights(ARGS.load_checkpoint).expect_partial()

    if not ARGS.evaluate and not os.path.exists(
            checkpoint_path) and not ARGS.no_save:
        os.makedirs(checkpoint_path)
        os.makedirs(logs_path)

    if ARGS.evaluate:
        content_image = get_image(ARGS.content_evaluate)
        style_image = get_image(ARGS.style_evaluate)
        if ARGS.result_evaluate:
            output_name = ARGS.result_evaluate
        else:
            output_name = (os.path.split(ARGS.content_evaluate)[1][:-4] + "_" +
                           os.path.split(ARGS.style_evaluate)[1])
        test(model, content_image, style_image, output_name)

    else:
        print("Total {} images will be used.".format(num_images))
        print("checkpoint saved in : {}".format(checkpoint_path))
        print("log saved in : {}".format(logs_path))
        num_batches = int(num_images // hp.batch_size)
        if num_batches == 0:
            raise Exception(
                "Number of images have to be larger than the batch size. \n \tCurrent images pair: {} \n \tbatch size: {}"
                .format(num_images, hp.batch_size))
        """
        datasets = ImageDataset(ARGS.content_dir, ARGS.style_dir)
        num_batches = min(len(datasets.content_data), len(datasets.style_data))
        for epoch in range(hp.num_epochs):
            for i in range(num_batches):
                content_data = next(datasets.content_data)
                style_data = next(datasets.style_data)
                loss = train(
                    model, content_data, style_data, logs_path, checkpoint_path
                )
        """
        for epoch in range(hp.num_epochs):
            np.random.shuffle(content_images)
            np.random.shuffle(style_images)

            for batch in range(num_batches):
                content_batch_paths = content_images[batch * hp.batch_size:(
                    batch * hp.batch_size + hp.batch_size)]
                style_batch_paths = style_images[batch * hp.batch_size:(
                    batch * hp.batch_size + hp.batch_size)]

                datasets = ImageDataset(content_batch_paths, style_batch_paths)
                content_data = datasets.content_data
                style_data = datasets.style_data
                loss = train(model, content_data, style_data, logs_path,
                             checkpoint_path)

                if batch % 10 == 0:
                    tf.print(
                        "Epoch {}\t Batch {}\t: Loss {}\t".format(
                            epoch, batch, loss),
                        # output_stream=sys.stdout,
                        output_stream="file://{}/loss.log".format(logs_path),
                    )
                if not ARGS.no_save and batch % 5000 == 0:
                    save_name = "epoch_{}_batch_{}".format(epoch, batch)
                    model.save_weights(
                        filepath=checkpoint_path + os.sep + save_name,
                        save_format="tf",
                    )
Example #7
0
                    type=bool,
                    help="Wether to continue training from last checkpoint")
parser.add_argument("--test_only",
                    default=False,
                    type=bool,
                    help="Only testing")
args = parser.parse_args()
print(args)

if torch.cuda.is_available():
    device = torch.device("cuda")
else:
    device = torch.device("cpu")

train_dataset = ImageDataset(txt_file='exemplars.txt',
                             root_dir='data/SmithCVPR2013_dataset_warped',
                             bg_indexs=set([0, 1, 2, 3, 4, 5, 7, 8, 9, 10]),
                             transform=transforms.Compose([ToTensor()]))
train_loader = DataLoader(train_dataset,
                          batch_size=args.batch_size,
                          shuffle=True,
                          num_workers=4)

valid_dataset = ImageDataset(txt_file='tuning.txt',
                             root_dir='data/SmithCVPR2013_dataset_warped',
                             bg_indexs=set([0, 1, 2, 3, 4, 5, 7, 8, 9, 10]),
                             transform=transforms.Compose([ToTensor()]))
valid_loader = DataLoader(valid_dataset,
                          batch_size=args.batch_size,
                          shuffle=True,
                          num_workers=4)
Example #8
0
def make_dataset(file, dir_name, trans, fg_indexs=None, bg_indexs=set([])):
    return ImageDataset(txt_file=file,
                        root_dir='data/facial_parts/' + dir_name,
                        fg_indexs=fg_indexs,
                        bg_indexs=bg_indexs,
                        transform=transforms.Compose(trans))