Example #1
0
def load_data(batch_size, training_set_feat, training_set_labels,
              validation_set_feat, validation_set_labels):
    train_dataset = ImgDataset(training_set_feat, training_set_labels)
    validation_dataset = ImgDataset(validation_set_feat, validation_set_labels)

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True)
    val_loader = torch.utils.data.DataLoader(validation_dataset,
                                             batch_size=batch_size,
                                             shuffle=True)

    return train_loader, val_loader
Example #2
0
def load_dataset(args):

    img_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.RandomCrop(224),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])
    train_caption, validation_caption, test_caption, train_id, validation_id, val_img_id, test_img_id, img_idx, vocab_table = read_text_data(
        args)

    train_dataset = TrainDataset(args.img_dir, vocab_table, img_idx,
                                 train_caption, train_id, img_transform)
    val_img_dataset = ImgDataset(args.img_dir,
                                 val_img_id,
                                 is_val=True,
                                 transform=img_transform,
                                 img_idx=img_idx)
    test_img_dataset = ImgDataset(args.img_dir,
                                  test_img_id,
                                  transform=img_transform)
    val_caption_dataset = CaptionDataset(vocab_table,
                                         validation_caption,
                                         is_val=True,
                                         label=validation_id)
    test_caption_dataset = CaptionDataset(vocab_table, test_caption)

    train_loader = DataLoader(train_dataset,
                              batch_size=args.batch_size,
                              num_workers=4,
                              shuffle=True,
                              collate_fn=train_collate_fn)
    val_img_loader = DataLoader(val_img_dataset,
                                batch_size=args.batch_size,
                                num_workers=4)
    test_img_loader = DataLoader(test_img_dataset,
                                 batch_size=args.batch_size,
                                 num_workers=4)
    val_caption_loader = DataLoader(val_caption_dataset,
                                    batch_size=args.batch_size,
                                    num_workers=4,
                                    collate_fn=test_collate_fn)
    test_caption_loader = DataLoader(test_caption_dataset,
                                     batch_size=args.batch_size,
                                     num_workers=4,
                                     collate_fn=test_collate_fn)

    return train_loader, val_img_loader, test_img_loader, val_caption_loader, test_caption_loader, vocab_table
Example #3
0
def train_val_data(dir_path1, dir_path2):
    x1, y1 = read_imgs(dir_path1, True)
    x2, y2 = read_imgs(dir_path2, True)
    x = np.concatenate((x1, x2), axis=0)
    y = np.concatenate((y1, y2), axis=0)

    return ImgDataset(x, y, train_transform)
Example #4
0
def main(args):

    segmen_A = Unet(3, 34).to(args.device)

    if args.model_path is not None:
        segmen_path = os.path.join(args.model_path, 'semsg.pt')

        with open(segmen_path, 'rb') as f:
            state_dict = torch.load(f)
            segmen_A.load_state_dict(state_dict)

    else:
        raise Exception('please specify model path!')

    segmen_A = nn.DataParallel(segmen_A)

    transforms_ = [
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]
    testloader = DataLoader(ImgDataset(args.image_path,
                                       transforms_=transforms_,
                                       mode='eval_unet'),
                            batch_size=args.batchSize,
                            shuffle=False,
                            num_workers=0)

    segmen_A.eval()

    with torch.no_grad():
        total_iou = []
        for i, batch in enumerate(testloader):
            name, toTest, labels = batch
            #segmentation
            pred_label = segmen_A(toTest)
            for idx in range(args.batchSize):
                pred = pred_label[idx].cpu().numpy()
                label = labels.cpu().numpy()[idx]
                img = np.zeros(
                    (label.shape[0], label.shape[1], 3)).astype('uint8')
                original_img = np.zeros(
                    (label.shape[0], label.shape[1], 3)).astype('uint8')
                prediction = np.zeros(
                    (label.shape[0], label.shape[1])).astype('uint8')
                for c in range(len(palette)):
                    indices = np.argmax(pred, axis=0) == c
                    prediction[indices] = c
                    img[indices] = palette[c]
                    original_img[label == c] = palette[c]
                original_img = Image.fromarray(original_img.astype('uint8'))
                original_img.save(
                    os.path.join(args.out_dir, 'original_' +
                                 name[idx].replace('jpg', 'png')))
                img = Image.fromarray(img.astype('uint8'))
                img.save(
                    os.path.join(args.out_dir, name[idx].replace('jpg',
                                                                 'png')))
                total_iou.append(IOU(prediction, label, 34))
        print(sum(total_iou) / len(total_iou))
def main(args):

    G = Generator(args.in_channel, args.out_channel).to(args.device)
    G_reverse = Generator(args.in_channel, args.out_channel).to(args.device)

    if args.model_path is not None:
        AB_path = os.path.join(args.model_path,'ab.pt')
        BA_path = os.path.join(args.model_path,'ba.pt')

        if args.direction == 'AB':
            with open(AB_path, 'rb') as f:
                state_dict = torch.load(f)
                G.load_state_dict(state_dict)
            with open(BA_path, 'rb') as f:
                state_dict = torch.load(f)
                G_reverse.load_state_dict(state_dict)

        elif args.direction == 'BA':
            with open(BA_path, 'rb') as f:
                state_dict = torch.load(f)
                G.load_state_dict(state_dict)
            with open(AB_path, 'rb') as f:
                state_dict = torch.load(f)
                G_reverse.load_state_dict(state_dict)
        else:
            raise Exception('direction has to be BA OR AB!')


    else:
        raise Exception('please specify model path!')

    G = nn.DataParallel(G)

    transforms_ = [ transforms.ToTensor(),
                    transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) ]
    testloader = DataLoader(ImgDataset(args.image_path, transforms_=transforms_, mode='test'),
                            batch_size=args.batchSize, shuffle=False, num_workers=0)

    G.eval()
    with torch.no_grad():
        for i, batch in enumerate(testloader):
            name, toTest = batch
            transformed_ = G(toTest)
            # recovered = G_reverse(transformed_)
            for idx in range(len(name)):
                # utils.save_image(torch.cat((toTest[idx].to(args.device), recovered[idx], transformed_[idx]),axis=1), os.path.join(args.out_dir, args.direction+'_'+name[idx].split('/')[-1]), normalize=True, range=(-1, 1))
                utils.save_image(transformed_[idx], os.path.join(args.out_dir, name[idx].split('/')[-1]), normalize=True, range=(-1, 1))
Example #6
0
def load_dataset():
    transform = transforms.Compose([
        transforms.Resize((args.input_size, args.input_size)),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        # transforms.Normalize((0.5,), (0.5,))  # convert (0,1) to (-1,1)
    ])

    # train_set = datasets.MNIST(
    #     args.dataset_path, train=True, download=True, transform=transform)
    # train_set = datasets.CIFAR10(
    #     args.dataset_path, train=True, download=True, transform=transform)

    class_label_dct = {
        '冬瓜排骨汤': 0,
        '土豆丝': 1,
        '椒盐虾': 2,
        '番茄炒蛋': 3,
        '糖醋里脊': 4,
        '红烧肉': 5,
        '莴笋肉片': 6,
        '辣子鸡': 7,
        '香菇青菜': 8,
        '鱼香茄子': 9
    }
    train_set = ImgDataset(root=args.dataset_path,
                           type_='train',
                           transforms=transform,
                           class_label_dct=class_label_dct,
                           num_per_class=200)

    train_loader = DataLoader(train_set,
                              batch_size=args.batch_size,
                              shuffle=True)

    return train_loader
import torch
import matplotlib.pyplot as plt
from dataset import ImgDataset
from model import Model
"""Plot Saliency Map"""

cuda = False
img_indices = [83, 4218, 4707, 8598]  # 选择数据集中的几张图片

# 加载模型
if cuda:
    model = torch.load('../hw3_CNN/model.pth')
else:
    model = torch.load('../hw3_CNN/model.pth', map_location='cpu')
# 选择数据集
training_dataset = ImgDataset('../hw3_CNN/data/training', cuda)
images, labels = training_dataset.get_batch(img_indices)

# 计算Loss并求导
model.eval()
images.requires_grad_()  # 追踪loss关于images的梯度
y_pred = model(images)
loss_func = torch.nn.CrossEntropyLoss()
loss = loss_func(y_pred, labels)
loss.backward()
# 取出loss关于images的梯度
if cuda:
    saliencies = images.grad.abs().detach().cpu()
else:
    saliencies = images.grad.abs().detach()
saliencies = torch.stack([(item - item.min()) / (item.max() - item.min())
Example #8
0
    transforms.ToTensor(
    ),  #將圖片轉成 Tensor,並把數值normalize到[0,1](data normalization)    
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])

#testing 時不需做 data augmentation
test_transform = transforms.Compose([
    transforms.ToPILImage(),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])

# Parameters initialize
batch_size = 32
train_set = ImgDataset(train_X, train_Y, train_transform)
val_set = ImgDataset(val_X, val_Y, test_transform)
test_set = ImgDataset(test_x, transform=train_transform)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=True)

teacher_net = []
teacher_net1 = VGG16().cuda()
teacher_net1.load_state_dict(torch.load('model/vgg16.model'))
teacher_net2 = VGG13().cuda()
teacher_net2.load_state_dict(torch.load('model/vgg13.model'))
# teacher_net3 = VGG19().cuda()
# teacher_net3.load_state_dict(torch.load('teacher_model/vgg19.model'))
teacher_net.append(teacher_net1)
teacher_net.append(teacher_net2)
Example #9
0
from dataset import ImgDataset
from gan import GAN
import os.path as op

path = op.join('..', 'datasets', 'small')

data = ImgDataset(path)
gan = GAN(data)
Example #10
0

# reading testing set
print("Reading data")
test_x = readfile(os.path.join(workspace_dir, "testing"), False)
print("Size of Testing data = {}".format(len(test_x)))

# testing configuration
model_best = Classifier().cuda()
model_best.load_state_dict(torch.load(model_filename))
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model_best.parameters(), lr=0.001)

# testing dataset
batch_size = 48
test_set = ImgDataset(test_x, transform=test_transform)
test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)

# predict
model_best.eval()
prediction = []
with torch.no_grad():
    for i, data in enumerate(test_loader):
        test_pred = model_best(data.cuda())
        test_label = np.argmax(test_pred.cpu().data.numpy(), axis=1)
        for y in test_label:
            prediction.append(y)

# Write the result to csv file
with open(output_filename, 'w') as f:
    f.write('Id,Category\n')
Example #11
0
# reading training set, validation set
workspace_dir = sys.argv[1]  #'/home/shannon/Downloads/food-11'
print("Reading data")
train_x, train_y = readfile(os.path.join(workspace_dir, "training"), True)
print("Size of training data = {}".format(len(train_x)))
val_x, val_y = readfile(os.path.join(workspace_dir, "validation"), True)
print("Size of validation data = {}".format(len(val_x)))

# train in training & validating set (final step)
#train_x = np.concatenate((train_x, val_x), axis=0)
#train_y = np.concatenate((train_y, val_y), axis=0)

# create train and valid dataset
batch_size = 48
train_set = ImgDataset(train_x, train_y, train_transform)
val_set = ImgDataset(val_x, val_y, test_transform)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)

# Keep the loss and accuracy at every iteration for plotting
train_loss_list = []
dev_loss_list = []
train_acc_list = []
dev_acc_list = []

# training configuration
model = Classifier().cuda()
#model_filename = "./model/vgg16_lite_drop_bth48_lr0.002_ep200_deg60_img168_112/lr0001_train_n_val/model_0.9848396501457726_ep150"
#model.load_state_dict(torch.load(model_filename))
loss = nn.CrossEntropyLoss(
Example #12
0
    print("Reading data")
    train_x, train_y = readfile(os.path.join(workspace_dir, "Train"), True)
    print("Size of training data = {}".format(len(train_x)))
    print("label Size of training data = {}".format(len(train_y)))
    val_x, val_y = readfile(os.path.join(workspace_dir, "Dev"), True)
    print("Size of validation data = {}".format(len(val_x)))
    print(train_x.shape)

    #ensure every time has same dataset
    torch.manual_seed(567)
    torch.cuda.manual_seed(567)
    np.random.seed(567)

    #data augmentation & load
    batch_size = 64
    train_set = ImgDataset(train_x, train_y, train_transform)
    val_set = ImgDataset(val_x, val_y, test_transform)
    train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
    val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)

    model = Classifier().cuda()
    loss = nn.CrossEntropyLoss(
    )  # 因為是 classification task,所以 loss 使用 CrossEntropyLoss
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=0.001)  # optimizer 使用 Adam
    #num_epoch = 60
    num_epoch = 1
    for epoch in range(num_epoch):
        epoch_start_time = time.time()
        train_acc = 0.0
        train_loss = 0.0
Example #13
0
    print('[*] Loading pickles...')
    with open(f'preprocessed/{dataset_type}train_x.pkl', 'rb') as f:
        train_x = pickle.load(f)
    with open(f'preprocessed/{dataset_type}train_y.pkl', 'rb') as f:
        train_y = pickle.load(f)
    with open(f'preprocessed/valid_x.pkl', 'rb') as f:
        valid_x = pickle.load(f)
    with open(f'preprocessed/valid_y.pkl', 'rb') as f:
        valid_y = pickle.load(f)
    with open(f'preprocessed/{dataset_type}train_val_x.pkl', 'rb') as f:
        train_val_x = pickle.load(f)
    with open(f'preprocessed/{dataset_type}train_val_y.pkl', 'rb') as f:
        train_val_y = pickle.load(f)

    train_dataset = ImgDataset(train_x, train_y, transform=train_transform)
    valid_dataset = ImgDataset(valid_x, valid_y, transform=test_transform)
    train_val_dataset = ImgDataset(train_val_x,
                                   train_val_y,
                                   transform=train_transform)

    model = build_model()
    device = torch.device(
        f'cuda:{args.cuda}' if torch.cuda.is_available() else 'cpu')
    if args.resume:
        model.load_state_dict(torch.load(f'{arch}/model.ckpt'))
    model = model.to(device)
    if not device == 'cpu':
        cudnn.benchmark = True

    trainer = Trainer(arch, model, args.batch_size, args.lr, args.accum_steps,
Example #14
0
def val_data(dir_path):
    x, y = read_imgs(dir_path, True)
 
    return ImgDataset(x, y, test_transform)
Example #15
0
				total += 1
		with env.begin(write = True) as txn:
			txn.put('length'.encode('utf-8'), str(total).encode('utf-8'))
if __name__ == "__main__":
	parser = argparse.ArgumentParser(description = 'Preprocess images for model training')
	parser.add_argument('--out', type = str, help = 'filename of the result lmdb dataset')
	parser.add_argument('--size', type = str, default = '128,256,512,1024', \
		help = 'resolutions of images for the dataset')
	parser.add_argument('--n_worker', type = int, default = 8, \
		help = 'number of workers for preparing dataset')
	parser.add_argument('--resample', type = str, default = 'lanczos', \
		help = 'resampling methods for resizing images')
	parser.add_argument('path', type = str, help = 'path to the image dataset')
	args = parser.parse_args()
	sizes = []
	for s in args.size.split(','):
		try:
			size = int(s.strip())
			sizes += [size]
		except ValueError as s:
			pass
	print('Make dataset of image sizes:' + ','.join('%d'%s for s in sizes))
	try:
		from torchvision import datasets
		imgset = datasets.ImageFolder(args.path)
	except:
		from dataset import ImgDataset
		imgset = ImgDataset(args.path)
	with lmdb.open(args.out, map_size = 1024 ** 4, readahead = False) as env:
		prepare(env, imgset, args.n_worker, sizes = sizes, resample = args.resample)
def main(args):
    writer = SummaryWriter(os.path.join(args.out_dir, 'logs'))
    current_time = datetime.now().strftime("%d-%m-%Y_%H-%M-%S")
    os.makedirs(
        os.path.join(args.out_dir, 'models',
                     args.model_name + '_' + current_time))
    os.makedirs(
        os.path.join(args.out_dir, 'logs',
                     args.model_name + '_' + current_time))

    G_AB = Generator(args.in_channel, args.out_channel).to(args.device)
    G_BA = Generator(args.in_channel, args.out_channel).to(args.device)
    D_A = Discriminator(args.in_channel).to(args.device)
    D_B = Discriminator(args.out_channel).to(args.device)
    segmen_B = Unet(3, 34).to(args.device)

    if args.model_path is not None:
        AB_path = os.join.path(args.model_path, 'ab.pt')
        BA_path = os.join.path(args.model_path, 'ba.pt')
        DA_path = os.join.path(args.model_path, 'da.pt')
        DB_path = os.join.path(args.model_path, 'db.pt')
        segmen_path = os.join.path(args.model_path, 'semsg.pt')

        with open(AB_path, 'rb') as f:
            state_dict = torch.load(f)
            G_AB.load_state_dict(state_dict)

        with open(BA_path, 'rb') as f:
            state_dict = torch.load(f)
            G_BA.load_state_dict(state_dict)

        with open(DA_path, 'rb') as f:
            state_dict = torch.load(f)
            D_A.load_state_dict(state_dict)

        with open(DB_path, 'rb') as f:
            state_dict = torch.load(f)
            D_B.load_state_dict(state_dict)

        with open(segmen_path, 'rb') as f:
            state_dict = torch.load(f)
            segmen_B.load_state_dict(state_dict)

    else:
        G_AB.apply(weights_init_normal)
        G_BA.apply(weights_init_normal)
        D_A.apply(weights_init_normal)
        D_B.apply(weights_init_normal)

    G_AB = nn.DataParallel(G_AB)
    G_BA = nn.DataParallel(G_BA)
    D_A = nn.DataParallel(D_A)
    D_B = nn.DataParallel(D_B)
    segmen_B = nn.DataParallel(segmen_B)

    criterion_GAN = torch.nn.MSELoss()
    criterion_cycle = torch.nn.L1Loss()
    criterion_identity = torch.nn.L1Loss()
    criterion_segmen = torch.nn.BCELoss()

    optimizer_G = torch.optim.Adam(itertools.chain(G_AB.parameters(),
                                                   G_BA.parameters()),
                                   lr=args.lr,
                                   betas=(0.5, 0.999))
    optimizer_D_A = torch.optim.Adam(D_A.parameters(),
                                     lr=args.lr,
                                     betas=(0.5, 0.999))
    optimizer_D_B = torch.optim.Adam(D_B.parameters(),
                                     lr=args.lr,
                                     betas=(0.5, 0.999))

    optimizer_segmen_B = torch.optim.Adam(segmen_B.parameters(),
                                          lr=args.lr,
                                          betas=(0.5, 0.999))

    lr_scheduler_G = torch.optim.lr_scheduler.LambdaLR(
        optimizer_G,
        lr_lambda=LambdaLR(args.n_epochs, args.epoch, args.decay_epoch).step)
    lr_scheduler_D_A = torch.optim.lr_scheduler.LambdaLR(
        optimizer_D_A,
        lr_lambda=LambdaLR(args.n_epochs, args.epoch, args.decay_epoch).step)
    lr_scheduler_D_B = torch.optim.lr_scheduler.LambdaLR(
        optimizer_D_B,
        lr_lambda=LambdaLR(args.n_epochs, args.epoch, args.decay_epoch).step)

    fake_A_buffer = ReplayBuffer()
    fake_B_buffer = ReplayBuffer()

    transforms_ = [
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]
    dataloader = DataLoader(ImgDataset(args.dataset_path,
                                       transforms_=transforms_,
                                       unaligned=True,
                                       device=args.device),
                            batch_size=args.batchSize,
                            shuffle=True,
                            num_workers=0)
    logger = Logger(args.n_epochs, len(dataloader))
    target_real = Variable(torch.Tensor(args.batchSize,
                                        1).fill_(1.)).to(args.device).detach()
    target_fake = Variable(torch.Tensor(args.batchSize,
                                        1).fill_(0.)).to(args.device).detach()

    G_AB.train()
    G_BA.train()
    D_A.train()
    D_B.train()
    segmen_B.train()

    for epoch in range(args.epoch, args.n_epochs):
        for i, batch in enumerate(dataloader):
            real_A = batch['A'].clone()
            real_B = batch['B'].clone()
            B_label = batch['B_label'].clone()

            fake_b = G_AB(real_A)
            fake_a = G_BA(real_B)
            same_b = G_AB(real_B)
            same_a = G_BA(real_A)
            recovered_A = G_BA(fake_b)
            recovered_B = G_AB(fake_a)
            pred_Blabel = segmen_B(real_B)
            pred_fakeAlabel = segmen_B(fake_a)

            optimizer_segmen_B.zero_grad()
            #segmen loss, do we assume that it also learns how to segment images after doing domain transfer?
            loss_segmen_B = criterion_segmen(
                pred_Blabel, B_label) + criterion_segmen(
                    segmen_B(fake_a.detach()), B_label)
            loss_segmen_B.backward()
            optimizer_segmen_B.step()

            optimizer_G.zero_grad()
            #gan loss
            pred_fakeb = D_B(fake_b)
            loss_gan_AB = criterion_GAN(pred_fakeb, target_real)

            pred_fakea = D_A(fake_a)
            loss_gan_BA = criterion_GAN(pred_fakea, target_real)

            #identity loss
            loss_identity_B = criterion_identity(same_b, real_B) * 5
            loss_identity_A = criterion_identity(same_a, real_A) * 5

            #cycle consistency loss
            loss_cycle_ABA = criterion_cycle(recovered_A, real_A) * 10
            loss_cycle_BAB = criterion_cycle(recovered_B, real_B) * 10

            #cycle segmen diff loss
            loss_segmen_diff = criterion_segmen(segmen_B(recovered_B),
                                                pred_Blabel.detach())

            loss_G = loss_gan_AB + loss_gan_BA + loss_identity_B + loss_identity_A + loss_cycle_ABA + loss_cycle_BAB + loss_segmen_diff
            loss_G.backward()

            optimizer_G.step()

            ##discriminator a
            optimizer_D_A.zero_grad()

            pred_realA = D_A(real_A)
            loss_D_A_real = criterion_GAN(pred_realA, target_real)

            fake_A = fake_A_buffer.push_and_pop(fake_a)
            pred_fakeA = D_A(fake_A.detach())
            loss_D_A_fake = criterion_GAN(pred_fakeA, target_fake)

            loss_D_A = (loss_D_A_real + loss_D_A_fake) * 0.5
            loss_D_A.backward()

            optimizer_D_A.step()

            #discriminator b
            optimizer_D_B.zero_grad()

            pred_realB = D_B(real_B)
            loss_D_B_real = criterion_GAN(pred_realB, target_real)

            fake_B = fake_B_buffer.push_and_pop(fake_b)
            pred_fakeB = D_B(fake_B.detach())
            loss_D_B_fake = criterion_GAN(pred_fakeB, target_fake)

            loss_D_B = (loss_D_B_real + loss_D_B_fake) * 0.5
            loss_D_B.backward()

            optimizer_D_B.step()

            logger.log(
                {
                    'loss_segmen_B': loss_segmen_B,
                    'loss_G': loss_G,
                    'loss_G_identity': (loss_identity_A + loss_identity_B),
                    'loss_G_GAN': (loss_gan_AB + loss_gan_BA),
                    'loss_G_cycle': (loss_cycle_ABA + loss_cycle_BAB),
                    'loss_D': (loss_D_A + loss_D_B)
                },
                images={
                    'real_A': real_A,
                    'real_B': real_B,
                    'fake_A': fake_a,
                    'fake_B': fake_b,
                    'reconstructed_A': recovered_A,
                    'reconstructed_B': recovered_B
                },
                out_dir=os.path.join(
                    args.out_dir, 'logs',
                    args.model_name + '_' + current_time + '/' + str(epoch)),
                writer=writer)

        if (epoch + 1) % args.save_per_epochs == 0:
            os.makedirs(
                os.path.join(args.out_dir, 'models',
                             args.model_name + '_' + current_time, str(epoch)))
            torch.save(
                G_AB.module.state_dict(),
                os.path.join(args.out_dir,
                             'models', args.model_name + '_' + current_time,
                             str(epoch), 'ab.pt'))
            torch.save(
                G_BA.module.state_dict(),
                os.path.join(args.out_dir,
                             'models', args.model_name + '_' + current_time,
                             str(epoch), 'ba.pt'))
            torch.save(
                D_A.module.state_dict(),
                os.path.join(args.out_dir,
                             'models', args.model_name + '_' + current_time,
                             str(epoch), 'da.pt'))
            torch.save(
                D_B.module.state_dict(),
                os.path.join(args.out_dir,
                             'models', args.model_name + '_' + current_time,
                             str(epoch), 'db.pt'))
            torch.save(
                segmen_B.module.state_dict(),
                os.path.join(args.out_dir,
                             'models', args.model_name + '_' + current_time,
                             str(epoch), 'semsg.pt'))

        lr_scheduler_G.step()
        lr_scheduler_D_A.step()
        lr_scheduler_D_B.step()
Example #17
0
    # 轉灰階: 將輸入3維壓成1維。
    transforms.Grayscale(),
    # 縮放: 因為source data是32x32,我們將target data的28x28放大成32x32。
    transforms.Resize((32, 32)),
    # 水平翻轉 (Augmentation)
    transforms.RandomHorizontalFlip(),
    # 旋轉15度內 (Augmentation),旋轉後空的地方補0
    transforms.RandomRotation(15, fill=(0, )),
    # 最後轉成Tensor供model使用。
    transforms.ToTensor(),
])

#source_dataset = ImageFolder('real_or_drawing/train_data', transform=source_transform)
#target_dataset = ImageFolder('real_or_drawing/test_data', transform=target_transform)
source_dataset = ImgDataset(source_transform,
                            target_transform,
                            './real_or_drawing',
                            train=True)
print('dataset done')
target_dataset = ImgDataset(source_transform,
                            target_transform,
                            './real_or_drawing',
                            train=False)
print('dataset done 2')

source_dataloader = DataLoader(source_dataset, batch_size=32, shuffle=True)
target_dataloader = DataLoader(target_dataset, batch_size=32, shuffle=True)
test_dataloader = DataLoader(target_dataset, batch_size=128, shuffle=False)

feature_extractor = FeatureExtractor().cuda()
label_predictor = LabelPredictor().cuda()
domain_classifier = DomainClassifier().cuda()
          y[i] = int(file.split("_")[0])
    # if label=true: train&valid
    if label:
      return x, y
    # if label=false: test
    else:
      return x

# reading testing set
print("Reading data")
val_x, val_y = readfile(os.path.join(workspace_dir, "validation"), True)
print("Size of Testing data = {}".format(len(val_x)))

# testing dataset
batch_size = 48
val_set = ImgDataset(val_x, val_y, test_transform)
val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)

# testing configuration
model_best = Classifier().cuda()
model_best.load_state_dict(torch.load(model_filename))

# predict
model_best.eval()
val_y_hat = []
with torch.no_grad():
    for i, data in enumerate(val_loader):
        test_pred = model_best(data[0].cuda())
        test_label = np.argmax(test_pred.cpu().data.numpy(), axis=1)
        for y in test_label:
            val_y_hat.append(y)
Example #19
0
def train(df, img_dir, pretrained_file=None):
    pdb.set_trace()

    # set up dataset for training
    df = df.sample(frac=1., random_state=42)
    train_number = int(len(df) * 0.8)
    train_df = df.iloc[:train_number, :]
    valid_df = df.iloc[train_number:, :]
    del df

    cols = ["label_" + str(idx) for idx in range(1, 5)]
    train_dataset = ImgDataset(train_df["imageId"].values,
                               img_dir,
                               mask_list=train_df[cols])
    train_data_loader = DataLoader(
        train_dataset,
        batch_size=8,
        shuffle=True,
    )
    valid_dataset = ImgDataset(valid_df["imageId"].values,
                               img_dir,
                               mask_list=train_df[cols])
    valid_data_loader = DataLoader(
        valid_dataset,
        batch_size=4,
        shuffle=True,
    )

    # set up model parameters

    model = res34Unet(num_classes=4)
    print(model)
    pdb.set_trace()
    if pretrained_file is not None:
        skip = ['block.5.weight', 'block.5.bias']
        load_pretained_weights(model,
                               pretained_file,
                               skip=skip,
                               first_layer=["block.0.0.weight"])
    summary(model, torch.zeros(2, 1, 224, 224))

    LR = 3e-4
    optimizer = optim.Adam(model.parameters(), lr=LR)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     mode="min",
                                                     patience=3,
                                                     verbose=True)

    # set up train parameters
    DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    SAVER = "./model_data/model.bin"
    EPOCHS = 50
    CNT = 0
    BEST_VALID_LOSS = float("inf")
    PATIENCE = 5
    train_meter = Meter(phase="train")
    valid_meter = Meter(phase="valid")

    model.to(DEVICE)
    for epoch in range(EPOCHS):
        st_time = time.time()
        train_loss = train_epoch(model,
                                 train_data_loader,
                                 optimizer,
                                 DEVICE,
                                 train_meter,
                                 schedule=None)
        current_time = time.time()
        train_meter.epoch_log(epoch, train_loss, current_time - st_time)
        valid_loss = valid_epoch(model, valid_data_loader, DEVICE, valid_meter)
        valid_meter.epoch_log(epoch, valid_loss, time.time() - current_time)
        scheduler.step(valid_loss)

        if valid_loss < BEST_VALID_LOSS:
            CNT = 0
            BEST_VALID_LOSS = valid_loss
            torch.save(model.state_dict(), SAVER)
        else:
            CNT += 1
            if CNT >= PATIENCE:
                print("Early stopping ... ")
            break
Example #20
0
def main(args):
    current_time = datetime.now().strftime("%d-%m-%Y_%H-%M-%S")
    os.makedirs(
        os.path.join(args.out_dir, 'models',
                     args.model_name + '_' + current_time))
    os.makedirs(
        os.path.join(args.out_dir, 'logs',
                     args.model_name + '_' + current_time))

    segmen_B = Unet(3, 34).to(args.device)

    if args.model_path is not None:
        segmen_path = os.join.path(args.model_path, 'semsg.pt')

        with open(segmen_path, 'rb') as f:
            state_dict = torch.load(f)
            segmen_B.load_state_dict(state_dict)

    segmen_B = nn.DataParallel(segmen_B)

    criterion_segmen = torch.nn.BCELoss()

    optimizer_segmen_B = torch.optim.Adam(segmen_B.parameters(),
                                          lr=args.lr,
                                          betas=(0.5, 0.999))

    transforms_ = [
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]
    dataloader = DataLoader(ImgDataset(args.dataset_path,
                                       transforms_=transforms_,
                                       mode='unetTrain',
                                       unaligned=False,
                                       device=args.device),
                            batch_size=args.batchSize,
                            shuffle=True,
                            num_workers=0)
    logger = Logger(args.n_epochs, len(dataloader))
    segmen_B.train()

    for epoch in range(args.epoch, args.n_epochs):
        for i, batch in enumerate(dataloader):
            real_B = batch['B'].clone()
            B_label = batch['B_label'].clone()
            optimizer_segmen_B.zero_grad()
            #segmen loss
            pred_Blabel = segmen_B(real_B)
            loss_segmen_B = criterion_segmen(pred_Blabel, B_label)
            loss_segmen_B.backward()
            optimizer_segmen_B.step()

            logger.log({'loss_segmen': loss_segmen_B},
                       out_dir=os.path.join(
                           args.out_dir, 'logs', args.model_name + '_' +
                           current_time + '/' + str(epoch)))

        if (epoch + 1) % args.save_per_epochs == 0:
            os.makedirs(
                os.path.join(args.out_dir, 'models',
                             args.model_name + '_' + current_time, str(epoch)))
            torch.save(
                segmen_B.module.state_dict(),
                os.path.join(args.out_dir,
                             'models', args.model_name + '_' + current_time,
                             str(epoch), 'semsg.pt'))
Example #21
0
def train_data(dir_path):
    x, y = read_imgs(dir_path, True)
    
    return ImgDataset(x, y, train_transform)
Example #22
0
def test_data(dir_path):
    x = read_imgs(dir_path, False)
        
    return ImgDataset(x, None, test_transform)
Example #23
0
if __name__ == '__main__':
	parser = argparse.ArgumentParser(description = 'Preprocess for Faces')
	parser.add_argument('path', type = str, help = 'path to image/images folder')
	parser.add_argument('--lmk', default = 'Exec', \
		help =	'Landmarks Detection Method')
	parser.add_argument('--bfm', default = '/data/BaselFaceModel.mat', \
		help =	'Morphable Face Model')
	parser.add_argument('--mask', default = '', \
		help =	'Skin Segmentation Model')
	parser.add_argument('--disp', action = 'store_true', \
		help =	'Show Landmarks')
	parser.add_argument('--output', default = '', \
		help =	'Output folder for processed images')
	args = parser.parse_args()
	from dataset import ImgDataset
	data = ImgDataset(args.path)
	if 'exe' in args.lmk.lower():
		detector = LandmarksDetectorExec()
		base_lmk = [0]*68
	elif 'dlib' in args.lmk.lower():
		detector = LandmarksDetectorDlib()
		base_lmk = [0]*68
	elif 'torch' in args.lmk.lower():
		detector = LandmarksDetectorPytorch()
		base_lmk = [0]*68
	elif os.path.exists(args.lmk) and args.lmk[-4:].lower() == '.txt':
		detector = LandmarksReader(args.lmk)
		base_lmk = [0]*(args.lmk.shape[1]//2)
	if 'torch' in args.mask.lower():
		mask = SkinSegmentationPytorch()
	else:
Example #24
0
    transforms.ToTensor(
    ),  #將圖片轉成 Tensor,並把數值normalize到[0,1](data normalization)    
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])

#testing 時不需做 data augmentation
test_transform = transforms.Compose([
    transforms.ToPILImage(),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])

# Parameters initialize
batch_size = 32
train_set = ImgDataset(train_x, train_y, train_transform)
val_set = ImgDataset(val_x, val_y, test_transform)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)

model = EfficientNet.from_pretrained('efficientnet-b3',
                                     num_classes=42).to(device)
#model = models.resnet152(pretrained=True).to(device)
#model.fc = nn.Linear(2048, 42).to(device)
#model.classifier = nn.Linear(1664, 42).to(device)

loss = nn.CrossEntropyLoss(
)  # 因為是 classification task,所以 loss 使用 CrossEntropyLoss
optimizer = torch.optim.SGD(model.parameters(),
                            lr=0.0125,
                            momentum=0.9,
def main():
    global args
    args = parser.parse_args()

    # create Light CNN for face recognition
    if args.model == 'LightCNN-29':
        model = LightCNN_29Layers(num_classes=args.num_classes)
    elif args.model == 'LightCNN-29v2':
        model = LightCNN_29Layers_v2(num_classes=args.num_classes)
    else:
        print('Error model type\n')

    model = model.cuda()

    print(model)

    if not os.path.exists(args.save_path):
        os.makedirs(args.save_path)

    # large lr for last fc parameters
    params = []
    for name, value in model.named_parameters():
        if 'bias' in name:
            if 'fc2' in name:
                params += [{
                    'params': value,
                    'lr': 20 * args.lr,
                    'weight_decay': 0
                }]
            else:
                params += [{
                    'params': value,
                    'lr': 2 * args.lr,
                    'weight_decay': 0
                }]
        else:
            if 'fc2' in name:
                params += [{'params': value, 'lr': 10 * args.lr}]
            else:
                params += [{'params': value, 'lr': 1 * args.lr}]

    optimizer = torch.optim.SGD(params,
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    state_dict = torch.load(args.model_path)
    model.load_state_dict(state_dict)

    cudnn.benchmark = True
    # load image
    train_loader = torch.utils.data.DataLoader(ImgDataset(
        args.dataroot, False, args.crop, args.preload),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=2,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(ImgDataset(
        args.dataroot, True, args.crop, args.preload),
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=0,
                                             pin_memory=True)

    # define loss function and optimizer
    criterion = nn.CrossEntropyLoss()

    criterion.cuda()

    validate(val_loader, model)

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        validate(val_loader, model)

        if not os.path.exists(args.save_path):
            os.makedirs(args.save_path)

        if epoch % 5 == 0:
            save_checkpoint(
                model.state_dict(),
                join(args.save_path,
                     'lightCNN_' + str(epoch + 1) + '_checkpoint.pth'))
        save_checkpoint(model.state_dict(),
                        join(args.save_path, 'lightCNN_latest_checkpoint.pth'))
Example #26
0
    transforms.ToTensor(
    ),  #將圖片轉成 Tensor,並把數值normalize到[0,1](data normalization)    
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])

#testing 時不需做 data augmentation
test_transform = transforms.Compose([
    transforms.ToPILImage(),
    transforms.CenterCrop(224),
    transforms.ToTensor(),
    transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])

# Parameters initialize
batch_size = 32
train_set = ImgDataset(train_X, train_Y, train_transform)
val_set = ImgDataset(val_X, val_Y, test_transform)
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
val_loader = DataLoader(val_set, batch_size=batch_size, shuffle=False)

model = Classifier().to(device)
#import torchvision.models as models
#model = models.densenet169(pretrained=False).to(device)
#model = models.resnet152(pretrained=False).to(device)
'''
model.classifier = nn.Sequential(
    nn.Dropout(0.3),
    nn.Linear(1664, 256),
    nn.ReLU(inplace=True),
    nn.Linear(256, 11)).to(device)
'''
Example #27
0
    print("raw maximized layer_activations imgs:",x.shape,"select:",0,"in",x.shape[0])
    filter_visualization = x.detach().cpu().squeeze()[0]
    hook_handle.remove() # reminber to rm it, or it exsits forever in every time forwarding
    return filter_activations, filter_visualization

if __name__ == "__main__":
    workspace_dir = sys.argv[1] #'/home/shannon/Downloads/food-11'
    model_filename = sys.argv[2]
    output_dir = sys.argv[3]
    cnnids = [7,14,14,14,24]
    filterids = [0,0,1,2,0]

    print("Reading data")
    train_x, train_y = readfile(os.path.join(workspace_dir, "training"), True)
    print("Size of training data = {}".format(len(train_x)))
    train_set = ImgDataset(train_x, train_y, test_transform)

    print("Loading model")
    model = Classifier().cuda()
    model.load_state_dict(torch.load(model_filename))

    # showing filters from assigned indices image 
    img_indices = [800,1602,2001,3201,4001,4800,5600,7000,7400,8003,8801]
    images, labels = train_set.getbatch(img_indices)

    for i, (cnnid,filterid) in enumerate(zip(cnnids,filterids)):
        filter_activations, filter_visualization = filter_explaination(images, model, cnnid=cnnid, filterid=filterid, iteration=100, lr=0.1)
        print(images.shape)
        print(filter_activations.shape)
        print(filter_visualization.shape)
Example #28
0
    else:
      return x

if __name__ == '__main__':
    #model_dir = sys.argv[1]
    workspace_dir = sys.argv[1]
    out_dir = sys.argv[2]
    model = Classifier().cuda()
    checkpoint = torch.load('VGG_150.pt')
    model.load_state_dict(checkpoint)

    train_paths, train_labels = readfile(workspace_dir, True)

    # 這邊在 initialize dataset 時只丟「路徑」和「class」,之後要從 dataset 取資料時
    # dataset 的 __getitem__ method 才會動態的去 load 每個路徑對應的圖片
    train_set = ImgDataset(train_paths, train_labels, mode='eval')

    # 指定想要一起 visualize 的圖片 indices
    img_indices = [83, 4218, 4707, 8598]
    img_indices2 = [993+200+709, 993+429+250+709]
    images, labels = train_set.getbatch(img_indices)
    images2, labels2 = train_set.getbatch2(img_indices)
    images3, labels3 = train_set.getbatch(img_indices2)

    saliencies = compute_saliency_maps(images, labels, model)
    
    # 使用 matplotlib 畫出來
    fig, axs = plt.subplots(2, len(img_indices), figsize=(15, 8))
    for row, target in enumerate([images, saliencies]):
        for column, img in enumerate(target):
            img = cv2.cvtColor(img.permute(1, 2, 0).numpy(), cv2.COLOR_BGR2RGB)
Example #29
0
def get_data(dir_path, ids):
    x, y = read_imgs(dir_path, ids, True)
    return ImgDataset(x, y, test_transform)
Example #30
0
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True),
    ])

    dataset = MultiResolutionDataset(args.path, transform, args.size)
    loader = data.DataLoader(
        dataset,
        batch_size=args.batch,
        sampler=data_sampler(dataset,
                             shuffle=True,
                             distributed=args.distributed),
        drop_last=True,
    )

    content_sample_dataset = ImgDataset(args.content_sample_path, args.size,
                                        'sample')
    style_sample_dataset = ImgDataset(args.style_sample_path, args.size,
                                      'sample')
    content_loader_sample = data.DataLoader(
        content_sample_dataset,
        batch_size=args.n_sample,
        sampler=data_sampler(content_sample_dataset,
                             shuffle=True,
                             distributed=args.distributed),
        drop_last=True,
    )
    style_loader_sample = data.DataLoader(
        style_sample_dataset,
        batch_size=args.n_sample,
        sampler=data_sampler(style_sample_dataset,
                             shuffle=True,