Пример #1
0
def eval(args):
    frames = args.frames
    caption_length = args.caption_length
    glove_file = args.glove_file

    if args.cuda:
        ctx = mx.gpu()
    else:
        ctx = mx.cpu()
    
    if args.load_pretrain:
        pretrain_model = vision.vgg16_bn(pretrained=True,ctx=ctx)
        transform = utils.Compose([utils.ToTensor(ctx),
                               utils.normalize(ctx),
                               utils.extractFeature(ctx,pretrain_model)
                             ])
    else:
        pretrain_model = None
        transform = utils.Compose([utils.ToTensor(ctx),
                                   utils.normalize(ctx),
                                 ])
    
    target_transform = utils.targetCompose([utils.WordToTensor(ctx)])

    val_dataset = videoFolder(args.val_folder,args.val_dict, frames, glove_file, caption_length, ctx, transform=transform, target_transform=target_transform)

    val_loader = gluon.data.DataLoader(val_dataset, batch_size=args.batch_size,last_batch='discard',shuffle=True)
Пример #2
0
    def loadData(self):       
        
        trainset = SimulationDataset("train", transforms=transforms.Compose([                 
                utils.RandomCoose(['center']),          
                utils.Preprocess(self.input_shape),
                # utils.RandomResizedCrop(self.input_shape),
                # utils.RandomNoise(),
                utils.RandomTranslate(10, 10),
                # utils.RandomBrightness(),
                # utils.RandomContrast(),
                # utils.RandomHue(),
                utils.RandomHorizontalFlip(),
                utils.ToTensor(),
                utils.Normalize([0.1, 0.4, 0.4], [0.9, 0.6, 0.5])
            ]))
        # weights = utils.get_weights(trainset)
        # sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights), replacement=False)
        # self.trainloader = torch.utils.data.DataLoader(trainset, batch_size=self.cfg.batch_size, sampler=sampler, num_workers=0, pin_memory=True)
        self.trainloader = torch.utils.data.DataLoader(trainset, shuffle=True, batch_size=self.cfg.batch_size, num_workers=0, pin_memory=True)

        testset = SimulationDataset("test", transforms=transforms.Compose([
                utils.RandomCoose(['center']),
                utils.Preprocess(self.input_shape),
                utils.ToTensor(),
                utils.Normalize([0.1, 0.4, 0.4], [0.9, 0.6, 0.5])
            ]))
        self.testloader = torch.utils.data.DataLoader(testset, batch_size=self.cfg.batch_size, shuffle=False, num_workers=0, pin_memory=True)
Пример #3
0
def main(args):
    utils.seedme(args.seed)
    cudnn.benchmark = True
    os.system('mkdir -p {}'.format(args.outf))
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    images_train, images_test, masks_train, masks_test = utils.load_seismic_data(
        args.root_dir, test_size=.2, random_state=args.seed)
    images_train, masks_train = utils.concatenate_hflips(
        images_train, masks_train, shuffle=True, random_state=args.seed)
    images_test, masks_test = utils.concatenate_hflips(images_test,
                                                       masks_test,
                                                       shuffle=True,
                                                       random_state=args.seed)

    # transform = transforms.Compose([utils.augment(), utils.ToTensor()])
    transform = transforms.Compose([utils.ToTensor()])
    dataset_train = utils.SegmentationDataset(images_train,
                                              masks_train,
                                              transform=transform)
    dataloader = torch.utils.data.DataLoader(dataset_train,
                                             batch_size=args.batch_size,
                                             shuffle=True,
                                             drop_last=True,
                                             num_workers=1)
    dataiter = utils.dataiterator(dataloader)

    netF = models.choiceF[args.archF](num_features=args.num_features_F,
                                      num_residuals=args.num_residuals,
                                      gated=args.gated,
                                      gate_param=args.gate_param).to(device)
    optimizerF = optim.Adam(netF.parameters(), lr=args.lr, amsgrad=True)
    loss_func = torch.nn.BCELoss()

    log = logger.LoggerBCE(args.outf,
                           netF,
                           torch.from_numpy(images_train),
                           torch.from_numpy(masks_train),
                           torch.from_numpy(images_test),
                           torch.from_numpy(masks_test),
                           bcefunc=loss_func,
                           device=device)

    for i in range(args.niter):
        optimizerF.zero_grad()
        images, masks = next(dataiter)
        images, masks = images.to(device), masks.to(device)
        masks_pred = netF(images)
        loss = loss_func(masks_pred, masks)
        loss.backward()
        optimizerF.step()

        if (i + 1) % args.nprint == 0:
            print "[{}/{}] | loss: {:.3f}".format(i + 1, args.niter,
                                                  loss.item())
            log.flush(i + 1)

            if (i + 1) > 5000:
                torch.save(netF.state_dict(),
                           '{}/netF_iter_{}.pth'.format(args.outf, i + 1))
Пример #4
0
    def predict(self, image, preloaded=False):

        # set test mode

        self.net.eval()

        if (not preloaded):

            loadModel()

            print('Loaded Model')

        print('Starting Prediction')

        composed = transforms.Compose([
            utils.Preprocess(self.input_shape),
            utils.ToTensor(),
            utils.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
        ])

        # Target gets discareded

        sample = {'image': image, 'target': 0}

        sample = composed(sample)

        inputs = sample['image']

        # Add single batch diemension

        inputs = inputs.unsqueeze(0)

        if (self.cfg.cuda):

            inputs = Variable(inputs.cuda(async=True))

        else:

            inputs = Variable(inputs)

        if (self.cfg.cuda):

            outputs = self.net(inputs).cuda(async=True)

        else:

            outputs = self.net(inputs)

        print('Finished Prediction')

        print('Control tensor: %.6f ' % (outputs.item()))

        # set train mode

        self.net.train()

        return outputs.item()
Пример #5
0
    def loadData(self):

        trainset = SimulationDataset(
            "train",
            transforms=transforms.Compose([
                utils.RandomCoose(['centre', 'left', 'right']),
                utils.Preprocess(self.input_shape),
                utils.RandomTranslate(100, 10),
                utils.RandomBrightness(),
                utils.RandomContrast(),
                utils.RandomHue(),
                utils.RandomHorizontalFlip(),
                utils.ToTensor(),
                utils.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
            ]))

        weights = utils.get_weights(trainset)

        sampler = torch.utils.data.sampler.WeightedRandomSampler(
            weights, len(weights), replacement=True)

        # self.trainloader = torch.utils.data.DataLoader(trainset, batch_size=self.cfg.batch_size, sampler=sampler, num_workers=4)

        self.trainloader = torch.utils.data.DataLoader(
            trainset, batch_size=self.cfg.batch_size, num_workers=4)

        testset = SimulationDataset("test",
                                    transforms=transforms.Compose([
                                        utils.RandomCoose(['center']),
                                        utils.Preprocess(self.input_shape),
                                        utils.ToTensor(),
                                        utils.Normalize([0.485, 0.456, 0.406],
                                                        [0.229, 0.224, 0.225])
                                    ]))

        self.testloader = torch.utils.data.DataLoader(
            testset,
            batch_size=self.cfg.batch_size,
            shuffle=False,
            num_workers=4)
 def create_operators(self):
     size = 224
     img_mean = [0.485, 0.456, 0.406]
     img_std = [0.229, 0.224, 0.225]
     img_scale = 1.0 / 255.0
     decode_op = utils.DecodeImage()
     resize_op = utils.ResizeImage(resize_short=256)
     crop_op = utils.CropImage(size=(size, size))
     normalize_op = utils.NormalizeImage(scale=img_scale,
                                         mean=img_mean,
                                         std=img_std)
     totensor_op = utils.ToTensor()
     return [decode_op, resize_op, crop_op, normalize_op, totensor_op]
Пример #7
0
def create_operators(interpolation=1):
    size = 224
    img_mean = [0.485, 0.456, 0.406]
    img_std = [0.229, 0.224, 0.225]
    img_scale = 1.0 / 255.0

    resize_op = utils.ResizeImage(resize_short=256,
                                  interpolation=interpolation)
    crop_op = utils.CropImage(size=(size, size))
    normalize_op = utils.NormalizeImage(scale=img_scale,
                                        mean=img_mean,
                                        std=img_std)
    totensor_op = utils.ToTensor()

    return [resize_op, crop_op, normalize_op, totensor_op]
Пример #8
0
def test(args):
    if args.gpu:
        ctx = [mx.gpu(0)]
    else:
        ctx = [mx.cpu(0)]
    if args.dataset == "Sony":
        out_channels = 12
        scale = 2
    else:
        out_channels = 27
        scale = 3

    # load data
    test_transform = utils.Compose([utils.ToTensor()])
    test_dataset = data.MyDataset(args.dataset,
                                  "test",
                                  transform=test_transform)
    test_loader = gluon.data.DataLoader(test_dataset,
                                        batch_size=1,
                                        last_batch='discard')
    unet = net.UNet(out_channels, scale)
    unet.load_params(args.model, ctx=ctx)
    batches = 0
    avg_psnr = 0.
    for img, gt in test_loader:
        batches += 1
        imgs = gluon.utils.split_and_load(img[0], ctx)
        label = gluon.utils.split_and_load(gt[0], ctx)
        outputs = []
        for x in imgs:
            outputs.append(unet(x))
        metric.update(label, outputs)
        avg_psnr += 10 * math.log10(1 / metric.get()[1])
        metric.reset()
    avg_psnr /= batches
    print('Test avg psnr: {:.3f}'.format(avg_psnr))
Пример #9
0
def train(args):
    frames = args.frames
    caption_length = args.caption_length
    glove_file = args.glove_file

    #CPU_COUNT = multiprocessing.cpu_count()
    if args.cuda:
        ctx = mx.gpu()
    else:
        ctx = mx.cpu()

    if args.load_pretrain:
        pretrain_model = vision.vgg16_bn(pretrained=True, ctx=ctx)
        transform = utils.Compose([
            utils.ToTensor(ctx),
            utils.normalize(ctx),
            utils.extractFeature(ctx, pretrain_model)
        ])
    else:
        pretrain_model = None
        transform = utils.Compose([
            utils.ToTensor(ctx),
            utils.normalize(ctx),
        ])

    target_transform = utils.targetCompose([utils.WordToTensor(ctx)])

    train_dataset = videoFolder(args.train_folder,
                                args.train_dict,
                                frames,
                                glove_file,
                                caption_length,
                                ctx,
                                img_size=args.img_size,
                                transform=transform,
                                target_transform=target_transform)

    test_dataset = videoFolder(args.test_folder,
                               args.test_dict,
                               frames,
                               glove_file,
                               caption_length,
                               ctx,
                               img_size=args.img_size,
                               transform=transform,
                               target_transform=target_transform)

    train_loader = gluon.data.DataLoader(train_dataset,
                                         batch_size=args.batch_size,
                                         last_batch='discard',
                                         shuffle=True)

    test_loader = gluon.data.DataLoader(test_dataset,
                                        batch_size=args.batch_size,
                                        last_batch='discard',
                                        shuffle=False)

    loss = L2Loss()
    #net = lstm_net(caption_length,ctx,pretrained=args.load_pretrain)
    net = resnet18_v2(caption_length=caption_length, ctx=ctx)

    net.collect_params().initialize(init=mx.initializer.MSRAPrelu(), ctx=ctx)

    trainer = gluon.Trainer(net.collect_params(), 'adam',
                            {'learning_rate': args.lr})

    train_loss = []
    test_loss = []
    train_loss_batch = []
    test_loss_batch = []

    smoothing_constant = 0.01

    for e in range(args.epochs):

        epoch_loss = 0.
        for batch_id, (x, _) in enumerate(train_loader):

            with autograd.record():
                pred = net(x)
                batch_loss = loss(pred, _)

            trainer.step(x.shape[0], ignore_stale_grad=True)
            batch_loss.backward()
            mx.nd.waitall()

            #print(batch_loss.shape)
            batch_loss = nd.mean(batch_loss).asscalar()
            #print(batch_id,batch_loss)

            if ((batch_id == 0) or (e == 0)):
                epoch_loss = batch_loss
            else:
                epoch_loss = (1 - smoothing_constant
                              ) * epoch_loss + smoothing_constant * batch_loss

            train_loss_batch.append(batch_loss)

            if (batch_id + 1) % 200 == 0:
                print("Train Batch:{}, batch_loss:{}".format(
                    batch_id + 1, batch_loss))

            if ((e + 1) * (batch_id + 1)) % (2 * args.log_interval) == 0:
                # save model
                save_model_filename = "Epoch_" + str(e) + "_iters_" + str(
                    batch_id + 1) + '_' + str(time.ctime()).replace(
                        ' ', '_') + "_" + ".params"

                save_model_path = os.path.join(args.model_path,
                                               save_model_filename)
                net.save_parameters(save_model_path)
                print("\nCheckpoint, trained model saved at", save_model_path)

                train_loss_filename = "Epoch_" + str(e) + "_iters_" + str(
                    batch_id + 1) + str(time.ctime()).replace(
                        ' ', '_') + "_train_loss" + ".txt"

                train_loss_path = os.path.join(args.log_path,
                                               train_loss_filename)
                np.savetxt(train_loss_path, np.array(train_loss_batch))

        epoch_loss_1 = 0.
        for batch_id, (x, _) in enumerate(test_loader):

            with autograd.predict_mode():
                predict = net(x)
                batch_loss_1 = loss(predict, _)

            #batch_loss_1 = F.mean(batch_loss_1.asscalar())
            batch_loss_1 = nd.mean(batch_loss_1).asscalar()

            if ((batch_id == 0) or (e == 0)):
                epoch_loss_1 = batch_loss_1
            else:
                epoch_loss_1 = (
                    1 - smoothing_constant
                ) * epoch_loss_1 + smoothing_constant * batch_loss_1

            test_loss_batch.append(batch_loss_1)

            if ((e + 1) * (batch_id + 1)) % (args.log_interval) == 0:

                test_loss_file_name = "Epoch_" + str(e) + "_iters_" + str(
                    batch_id + 1) + str(time.ctime()).replace(
                        ' ', '_') + "_test_loss" + ".txt"
                test_loss_path = os.path.join(args.log_path,
                                              test_loss_file_name)
                np.savetxt(test_loss_path, np.array(test_loss_batch))

        train_loss.append(epoch_loss)
        test_loss.append(epoch_loss_1)

        print("Epoch {}, train_loss:{}, test_loss:{}".format(
            e + 1, epoch_loss, epoch_loss_1))

    # save model
    save_model_filename = "Final_epoch_" + str(args.epochs) + "_" + str(
        time.ctime()).replace(' ', '_') + "_" + ".params"
    save_model_path = os.path.join(args.model_path, save_model_filename)
    net.save_parameters(save_model_path)
    print("\nDone, trained model saved at", save_model_path)

    train_epoch_loss_file_name = 'train_epoch_loss.txt'
    test_epoch_loss_file_name = 'test_epoch_loss.txt'
    train_epoch_loss_path = os.path.join(args.log_path,
                                         train_epoch_loss_file_name)
    test_epoch_loss_path = os.path.join(args.log_path,
                                        test_epoch_loss_file_name)
    np.savetxt(train_epoch_loss_path, train_loss)
    np.savetxt(test_epoch_loss_path, test_loss)
Пример #10
0
import os
import torch
from torchvision import transforms
from torch.utils.data import DataLoader
from datasets import *
import utils
from model import Net

transform = transforms.Compose([utils.ToTensor()])
mnist_dataset = MNIST_Datasets('training', './data', transform)
test_dataset = MNIST_Datasets('testing', './data', transform)

dataloader = DataLoader(mnist_dataset,
                        batch_size=4,
                        shuffle=True,
                        num_workers=4)
testloader = DataLoader(test_dataset,
                        batch_size=4,
                        shuffle=False,
                        num_workers=4)

print 'Dataset size:', len(mnist_dataset)
n_epoches = 5

model = Net()
# How to init weights in model
# grad = True, False
# Normalize
# GPU
# Save model
# Transfer learning
Пример #11
0
def main(args):
    # log hyperparameter
    print(args)

    # select device
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    device = torch.device("cuda: 0" if args.cuda else "cpu")

    # set random seed
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    # data loader
    transform = transforms.Compose([utils.Normalize(), utils.ToTensor()])

    infer_dataset = InferTVDataset(root=args.root,
                                   sub_size=args.block_size,
                                   volume_list="volume_test_list.txt",
                                   max_k=args.infering_step,
                                   transform=transform)

    kwargs = {"num_workers": 4, "pin_memory": True} if args.cuda else {}
    infer_loader = DataLoader(infer_dataset,
                              batch_size=args.batch_size,
                              shuffle=False,
                              **kwargs)

    # model
    def generator_weights_init(m):
        if isinstance(m, nn.Conv3d):
            nn.init.kaiming_normal_(m.weight,
                                    mode='fan_out',
                                    nonlinearity='relu')
            if m.bias is not None:
                nn.init.zeros_(m.bias)

    g_model = Generator(args.upsample_mode, args.forward, args.backward,
                        args.gen_sn)
    g_model.apply(generator_weights_init)
    if args.data_parallel and torch.cuda.device_count() > 1:
        g_model = nn.DataParallel(g_model)
    g_model.to(device)

    mse_loss = nn.MSELoss()
    adversarial_loss = nn.MSELoss()
    train_losses, test_losses = [], []
    d_losses, g_losses = [], []

    # load checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint {}".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint["epoch"]
            g_model.load_state_dict(checkpoint["g_model_state_dict"])
            # g_optimizer.load_state_dict(checkpoint["g_optimizer_state_dict"])
            if args.gan_loss != "none":
                # d_model.load_state_dict(checkpoint["d_model_state_dict"])
                # d_optimizer.load_state_dict(checkpoint["d_optimizer_state_dict"])
                d_losses = checkpoint["d_losses"]
                g_losses = checkpoint["g_losses"]
            train_losses = checkpoint["train_losses"]
            test_losses = checkpoint["test_losses"]
            print("=> load chekcpoint {} (epoch {})".format(
                args.resume, checkpoint["epoch"]))

    g_model.eval()
    inferRes = []
    zSize, ySize, xSize = 120, 720, 480
    for i in range(args.infering_step):
        inferRes.append(np.zeros((zSize, ySize, xSize)))
    inferScale = np.zeros((zSize, ySize, xSize))
    time_start = 0
    volume_type = ''

    with torch.no_grad():
        for i, sample in tqdm(enumerate(infer_loader)):
            v_f = sample["v_f"].to(device)
            v_b = sample["v_b"].to(device)
            fake_volumes = g_model(v_f, v_b, args.infering_step,
                                   args.wo_ori_volume, args.norm)
            volume_type, time_start, x_start, y_start, z_start = utils.Parse(
                sample["vf_name"][0])

            for j in range(fake_volumes.shape[1]):
                volume = fake_volumes[0, j, 0]
                min_value = -0.015  # -0.012058
                max_value = 1.01  # 1.009666
                mean = (min_value + max_value) / 2
                std = mean - min_value
                volume = volume.to("cpu").numpy() * std + mean

                inferRes[j][z_start:z_start + args.block_size,
                            y_start:y_start + args.block_size,
                            x_start:x_start + args.block_size] += volume
                if j == 0:
                    inferScale[z_start:z_start + args.block_size,
                               y_start:y_start + args.block_size,
                               x_start:x_start + args.block_size] += 1
                # pdb.set_trace()

    for j in range(args.infering_step):
        inferRes[j] = inferRes[j] / inferScale
        inferRes[j] = inferRes[j].astype(np.float32)

        volume_name = volume_type + '_' + ("%04d" %
                                           (time_start + j + 1)) + '.raw'
        inferRes[j].tofile(os.path.join(args.save_pred, volume_name))
Пример #12
0
def train(args):
    frames = args.frames
    caption_length = args.caption_length
    glove_file = args.glove_file
    
    #CPU_COUNT = multiprocessing.cpu_count()
    if args.cuda:
        ctx = mx.gpu()
    else:
        ctx = mx.cpu()
    
    if args.load_pretrain:
        pretrain_model = vision.vgg16_bn(pretrained=True,ctx=ctx)
        transform = utils.Compose([utils.ToTensor(ctx),
                               utils.normalize(ctx),
                               utils.extractFeature(ctx,pretrain_model)
                             ])
    else:
        pretrain_model = None
        transform = utils.Compose([utils.ToTensor(ctx),
                                   utils.normalize(ctx),
                                 ])
    
    target_transform = utils.targetCompose([utils.WordToTensor(ctx)])

    train_dataset = videoFolder(args.train_folder,args.train_dict, frames, glove_file, 
                    caption_length, ctx, transform=transform, target_transform=target_transform)

    test_dataset = videoFolder(args.test_folder,args.test_dict, frames, glove_file, 
                        caption_length, ctx, transform=transform, target_transform=target_transform)

    train_loader = gluon.data.DataLoader(train_dataset,batch_size=args.batch_size,
                                last_batch='discard',shuffle=True)

    test_loader = gluon.data.DataLoader(test_dataset,batch_size=args.batch_size,
                                    last_batch='discard',shuffle=False)

    #loss = L2Loss_cos()
    loss = L2Loss_2()
    net = lstm_net(frames,caption_length,ctx,pretrained=args.load_pretrain)
    #net = resnet18_v2(caption_length=caption_length,ctx=ctx)
    
    net.collect_params().initialize(init=mx.initializer.MSRAPrelu(), ctx=ctx)
    trainer = gluon.Trainer(net.collect_params(), 'adam',
                            {'learning_rate': args.lr})
    
    smoothing_constant = 0.01
    
    for e in range(args.epochs):
        epoch_loss = 0
        
        for batch_id, (x,_) in enumerate(train_loader):
            with autograd.record():
                pred = net(x)
                batch_loss = loss(pred,_)
            
            trainer.step(x.shape[0],ignore_stale_grad=True)
            batch_loss.backward()
            mx.nd.waitall()
            
            batch_loss = F.mean(batch_loss).asscalar()
            
            if batch_id % 100 == 0:
                print("Train Batch:{}, batch_loss:{}".format(batch_id+1, batch_loss))
                  
            epoch_loss = (batch_loss if ((batch_id == 0) and (e == 0))
                          else (1 - smoothing_constant)*epoch_loss + smoothing_constant*batch_loss)
        
        epoch_loss_1 = 0
        for batch_id, (x,_) in enumerate(test_loader):
            with autograd.predict_mode():
                predict = net(x)
                batch_loss_1 = loss(pred,_)
            
            batch_loss_1 = F.mean(batch_loss_1).asscalar()
            
            if batch_id % 100 == 0:
                print("Test Batch:{}, batch_loss:{}".format(batch_id+1, batch_loss_1))
                
            epoch_loss_1 = (batch_loss_1 if ((batch_id == 0) and (e == 0))
                          else (1 - smoothing_constant)*epoch_loss_1 + smoothing_constant*batch_loss_1)
            
 
        
        print("Epoch {}, train_loss:{}, test_loss:{}".format(e+1, epoch_loss, epoch_loss_1))
    
    if args.save_model == True:
        file_name = "./saved_model/" + "lstm_pretrain.params"
        net.save_parameters(file_name)
Пример #13
0

if __name__ == "__main__":
    # load config
    args = args().parse_args()
    syn_world(args)
    if world.verbose:
        print("loaded configs")
    if world.useSigmoid:
        world.filename = "checkpoint_sigmoid.pth.tar"
    args.tag = time.strftime("%m-%d-%H:%M") + args.opt + str(
        args.lr) + "-" + world.comment + "_"
    world.filename = args.tag + world.filename

    # load data
    tran = transforms.Compose([utils.Scale(), utils.ToTensor()])
    data = dataloader(transform=tran)
    if len(data) == 0:
        print("Didn't find dataset.")
        raise ValueError("empty dataset")
    data_train = DataLoader(data,
                            batch_size=world.batch_size,
                            shuffle=True,
                            num_workers=world.workers)
    world.n_batch = len(data_train)
    if args.eval == False:
        data_test = dataloader(mode="test", transform=tran)
        data_test = DataLoader(data_test,
                               batch_size=world.batch_size,
                               shuffle=True,
                               num_workers=world.workers)
Пример #14
0
def train(args):
    np.random.seed(args.seed)
    if args.gpu:
        ctx = [mx.gpu(0)]
    else:
        ctx = [mx.cpu(0)]
    if args.dataset == "Sony":
        out_channels = 12
        scale = 2
    else:
        out_channels = 27
        scale = 3

    # load data
    train_transform = utils.Compose([
        utils.RandomCrop(args.patch_size, scale),
        utils.RandomFlipLeftRight(),
        utils.RandomFlipTopBottom(),
        utils.RandomTranspose(),
        utils.ToTensor(),
    ])
    train_dataset = data.MyDataset(args.dataset,
                                   "train",
                                   transform=train_transform)
    val_transform = utils.Compose([utils.ToTensor()])
    val_dataset = data.MyDataset(args.dataset, "val", transform=val_transform)
    train_loader = gluon.data.DataLoader(train_dataset,
                                         shuffle=True,
                                         batch_size=args.batch_size,
                                         last_batch='rollover')
    val_loader = gluon.data.DataLoader(val_dataset,
                                       batch_size=1,
                                       last_batch='discard')
    unet = net.UNet(out_channels, scale)
    unet.initialize(init=initializer.Xavier(), ctx=ctx)

    # optimizer and loss
    trainer = gluon.Trainer(unet.collect_params(), 'adam',
                            {'learning_rate': args.lr})
    l1_loss = gluon.loss.L1Loss()

    print "Start training now.."
    for i in range(args.epochs):
        total_loss = 0
        count = 0
        profiler.set_state('run')
        for batch_id, (img, gt) in enumerate(train_loader):
            batch_size = img.shape[0]
            count += batch_size
            img_list = gluon.utils.split_and_load(img[0], ctx)
            gt_list = gluon.utils.split_and_load(gt[0], ctx)
            with autograd.record():
                preds = [unet(x) for x in img_list]
                losses = []
                for ii in range(len(preds)):
                    loss = l1_loss(gt_list[ii], preds[ii])
                    losses.append(loss)
            for loss in losses:
                loss.backward()
            total_loss += sum([l.sum().asscalar() for l in losses])
            avg_loss = total_loss / count
            trainer.step(batch_size)
            metric.update(gt_list, preds)
            F.waitall()
            profiler.set_state('stop')
            print profiler.dumps()
            break
            gt_save = gt_list[0]
            output_save = preds[0]

            if (batch_id + 1) % 100 == 0:
                message = "Epoch {}: [{}/{}]: l1_loss: {:.4f}".format(
                    i + 1, count, len(train_dataset), avg_loss)
                print message
        temp = F.concat(gt_save, output_save, dim=3)
        temp = temp.asnumpy().reshape(temp.shape[2], temp.shape[3], 3)
        scipy.misc.toimage(temp * 255,
                           high=255,
                           low=0,
                           cmin=0,
                           cmax=255,
                           mode='RGB').save(args.save_model_dir +
                                            '%04d_%05d_00_train.jpg' %
                                            (i + 1, count))

        # evaluate
        batches = 0
        avg_psnr = 0.
        for img, gt in val_loader:
            batches += 1
            imgs = gluon.utils.split_and_load(img[0], ctx)
            label = gluon.utils.split_and_load(gt[0], ctx)
            outputs = []
            for x in imgs:
                outputs.append(unet(x))
            metric.update(label, outputs)
            avg_psnr += 10 * math.log10(1 / metric.get()[1])
            metric.reset()
        avg_psnr /= batches
        print('Epoch {}: validation avg psnr: {:.3f}'.format(i + 1, avg_psnr))

        # save model
        if (i + 1) % args.save_freq == 0:
            save_model_filename = "Epoch_" + str(i + 1) + ".params"
            save_model_path = os.path.join(args.save_model_dir,
                                           save_model_filename)
            unet.save_params(save_model_path)
            print("\nCheckpoint, trained model saved at", save_model_path)

    # save model
    save_model_filename = "Final_Epoch_" + str(i + 1) + ".params"
    save_model_path = os.path.join(args.save_model_dir, save_model_filename)
    unet.save_params(save_model_path)
    print("\nCheckpoint, trained model saved at", save_model_path)
Пример #15
0
def train(args):
    train_logger = None
    if args.log_dir is not None:
        train_logger = tb.SummaryWriter(
            os.path.join(args.log_dir, "train"), flush_secs=1
        )

    transform = utils.Compose(
        [
            utils.Rotation(),
            utils.Crop(),
            utils.Resize((32, 32)),
            utils.IntensityNormalize(),
            utils.ToTensor(),
        ]
    )
    train_loader = dataset.load_data(
        args.data_dir, batch_size=args.batch_size, transform=transform
    )
    print("Train length:", len(train_loader))

    model = FacialModel()
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=5e-3)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    model.to(device)
    print_every = 10
    num_train = len(train_loader)
    for epoch in range(args.epochs):
        model.train()

        lr = adjust_learning_rate_poly(optimizer, 1e-3, epoch, args.epochs)

        running_print_loss = 0
        running_print_accuracy = 0
        running_accuracy = 0
        for i, data in enumerate(train_loader, 0):
            # get the inputs; data is a list of [inputs, labels]
            inputs, labels = data
            inputs = inputs.to(device)
            labels = labels.to(device)

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            # print statistics
            acc = accuracy(labels, outputs)
            running_print_accuracy += acc.item()
            running_accuracy += acc.item()
            running_print_loss += loss.item()
            running_loss = loss.item()
            if (i + 1) % print_every == 0:  # print every 2000 mini-batches
                print(
                    "[%d, %5d] loss: %.3f accuracy: %.3f lr: %.8f"
                    % (
                        epoch + 1,
                        i + 1,
                        running_print_loss / print_every,
                        running_print_accuracy / print_every,
                        lr,
                    )
                )
                running_print_loss = 0
                running_print_accuracy = 0

            # write train loss summaries
            train_logger.add_scalar("loss", running_loss, epoch * num_train + i + 1)

        train_logger.add_scalar("accuracy", running_accuracy / num_train, epoch + 1)
        # plt.imshow(F.to_pil_image(sample['image']))
        # plt.title(str(sample['target']))
        # plt.show()

        return sample['image'], sample['target']

    def __len__(self):
        return len(self.image_paths)


if __name__ == '__main__':

    input_shape = (utils.IMAGE_HEIGHT, utils.IMAGE_WIDTH)
    dataset = SimulationDataset("train",
                                transforms=transforms.Compose([
                                    utils.RandomCoose(['center']),
                                    utils.Preprocess(input_shape),
                                    utils.RandomHorizontalFlip(),
                                    utils.ToTensor(),
                                    utils.Normalize([0.485, 0.456, 0.406],
                                                    [0.229, 0.224, 0.225])
                                ]))
    print(dataset.__len__())
    print(dataset.__getitem__(0)[0].size())

    for c in range(3):
        for i in range(dataset.__len__()):
            print(dataset.__getitem__(i)[c].mean())
            print(dataset.__getitem__(i)[c].std())
    # print(dataset.__getitem__(0))
    # print(len(dataset.__get_annotations__()))
Пример #17
0
def main(args):
    # log hyperparameter
    print(args)

    # select device
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    device = torch.device("cuda: 0" if args.cuda else "cpu")

    # set random seed
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    # data loader
    transform = transforms.Compose([
        utils.Normalize(),
        utils.ToTensor()
    ])
    train_dataset = TVDataset(
        root=args.root,
        sub_size=args.block_size,
        volume_list=args.volume_train_list,
        max_k=args.training_step,
        train=True,
        transform=transform
    )
    test_dataset = TVDataset(
        root=args.root,
        sub_size=args.block_size,
        volume_list=args.volume_test_list,
        max_k=args.training_step,
        train=False,
        transform=transform
    )

    kwargs = {"num_workers": 4, "pin_memory": True} if args.cuda else {}
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size,
                              shuffle=True, **kwargs)
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size,
                             shuffle=False, **kwargs)

    # model
    def generator_weights_init(m):
        if isinstance(m, nn.Conv3d):
            nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
            if m.bias is not None:
                nn.init.zeros_(m.bias)

    def discriminator_weights_init(m):
        if isinstance(m, nn.Conv3d):
            nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='leaky_relu')
            if m.bias is not None:
                nn.init.zeros_(m.bias)

    g_model = Generator(args.upsample_mode, args.forward, args.backward, args.gen_sn, args.residual)
    g_model.apply(generator_weights_init)
    if args.data_parallel and torch.cuda.device_count() > 1:
        g_model = nn.DataParallel(g_model)
    g_model.to(device)

    if args.gan_loss != "none":
        d_model = Discriminator(args.dis_sn)
        d_model.apply(discriminator_weights_init)
        # if args.dis_sn:
        #     d_model = add_sn(d_model)
        if args.data_parallel and torch.cuda.device_count() > 1:
            d_model = nn.DataParallel(d_model)
        d_model.to(device)

    mse_loss = nn.MSELoss()
    adversarial_loss = nn.MSELoss()
    train_losses, test_losses = [], []
    d_losses, g_losses = [], []

    # optimizer
    g_optimizer = optim.Adam(g_model.parameters(), lr=args.lr,
                             betas=(args.beta1, args.beta2))
    if args.gan_loss != "none":
        d_optimizer = optim.Adam(d_model.parameters(), lr=args.d_lr,
                                 betas=(args.beta1, args.beta2))

    Tensor = torch.cuda.FloatTensor if args.cuda else torch.FloatTensor

    # load checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint {}".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint["epoch"]
            g_model.load_state_dict(checkpoint["g_model_state_dict"])
            # g_optimizer.load_state_dict(checkpoint["g_optimizer_state_dict"])
            if args.gan_loss != "none":
                d_model.load_state_dict(checkpoint["d_model_state_dict"])
                # d_optimizer.load_state_dict(checkpoint["d_optimizer_state_dict"])
                d_losses = checkpoint["d_losses"]
                g_losses = checkpoint["g_losses"]
            train_losses = checkpoint["train_losses"]
            test_losses = checkpoint["test_losses"]
            print("=> load chekcpoint {} (epoch {})"
                  .format(args.resume, checkpoint["epoch"]))

    # main loop
    for epoch in tqdm(range(args.start_epoch, args.epochs)):
        # training..
        g_model.train()
        if args.gan_loss != "none":
            d_model.train()
        train_loss = 0.
        volume_loss_part = np.zeros(args.training_step)
        for i, sample in enumerate(train_loader):
            params = list(g_model.named_parameters())
            # pdb.set_trace()
            # params[0][1].register_hook(lambda g: print("{}.grad: {}".format(params[0][0], g)))
            # adversarial ground truths
            real_label = Variable(Tensor(sample["v_i"].shape[0], sample["v_i"].shape[1], 1, 1, 1, 1).fill_(1.0), requires_grad=False)
            fake_label = Variable(Tensor(sample["v_i"].shape[0], sample["v_i"].shape[1], 1, 1, 1, 1).fill_(0.0), requires_grad=False)

            v_f = sample["v_f"].to(device)
            v_b = sample["v_b"].to(device)
            v_i = sample["v_i"].to(device)
            g_optimizer.zero_grad()
            fake_volumes = g_model(v_f, v_b, args.training_step, args.wo_ori_volume, args.norm)

            # adversarial loss
            # update discriminator
            if args.gan_loss != "none":
                avg_d_loss = 0.
                avg_d_loss_real = 0.
                avg_d_loss_fake = 0.
                for k in range(args.n_d):
                    d_optimizer.zero_grad()
                    decisions = d_model(v_i)
                    d_loss_real = adversarial_loss(decisions, real_label)
                    fake_decisions = d_model(fake_volumes.detach())

                    d_loss_fake = adversarial_loss(fake_decisions, fake_label)
                    d_loss = d_loss_real + d_loss_fake
                    d_loss.backward()
                    avg_d_loss += d_loss.item() / args.n_d
                    avg_d_loss_real += d_loss_real / args.n_d
                    avg_d_loss_fake += d_loss_fake / args.n_d

                    d_optimizer.step()

            # update generator
            if args.gan_loss != "none":
                avg_g_loss = 0.
            avg_loss = 0.
            for k in range(args.n_g):
                loss = 0.
                g_optimizer.zero_grad()

                # adversarial loss
                if args.gan_loss != "none":
                    fake_decisions = d_model(fake_volumes)
                    g_loss = args.gan_loss_weight * adversarial_loss(fake_decisions, real_label)
                    loss += g_loss
                    avg_g_loss += g_loss.item() / args.n_g

                # volume loss
                if args.volume_loss:
                    volume_loss = args.volume_loss_weight * mse_loss(v_i, fake_volumes)
                    for j in range(v_i.shape[1]):
                        volume_loss_part[j] += mse_loss(v_i[:, j, :], fake_volumes[:, j, :]) / args.n_g / args.log_every
                    loss += volume_loss

                # feature loss
                if args.feature_loss:
                    feat_real = d_model.extract_features(v_i)
                    feat_fake = d_model.extract_features(fake_volumes)
                    for m in range(len(feat_real)):
                        loss += args.feature_loss_weight / len(feat_real) * mse_loss(feat_real[m], feat_fake[m])

                avg_loss += loss / args.n_g
                loss.backward()
                g_optimizer.step()

            train_loss += avg_loss

            # log training status
            subEpoch = (i + 1) // args.log_every
            if (i+1) % args.log_every == 0:
                print("Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}".format(
                    epoch, (i+1) * args.batch_size, len(train_loader.dataset), 100. * (i+1) / len(train_loader),
                    avg_loss
                ))
                print("Volume Loss: ")
                for j in range(volume_loss_part.shape[0]):
                    print("\tintermediate {}: {:.6f}".format(
                        j+1, volume_loss_part[j]
                    ))

                if args.gan_loss != "none":
                    print("DLossReal: {:.6f} DLossFake: {:.6f} DLoss: {:.6f}, GLoss: {:.6f}".format(
                        avg_d_loss_real, avg_d_loss_fake, avg_d_loss, avg_g_loss
                    ))
                    d_losses.append(avg_d_loss)
                    g_losses.append(avg_g_loss)
                # train_losses.append(avg_loss)
                train_losses.append(train_loss.item() / args.log_every)
                print("====> SubEpoch: {} Average loss: {:.6f} Time {}".format(
                    subEpoch, train_loss.item() / args.log_every, time.asctime(time.localtime(time.time()))
                ))
                train_loss = 0.
                volume_loss_part = np.zeros(args.training_step)

            # testing...
            if (i + 1) % args.test_every == 0:
                g_model.eval()
                if args.gan_loss != "none":
                    d_model.eval()
                test_loss = 0.
                with torch.no_grad():
                    for i, sample in enumerate(test_loader):
                        v_f = sample["v_f"].to(device)
                        v_b = sample["v_b"].to(device)
                        v_i = sample["v_i"].to(device)
                        fake_volumes = g_model(v_f, v_b, args.training_step, args.wo_ori_volume, args.norm)
                        test_loss += args.volume_loss_weight * mse_loss(v_i, fake_volumes).item()

                test_losses.append(test_loss * args.batch_size / len(test_loader.dataset))
                print("====> SubEpoch: {} Test set loss {:4f} Time {}".format(
                    subEpoch, test_losses[-1], time.asctime(time.localtime(time.time()))
                ))

            # saving...
            if (i+1) % args.check_every == 0:
                print("=> saving checkpoint at epoch {}".format(epoch))
                if args.gan_loss != "none":
                    torch.save({"epoch": epoch + 1,
                                "g_model_state_dict": g_model.state_dict(),
                                "g_optimizer_state_dict":  g_optimizer.state_dict(),
                                "d_model_state_dict": d_model.state_dict(),
                                "d_optimizer_state_dict": d_optimizer.state_dict(),
                                "d_losses": d_losses,
                                "g_losses": g_losses,
                                "train_losses": train_losses,
                                "test_losses": test_losses},
                               os.path.join(args.save_dir, "model_" + str(epoch) + "_" + str(subEpoch) + "_" + "pth.tar")
                               )
                else:
                    torch.save({"epoch": epoch + 1,
                                "g_model_state_dict": g_model.state_dict(),
                                "g_optimizer_state_dict": g_optimizer.state_dict(),
                                "train_losses": train_losses,
                                "test_losses": test_losses},
                               os.path.join(args.save_dir, "model_" + str(epoch) + "_" + str(subEpoch) + "_" + "pth.tar")
                               )
                torch.save(g_model.state_dict(),
                           os.path.join(args.save_dir, "model_" + str(epoch) + "_" + str(subEpoch) + ".pth"))

        num_subEpoch = len(train_loader) // args.log_every
        print("====> Epoch: {} Average loss: {:.6f} Time {}".format(
            epoch, np.array(train_losses[-num_subEpoch:]).mean(), time.asctime(time.localtime(time.time()))
        ))
Пример #18
0
def main(args):
    utils.seedme(args.seed)
    cudnn.benchmark = True
    os.system('mkdir -p {}'.format(args.outf))
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    print "Using BCE loss: {}".format(not args.no_bce)

    images_train, images_test, masks_train, masks_test = utils.load_seismic_data(args.root_dir, test_size=.2, random_state=args.seed)
    images_train, masks_train = utils.concatenate_hflips(images_train, masks_train, shuffle=True, random_state=args.seed)
    images_test, masks_test = utils.concatenate_hflips(images_test, masks_test, shuffle=True, random_state=args.seed)

    # transform = transforms.Compose([utils.augment(), utils.ToTensor()])
    transform = transforms.Compose([utils.ToTensor()])
    dataset_train = utils.SegmentationDataset(images_train, masks_train, transform=transform)
    dataloader = torch.utils.data.DataLoader(dataset_train, batch_size=args.batch_size, shuffle=True, drop_last=True, num_workers=1)
    dataiter = utils.dataiterator(dataloader)

    netF = models.choiceF[args.archF](num_features=args.num_features_F, num_residuals=args.num_residuals, gated=args.gated, gate_param=args.gate_param).to(device)
    netD = models.choiceD[args.archD](num_features=args.num_features_D, nc=2, dropout=args.dropout).to(device)
    if args.netF:
        netF.load_state_dict(torch.load(args.netF))
    if args.netD:
        netD.load_state_dict(torch.load(args.netD))
    print netF
    print netD
    optimizerF = optim.Adam(netF.parameters(), betas=(0.5, 0.999), lr=args.lr, amsgrad=True)
    optimizerD = optim.Adam(netD.parameters(), betas=(0.5, 0.999), lr=args.lr, amsgrad=True)
    alpha = torch.tensor(args.alpha).to(device)
    loss_func = torch.nn.BCELoss()

    smooth_binary = utils.SmoothBinary(scale=args.smooth_noise)

    # images_test, masks_test = torch.from_numpy(images_test).to(device), torch.from_numpy(masks_test).to(device)
    log = logger.LoggerGAN(args.outf, netF, netD, torch.from_numpy(images_train), torch.from_numpy(masks_train), torch.from_numpy(images_test), torch.from_numpy(masks_test), bcefunc=loss_func, device=device)

    start_time = time.time()
    for i in range(args.niter):

        # --- train D
        for p in netD.parameters():
            p.requires_grad_(True)

        for _ in range(args.niterD):
            optimizerD.zero_grad()

            images_real, masks_real = next(dataiter)
            images_real, masks_real = images_real.to(device), masks_real.to(device)
            masks_fake = netF(images_real).detach()
            x_fake = torch.cat((images_real, masks_fake), dim=1)

            # images_real, masks_real = next(dataiter)
            # images_real, masks_real = images_real.to(device), masks_real.to(device)
            masks_real = smooth_binary(masks_real)
            x_real = torch.cat((images_real, masks_real), dim=1)

            x_real.requires_grad_()  # to compute gradD_real
            x_fake.requires_grad_()  # to compute gradD_fake

            y_real = netD(x_real)
            y_fake = netD(x_fake)
            lossE = y_real.mean() - y_fake.mean()

            # grad() does not broadcast so we compute for the sum, effect is the same
            gradD_real = torch.autograd.grad(y_real.sum(), x_real, create_graph=True)[0]
            gradD_fake = torch.autograd.grad(y_fake.sum(), x_fake, create_graph=True)[0]
            omega = 0.5*(gradD_real.view(gradD_real.size(0), -1).pow(2).sum(dim=1).mean() +
                         gradD_fake.view(gradD_fake.size(0), -1).pow(2).sum(dim=1).mean())

            loss = -lossE - alpha*(1.0 - omega) + 0.5*args.rho*(1.0 - omega).pow(2)
            loss.backward()
            optimizerD.step()
            alpha -= args.rho*(1.0 - omega.item())

        # --- train G
        for p in netD.parameters():
            p.requires_grad_(False)
        optimizerF.zero_grad()
        images_real, masks_real = next(dataiter)
        images_real, masks_real = images_real.to(device), masks_real.to(device)
        masks_fake = netF(images_real)
        x_fake = torch.cat((images_real, masks_fake), dim=1)
        y_fake = netD(x_fake)
        loss = -y_fake.mean()
        bceloss = loss_func(masks_fake, masks_real)
        if not args.no_bce:
            loss = loss + bceloss * args.bce_weight
        loss.backward()
        optimizerF.step()

        log.dump(i+1, lossE.item(), alpha.item(), omega.item())

        if (i+1) % args.nprint == 0:
            print 'Time per loop: {} sec/loop'.format((time.time() - start_time)/args.nprint)

            print "[{}/{}] lossE: {:.3f}, bceloss: {:.3f}, alpha: {:.3f}, omega: {:.3f}".format((i+1), args.niter, lossE.item(), bceloss.item(), alpha.item(), omega.item())

            log.flush(i+1)

            # if (i+1) > 5000:
            torch.save(netF.state_dict(), '{}/netF_iter_{}.pth'.format(args.outf, i+1))
            torch.save(netD.state_dict(), '{}/netD_iter_{}.pth'.format(args.outf, i+1))

            start_time = time.time()
Пример #19
0
def train(args):
    np.random.seed(args.seed)
    if args.cuda:
        ctx = mx.gpu(0)
    else:
        ctx = mx.cpu(0)
    # dataloader
    transform = utils.Compose([utils.Scale(args.image_size),
                               utils.CenterCrop(args.image_size),
                               utils.ToTensor(ctx),
                               ])
    train_dataset = data.ImageFolder(args.dataset, transform)
    train_loader = gluon.data.DataLoader(train_dataset, batch_size=args.batch_size, last_batch='discard')
    style_loader = utils.StyleLoader(args.style_folder, args.style_size, ctx=ctx)
    print('len(style_loader):',style_loader.size())
    # models
    vgg = net.Vgg16()
    utils.init_vgg_params(vgg, 'models', ctx=ctx)
    style_model = net.Net(ngf=args.ngf)
    style_model.initialize(init=mx.initializer.MSRAPrelu(), ctx=ctx)
    if args.resume is not None:
        print('Resuming, initializing using weight from {}.'.format(args.resume))
        style_model.collect_params().load(args.resume, ctx=ctx)
    print('style_model:',style_model)
    # optimizer and loss
    trainer = gluon.Trainer(style_model.collect_params(), 'adam',
                            {'learning_rate': args.lr})
    mse_loss = gluon.loss.L2Loss()

    for e in range(args.epochs):
        agg_content_loss = 0.
        agg_style_loss = 0.
        count = 0
        for batch_id, (x, _) in enumerate(train_loader):
            n_batch = len(x)
            count += n_batch
            # prepare data
            style_image = style_loader.get(batch_id)
            style_v = utils.subtract_imagenet_mean_preprocess_batch(style_image.copy())
            style_image = utils.preprocess_batch(style_image)

            features_style = vgg(style_v)
            gram_style = [net.gram_matrix(y) for y in features_style]

            xc = utils.subtract_imagenet_mean_preprocess_batch(x.copy())
            f_xc_c = vgg(xc)[1]
            with autograd.record():
                style_model.setTarget(style_image)
                y = style_model(x)

                y = utils.subtract_imagenet_mean_batch(y)
                features_y = vgg(y)

                content_loss = 2 * args.content_weight * mse_loss(features_y[1], f_xc_c)

                style_loss = 0.
                for m in range(len(features_y)):
                    gram_y = net.gram_matrix(features_y[m])
                    _, C, _ = gram_style[m].shape
                    gram_s = F.expand_dims(gram_style[m], 0).broadcast_to((args.batch_size, 1, C, C))
                    style_loss = style_loss + 2 * args.style_weight * mse_loss(gram_y, gram_s[:n_batch, :, :])

                total_loss = content_loss + style_loss
                total_loss.backward()
                
            trainer.step(args.batch_size)
            mx.nd.waitall()

            agg_content_loss += content_loss[0]
            agg_style_loss += style_loss[0]

            if (batch_id + 1) % args.log_interval == 0:
                mesg = "{}\tEpoch {}:\t[{}/{}]\tcontent: {:.3f}\tstyle: {:.3f}\ttotal: {:.3f}".format(
                    time.ctime(), e + 1, count, len(train_dataset),
                                agg_content_loss.asnumpy()[0] / (batch_id + 1),
                                agg_style_loss.asnumpy()[0] / (batch_id + 1),
                                (agg_content_loss + agg_style_loss).asnumpy()[0] / (batch_id + 1)
                )
                print(mesg)

            
            if (batch_id + 1) % (4 * args.log_interval) == 0:
                # save model
                save_model_filename = "Epoch_" + str(e) + "iters_" + str(count) + "_" + str(time.ctime()).replace(' ', '_') + "_" + str(
                    args.content_weight) + "_" + str(args.style_weight) + ".params"
                save_model_path = os.path.join(args.save_model_dir, save_model_filename)
                style_model.collect_params().save(save_model_path)
                print("\nCheckpoint, trained model saved at", save_model_path)

    # save model
    save_model_filename = "Final_epoch_" + str(args.epochs) + "_" + str(time.ctime()).replace(' ', '_') + "_" + str(
        args.content_weight) + "_" + str(args.style_weight) + ".params"
    save_model_path = os.path.join(args.save_model_dir, save_model_filename)
    style_model.collect_params().save(save_model_path)
    print("\nDone, trained model saved at", save_model_path)
Пример #20
0
def train():
    """
        Load data, build  and train model.
    """

    # Get command line arguments.
    parser = argparse.ArgumentParser(
        description='Behavioral Cloning Training Program')
    parser.add_argument('-d',
                        type=str,
                        help='data directory',
                        default='data',
                        dest='data_dir')
    parser.add_argument('-l',
                        type=float,
                        help='learning rate',
                        default=0.001,
                        dest='learning_rate')
    parser.add_argument('-b',
                        type=int,
                        help='batch size',
                        default=40,
                        dest='batch_size')
    parser.add_argument('-e',
                        type=int,
                        help='number of epochs',
                        default=10,
                        dest='epochs')

    args = parser.parse_args()

    # Load, pre-process and augment data.
    data_set = utils.DataSetGenerator(data_dir=args.data_dir,
                                      transform=transforms.Compose([
                                          utils.AugmentData(),
                                          utils.PreProcessData(),
                                          utils.ToTensor()
                                      ]))

    # Data loader for batch generation.
    data_loader = DataLoader(data_set,
                             batch_size=args.batch_size,
                             drop_last=True)

    # Build model.
    model = CNN(utils.INPUT_SHAPE, args.batch_size)

    # Loss and optimizer.
    criterion = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)

    # Train model.
    best_loss = float('inf')

    for epoch in range(args.epochs):
        for idx, sample in enumerate(data_loader):
            img = Variable(sample['img'])
            steering_angle = Variable(sample['steering_angle'])
            optimizer.zero_grad()
            steering_angle_out = model(img)
            loss = criterion(steering_angle_out, steering_angle)
            loss.backward()
            optimizer.step()

            # Save weights.
            if loss.data[0] < best_loss:
                best_loss = loss.data[0]
                torch.save(model.state_dict(), 'train.pth')

            if idx % 100 == 0:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                    epoch, idx * len(img), len(data_loader.dataset),
                    100. * idx / len(data_loader), loss.data[0]))