def combine_unets(f1: str, f2: str, fout: str) -> models.DoubleUNet:
    net1 = models.UNet()
    net2 = models.UNet()
    net1 = load_model(net1, f1)
    net2 = load_model(net2, f2)
    dnet = models.DoubleUNet()
    dnet.net_b = net1
    dnet.net_t = net2
    save_model(dnet, fout)
Exemplo n.º 2
0
 def __init__(self, image_paths, seed_index):
     self.image_paths = image_paths
     self.seed_index = seed_index
     self.samples = Loader.get_batch(self.image_paths, len(self.image_paths), 0, self.seed_index)
     self.samples_images = self.samples[:, 0]
     self.samples_masks = self.samples[:, 1]
     self.model = models.UNet()
def main():
    if not os.path.isfile(INPUT_FILE):
        print('Input image not found ', INPUT_FILE)
    else:
        if not os.path.isfile(MODEL_FILE):
            print('Model not found ', MODEL_FILE)

        else:
            print('Load model... ', MODEL_FILE)
            model = models.UNet(n_channels=1, n_classes=1)

            checkpoint = torch.load(pathlib.Path(MODEL_FILE))
            model.load_state_dict(checkpoint)
            model.to(device)
            model.eval()

            print('Load image... ', INPUT_FILE)
            img, h, w = image.load_image(INPUT_FILE)

            print('Prediction...')
            output_image = predict_image(model, img)

            print('Resize mask to original size...')
            mask_image = cv2.resize(output_image, (w, h))
            cv2.imwrite(OUTPUT_MASK, mask_image)

            print('Cut it out...')
            warped = image.extract_idcard(cv2.imread(INPUT_FILE), mask_image)
            cv2.imwrite(OUTPUT_FILE, warped)

            print('Done.')
Exemplo n.º 4
0
def convert_sparse(checkpoint, parameters, output_filename):
    print("Convert sparse unshaded super-resolution network")
    import models

    # create model
    externalFlow = parameters.get('externalFlow', False)
    if externalFlow:
        input_channels = 7
        output_channels = 6
    else:
        input_channels = 9
        output_channels = 8
    input_channels_with_previous = input_channels + output_channels
    model = models.UNet(input_channels_with_previous, output_channels,
                        parameters['depth'], parameters['filters'], parameters['padding'],
                        parameters['batchNorm'], parameters['residual'],
                        parameters['hardInput'], parameters['upMode'], True)

    # restore weights
    model.load_state_dict(checkpoint['model_params'], True)
    device = torch.device('cuda')
    model.to(device)
    model.train(False)
    print("Model:")
    print(model)

    print("Convert to script")
    try:
        def genInput(width, height):
            input = torch.rand(1, input_channels_with_previous, height, width, dtype=torch.float32, device=device)
            mask = (torch.rand(1, 1, height, width, dtype=torch.float32, device=device) > 0.5).to(torch.float32)
            return (input, mask)
        inputs = [
            genInput(128, 128),
            genInput(262, 913)]#,
            #genInput(841, 498),
            #genInput(713, 582)]
        print("Dry run:")
        for input in inputs:
            print("====== Check input of size", input[0].shape, "======")
            run1 = model(*input)
            assert input[0].shape[-2:] == run1[0].shape[-2:], "shapes don't match"
        print("Trace run:")
        scripted_module = torch.jit.trace(model, inputs[0])#, check_inputs=inputs)
        #scripted_module = torch.jit.script(model)
    except Exception as ex:
        print("Unable to convert:")
        print(traceback.format_exc())
        return

    settings = parameters
    settings_json = json.dumps(settings)

    print("Save to", output_filename)
    extra_files = torch._C.ExtraFilesMap()
    extra_files['settings.json'] = settings_json
    print(extra_files)
    torch.jit.save(scripted_module, output_filename, _extra_files=extra_files)
Exemplo n.º 5
0
def main():
    seed_torch()

    print('Create datasets...')
    train_dataset = SegmentationImageDataset(
        './dataset/train/train_frames/image',
        './dataset/train/train_masks/image')
    validation_dataset = SegmentationImageDataset(
        './dataset/train/val_frames/image', './dataset/train/val_masks/image')

    print('Create dataloader...')
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=BATCH_SIZE,
                                  shuffle=True,
                                  num_workers=0)
    validation_dataloader = DataLoader(validation_dataset,
                                       batch_size=BATCH_SIZE,
                                       shuffle=True,
                                       num_workers=0)

    dataloader = {"train": train_dataloader, "val": validation_dataloader}

    print('Initialize model...')
    model = models.UNet(n_channels=1, n_classes=1)
    model = model.to(device)

    criterion = nn.BCEWithLogitsLoss()
    optimizer = Adam(model.parameters(), lr=1e-4)
    scheduler = ReduceLROnPlateau(optimizer,
                                  mode='max',
                                  factor=0.2,
                                  patience=3,
                                  verbose=True)

    print(RESUME_TRAINING)
    if RESUME_TRAINING:
        print('Load Model to resume training...')
        checkpoint = torch.load(CHECKPOINT_PATH)
        model.load_state_dict(checkpoint['model_state_dict'])
        model.to(device)
        model.eval()

    print('Start training...')
    train(model,
          dataloader,
          criterion,
          optimizer,
          scheduler,
          num_epochs=NO_OF_EPOCHS)

    print('Save final model...')
    torch.save(model.state_dict(), FINAL_PATH)
Exemplo n.º 6
0
cpt_name = '/full_model_'

writer = SummaryWriter(log_name)

print("torch.cuda.is_available: ", torch.cuda.is_available())
print("torch.cuda.device_count: ", torch.cuda.device_count())

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
multiGPUs = [0, 1, 2, 3]

netT = models.ResNet()
sketExt = models.PWCExtractor()
imagExt = models.PWCExtractor()
flowEst = models.Network()
blenEst = models.blendNet()
flowRef = models.UNet(14, 8)
ImagRef = model_deform.DeformUNet(21, 15)

W = 576
H = 384
flowBackWarp = models.backWarp(W, H)
occlusiCheck = models.occlusionCheck(W, H)

if torch.cuda.device_count() >= 1:
    netT = nn.DataParallel(netT, device_ids=multiGPUs)
    sketExt = nn.DataParallel(sketExt, device_ids=multiGPUs)
    imagExt = nn.DataParallel(imagExt, device_ids=multiGPUs)
    flowEst = nn.DataParallel(flowEst, device_ids=multiGPUs)
    blenEst = nn.DataParallel(blenEst, device_ids=multiGPUs)
    flowRef = nn.DataParallel(flowRef, device_ids=multiGPUs)
    ImagRef = nn.DataParallel(ImagRef, device_ids=multiGPUs)
Exemplo n.º 7
0
                                                                                       split_ratio=(0.5, 0.5))
    training_folder_list, val_folder_list = utils.get_parent_folder_names(training_data_root, which_bag=which_bag)
    # Build training and validation dataset
    train_dataset = dataset.SfMDataset(image_file_names=train_filenames,
                                       folder_list=training_folder_list + val_folder_list,
                                       adjacent_range=adjacent_range, to_augment=True, transform=training_transforms,
                                       downsampling=downsampling,
                                       net_depth=teacher_depth, inlier_percentage=inlier_percentage,
                                       use_store_data=load_intermediate_data,
                                       store_data_root=precompute_root,
                                       use_view_indexes_per_point=use_view_indexes_per_point, visualize=visualize,
                                       phase="train", is_hsv=is_hsv)
    train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=shuffle,
                                               num_workers=num_workers)
    # Load trained teacher model
    depth_estimation_model_teacher = models.UNet(in_channels=3, out_channels=1, depth=teacher_depth, wf=filter_base,
                                                 padding=True, up_mode='upsample')
    # Initialize the depth estimation network with Kaiming He initialization
    utils.init_net(depth_estimation_model_teacher, type="kaiming", mode="fan_in", activation_mode="relu",
                   distribution="normal")
    # Multi-GPU running
    depth_estimation_model_teacher = torch.nn.DataParallel(depth_estimation_model_teacher)
    depth_estimation_model_teacher.train()

    # Define teacher network weight path
    # Load previous teacher model
    if best_teacher_model_path is None:
        best_teacher_model_path = str(model_root / "best_teacher_model.pt")

    # Load previous student model, lr scheduler, failure SfM sequences, and so on
    if use_previous_teacher_model:
        if Path(best_teacher_model_path).exists():
Exemplo n.º 8
0
    tensor2image = torchvision.transforms.ToPILImage()
    batch_size = 1
    PSNR_total = 0
    SSIM_total = 0

    File_No = 100
    Folder_name = "{}/BSD100".format(args.test_path)

    with torch.no_grad():
        init_net = nn.DataParallel(models.InitNet(args)).to(device).eval()
        init_net.load_state_dict(
            torch.load("./trained_models/init_net_ratio{}.pth".format(
                args.ratio),
                       map_location='cpu')["model"])

        deep_net = nn.DataParallel(models.UNet(args)).to(device).eval()
        deep_net.load_state_dict(
            torch.load("./trained_models/deep_net_ratio{}.pth".format(
                args.ratio),
                       map_location='cpu')["model"])

        for i in range(1, File_No + 1):
            name = "{}/({}).mat".format(Folder_name, i)
            x = scio.loadmat(name)['temp3']
            x = torch.from_numpy(np.array(x)).to(device)
            x = x.float()
            ori_x = x

            h = x.size()[0]
            h_lack = 0
            w = x.size()[1]
Exemplo n.º 9
0
def train():
    # Setup device
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    #Setup model
    model = models.UNet(n_channels=3,n_classes=5)
    #用预训练的Vgg16网络初始化FCN32s的参数
    vgg16 = models.VGG16(pretrained=True)
    model.copy_params_from_vgg16(vgg16)

    # Setup Dataloader,训练集和验证集数据
    """data.picFulPath('/home/mlxuan/project/DeepLearning/data/benchmark/benchmark_RELEASE/dataset/train.txt',
               '/home/mlxuan/project/DeepLearning/data/benchmark/benchmark_RELEASE/dataset/img/',
               '/home/mlxuan/project/DeepLearning/data/benchmark/benchmark_RELEASE/dataset/cls/')
    train_dataset = data.SBDClassSeg('/home/mlxuan/project/DeepLearning/FCN/fcn_mlx/data/ImagAndLal.txt')
    trainloader = DataLoader(train_dataset, batch_size=4, shuffle=False, drop_last=True)

    data.picFulPath('/home/mlxuan/project/DeepLearning/data/VOCtrainval_11-May-2012/VOCdevkit/VOC2012/ImageSets/Segmentation/val.txt',
                    '/home/mlxuan/project/DeepLearning/data/VOCtrainval_11-May-2012/VOCdevkit/VOC2012/JPEGImages/',
                    '/home/mlxuan/project/DeepLearning/data/VOCtrainval_11-May-2012/VOCdevkit/VOC2012/SegmentationClass/',
                    destPath='/home/mlxuan/project/DeepLearning/FCN/fcn_mlx/data/ValImagAndLal.txt',
                    ImgFix='.jpg',lblFix='.png')

    val_dataset = data.VOCClassSeg('/home/mlxuan/project/DeepLearning/FCN/fcn_mlx/data/ValImagAndLal.txt',train=False)
    valloader = DataLoader(val_dataset,batch_size=1,shuffle=False)"""

    train_dataset = data.RSDataClassSeg('/home/mlxuan/project/DeepLearning/FCN/fcn_mlx/Data/trainFullPath.txt')
    trainloader = DataLoader(train_dataset, batch_size=4, shuffle=False, drop_last=True)
    val_dataset = data.RSDataClassSeg('/home/mlxuan/project/DeepLearning/FCN/fcn_mlx/Data/validFullPath.txt',train=False)
    valloader = DataLoader(val_dataset, batch_size=1, shuffle=False)




    # Setup optimizer, lr_scheduler and loss function(优化器、学习率调整策略、损失函数)

    def cross_entropy2d(input, target, weight=None, size_average=True):
        # input: (n, c, h, w), target: (n, h, w)
        n, c, h, w = input.size()
        # log_p: (n, c, h, w)
        if LooseVersion(torch.__version__) < LooseVersion('0.3'):#简单的版本比较操作,此处传入的时torch.__version__,所以比较的时torch的版本
            # ==0.2.X
            log_p = F.log_softmax(input)
        else:
            # >=0.3
            log_p = F.log_softmax(input, dim=1)
        # log_p: (n*h*w, c) log_p是对input做log_softmax后的结果,表示每个类的概率。tensor.transpose将tensor的维度交换,如行变成列,列变成行
        log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous()
        log_p = log_p[target.view(n, h, w, 1).repeat(1, 1, 1, c) >= 0]
        log_p = log_p.view(-1, c)
        # target: (n*h*w,)
        mask = target >= 0
        target = target[mask]
        loss = F.nll_loss(log_p, target, weight=weight)
        if size_average:
            loss /= mask.data.sum()
        return loss


    lossFun = cross_entropy2d

    def get_parameters(model, bias=False):
        import torch.nn as nn
        modules_skipped = (
            nn.ReLU,
            nn.MaxPool2d,
            nn.Dropout2d,
            nn.Sequential,
            models.FCN32s,


        )
        for m in model.modules():
            if isinstance(m, nn.Conv2d):
                if bias:
                    yield m.bias
                else:
                    yield m.weight
            elif isinstance(m, nn.ConvTranspose2d):
                # weight is frozen because it is just a bilinear upsampling
                if bias:
                    assert m.bias is None
            elif isinstance(m, modules_skipped):
                continue
            else:
                raise ValueError('Unexpected module: %s' % str(m))


    optim = torch.optim.SGD(
        [
            {'params': get_parameters(model, bias=False)},
            {'params': get_parameters(model, bias=True),
             'lr':  1.0e-5* 2, 'weight_decay': 0},
        ],
        lr=1.0e-5,
        momentum=0.99,
        weight_decay=0.0005)
    #定义学习率调整策略
    scheduler = lr_scheduler.ReduceLROnPlateau(optim, mode='min', patience=0,min_lr=10e-10,eps=10e-8)  # min表示当指标不在降低时,patience表示可以容忍的step次数

    utils.ModelLoad('/home/mlxuan/project/DeepLearning/FCN/fcn_mlx/output/Model.path/20181227_220035.852449model_best.pth.tar',model,optim)

    trainer = models.Trainer(
        cuda =True,
        model=model,
        optimizer=optim,
        loss_fcn=lossFun,
        train_loader=trainloader,
        val_loader=valloader,
        out='./output/',
        max_iter=40000,
        scheduler = scheduler,
        interval_validate=2000
    )
    trainer.train()#进入训练
Exemplo n.º 10
0
# -*- coding: utf-8 -*-

import models
from torchsummary import summary
from flopth import flopth
#import torch

model = models.UNet(10).cuda()

summary(model, input_size=(3, 256, 256))

flops = flopth(model, in_size=(3, 256, 256))
print(flops)
Exemplo n.º 11
0
                    default=224,
                    help='Size of the input')

parser.add_argument('--n_measures',
                    type=int,
                    default=1,
                    help='Number of time measurements')

args = parser.parse_args()

#------------------------------------------------------------------------------
#	Create model
#------------------------------------------------------------------------------
# UNet
model = models.UNet(
    backbone="mobilenetv2",
    num_classes=2,
)

# # DeepLabV3+
# model = DeepLabV3Plus(
#     backbone='resnet18',
#     output_stride=16,
#     num_classes=2,
#     pretrained_backbone=None,
# )

# # BiSeNet
# model = BiSeNet(
#     backbone='resnet18',
#     num_classes=2,
#     pretrained_backbone=None,
Exemplo n.º 12
0
    def __init__(self, args):
        self.args = args
        self.mode = args.mode
        self.epochs = args.epochs
        self.dataset = args.dataset
        self.data_path = args.data_path
        self.train_crop_size = args.train_crop_size
        self.eval_crop_size = args.eval_crop_size
        self.stride = args.stride
        self.batch_size = args.train_batch_size
        self.train_data = AerialDataset(crop_size=self.train_crop_size,
                                        dataset=self.dataset,
                                        data_path=self.data_path,
                                        mode='train')
        self.train_loader = DataLoader(self.train_data,
                                       batch_size=self.batch_size,
                                       shuffle=True,
                                       num_workers=2)
        self.eval_data = AerialDataset(dataset=self.dataset,
                                       data_path=self.data_path,
                                       mode='val')
        self.eval_loader = DataLoader(self.eval_data,
                                      batch_size=1,
                                      shuffle=False,
                                      num_workers=2)

        if self.dataset == 'Potsdam':
            self.num_of_class = 6
            self.epoch_repeat = get_test_times(6000, 6000,
                                               self.train_crop_size,
                                               self.train_crop_size)
        elif self.dataset == 'UDD5':
            self.num_of_class = 5
            self.epoch_repeat = get_test_times(4000, 3000,
                                               self.train_crop_size,
                                               self.train_crop_size)
        elif self.dataset == 'UDD6':
            self.num_of_class = 6
            self.epoch_repeat = get_test_times(4000, 3000,
                                               self.train_crop_size,
                                               self.train_crop_size)
        else:
            raise NotImplementedError

        if args.model == 'FCN':
            self.model = models.FCN8(num_classes=self.num_of_class)
        elif args.model == 'DeepLabV3+':
            self.model = models.DeepLab(num_classes=self.num_of_class,
                                        backbone='resnet')
        elif args.model == 'GCN':
            self.model = models.GCN(num_classes=self.num_of_class)
        elif args.model == 'UNet':
            self.model = models.UNet(num_classes=self.num_of_class)
        elif args.model == 'ENet':
            self.model = models.ENet(num_classes=self.num_of_class)
        elif args.model == 'D-LinkNet':
            self.model = models.DinkNet34(num_classes=self.num_of_class)
        else:
            raise NotImplementedError

        if args.loss == 'CE':
            self.criterion = CrossEntropyLoss2d()
        elif args.loss == 'LS':
            self.criterion = LovaszSoftmax()
        elif args.loss == 'F':
            self.criterion = FocalLoss()
        elif args.loss == 'CE+D':
            self.criterion = CE_DiceLoss()
        else:
            raise NotImplementedError

        self.schedule_mode = args.schedule_mode
        self.optimizer = opt.AdamW(self.model.parameters(), lr=args.lr)
        if self.schedule_mode == 'step':
            self.scheduler = opt.lr_scheduler.StepLR(self.optimizer,
                                                     step_size=30,
                                                     gamma=0.1)
        elif self.schedule_mode == 'miou' or self.schedule_mode == 'acc':
            self.scheduler = opt.lr_scheduler.ReduceLROnPlateau(self.optimizer,
                                                                mode='max',
                                                                patience=10,
                                                                factor=0.1)
        elif self.schedule_mode == 'poly':
            iters_per_epoch = len(self.train_loader)
            self.scheduler = Poly(self.optimizer,
                                  num_epochs=args.epochs,
                                  iters_per_epoch=iters_per_epoch)
        else:
            raise NotImplementedError

        self.evaluator = Evaluator(self.num_of_class)

        self.model = nn.DataParallel(self.model)

        self.cuda = args.cuda
        if self.cuda is True:
            self.model = self.model.cuda()

        self.resume = args.resume
        self.finetune = args.finetune
        assert not (self.resume != None and self.finetune != None)

        if self.resume != None:
            print("Loading existing model...")
            if self.cuda:
                checkpoint = torch.load(args.resume)
            else:
                checkpoint = torch.load(args.resume, map_location='cpu')
            self.model.load_state_dict(checkpoint['parameters'])
            self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.scheduler.load_state_dict(checkpoint['scheduler'])
            self.start_epoch = checkpoint['epoch'] + 1
            #start from next epoch
        elif self.finetune != None:
            print("Loading existing model...")
            if self.cuda:
                checkpoint = torch.load(args.finetune)
            else:
                checkpoint = torch.load(args.finetune, map_location='cpu')
            self.model.load_state_dict(checkpoint['parameters'])
            self.start_epoch = checkpoint['epoch'] + 1
        else:
            self.start_epoch = 1
        if self.mode == 'train':
            self.writer = SummaryWriter(comment='-' + self.dataset + '_' +
                                        self.model.__class__.__name__ + '_' +
                                        args.loss)
        self.init_eval = args.init_eval
Exemplo n.º 13
0
import argparse
import utils
import models
import torch
import transforms
from PIL import Image

parser = argparse.ArgumentParser()
parser.add_argument('--image-path', type=str, required=True)
parser.add_argument('--output-path', type=str, required=True)
parser.add_argument('--model-path', type=str, required=True)
args = parser.parse_args()

model = models.UNet()
#utils.load_model(model,args.model_path[:-5],args.model_path[-5:])

img_rgb = Image.open(args.image_path).convert('RGB')
img_tensor = transforms.transform_rgb2tensor(img_rgb)
img_tensor = torch.unsqueeze(img_tensor, 0)

with torch.no_grad():
    fake_img_tensor = model(img_tensor)
    fake_img_tensor = torch.squeeze(fake_img_tensor, 0)
    print(fake_img_tensor)

fake_img_rgb = transforms.transform_tensor2rgb(fake_img_tensor)
fake_img_rgb.save(args.output_path)
Exemplo n.º 14
0
def train(args):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    init_net = nn.DataParallel(models.InitNet(args)).to(device)
    deep_net = nn.DataParallel(models.UNet(args)).to(device)

    print("Data loading.")
    dataset = utils.loader(args)
    print("Data loaded.")

    criterion = nn.L1Loss().to(device)
    optimizer_init = optim.Adam(init_net.parameters())
    optimizer_deep = optim.Adam(deep_net.parameters())
    scheduler_init = optim.lr_scheduler.MultiStepLR(optimizer_init, milestones=[50, 80], gamma=0.1)
    scheduler_deep = optim.lr_scheduler.MultiStepLR(optimizer_deep, milestones=[50, 80], gamma=0.1)

    print("Train start.")
    time_start = time.time()

    if os.path.exists(args.init_state_dict) and os.path.exists(args.deep_state_dict):
        if torch.cuda.is_available():
            checkpoint_init = torch.load(args.init_state_dict)
            checkpoint_deep = torch.load(args.deep_state_dict)
        else:
            checkpoint_init = torch.load(args.init_state_dict, map_location="cpu")
            checkpoint_deep = torch.load(args.deep_state_dict, map_location="cpu")

        init_net.load_state_dict(checkpoint_init["model"])
        optimizer_init.load_state_dict(checkpoint_init["optimizer"])

        deep_net.load_state_dict(checkpoint_deep["model"])
        optimizer_deep.load_state_dict(checkpoint_deep["optimizer"])

        start_epoch = checkpoint_deep["epoch"]
        print("Success loading epoch {}".format(start_epoch))
    else:
        start_epoch = 0
        print("No saved model, start epoch = 0.")

    for epoch in range(start_epoch, args.epochs):
        for idx, item in enumerate(dataset):
            x, _ = item
            x = x.to(device)

            optimizer_init.zero_grad()
            optimizer_deep.zero_grad()

            init_x = init_net(x)
            init_x = utils.reshape(init_x, args)
            deep_x = deep_net(init_x)

            loss_init = criterion(x, init_x)
            loss_deep = criterion(x, init_x + deep_x)

            loss_init.backward(retain_graph=True)
            loss_deep.backward()

            optimizer_init.step()
            optimizer_deep.step()

            use_time = time.time() - time_start
            if (idx + 1) % 20 == 0:
                print("=> epoch: {}, batch: {}, Loss1: {:.4f}, Loss2: {:.4f}, lr1: {}, lr2: {}, used time: {:.4f}"
                      .format(epoch + 1, idx + 1, loss_init.item(), loss_deep.item(),
                              optimizer_init.param_groups[0]['lr'], optimizer_deep.param_groups[0]['lr'], use_time))

        scheduler_init.step()
        scheduler_deep.step()
        state_init = {"model": init_net.state_dict(), "optimizer": optimizer_init.state_dict()}
        state_deep = {"model": deep_net.state_dict(), "optimizer": optimizer_deep.state_dict(), "epoch": epoch + 1}
        torch.save(state_init, args.init_state_dict)
        torch.save(state_deep, args.deep_state_dict)
        print("Check point of epoch {} saved.".format(epoch + 1))

    print("Train end.")
    torchsummary.summary(init_net, (1, 32, 32))
    torchsummary.summary(deep_net, (1, 32, 32))
    with open("./trained_models/init_net.txt", "w") as f1:
        f1.write(torchsummary.summary(init_net, (1, 32, 32)))
    with open("./trained_models/deep_net.txt", "w") as f2:
        f2.write(torchsummary.summary(deep_net, (1, 32, 32)))
Exemplo n.º 15
0
        mask = torch.abs(sparse[:,:,8:9,:,:])
        if not opt.interpolateInput:
            sparse = sparse * mask
        return (sparse, mask)

    if opt.externalFlow:
        input_channels = 7
        output_channels = 6
    else:
        input_channels = 9
        output_channels = 8
    input_channels_with_previous = input_channels + output_channels

    # TODO: support DeepFovea network here
    model = models.UNet(input_channels_with_previous, output_channels,
                        opt.depth, opt.filters, opt.padding,
                        opt.batchNorm, opt.residual, opt.hardInput, opt.upMode,
                        True)
    model.to(device)
    if not no_summary:
        summary(model, 
                input_size=[
                    (input_channels_with_previous, crop_size, crop_size),
                    (1, crop_size, crop_size)], 
                device=device.type)

    #single_input = torch.rand(opt.testBatchSize, input_channels_with_previous, opt.testCropSize, opt.testCropSize,
    #                          dtype=torch.float32, device=device)
    #inputMask = torch.ones(opt.testBatchSize, 1, opt.testCropSize, opt.testCropSize,
    #                          dtype=torch.float32, device=device)
    #writer.add_graph(model, (single_input, inputMask), verbose=True)
    #writer.flush()
Exemplo n.º 16
0
import losses
import metrics
import utils

if __name__ == '__main__':
    cudnn.benchmark = True
    #load data
    train_data = datasets.maps_train
    val_data = datasets.maps_test
    train_data_loader = torch.utils.data.DataLoader(
        dataset=train_data, batch_size=config.BATCH_SIZE, shuffle=True)
    val_data_loader = torch.utils.data.DataLoader(dataset=val_data,
                                                  batch_size=config.BATCH_SIZE)

    #load model
    G = models.UNet().to(config.DEVICE)
    D = models.PatchDiscriminator().to(config.DEVICE)

    #set optimizer
    G_optim = optim.Adam(G.parameters(),
                         lr=config.LEARNING_RATE,
                         betas=config.BETAS)
    D_optim = optim.Adam(D.parameters(),
                         lr=config.LEARNING_RATE,
                         betas=config.BETAS)

    #set criterion
    G_criterion = losses.GLoss()
    D_criterion = losses.DLoss()

    #set meter