Esempio n. 1
0
    def __init__(self, args):
        self.args = args

        # Set up model
        self.device = args.device
        if args.model == 'stackhourglass':
            self.model = PSMNet(args.maxdisp)
        self.model = self.model.to(self.device)

        # if args.cuda:
        #     model = torch.nn.DataParallel(self.model)
        #     model.cuda()
        if args.use_multiple_gpu:
            self.model = torch.nn.DataParallel(self.model)
            self.model.cuda()
        if args.mode == 'train':
            self.loss_function = MonodepthLoss(n=3,
                                               SSIM_w=0.8,
                                               disp_gradient_w=0.1,
                                               lr_w=1).to(self.device)
            self.optimizer = optim.Adam(self.model.parameters(),
                                        lr=args.learning_rate)
            # self.val_n_img, self.val_loader = prepare_dataloader(args.val_data_dir, args.mode,
            #                                                      args.augment_parameters,
            #                                                      False, args.batch_size,
            #                                                      (args.input_height, args.input_width),
            #                                                      args.num_workers)
            # self.pretrained_dict = torch.load(args.loadmodel)
            # self.model_dict = self.model.state_dict()
            # self.pretrained_dict = {k: v for k, v in self.pretrained_dict.items() if k in self.model_dict}
            # self.model_dict.update(self.pretrained_dict)
            # self.model.load_state_dict(self.model_dict)
            # self.model.load_state_dict(torch.load(args.loadmodel))

            # if args.loadmodel is not None:
            #     pretrained_dict = torch.load(args.loadmodel)

            #     self.model_dict = self.model.state_dict()
            #     pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in self.model_dict}
            #     self.model_dict.update(pretrained_dict)
            #     self.model.load_state_dict(self.model_dict)

            #     print('Number of model parameters: {}'.format(sum([p.data.nelement() for p in self.model.parameters()])))
        else:
            self.model.load_state_dict(torch.load(args.model_path))
            args.augment_parameters = None
            args.do_augmentation = False
            args.batch_size = 1
        self.n_img, self.loader = prepare_dataloader(
            args.data_dir, args.mode, args.augment_parameters,
            args.do_augmentation, args.batch_size,
            (args.input_height, args.input_width), args.num_workers)

        # Load data
        self.output_directory = args.output_directory
        self.input_height = args.input_height
        self.input_width = args.input_width

        if 'cuda' in self.device:
            torch.cuda.synchronize()
Esempio n. 2
0
    def __init__(self, batch_size=4, input_channels=3, use_multiple_gpu=False,
                       learning_rate=1e-4,
                       model_path='model', device='cuda:0', mode='train', train_dataset_dir='data_scene_flow/training', 
                       val_dataset_dir='data_scene_flow/testing', num_workers=4, do_augmentation=True,
                       output_directory='outputs',
                       input_height=256, input_width=512, augment_parameters=[0.8, 1.2, 0.5, 2.0, 0.8, 1.2]):

        self.batch_size = batch_size
        self.input_channels = input_channels
        self.model_path = model_path
        self.device = device
        self.use_multiple_gpu = use_multiple_gpu

        self.g_LL = Resnet50_md(self.input_channels).to(self.device)
        self.d_R = Discriminator(self.input_channels).to(self.device)

        if self.use_multiple_gpu:
            self.g_LL = torch.nn.DataParallel(self.g_LL)
            self.d_R = torch.nn.DataParallel(self.d_R)

        self.learning_rate=learning_rate
        self.mode = mode
        self.input_height = input_height
        self.input_width = input_width
        self.augment_parameters = augment_parameters
        self.train_dataset_dir = train_dataset_dir
        self.val_dataset_dir = val_dataset_dir
        self.g_best_val_loss = float('inf')
        self.num_workers = num_workers
        self.do_augmentation = do_augmentation

        if self.mode == 'train':
            self.criterion_GAN = HcGANLoss().to(self.device)
            self.criterion_mono = MonodepthLoss()
            
            self.optimizer = optim.Adam(
                chain(
                    self.g_LL.parameters()
                ),
                lr=self.learning_rate
            )
            self.val_n_img, self.val_loader = prepare_dataloader(self.val_dataset_dir, self.mode, self.augment_parameters,
                                                False, self.batch_size,
                                                (self.input_height, self.input_width),
                                                self.num_workers, shuffle=False)
        else:
            self.augment_parameters = None
            self.do_augmentation = False

        self.n_img, self.loader = prepare_dataloader(self.train_dataset_dir, self.mode,
                                                    self.augment_parameters,
                                                    self.do_augmentation, self.batch_size,
                                                    (self.input_height, self.input_width),
                                                    self.num_workers)
        self.output_directory = output_directory
        if 'cuda' in self.device:
            torch.cuda.synchronize()
    def __init__(self, args):
        self.args = args

        # Set up model
        self.device = args.device
        self.model = get_model(args.model,
                               input_channels=args.input_channels,
                               pretrained=args.pretrained)
        self.model = self.model.to(self.device)
        if args.use_multiple_gpu:
            self.model = torch.nn.DataParallel(self.model)

        if args.mode == "train":
            self.loss_function = MonodepthLoss(n=4,
                                               SSIM_w=0.85,
                                               disp_gradient_w=0.1,
                                               lr_w=1).to(self.device)
            self.optimizer = optim.Adam(self.model.parameters(),
                                        lr=args.learning_rate)
            self.val_n_img, self.val_loader = prepare_dataloader(
                args.val_data_dir,
                args.mode,
                args.augment_parameters,
                False,
                args.batch_size,
                (args.input_height, args.input_width),
                args.num_workers,
                labels=args.stereo_labels,
            )
        else:
            self.model.load_state_dict(torch.load(args.model_path))
            args.augment_parameters = None
            args.do_augmentation = False
            args.batch_size = 1

        # Load data
        self.output_directory = args.output_directory
        self.input_height = args.input_height
        self.input_width = args.input_width

        self.n_img, self.loader = prepare_dataloader(
            args.data_dir,
            args.mode,
            args.augment_parameters,
            args.do_augmentation,
            args.batch_size,
            (args.input_height, args.input_width),
            args.num_workers,
            labels=args.stereo_labels,
        )

        if "cuda" in self.device:
            torch.cuda.synchronize(self.device)
    def __init__(self, args):
        self.args = args

        # Set up model
        self.device = args.device
        self.model = get_model(args.model, input_channels=args.input_channels, pretrained=args.pretrained)
        self.model = self.model.to(self.device)
        if args.use_multiple_gpu:
            self.model = torch.nn.DataParallel(self.model)

        if not os.path.exists(self.args.output_directory):
            os.makedirs(self.args.output_directory)

        if args.mode == 'train':
            self.loss_function = MonodepthLoss(
                n=4,
                SSIM_w=0.85,
                disp_gradient_w=0.1, lr_w=1).to(self.device)
            self.optimizer = optim.Adam(self.model.parameters(),
                                        lr=args.learning_rate)
            self.train_n_img, self.train_loader = prepare_dataloader(args.train_dir, args.mode,
                                                                 args.augment_parameters,
                                                                 True, args.batch_size,
                                                                 (args.input_height, args.input_width),
                                                                 args.num_workers)

            self.val_n_img, self.val_loader = prepare_dataloader(args.val_dir, args.mode,
                                                                 args.augment_parameters,
                                                                 False, args.batch_size,
                                                                 (args.input_height, args.input_width),
                                                                 args.num_workers)
        else:
            if args.model_path:
                self.model.load_state_dict(torch.load(args.model_path))
                args.augment_parameters = None
                args.do_augmentation = False
                args.batch_size = 1
            else:
                raise Exception('Pretrained model path not provided!')

        self.test_n_img, self.test_loader = prepare_dataloader(args.test_dir, 'test', None,
                                                    False, args.batch_size,
                                                    (args.input_height, args.input_width),
                                                    args.num_workers)

        # Load data
        self.output_directory = args.output_directory
        self.input_height = args.input_height
        self.input_width = args.input_width

        if 'cuda' in self.device:
            torch.cuda.synchronize()
Esempio n. 5
0
    def __init__(self, args):
        self.args = args

        # Set up model
        self.device = args.device
        self.model = get_model(args.model, input_channels=args.input_channels, pretrained=args.pretrained)
        self.model = self.model.to(self.device)
        if args.use_multiple_gpu:
            self.model = torch.nn.DataParallel(self.model)

        self.model_output_directory = self.args.model_output_directory;
        if not os.path.exists( self.model_output_directory ):
           os.makedirs( self.model_output_directory );

        self.save_model_name = os.path.basename( self.args.model_path ).split('.')[0];


        if args.mode == 'train':
            self.loss_function = MonodepthLoss(
                n=4,
                SSIM_w=0.85,
                disp_gradient_w=0.1, lr_w=1, l_type=self.args.l_type).to(self.device)
            self.optimizer = optim.Adam(self.model.parameters(),
                                        lr=args.learning_rate)
            if args.do_mult:
               _mode = 'val';
            else:
               _mode = args.mode;
            self.val_n_img, self.val_loader = prepare_dataloader(args.val_data_dir, _mode,
                                                                 args.augment_parameters,
                                                                 False, args.batch_size,
                                                                 (args.input_height, args.input_width),
                                                                 args.num_workers)
        else:
            self.model.load_state_dict(torch.load(args.model_path))
            args.augment_parameters = None
            args.do_augmentation = False
            args.batch_size = 1

        # Load data
        self.output_directory = args.output_directory
        self.input_height = args.input_height
        self.input_width = args.input_width

        self.n_img, self.loader = prepare_dataloader(args.data_dir, args.mode, args.augment_parameters,
                                                     args.do_augmentation, args.batch_size,
                                                     (args.input_height, args.input_width),
                                                     args.num_workers)

        if 'cuda' in self.device:
            torch.cuda.synchronize()
Esempio n. 6
0
    def __init__(self, args):
        self.args = args

        self.device = args.device
        if args.model == 'stackhourglass':
            
                self.model = Jnet(1)
       
       
        self.model.cuda() #= self.model.to(self.device)

        if args.use_multiple_gpu:
            self.model = torch.nn.DataParallel(self.model)
            self.model.cuda()
        if args.mode == 'train':
            self.loss_function = MonodepthLoss(
                n=3,
                SSIM_w=0.8,
                disp_gradient_w=0.1, lr_w=1).to(self.device)
            self.optimizer = optim.Adam(self.model.parameters(),
                                        lr=args.learning_rate)
            self.val_n_img, self.val_loader = prepare_dataloader(args.val_data_dir, 'test',
                                                                 args.augment_parameters,
                                                                 False, 1,
                                                                 (args.input_height, args.input_width),
                                                                 args.num_workers)


            # self.model.load_state_dict(torch.load(args.loadmodel))#jiazai yuxunlian moxing jiezhu ciju 
                                      
        else:
            self.model.load_state_dict(torch.load(args.model_path))
            args.augment_parameters = None
            args.do_augmentation = False
            args.batch_size = 1
        self.n_img, self.loader = prepare_dataloader(args.data_dir, args.mode, 
                                                     args.augment_parameters,
                                                     args.do_augmentation, args.batch_size,
                                                     (args.input_height, args.input_width),
                                                     args.num_workers)


     
  
        
        if 'cuda' in self.device:
            torch.cuda.synchronize()
Esempio n. 7
0
    def __init__(self, args):
        self.args = args

        self.device = args.device
        self.model = get_model(args.model,
                               input_channels=args.input_channels,
                               pretrained=args.pretrained)
        self.model = self.model.to(self.device)
        if args.use_multiple_gpu:
            self.model = torch.nn.DataParallel(self.model)

        if args.mode == 'generate_label' or 'fine_tune':
            self.loss_function = MonodepthLoss(n=4,
                                               SSIM_w=0.85,
                                               disp_gradient_w=0.1,
                                               lr_w=1).to(self.device)
            self.optimizer = optim.Adam(self.model.parameters(),
                                        lr=args.learning_rate)

            self.model.load_state_dict(torch.load(args.to_load_model_path))

        # Load data
        self.output_directory = args.output_directory
        self.input_height = args.input_height
        self.input_width = args.input_width

        train_split_loc = os.path.join('splits', 'eigen_zhou',
                                       'train_files.txt')
        with open(train_split_loc) as f:
            filenames = f.readlines()
        self.n_img, self.loader = prepare_dataloader(
            args.data_dir,
            args.mode,
            args.augment_parameters,
            do_augmentation=False,
            batch_size=self.args.batch_size,
            size=(args.input_height, args.input_width),
            num_workers=self.args.batch_size,
            filenames=filenames,
            load_gt=False,
            load_predition=True,
            args=self.args)

        if 'cuda' in self.device:
            torch.cuda.synchronize()
Esempio n. 8
0
 def __init__(self, args):
     self.args = args
     if args.mode == 'train':
         # Load data
         data_dirs = os.listdir(args.data_dir)
         data_transform = image_transforms(
             mode=args.mode,
             tensor_type=args.tensor_type,
             augment_parameters=args.augment_parameters,
             do_augmentation=args.do_augmentation)
         train_datasets = [
             KittiLoader(os.path.join(args.data_dir, data_dir),
                         True,
                         transform=data_transform) for data_dir in data_dirs
         ]
         train_dataset = ConcatDataset(train_datasets)
         self.n_img = len(train_dataset)
         print('Use a dataset with', self.n_img, 'images')
         self.train_loader = DataLoader(train_dataset,
                                        batch_size=args.batch_size,
                                        shuffle=True)
         # Set up model
         self.device = torch.device(
             ('cuda:0' if torch.cuda.is_available()
              and args.tensor_type == 'torch.cuda.FloatTensor' else 'cpu'))
         self.loss_function = MonodepthLoss(n=4,
                                            SSIM_w=0.85,
                                            disp_gradient_w=0.1,
                                            lr_w=1).to(self.device)
         if args.model == 'resnet50_md':
             self.model = models_resnet.resnet50_md(3)
         elif args.model == 'resnet18_md':
             self.model = models_resnet.resnet18_md(3)
         self.model = self.model.to(self.device)
         self.optimizer = optim.Adam(self.model.parameters(),
                                     lr=args.learning_rate)
         if args.tensor_type == 'torch.cuda.FloatTensor':
             torch.cuda.synchronize()
     elif args.mode == 'test':
         # Load data
         self.output_directory = args.output_directory
         self.input_height = args.input_height
         self.input_width = args.input_width
         data_transform = image_transforms(mode=args.mode,
                                           tensor_type=args.tensor_type)
         test_dataset = ImageLoader(args.data_dir,
                                    False,
                                    transform=data_transform)
         self.num_test_examples = len(test_dataset)
         self.test_loader = DataLoader(test_dataset,
                                       batch_size=1,
                                       shuffle=False)
         # Set up model
         self.device = torch.device(
             ('cuda:0' if torch.cuda.is_available()
              and args.tensor_type == 'torch.cuda.FloatTensor' else 'cpu'))
         if args.model == 'resnet50_md':
             self.model = models_resnet.resnet50_md(3)
         elif args.model == 'resnet18_md':
             self.model = models_resnet.resnet18_md(3)
         self.model.load_state_dict(torch.load(args.model_path))
         self.model = self.model.to(self.device)
Esempio n. 9
0
    def __init__(self, args):
        self.args = args

        # Set up model
        if self.args.use_two_inputs:
            self.args.input_channels = 6
        self.device = args.device
        self.model = get_model(args.model,
                               input_channels=args.input_channels,
                               pretrained=args.pretrained)
        self.model = self.model.to(self.device)
        if args.use_multiple_gpu:
            self.model = torch.nn.DataParallel(self.model)

        if args.mode == 'train':
            self.loss_function = MonodepthLoss(n=4,
                                               SSIM_w=0.85,
                                               disp_gradient_w=0.1,
                                               lr_w=1).to(self.device)
            self.optimizer = optim.Adam(self.model.parameters(),
                                        lr=args.learning_rate)

            if self.args.split == 'Nothing':
                self.val_n_img, self.val_loader = prepare_dataloader(
                    args.val_data_dir, args.mode, args.augment_parameters,
                    False, args.batch_size,
                    (args.input_height, args.input_width), args.num_workers)
            else:
                val_split_loc = os.path.join('splits', self.args.split,
                                             'val_files.txt')
                with open(val_split_loc) as f:
                    filenames = f.readlines()
                    filenames = filenames[0::4]
                self.val_n_img, self.val_loader = prepare_dataloader(
                    args.data_dir,
                    args.mode,
                    args.augment_parameters,
                    False,
                    args.batch_size, (args.input_height, args.input_width),
                    args.num_workers,
                    filenames=filenames)
        else:
            self.model.load_state_dict(torch.load(args.model_path))
            args.augment_parameters = None
            args.do_augmentation = False
            args.batch_size = 1

        # Load data
        self.output_directory = args.output_directory
        self.input_height = args.input_height
        self.input_width = args.input_width

        if self.args.split == 'Nothing':
            self.n_img, self.loader = prepare_dataloader(
                args.data_dir, args.mode, args.augment_parameters,
                args.do_augmentation, args.batch_size,
                (args.input_height, args.input_width), args.num_workers)
        else:
            train_split_loc = os.path.join('splits', 'eigen', 'test_files.txt')
            with open(train_split_loc) as f:
                filenames = f.readlines()
            self.n_img, self.loader = prepare_dataloader(
                args.data_dir,
                args.mode,
                args.augment_parameters,
                args.do_augmentation,
                args.batch_size, (args.input_height, args.input_width),
                args.num_workers,
                filenames=filenames,
                load_gt=True)

        if 'cuda' in self.device:
            torch.cuda.synchronize()
Esempio n. 10
0
    def __init__(self, args):
        self.args = args

        # Set up model
        self.device = args.device

        self.fixed_noise = torch.randn(self.args.batch_size,
                                       self.args.nz,
                                       1,
                                       1,
                                       device=self.device)
        self.real_label = 1
        self.fake_label = 0

        self.ngpu = int(self.args.ngpu)
        self.nz = int(self.args.nz)
        self.ngf = int(self.args.ngf)
        self.ndf = int(self.args.ndf)

        # self.model_generator = Generator(self.args).to(self.device)
        # self.model_generator.apply(weights_init)
        # if self.args.netG != '':
        #     self.model_generator.load_state_dict(torch.load(self.args.netG))
        # print(self.model_generator)

        self.model_estimator = get_model(args.model,
                                         input_channels=args.input_channels,
                                         pretrained=args.pretrained)
        self.model_estimator = self.model_estimator.to(self.device)

        self.dis_lrs = [0.0001, 0.0001, 0.0001, 0.0001]
        self.gen_lrs = [0.0003, 0.0005, 0.003, 0.0003]

        self.D_criterions = []
        self.G_criterions = []

        self.D_optimizers = []
        self.G_optimizers = []

        self.fake_disparity = []

        self.one = torch.FloatTensor([1])
        self.mone = self.one * -1
        self.one = self.one.to(self.device)
        self.mone = self.mone.to(self.device)

        self.generator = Generator()

        if args.mode == 'train':
            self.loss_function = MonodepthLoss(n=4,
                                               SSIM_w=0.85,
                                               disp_gradient_w=0.1,
                                               lr_w=1).to(self.device)
            self.optimizer_estimator = optim.Adam(
                self.model_estimator.parameters(), lr=args.learning_rate)

            self.val_n_img, self.val_loader = prepare_dataloader(
                args.val_data_dir, args.mode, args.augment_parameters, False,
                args.batch_size, (args.input_height, args.input_width),
                args.num_workers)
            for l in range(4):
                self.D_criterions.append(nn.BCELoss())
                D_optim = optim.Adam(self.generator.Dis_models[l].parameters(),
                                     lr=self.dis_lrs[l],
                                     betas=(0.5, 0.999))
                # D_optim = optim.SGD(LapGan_model.Dis_models[l].parameters(), lr=dis_lrs[l], momentum=0.5)
                self.D_optimizers.append(D_optim)

                self.G_criterions.append(nn.BCELoss())
                G_optim = optim.Adam(self.generator.Gen_models[l].parameters(),
                                     lr=self.gen_lrs[l],
                                     betas=(0.5, 0.999))
                # G_optim = optim.SGD(LapGan_model.Gen_models[l].parameters(), lr=gen_lrs[l], momentum=0.5)
                self.G_optimizers.append(G_optim)

            for G in self.generator.Gen_models:
                G.train()
            for D in self.generator.Dis_models:
                D.train()

        else:
            self.model_estimator.load_state_dict(torch.load(args.model_path))
            args.augment_parameters = None
            args.do_augmentation = False
            args.batch_size = 1

        # Load data
        self.output_directory = args.output_directory
        self.input_height = args.input_height
        self.input_width = args.input_width

        self.n_img, self.loader = prepare_dataloader(
            args.data_dir, args.mode, args.augment_parameters,
            args.do_augmentation, args.batch_size,
            (args.input_height, args.input_width), args.num_workers)

        if 'cuda' in self.device:
            torch.cuda.synchronize()
Esempio n. 11
0
    def __init__(self, args):
        self.args = args

        # Set up model
        self.device = args.device

        self.fixed_noise = torch.randn(self.args.batch_size, self.args.nz, 1, 1, device=self.device)
        self.real_label = 1
        self.fake_label = 0

        self.ngpu = int(self.args.ngpu)
        self.nz = int(self.args.nz)
        self.ngf = int(self.args.ngf)
        self.ndf = int(self.args.ndf)

        self.model_generator = Generator(self.args).to(self.device)
        self.model_discriminator = Discriminator().to(self.device)

        if self.args.netG != '':
            self.model_generator.load_state_dict(torch.load(self.args.netG))
        # if self.args.netG != '':
        #     self.model_generator.load_state_dict(torch.load(self.args.netG))
        # print(self.model_generator)

        self.model_estimator = get_model(args.model, input_channels=args.input_channels,
                                             pretrained=args.pretrained)
        self.model_estimator = self.model_estimator.to(self.device)

        # self.one = torch.FloatTensor([1])
        # self.mone = self.one * -1
        # self.one = self.one.to(self.device)
        # self.mone = self.mone.to(self.device)

        self.model_estimator.load_state_dict(torch.load(args.model_path))
        if args.mode == 'train':
            self.loss_function = MonodepthLoss(
                n=4,
                SSIM_w=0.85,
                disp_gradient_w=0.1, lr_w=1).to(self.device)
            self.optimizer_estimator = optim.Adam(self.model_estimator.parameters(), lr=args.learning_rate)
            self.optimizer_generator = optim.Adam(self.model_generator.parameters(), lr=args.learning_rate)
            self.optimizer_discriminator = optim.Adam(self.model_discriminator.parameters(), lr=args.learning_rate)

            self.val_n_img, self.val_loader = prepare_dataloader(args.val_data_dir, args.mode,
                                                                 args.augment_parameters,
                                                                 False, args.batch_size,
                                                                 (args.input_height, args.input_width),
                                                                 args.num_workers)
        else:
            args.augment_parameters = None
            args.do_augmentation = False
            args.batch_size = 1

        # Load data
        self.output_directory = args.output_directory
        self.input_height = args.input_height
        self.input_width = args.input_width

        self.n_img, self.loader = prepare_dataloader(args.data_dir, args.mode, args.augment_parameters,
                                                     args.do_augmentation, args.batch_size,
                                                     (args.input_height, args.input_width),
                                                     args.num_workers)

        if 'cuda' in self.device:
            torch.cuda.synchronize()