Ejemplo n.º 1
0
 def __init__(self, args):
     self.opts = json.load(open(args.exp, 'r'))
     self.image_dir = args.image_dir
     self.data_loader = get_data_loaders(self.opts['dataset'],
                                         cityscapes.DataProvider)
     self.model = polyrnnpp.PolyRNNpp(self.opts).to(device)
     self.model.reload(args.reload, strict=False)
Ejemplo n.º 2
0
    def __init__(self, args):
        self.global_step = 0
        self.epoch = 0
        self.opts = json.load(open(args.exp, 'r'))
        utils.create_folder(os.path.join(self.opts['exp_dir'], 'checkpoints'))

        # Copy experiment file
        os.system('cp %s %s' % (args.exp, self.opts['exp_dir']))
        # result of experiment
        self.writer = SummaryWriter(
            os.path.join(self.opts['exp_dir'], 'logs', 'train'))
        self.val_writer = SummaryWriter(
            os.path.join(self.opts['exp_dir'], 'logs', 'train_val'))

        self.train_loader, self.val_loader = get_data_loaders(
            self.opts['dataset'], cityscapes.DataProvider)
        self.model = polyrnnpp.PolyRNNpp(self.opts).to(device)
        self.grid_size = self.model.encoder.feat_size

        if 'encoder_reload' in self.opts.keys():
            self.model.encoder.reload(self.opts['encoder_reload'])

        # OPTIMIZER
        no_wd = []
        wd = []
        print 'Weight Decay applied to: '

        for name, p in self.model.named_parameters():
            if not p.requires_grad:
                # No optimization for frozen params
                continue

            if 'bn' in name or 'conv_lstm' in name or 'bias' in name:
                no_wd.append(p)
            else:
                wd.append(p)
                print name,

        # Allow individual options
        self.optimizer = optim.Adam([{
            'params': no_wd,
            'weight_decay': 0.0
        }, {
            'params': wd
        }],
                                    lr=self.opts['lr'],
                                    weight_decay=self.opts['weight_decay'],
                                    amsgrad=False)
        # TODO: Test how amsgrad works (On the convergence of Adam and Beyond)

        self.lr_decay = optim.lr_scheduler.StepLR(
            self.optimizer, step_size=self.opts['lr_decay'], gamma=0.1)

        if args.resume is not None:
            self.resume(args.resume)
Ejemplo n.º 3
0
 def __init__(
     self, args
 ):  ## HTF the args given is defined here but not in the above object place
     self.opts = json.load(open(args.exp, 'r'))
     self.image_dir = args.image_dir
     self.data_loader = get_data_loaders(self.opts['dataset'],
                                         cityscapes.DataProvider)
     self.model = polyrnnpp.PolyRNNpp(self.opts).to(
         device)  ## Network defenition
     self.model.reload(
         args.reload,
         strict=False)  ## loading of .pth from the saved models folder
Ejemplo n.º 4
0
    def __init__(self, args):
        self.opts = json.load(open(args.exp, 'r'))
        self.output_dir = args.output_dir
        self.fp_beam_size = args.fp_beam_size
        self.lstm_beam_size = args.lstm_beam_size
        if self.output_dir is None:
            self.output_dir = os.path.join(self.opts['exp_dir'], 'preds')

        utils.create_folder(self.output_dir)
        self.opts = override_options(self.opts)
        self.val_loader = get_data_loaders(self.opts['dataset'],
                                           cityscapes.DataProvider)
        self.model = polyrnnpp.PolyRNNpp(self.opts).to(device)
        self.model.reload(args.reload, strict=False)

        if self.opts['use_ggnn'] == True:
            self.grid_size = self.model.ggnn.ggnn_grid_size
        else:
            self.grid_size = self.model.encoder.feat_size
Ejemplo n.º 5
0
    def __init__(self, args):
        self.global_step = 0
        self.epoch = 0
        self.opts = json.load(open(args.exp, 'r'))
        utils.create_folder(os.path.join(self.opts['exp_dir'], 'checkpoints'))

        # Copy experiment file
        os.system('cp %s %s' % (args.exp, self.opts['exp_dir']))

        self.writer = SummaryWriter(
            os.path.join(self.opts['exp_dir'], 'logs', 'train'))
        self.val_writer = SummaryWriter(
            os.path.join(self.opts['exp_dir'], 'logs', 'train_val'))

        self.train_loader, self.val_loader = get_data_loaders(
            self.opts['dataset'], cityscapes.DataProvider)
        self.model = polyrnnpp.PolyRNNpp(self.opts).to(device)
        self.grid_size = self.model.encoder.feat_size

        if 'xe_initializer' in self.opts.keys():
            self.model.reload(self.opts['xe_initializer'])

        elif 'encoder_reload' in self.opts.keys():
            self.model.encoder.reload(self.opts['encoder_reload'])

        self.model.encoder.eval()
        print 'Setting encoder to eval'

        print 'No weight decay in RL training'

        train_params = [p for p in self.model.parameters() if p.requires_grad]

        self.optimizer = optim.Adam(train_params,
                                    lr=self.opts['lr'],
                                    amsgrad=False)
        # TODO: Test how amsgrad works (On the convergence of Adam and Beyond)

        self.lr_decay = optim.lr_scheduler.StepLR(
            self.optimizer, step_size=self.opts['lr_decay'], gamma=0.1)

        if args.resume is not None:
            self.resume(args.resume)