コード例 #1
0
    def __init__(self, args):

        self.opt = Bunch(json.load(open(args.experiment, 'r')))
        self.opt.resume = args.resume

        self.global_step = 0
        self.epoch = 0
        self.best_score = 0.0
        self.start_time = time()

        # LOGGING
        self.result_path = os.path.join(self.opt.exp_dir,
                                        self.opt.result_folder,
                                        self.opt.run_name)
        util.create_folder(self.result_path)
        util.create_folder(os.path.join(self.result_path, 'checkpoints'))
        self.logger = TensorboardLogger(
            os.path.join(self.opt.exp_dir, self.opt.result_folder,
                         "tensorboard/" + self.opt.run_name))
        self.std_logger = util.get_std_logger(
            'results', os.path.join(self.result_path, 'stdout.log'))
        self.visualizer = QGenVisualizer(
            os.path.join(self.result_path,
                         "{}.html".format(self.opt.run_name)),
            os.path.join(self.opt.exp_dir, "Utils/css/question.css"))

        self.get_data_loaders()

        # MODEL
        self.model = QuestionGenerator(self.opt).to(device)
        self.model.apply(util.weights_init)

        self.vqa = load_model(self.opt.vqa_path, AttentionVQA)
        self.captioner = load_model(self.opt.cap_path, AttentionCaptioner)

        self.vqa.eval()
        if self.opt.cap_eval:
            self.captioner.eval()

        # OPTIMIZER
        self.optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                                 self.model.parameters()),
                                          lr=self.opt.lr)

        if self.opt.resume is not None:
            self.resume(self.opt.resume)

        self.arang_vector = torch.arange(self.opt.batch_size,
                                         dtype=torch.long).to(device)
コード例 #2
0
    def __init__(self, args):

        __metaclass__ = ABCMeta
        self.opt = Bunch(json.load(open(args.experiment, 'r')))
        self.opt.resume = args.resume

        self.set_seed(self.opt.seed)
        self.chunk, self.collection_epoch, self.cap_epoch, self.collection_steps, self.cap_steps = 0, 0, 0, 0, 0
        self.start_time = time()

        # LOGGING
        self.result_path = os.path.join(self.opt.exp_dir,
                                        self.opt.result_folder,
                                        self.opt.run_name)
        util.create_folder(self.result_path)

        self.logger = TensorboardLogger(
            os.path.join(self.opt.exp_dir, self.opt.result_folder,
                         "tensorboard/" + self.opt.run_name))
        self.std_logger = util.get_std_logger(
            'results', os.path.join(self.result_path, 'stdout.log'))
        self.Cvisualizer = CaptionVisualizer(
            os.path.join(self.result_path,
                         "{}_cap.html".format(self.opt.run_name)),
            os.path.join(self.opt.exp_dir, "Utils/css/caption.css"))

        self.init_files_dirs()

        # Auto-eval scorers
        self.scorers = get_scorers(self.opt.cached_tokens)

        self.get_data_loaders()

        self.captioner = load_model(self.opt.cap_path, AttentionCaptioner)

        self.c_optimizer = torch.optim.Adam([{
            'params':
            filter(lambda p: p.requires_grad, self.captioner.parameters()),
            'lr':
            self.opt.c_lr,
            'mod_name':
            'cap'
        }])

        if self.opt.resume is not None:
            self.resume(self.opt.resume)
コード例 #3
0
    def __init__(self, args):

        self.opt = Bunch(json.load(open(args.experiment, 'r')))
        self.opt.resume = args.resume

        self.global_step = 0
        self.epoch = 0
        self.best_score = 0.0
        self.start_time = time()

        # LOGGING
        self.result_path = os.path.join(self.opt.exp_dir,
                                        self.opt.result_folder,
                                        self.opt.run_name)
        util.create_folder(self.result_path)
        util.create_folder(os.path.join(self.result_path, 'checkpoints'))
        self.logger = TensorboardLogger(
            os.path.join(self.opt.exp_dir, self.opt.result_folder,
                         "tensorboard/" + self.opt.run_name))
        self.std_logger = util.get_std_logger(
            'results', os.path.join(self.result_path, 'stdout.log'))
        self.visualizer = CaptionVisualizer(
            os.path.join(self.result_path,
                         "{}.html".format(self.opt.run_name)),
            os.path.join(self.opt.exp_dir, "Utils/css/caption.css"))

        self.get_data_loaders()

        # MODEL
        self.model = AttentionCaptioner(self.opt).to(device)
        self.model.apply(util.weights_init)

        # OPTIMIZER
        self.optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                                 self.model.parameters()),
                                          lr=self.opt.lr)

        # Auto-eval scorers
        self.scorers = get_scorers(self.opt.cached_tokens)

        if self.opt.resume is not None:
            self.resume(self.opt.resume)
コード例 #4
0
    def init_files_dirs(self):
        base = self.result_path
        self.lifelong_data_files = self.opt.train_files.split(',')
        self.chunks = len(self.lifelong_data_files)

        self.cap_checkpoint_path, self.collection_path, self.dm_checkpoint_path \
            = [base+x for x in ["/ccheckpoints", "/collected_data", "/mcheckpoints"]]

        for d in [
                self.cap_checkpoint_path, self.collection_path,
                self.dm_checkpoint_path
        ]:
            util.create_folder(d)

        self.collected_data_paths = [
            os.path.join(self.collection_path,
                         'CollectedDataRound{}.p'.format(x + 1))
            for x in range(self.chunks)
        ]
        self.caption_data_files = [self.opt.warmup_file]
        self.caption_data_files.extend([
            os.path.join(self.collection_path,
                         'AccumDataRound{}.p'.format(x + 1))
            for x in range(self.chunks)
        ])
        self.chunks = len(self.lifelong_data_files)
        self.caption_model_files = [
            os.path.join(self.cap_checkpoint_path,
                         'chunk{}_best.pth'.format(x + 1))
            for x in range(self.chunks)
        ]
        self.decision_maker_model_files = [
            os.path.join(self.dm_checkpoint_path,
                         'chunk{}_best.pth'.format(x + 1))
            for x in range(self.chunks)
        ]