Exemplo n.º 1
0
    def load_pretrained_model(self):
        base_dir = self.config.pretrain_model.exp_dir
        checkpoint_name = self.config.pretrain_model.checkpoint_name

        config_path = os.path.join(base_dir, 'config.json')
        config_json = load_json(config_path)
        config = DotMap(config_json)

        SystemClass = globals()[config.system]
        system = SystemClass(config)
        checkpoint_file = os.path.join(base_dir, 'checkpoints',
                                       checkpoint_name)
        checkpoint = torch.load(checkpoint_file, map_location=self.device)
        system.load_state_dict(checkpoint['state_dict'])

        encoder = system.model.eval()
        viewmaker = system.view.eval()

        for param in encoder.parameters():
            param.requires_grad = False

        for param in viewmaker.parameters():
            param.requires_grad = False

        return encoder, viewmaker, system, system.config
Exemplo n.º 2
0
def get_system(model_path, device):
    config_dir = '/'.join(
        model_path.split('/')[:-2])  # Remove "checkpoints/epoch=X.ckpt"
    config_json = load_json(config_dir + '/config.json')
    config = DotMap(config_json)
    system = TransferExpertSystem(config)
    checkpoint = torch.load(model_path, map_location=device)
    system.load_state_dict(checkpoint['state_dict'], strict=False)
    return system
    def load_pretrained_model(self):
        base_dir = self.config.pretrain_model.exp_dir
        checkpoint_name = self.config.pretrain_model.checkpoint_name

        config_path = os.path.join(base_dir, 'config.json')
        config_json = utils.load_json(config_path)
        config = DotMap(config_json)
        # overwrite GPU to load on same as current agent
        config.gpu_device = self.config.gpu_device

        SystemClass = globals()[config.system]
        system = SystemClass(config)
        checkpoint_file = os.path.join(base_dir, 'checkpoints', checkpoint_name)
        checkpoint = torch.load(checkpoint_file)
        system.load_state_dict(checkpoint['state_dict'])

        encoder = system.model.eval()
        return encoder
Exemplo n.º 4
0
    def load_pretrained_model(self):
        base_dir = self.config.pretrain_model.exp_dir
        checkpoint_name = self.config.pretrain_model.checkpoint_name

        config_path = os.path.join(base_dir, 'config.json')
        config_json = utils.load_json(config_path)
        config = DotMap(config_json)

        if self.config.model_params.resnet_small:
            config.model_params.resnet_small = self.config.model_params.resnet_small

        SystemClass = globals()[config.system]
        system = SystemClass(config)
        checkpoint_file = os.path.join(base_dir, 'checkpoints', checkpoint_name)
        checkpoint = torch.load(checkpoint_file, map_location=self.device)
        system.load_state_dict(checkpoint['state_dict'], strict=False)

        encoder = system.model.eval()
        return encoder, config
def process_config(config_path, override_dotmap=None):
    config_json = load_json(config_path)
    return _process_config(config_json, override_dotmap=override_dotmap)
Exemplo n.º 6
0
        # this will load both the weights and memory bank
        agent.load_checkpoint('checkpoint.pth.tar',
                              pre_checkpoint_dir,
                              load_memory_bank=True,
                              load_model=True,
                              load_optim=True,
                              load_epoch=True)

    try:
        agent.run()
        agent.finalise()
    except KeyboardInterrupt:
        pass


if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('config', type=str, default='path to config file')
    args = parser.parse_args()

    config_json = load_json(args.config)

    pre_checkpoint_dir = None
    if config_json['pretrained_exp_dir'] is not None:
        print("NOTE: found pretrained model...continue training")
        pre_checkpoint_dir = os.path.join(config_json['pretrained_exp_dir'],
                                          'checkpoints')

    run(args.config, pre_checkpoint_dir)
Exemplo n.º 7
0
def load_samples(samples_file):
    return load_json(samples_file)
Exemplo n.º 8
0
    args = parser.parse_args()

    OUT_IMG_DIR = f"/mnt/fs5/wumike/reference/pretrain/{args.dataset}/vaegan_image"
    OUT_TXT_DIR = f"/mnt/fs5/wumike/reference/pretrain/{args.dataset}/vaegan_text"
   
    if not os.path.isdir(OUT_IMG_DIR):
        os.makedirs(OUT_IMG_DIR)

    if not os.path.isdir(OUT_TXT_DIR):
        os.makedirs(OUT_TXT_DIR)

    config_path = os.path.join(MODEL_DIR, 'config.json')
    checkpoint_dir = os.path.join(MODEL_DIR, 'checkpoints')
    assert os.path.isfile(os.path.join(checkpoint_dir, 'model_best.pth.tar'))

    config = load_json(config_path)
    config['gpu_device'] = args.gpu_device
    config = DotMap(config)

    AgentClass = globals()[config.agent]
    mvae = AgentClass(config)
    mvae.load_checkpoint('model_best.pth.tar')
    mvae._set_models_to_eval()
    gpu_device = mvae.config.gpu_device

    override_vocab = copy.deepcopy(mvae.train_dataset.vocab)

    image_transforms = transforms.Compose([
        transforms.Resize(32),
        transforms.CenterCrop(32),
        transforms.ToTensor(),
Exemplo n.º 9
0
def load_config(config_file):
    config = load_json(config_file)
    return config
Exemplo n.º 10
0
def process_config(config_path, override_dotmap=None, exp_name_suffix=None):
    config_json = load_json(config_path)
    return _process_config(config_json, override_dotmap=override_dotmap,
                           exp_name_suffix=exp_name_suffix)