# Overwrite required args
args.model = 'gve'
args.dataset = 'cub'
args.pretrained_model = 'vgg16'
args.num_epochs = 1
args.batch_size = 1
# set to train because we need gradients for Grad-CAM
args.train = True
args.eval_ckpt = 'data/vgg-ic-gve-best-ckpt.pth'
args.ic_ckpt = 'data/cub/image_classifier_ckpt.pth'

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# Data preparation
print("Preparing Data ...")
split = get_split_str(args.train, bool(args.eval_ckpt), args.dataset)
split = 'test'
data_prep = DataPreparation(args.dataset, args.data_path)
dataset, data_loader = data_prep.get_dataset_and_loader(
    split,
    args.pretrained_model,
    batch_size=args.batch_size,
    num_workers=args.num_workers)

# Load VGE model
print("Loading Model ...")
ml = ModelLoader(args, dataset, device)
model = getattr(ml, args.model)()
print(model, '\n')
print("Loading Model Weights ...")
evaluation_state_dict = torch.load(args.eval_ckpt, map_location='cpu')
예제 #2
0
def get_model():
    old_args = sys.argv[:]
    sys.argv = old_args[:1]
    # Get default arguments
    args = arg_parser.get_args()
    sys.argv = old_args

    args.model = "gve"
    args.dataset = "cub"
    args.pretrained_model = "vgg16"
    args.num_epochs = 1
    args.batch_size = 1
    # set to train because we need gradients for Grad-CAM
    args.train = True
    args.eval_ckpt = "data/vgg-ic-gve-best-ckpt.pth"
    args.ic_ckpt = "data/cub/image_classifier_ckpt.pth"

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # Data preparation
    print("Preparing Data ...")
    split = get_split_str(args.train, bool(args.eval_ckpt), args.dataset)
    split = "test"
    data_prep = DataPreparation(args.dataset, args.data_path)
    dataset, data_loader = data_prep.get_dataset_and_loader(
        split,
        args.pretrained_model,
        batch_size=args.batch_size,
        num_workers=args.num_workers,
    )

    # Load VGE model
    print("Loading Model ...")
    ml = ModelLoader(args, dataset, device)
    model = getattr(ml, args.model)()
    print(model, "\n")
    print("Loading Model Weights ...")
    evaluation_state_dict = torch.load(args.eval_ckpt, map_location="cpu")
    model_dict = model.state_dict(full_dict=True)
    model_dict.update(evaluation_state_dict)
    model.load_state_dict(model_dict)
    # Disable dropout and batch normalization
    model.eval()

    model.has_vision_model = False

    vgg_feat_layers = (
        model.image_classifier.vision_model.pretrained_model.features)
    vgg_class_layers = None

    visual = np.zeros((224, 224))

    trainer_creator = getattr(TrainerLoader, args.model)
    trainer = trainer_creator(args,
                              model,
                              dataset,
                              data_loader,
                              logger=None,
                              device=device)

    return model, trainer, dataset, vgg_feat_layers
#job_string = time.strftime("{}-{}-D%Y-%m-%d-T%H-%M-%S-G{}".format(args.model, args.dataset, args.cuda_device))

#job_path = os.path.join(args.checkpoint_path, job_string)

# Create new checkpoint directory
# if not os.path.exists(job_path):
#os.makedirs(job_path)

# Save job arguments
#with open(os.path.join(job_path, 'config.json'), 'w') as f:
#    json.dump(vars(args), f)

# Data preparation
print("Preparing Data ...")
split = get_split_str(args.train)
data_prep = DataPreparation(args.dataset, args.data_path)
dataset, data_loader = data_prep.get_dataset_and_loader(
    split,
    args.pretrained_model,
    batch_size=args.batch_size,
    num_workers=args.num_workers)
# Load Model
model = ModelLoader(args, dataset)

trainer = lrcn_trainer.LRCNTrainer(args, model.lrcn(), dataset, data_loader,
                                   None)
#lrcn =
#gve = model.gve()

#print(lrcn)