Example #1
0
def get_model():
    args = parser.parse_args()
    print(args.checkpoint)
    if not os.path.isfile(args.checkpoint):
        print('ERROR: Checkpoint file "%s" not found' % args.checkpoint)
        print('Maybe you forgot to download pretraind models? Try running:')
        print('bash scripts/download_models.sh')
        return

    if not os.path.isdir(args.output_dir):
        print('Output directory "%s" does not exist; creating it' % args.output_dir)
        os.makedirs(args.output_dir)

    if args.device == 'cpu':
        device = torch.device('cpu')
    elif args.device == 'gpu':
        device = torch.device('cuda:0')
        if not torch.cuda.is_available():
            print('WARNING: CUDA not available; falling back to CPU')
            device = torch.device('cpu')

    # Load the model, with a bit of care in case there are no GPUs
    map_location = 'cpu' if device == torch.device('cpu') else None
    checkpoint = torch.load(args.checkpoint, map_location=map_location)
    dirname = os.path.dirname(args.checkpoint)
    features_path = os.path.join(dirname, 'features_clustered_010.npy')
    # print(features_path)
    # features = None
    if os.path.isfile(features_path):
        features = np.load(features_path).item()
    else:
        features = None
    model = Model(**checkpoint['model_kwargs'])
    model_state = checkpoint['model_state']
    if args.first_checkpoint is not None:
        all_new_keys = []
        first_checkpoint = torch.load(args.first_checkpoint)
        print('Loading first model from ', args.first_checkpoint)
        for (k, v) in first_checkpoint['model_best_inception_state'].items():
            # CHANGE: for (k, v) in first_checkpoint['model_best_state'].items():
            if k == 'repr_net.0.weight':
                break
            # print(k)
            model_state[k] = v
            all_new_keys.append(k)
        remove_old_keys = []
        for (k, v) in model_state.items():
            if 'mask' in k and k not in all_new_keys:
                remove_old_keys.append(k)
        for k in remove_old_keys:
            del model_state[k]
    model.load_state_dict(model_state)
    model.features = features
    model.colors = torch.randint(0, 256, [134, 3]).float()
    model.eval()
    model.to(device)
    return model
Example #2
0
    def init_generator(self, args, checkpoint):
        if args.restore_from_checkpoint:
            model_kwargs = checkpoint['model_kwargs']
        else:
            model_kwargs = {
                'vocab': self.vocab,
                'image_size': args.image_size,
                'embedding_dim': args.embedding_dim,
                'gconv_dim': args.gconv_dim,
                'gconv_hidden_dim': args.gconv_hidden_dim,
                'gconv_num_layers': args.gconv_num_layers,
                'mlp_normalization': args.mlp_normalization,
                'appearance_normalization': args.appearance_normalization,
                'activation': args.activation,
                'mask_size': args.mask_size,
                'n_downsample_global': args.n_downsample_global,
                'box_dim': args.box_dim,
                'use_attributes': args.use_attributes,
                'box_noise_dim': args.box_noise_dim,
                'mask_noise_dim': args.mask_noise_dim,
                'pool_size': args.pool_size,
                'rep_size': args.rep_size,
            }
            checkpoint['model_kwargs'] = model_kwargs
        self.model = model = Model(**model_kwargs).to('cuda')
        # model.type(torch.cuda.FloatTensor)

        self.criterionVGG = VGGLoss() if args.vgg_features_weight > 0 else None
        self.criterionFeat = torch.nn.L1Loss()
        self.criterionGAN = GANLoss(use_lsgan=not args.no_lsgan, tensor=torch.cuda.FloatTensor)
        self.optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate, betas=(args.beta1, 0.999))
def main():
    if not os.path.isfile(CHECKPOINT):
        print('ERROR: Checkpoint file "%s" not found' % CHECKPOINT)
        return

    # Read config file of the model
    args = get_args()
    print(args)
    torch.manual_seed(1)
    random.seed(1)
    np.random.seed(1)

    # reset some arguments
    args.add_jitter_bbox = None
    args.add_jitter_layout = None
    args.add_jitter_feats = None
    args.batch_size = BATCH_SIZE
    args.test_h5 = SPLIT
    device = torch.device("cuda:0")  #torch.cuda.set_device(GPU)

    # Load the model, with a bit of care in case there are no GPUs
    map_location = 'cpu' if device == torch.device('cpu') else None
    checkpoint = torch.load(CHECKPOINT, map_location=map_location)

    if not PRECOMPUTED:
        # initialize model and load checkpoint
        kwargs = checkpoint['model_kwargs']

        model = Model(**kwargs)
        model.load_state_dict(checkpoint['model_state'])
        model.eval()
        model.to(device)

        # create data loaders
        _, train_loader, val_loader, test_loader = build_loaders(
            args, evaluating=True)

        # testing model
        print('Batch size: ', BATCH_SIZE)
        print('Evaluating on {} set'.format(SPLIT))
        eval_model(args,
                   model,
                   test_loader,
                   device,
                   use_gt=USE_GT,
                   use_feats=USE_FEATS,
                   filter_box=IGNORE_SMALL)
        # losses, samples, avg_iou = results
    else:
        # sample images and scores already computed while training (only one batch)
        samples = checkpoint['val_samples'][-1]  # get last iteration
        original_img = samples['gt_img'].cpu().numpy()
        predicted_img = samples['gt_box_pred_mask'].cpu().numpy()

    return
Example #4
0
def get_model():
    args = parser.parse_args()
    print(args.checkpoint)
    if not os.path.isfile(args.checkpoint):
        print('ERROR: Checkpoint file "%s" not found' % args.checkpoint)
        print('Maybe you forgot to download pretraind models? Try running:')
        print('bash scripts/download_models.sh')
        return

    if not os.path.isdir(args.output_dir):
        print('Output directory "%s" does not exist; creating it' % args.output_dir)
        os.makedirs(args.output_dir)

    if args.device == 'cpu':
        device = torch.device('cpu')
    elif args.device == 'gpu':
        device = torch.device('cuda:0')
        if not torch.cuda.is_available():
            print('WARNING: CUDA not available; falling back to CPU')
            device = torch.device('cpu')

    # Load the model, with a bit of care in case there are no GPUs
    map_location = 'cpu' if device == torch.device('cpu') else None
    checkpoint = torch.load(args.checkpoint, map_location=map_location)
    dirname = os.path.dirname(args.checkpoint)
    features_path = os.path.join(dirname, 'features_clustered_010.npy')
    if os.path.isfile(features_path):
        features = np.load(features_path).item()
    else:
        features = None
    model = Model(**checkpoint['model_kwargs'])
    model_state = checkpoint['model_state']
    model.load_state_dict(model_state)
    model.features = features
    model.colors = torch.randint(0, 256, [134, 3]).float()
    model.eval()
    model.to(device)
    return model
def build_model(args, checkpoint):
    kwargs = checkpoint['model_kwargs']
    model = Model(**kwargs)
    model.load_state_dict(checkpoint['model_state'])
    if args.model_mode == 'eval':
        model.eval()
    elif args.model_mode == 'train':
        model.train()
    model.image_size = args.image_size
    model.cuda()
    return model
def build_model(args, checkpoint):
    kwargs = checkpoint['model_kwargs']
    if args.aGCN:
        kwargs['gconv_pooling'] = 'wAvg'
    model = Model(**checkpoint['model_kwargs'])
    model.load_state_dict(checkpoint['model_state'])
    if args.model_mode == 'eval':
        model.eval()
    elif args.model_mode == 'train':
        model.train()
    model.image_size = args.image_size
    model.cuda()
    return model