Ejemplo n.º 1
0
    def __init__(self, cf, num_classes=21, pretrained=False, net_name='inception'):
        super(Inception, self).__init__(cf)

        self.url = None
        self.pretrained = False
        self.net_name = net_name

        if pretrained:
            self.model = models.inception_v3(pretrained=True)
            self.model.AuxLogits.fc = nn.Linear(768, num_classes)
            self.model.fc = nn.Linear(2048, num_classes)
        else:
            self.model = models.inception_v3(pretrained=False, num_classes=num_classes)
Ejemplo n.º 2
0
 def test_inception(self):
     x = Variable(
         torch.randn(BATCH_SIZE, 3, 299, 299), requires_grad=True)
     # state_dict = model_zoo.load_url(model_urls['inception_v3_google'], progress=False)
     state_dict = None
     self.run_model_test(inception_v3(), train=False, batch_size=BATCH_SIZE,
                         state_dict=state_dict, input=x)
Ejemplo n.º 3
0
def load_inception_net(parallel=False):
    inception_model = inception_v3(pretrained=True, transform_input=False)
    inception_model = WrapInception(inception_model.eval()).cuda()
    if parallel:
        print('Parallelizing Inception module...')
        inception_model = nn.DataParallel(inception_model)
    return inception_model
Ejemplo n.º 4
0
 def __init__(self, parallel=False):
     # Expects inputs to be in range [-1, 1]
     inception_model = inception_v3(pretrained=True, transform_input=False)
     inception_model = WrapInception(inception_model.eval()).cuda()
     if parallel:
         inception_model = nn.DataParallel(inception_model)
     self.inception_model = inception_model
Ejemplo n.º 5
0
 def test_inception(self):
     x = Variable(
         torch.randn(BATCH_SIZE, 3, 299, 299), requires_grad=True)
     # state_dict = model_zoo.load_url(model_urls['inception_v3_google'], progress=False)
     state_dict = None
     self.run_model_test(inception_v3(), train=False, batch_size=BATCH_SIZE,
                         state_dict=state_dict, input=x)
Ejemplo n.º 6
0
def get_inception_model(opt):
    # Set up dtype
    if len(opt.gpu_ids) > 0:
        dtype = torch.cuda.FloatTensor
    else:
        if torch.cuda.is_available():
            print(
                "WARNING: You have a CUDA device, so you should probably set cuda=True"
            )
        dtype = torch.FloatTensor

    inception_model = None
    up = None
    if opt.which_model_IS == 'inception_v3':
        inception_model = inception_v3(pretrained=True,
                                       transform_input=False).type(dtype)
        up = nn.Upsample(size=(299, 299), mode='bilinear').type(dtype)
    elif 'resnet' in opt.which_model_IS:
        inception_model = networks.ResNet(3, opt.num_classes,
                                          opt.which_model_IS).type(dtype)
        up = nn.Upsample(size=(224, 224), mode='bilinear').type(dtype)
        print('loading the model from %s...' % opt.pretrained_model_path_IS)
        state_dict = torch.load(opt.pretrained_model_path_IS)
        inception_model.load_state_dict(state_dict, strict=True)
    elif 'vgg' in opt.which_model_IS:
        inception_model = networks.VGG(3, opt.num_classes,
                                       opt.which_model_IS).type(dtype)
        up = nn.Upsample(size=(224, 224), mode='bilinear').type(dtype)
        print('loading the model from %s...' % opt.pretrained_model_path_IS)
        state_dict = torch.load(opt.pretrained_model_path_IS)
        inception_model.load_state_dict(state_dict, strict=True)
    inception_model.eval()
    return inception_model, up
def inception_score(dataloader, batch_size, N, resize=True, splits=3):

    if torch.cuda.is_available():
        Tensor = torch.cuda.FloatTensor
    else:
        Tensor = torch.FloatTensor
    
    # Set up dataloader
    inception_model = inception_v3(pretrained=True, transform_input=False).type(Tensor)
    inception_model.eval();


    # Get predictions
    preds = np.zeros((N, 1000))

    for i, (batch) in enumerate(dataloader):
        if resize:
            batch =  F.interpolate(batch,size=(299, 299), mode='bilinear', align_corners=True).type(Tensor)

        preds[i*batch_size:(i+1)*batch_size] = get_pred(inception_model,batch)
        del batch
    # Now compute the mean kl-div
    split_scores = []

    for k in range(splits):
        part = preds[k * (N // splits): (k+1) * (N // splits), :]
        py = np.mean(part, axis=0)
        scores = []
        for i in range(part.shape[0]):
            pyx = part[i, :]
            scores.append(entropy(pyx, py))
        split_scores.append(np.exp(np.mean(scores)))
    return np.mean(split_scores), np.std(split_scores)
def cal_IS(model, dim_z, bs=50, n_total=5000, splits=1, resize=True):
    model.eval()
    inception = incepnets.inception_v3(pretrained=True).cuda()
    inception.eval()

    def get_pred(x):
        if resize:
            x = F.interpolate(x,
                              size=(299, 299),
                              mode='bilinear',
                              align_corners=True)
        x = inception(x)
        return F.softmax(x).data.cpu().numpy()

    preds = np.zeros((n_total, 1000))
    for i in range(n_total // bs):
        z = Variable(torch.randn(bs, dim_z)).cuda()
        gen = model(z.view(bs, dim_z)).detach()
        preds[i * bs:i * bs + bs] = get_pred(gen)

    split_scores = []
    for k in range(splits):
        part = preds[k * (n_total // splits):(k + 1) * (n_total // splits), :]
        py = part.mean(0)
        scores = []
        for j in range(part.shape[0]):
            pyx = part[i, :]
            # scores.append(np.sum(pyx * np.log(pyx / py), axis=0))
            scores.append(entropy(pyx, py))
        split_scores.append(np.exp(np.mean(scores)))

    return np.mean(split_scores), np.std(split_scores)
Ejemplo n.º 9
0
def FID_score(real, fake, batchSize=64):
    """Computes FID score
    """
    # Load inception model
    device = torch.device('cuda:0')
    inception_model = inception_v3(pretrained=True, transform_input=False)
    inception_pool = WrapInception(inception_model).to(device)
    inception_pool.eval()

    def get_pred(inputs):
        preds = []
        nbatches = inputs.size(0) / batchSize
        for i in range(nbatches):
            x = inputs[i * batchSize:(i + 1) * batchSize]
            x = F.interpolate(x, size=(299, 299), mode='nearest')
            with torch.no_grad():
                x = inception_pool(x)[0]
            preds.append(x)
        return torch.cat(preds, dim=0)

    # Get predictions
    pool_real = get_pred(real).cpu().numpy()
    pool_fake = get_pred(fake).cpu().numpy()

    def get_moments(pool):
        mu, sigma = np.mean(pool, axis=0), np.cov(pool, rowvar=False)
        return mu, sigma

    mu1, sigma1 = get_moments(pool_real)
    mu2, sigma2 = get_moments(pool_fake)
    fid = numpy_calculate_frechet_distance(mu1, sigma1, mu2, sigma2)
    return fid
Ejemplo n.º 10
0
def inception_score(imgs, cuda=False, batch_size=32, resize=False, splits=1):
    """Computes the inception score of the generated images imgs
    imgs -- Torch dataset of (3xHxW) numpy images normalized in the range [-1, 1]
    cuda -- whether or not to run on GPU
    batch_size -- batch size for feeding into Inception v3
    splits -- number of splits
    """
    N = len(imgs)

    assert batch_size > 0
    assert N > batch_size

    # Set up dtype
    if cuda:
        dtype = torch.cuda.FloatTensor
        print('Im using cuda')
    else:
        if torch.cuda.is_available():
            print("WARNING: You have a CUDA device, so you should probably set cuda=True")
        print('Im not using cuda')
        dtype = torch.FloatTensor

    # Set up dataloader
    dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size)

    # Load inception model
    inception_model = inception_v3(pretrained=True, transform_input=False).to(device)
    inception_model.eval()
    up = nn.Upsample(size=(299, 299), mode='bilinear',align_corners=False ).to(device)
    def get_pred(x):
        if resize:
            x = up(x)
        x = inception_model(x)
        return F.softmax(x).data.cpu().numpy()

    # Get predictions
    preds = np.zeros((N, 1000))

    for i, batch in enumerate(dataloader, 0):

        batch = batch.type(dtype)
        batchv = Variable(batch)
        batch_size_i = batch.size()[0]
        print(str(i)+'/'+str(len(dataloader)))

        preds[i*batch_size:i*batch_size + batch_size_i] = get_pred(batchv)

    # Now compute the mean kl-div
    split_scores = []

    for k in range(splits):
        part = preds[k * (N // splits): (k+1) * (N // splits), :]
        py = np.mean(part, axis=0)
        scores = []
        for i in range(part.shape[0]):
            pyx = part[i, :]
            scores.append(entropy(pyx, py))
        split_scores.append(np.exp(np.mean(scores)))

    return np.mean(split_scores), np.std(split_scores)
Ejemplo n.º 11
0
    def __init__(self):
        super(FIDModel, self).__init__()
        inception = inception_v3(pretrained=True, aux_logits=True)
        blocks = []
        block0 = [
            inception.Conv2d_1a_3x3, inception.Conv2d_2a_3x3,
            inception.Conv2d_2b_3x3,
            nn.MaxPool2d(kernel_size=3, stride=2)
        ]
        blocks.extend(block0)

        block1 = [
            inception.Conv2d_3b_1x1, inception.Conv2d_4a_3x3,
            nn.MaxPool2d(kernel_size=3, stride=2)
        ]
        blocks.extend(block1)

        block2 = [
            inception.Mixed_5b,
            inception.Mixed_5c,
            inception.Mixed_5d,
            inception.Mixed_6a,
            inception.Mixed_6b,
            inception.Mixed_6c,
            inception.Mixed_6d,
            inception.Mixed_6e,
        ]
        blocks.extend(block2)

        block3 = [
            inception.Mixed_7a, inception.Mixed_7b, inception.Mixed_7c,
            nn.AdaptiveAvgPool2d(output_size=(1, 1))
        ]
        blocks.extend(block3)
        self.blocks = nn.Sequential(*blocks)
Ejemplo n.º 12
0
def inception_score_metric(images, n_split=10, eps=1E-16):
	inception_model = inception_v3(pretrained=True)
	inception_model.eval()
	y_hat = inception_model(images)

	# enumerate splits of images/predictions
	scores = list()
	n_part = int(images.shape[0] / n_split)
	for i in range(n_split):
		# retrieve p(y|x)
		ix_start, ix_end = i * n_part, i * n_part + n_part
		p_yx = y_hat[ix_start:ix_end]
		# calculate p(y)
		p_y = np.expand_dims(p_yx.mean(axis=0), 0)
		# calculate KL divergence using log probabilities
		kl_d = p_yx * (np.log(p_yx + eps) - np.log(p_y + eps))
		# sum over classes
		sum_kl_d = kl_d.sum(axis=1)
		# average over images
		avg_kl_d = np.mean(sum_kl_d)
		# undo the log
		is_score = np.exp(avg_kl_d)
		# store
		scores.append(is_score)
	# average across images
	is_avg, is_std = np.mean(scores), np.std(scores)
	return is_avg, is_std
Ejemplo n.º 13
0
 def __init__(self, device):
     super().__init__()
     self.device = device
     self.model = inception_v3(pretrained=True, progress=True, transform_input=True).to(self.device)
     self.fc = self.model.fc
     self.model.fc = nn.Sequential()
     self.softmax = nn.Softmax(dim=1)
     self.model.eval()
Ejemplo n.º 14
0
def inception_score_model(generator,
                          noise,
                          n_batch=1000,
                          device='cuda',
                          batch_size=32,
                          resize=True,
                          splits=1):
    N = n_batch * batch_size

    assert batch_size > 0
    assert N > batch_size

    # Set up dtype
    dtype = torch.cuda.FloatTensor
    generator.to('cuda')
    generator.eval()

    # Set up dataloader
    # dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size)

    # Load inception model
    inception_model = inception_v3(pretrained=True,
                                   transform_input=False).type(dtype)
    inception_model.eval()
    up = nn.Upsample(size=(299, 299), mode='bilinear').type(dtype)

    def get_pred(x):
        if resize:
            x = up(x)
        x = inception_model(x)
        return F.softmax(x).data.cpu().numpy()

    # Get predictions
    preds = np.zeros((N, 1000))

    for i in range(n_batch):
        # print(i)
        batch = generator(noise.next_batch(batch_size, device=device))
        batch = batch.type(dtype)
        batch_size_i = batch.size()[0]
        # print(i, batch.size(), batch.type())

        preds[i * batch_size:i * batch_size + batch_size_i] = get_pred(
            batch.data)

    # Now compute the mean kl-div
    split_scores = []

    for k in range(splits):
        part = preds[k * (N // splits):(k + 1) * (N // splits), :]
        py = np.mean(part, axis=0)
        scores = []
        for i in range(part.shape[0]):
            pyx = part[i, :]
            scores.append(entropy(pyx, py))
        split_scores.append(np.exp(np.mean(scores)))

    return np.mean(split_scores), np.std(split_scores)
def inception_score(imgs, device=None, batch_size=32, resize=False, splits=1):
    """Computes the inception score of the generated images imgs

    Args:
        imgs: Torch dataset of (3xHxW) numpy images normalized in the
              range [-1, 1]
        cuda: whether or not to run on GPU
        batch_size: batch size for feeding into Inception v3
        splits: number of splits
    """
    N = len(imgs)

    assert batch_size > 0
    assert N > batch_size

    # Set up dataloader
    dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size)

    # Load inception model
    inception_model = inception_v3(pretrained=True, transform_input=False)
    inception_model = inception_model.to(device)

    inception_model = nn.DataParallel(inception_model)

    inception_model.eval()
    up = nn.Upsample(size=(299, 299), mode='bilinear').to(device)

    def get_pred(x):
        with torch.no_grad():
            if resize:
                x = up(x)
            x = inception_model(x)
            out = F.softmax(x, dim=-1)
        out = out.cpu().numpy()
        return out

    # Get predictions
    preds = np.zeros((N, 1000))

    for i, batch in enumerate(dataloader, 0):
        batchv = batch.to(device)
        batch_size_i = batch.size()[0]

        preds[i * batch_size:i * batch_size + batch_size_i] = get_pred(batchv)

    # Now compute the mean kl-div
    split_scores = []

    for k in range(splits):
        part = preds[k * (N // splits):(k + 1) * (N // splits), :]
        py = np.mean(part, axis=0)
        scores = []
        for i in range(part.shape[0]):
            pyx = part[i, :]
            scores.append(entropy(pyx, py))
        split_scores.append(np.exp(np.mean(scores)))

    return np.mean(split_scores), np.std(split_scores)
Ejemplo n.º 16
0
 def __init__(self, layer=-1):
     super(Inception, self).__init__()
     model = inception_v3(pretrained=True,
                          transform_input=False,
                          aux_logits=False)
     modules1 = list(model.children())[:-2]
     self.model = nn.Sequential(*modules1)
     modules2 = list(model.children())[-2:]
     self.rest = nn.Sequential(*modules2)
Ejemplo n.º 17
0
 def __prepare_inception_net(self, embedding_size):
     inception = inception_v3(pretrained=True, aux_logits=False)
     inception.fc = nn.Linear(inception.fc.in_features, embedding_size)
     for name, param in inception.named_parameters():
         if "fc.weight" in name or "fc.bias" in name:
             param.requires_grad = True
         else:
             param.requires_grad = False
     return inception
Ejemplo n.º 18
0
def inception_score(imgs, batch_size=64, resize=True, splits=10):
    """Computes the inception score of the generated images imgs
    imgs -- Torch dataset of (3xHxW) numpy images normalized in the range [-1, 1]
    batch_size -- batch size for feeding into Inception v3
    resize -- if image size is smaller than 229, then resize it to 229
    splits -- number of splits, if splits are different, the inception score could be changing even using same data
    """
    # Set up dtype
    device = torch.device("cuda:0")  # you can change the index of cuda

    N = len(imgs)

    assert batch_size > 0
    assert N > batch_size

    # Set up dataloader
    print('Creating data loader')
    dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size)

    # Load inception model
    inception_model = inception_v3(pretrained=True,
                                   transform_input=False).to(device)
    inception_model.eval()
    up = nn.Upsample(size=(299, 299), mode='bilinear',
                     align_corners=False).to(device)

    def get_pred(x):
        if resize:
            x = up(x)
        x = inception_model(x)
        return F.softmax(x, dim=1).data.cpu().numpy()

    # Get predictions using pre-trained inception_v3 model
    print('Computing predictions using inception v3 model')
    preds = np.zeros((N, 1000))

    for i, batch in enumerate(dataloader, 0):
        batch = batch[0].to(device)
        batch_size_i = batch.size()[0]

        preds[i * batch_size:i * batch_size + batch_size_i] = get_pred(batch)

    # Now compute the mean KL Divergence
    print('Computing KL Divergence')
    split_scores = []
    for k in range(splits):
        part = preds[k * (N // splits):(k + 1) *
                     (N //
                      splits), :]  # split the whole data into several parts
        py = np.mean(part, axis=0)  # marginal probability
        scores = []
        for i in range(part.shape[0]):
            pyx = part[i, :]  # conditional probability
            scores.append(entropy(pyx, py))  # compute divergence
        split_scores.append(np.exp(np.mean(scores)))

    return np.mean(split_scores), np.std(split_scores)
Ejemplo n.º 19
0
def inception_score(imgs, cuda=True, batch_size=32, resize=False, splits=1):
    """Computes the inception score of the generated images imgs

    imgs -- Torch dataset of (3xHxW) numpy images normalized in the range [-1, 1]
    cuda -- whether or not to run on GPU
    batch_size -- batch size for feeding into Inception v3
    splits -- number of splits
    """
    N = len(imgs)

    assert batch_size > 0
    assert N > batch_size

    # Set up dtype
    if cuda:
        dtype = torch.cuda.FloatTensor
    else:
        if torch.cuda.is_available():
            print("WARNING: You have a CUDA device, so you should probably set cuda=True")
        dtype = torch.FloatTensor

    # Set up dataloader
    dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size)

    # Load inception model
    inception_model = inception_v3(pretrained=True, transform_input=False).type(dtype)
    inception_model.eval();
    up = nn.Upsample(size=(299, 299), mode='bilinear').type(dtype)
    def get_pred(x):
        if resize:
            x = up(x)
        x = inception_model(x)
        return F.softmax(x).data.cpu().numpy()

    # Get predictions
    preds = np.zeros((N, 1000))

    for i, batch in enumerate(dataloader, 0):
        batch = batch.type(dtype)
        batchv = Variable(batch)
        batch_size_i = batch.size()[0]

        preds[i*batch_size:i*batch_size + batch_size_i] = get_pred(batchv)

    # Now compute the mean kl-div
    split_scores = []

    for k in range(splits):
        part = preds[k * (N // splits): (k+1) * (N // splits), :]
        py = np.mean(part, axis=0)
        scores = []
        for i in range(part.shape[0]):
            pyx = part[i, :]
            scores.append(entropy(pyx, py))
        split_scores.append(np.exp(np.mean(scores)))

    return np.mean(split_scores), np.std(split_scores)
Ejemplo n.º 20
0
 def __init__(self, path='deep_dream_model'):
     self.model = self.model = inception_v3(pretrained=True,
                                            transform_input=True)
     self.model.aux_logits = False
     self.model.train(False)
     self.modulelist = list(self.model.children())
     print(
         '-----------------------------------init--------------------------------------------'
     )
Ejemplo n.º 21
0
def inception_score(imgs, cuda=True, batch_size=32, resize=False, splits=1, return_preds=False):
    """Computes the inception score of the generated images imgs

    imgs -- Torch dataset of (3xHxW) numpy images normalized in the range [-1, 1]
    cuda -- whether or not to run on GPU
    batch_size -- batch size for feeding into Inception v3
    splits -- number of splits
    """
    N = len(imgs)
    # ipdb.set_trace()
    assert batch_size > 0
    assert N > batch_size

    # Set up dtype
    if cuda:
        dtype = torch.cuda.FloatTensor
    else:
        if torch.cuda.is_available():
            print("WARNING: You have a CUDA device, so you should probably set cuda=True")
        dtype = torch.FloatTensor

    # Set up dataloader
    if isinstance(imgs, torch.Tensor):
        imgs = IgnoreLabelDataset(torch.utils.data.TensorDataset(imgs))
    dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size)
    # ipdb.set_trace()
    # Load inception model
    inception_model = inception_v3(pretrained=True, transform_input=False).type(dtype)
    inception_model.eval();
    up = nn.Upsample(size=(299, 299), mode='bilinear').type(dtype)
    def get_pred(x):
        if resize:
            x = up(x)
        x = inception_model(x)
        return F.softmax(x).data.cpu().numpy(), x.cpu().numpy()

    # Get predictions
    preds = np.zeros((N, 1000))
    acts = np.zeros((N, 1000))
    for i, batch in enumerate(dataloader, 0):
        batch = batch.type(dtype)
        batchv = Variable(batch)
        batch_size_i = batch.size()[0]

        preds[i*batch_size:i*batch_size + batch_size_i], acts[i*batch_size:i*batch_size + batch_size_i] = get_pred(batchv)

    # ipdb.set_trace()

    opreds = copy.deepcopy(preds)
    np.random.shuffle(preds)

    split_scores = compute_is_from_preds(preds, splits)
    ret_val = [np.mean(split_scores), np.std(split_scores)]
    if return_preds:
        ret_val += [opreds, acts]
    return ret_val
Ejemplo n.º 22
0
def inception_score(imgs, model_checkpoint, cuda=True, batch_size=100, resize=True, splits=1):
    """Compute the inception score of the generated images imgs."""
    N = len(imgs)
    # Set up dtype
    if cuda:
        dtype = torch.cuda.FloatTensor
    else:
        if torch.cuda.is_available():
            print("WARNING: You have a CUDA device, so you should probably set cuda=True")
        dtype = torch.FloatTensor

    # Set up dataloader
    dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size)

    # Load inception model
    inception_model = inception_v3(
        pretrained=False, transform_input=False).type(dtype)
    if model_checkpoint is None:
        model_checkpoint = "/workspace/code_paper/inception_v3_google-1a9a5a14.pth"
    if not os.path.isfile(model_checkpoint):
        raise "Pretrained model is not existed, model={}".format(
            model_checkpoint)
    checkpoint = torch.load(model_checkpoint)
    inception_model.load_state_dict(checkpoint)
    inception_model.eval()
    up = nn.Upsample(size=(299, 299), mode='bilinear').type(dtype)

    def get_pred(x):
        if resize:
            x = up(x)
        x = inception_model(x)
        return F.softmax(x).data.cpu().numpy()

    # Get predictions
    preds = np.zeros((N, 1000))

    for i, batch in enumerate(dataloader):
        batch = batch.type(dtype)
        batchv = Variable(batch)
        batch_size_i = batch.size()[0]

        preds[i * batch_size:i * batch_size + batch_size_i] = get_pred(batchv)

    # Now compute the mean kl-div
    split_scores = []

    for k in range(splits):
        part = preds[k * (N // splits): (k + 1) * (N // splits), :]
        py = np.mean(part, axis=0)
        scores = []
        for i in range(part.shape[0]):
            pyx = part[i, :]
            scores.append(entropy(pyx, py))
        split_scores.append(np.exp(np.mean(scores)))

    return np.mean(split_scores), np.std(split_scores)
Ejemplo n.º 23
0
def inception_score(images,batch_size=16,resize=False,splits = 10):
	'''
	This function computes the inception score of generated images

	images: Torch tensor of shape (batch_size,channels,height,width)

	'''

	num_images = images.size(0)

	dtype = torch.FloatTensor


	#use a pre-trained inception model for evaluation
	inception_net = inception_v3(pretrained=True,transform_input=False).type(dtype)

	#use in evaluation mode
	inception_net.eval()

	#function to resize images
	up = nn.Upsample(size=(299, 299), mode='bilinear').type(dtype)

	#helper function to evaluate inception scores
	def score(x):
		if resize:
			x = up(x)
		logits = inception_net(x)
		return F.softmax(logits,dim=-1).data.numpy()

	scores = np.zeros((num_images,1000))

	dataloader = torch.utils.data.DataLoader(images, batch_size=batch_size)

	for i, batch in enumerate(dataloader, 0):
		batch = batch.type(dtype)
		batch_size_i = batch.size(0)
		scores[i*batch_size:i*batch_size + batch_size_i] = score(batch)

	print "Scores calculated! \n Now calculating KL-Divergence"

	#KL-Divergence calculation
	means = np.mean(scores,axis=0)

	split_scores = []

	for k in range(splits):
		split_i = scores[k*(num_images//splits):(k+1)*(num_images//splits),:]
		ms = np.mean(split_i, axis=0)
		entr = []
		for i in range(split_i.shape[0]):
			example = split_i[i, :]
			entr.append(entropy(example, ms))
		split_scores.append(np.exp(np.mean(entr)))

	return np.mean(split_scores), np.std(split_scores)
Ejemplo n.º 24
0
def inception_score2(generator, num_batch, batch_size=32, cuda=True, resize=False, splits=1):
    """Computes the inception score of the generated images imgs
    cuda -- whether or not to run on GPU

    splits -- number of splits
    
    """
    N_img = num_batch * batch_size
    hidden_size = generator.hidden_size
    
    # Set up dtype
    if cuda:
        dtype = torch.cuda.FloatTensor
    else:
        if torch.cuda.is_available():
            print("WARNING: You have a CUDA device, so you should probably set cuda=True")
        dtype = torch.FloatTensor

    if cuda:
        generator.cuda()

    # Load inception model
    inception_model = inception_v3(pretrained=True, transform_input=False).type(dtype)
    inception_model.eval();
    up = nn.Upsample(size=(299, 299), mode='bilinear').type(dtype)
    
    # Get predictions
    preds = np.zeros((N_img, 1000))
    
    def get_pred(x):
        if resize:
            x = up(x)
        x = inception_model(x)
        return F.softmax(x).data.cpu().numpy()
    
    for ep in range(num_batch) :
        z_ = torch.randn((batch_size, hidden_size)).view(-1, hidden_size, 1, 1)
        z_ = Variable(z_.type(dtype))
        G_result = generator(z_) #generate fake images (batch,3,64,64)
        
        preds[ep*batch_size:(ep+1)*batch_size] =  get_pred(G_result)
        
    # Now compute the mean kl-div
    split_scores = []

    for k in range(splits):
        part = preds[k * (N_img // splits): (k+1) * (N_img // splits)]
        py = np.mean(part, axis=0) #p(y)
        scores = []
        for i in range(part.shape[0]):
            pyx = part[i,:] #p(y|x)
            scores.append(entropy(pyx, py)) #KL
        split_scores.append(np.exp(np.mean(scores))) #exp(E[KL])

    return np.mean(split_scores), np.std(split_scores)
Ejemplo n.º 25
0
def inception_score(imgs, cuda=False, batch_size=32, resize=False, splits=1):

    N = len(imgs)

    assert batch_size > 0
    assert N >= batch_size

    if cuda:
        dtype = torch.cuda.FloatTensor
    else:
        dtype = torch.FloatTensor

    dataloader = torch.utils.data.DataLoader(imgs,
                                             batch_size=batch_size,
                                             collate_fn=collate)

    inception_model = inception_v3(pretrained=True,
                                   transform_input=False).type(dtype)
    inception_model.eval()

    up = nn.Upsample(size=(299, 299), mode='bilinear',
                     align_corners=False).type(dtype)

    def get_pred(x):
        if resize:
            x = up(x)
        x = inception_model(x)
        return F.softmax(x, dim=1).data.cpu().numpy()

    preds = np.zeros((N, 1000))

    for i, batch in enumerate(tqdm(dataloader), 0):
        batch = batch.type(dtype)
        batchv = Variable(batch)
        batch_size_i = batch.size()[0]

        preds[i * batch_size:i * batch_size + batch_size_i] = get_pred(batchv)

    split_scores = []

    for k in tqdm(range(splits)):

        part = preds[k * (N // splits):(k + 1) * (N // splits), :]
        py = np.mean(part, axis=0)
        scores = []

        for i in range(part.shape[0]):
            pyx = part[i, :]
            #Since second param of entropy() is not None,
            #entropy() gives Kullback-Leibler divergence
            scores.append(entropy(pyx, py))

        split_scores.append(np.exp(np.mean(scores)))

    return np.mean(split_scores), np.std(split_scores)
Ejemplo n.º 26
0
def inception_score(imgs, model, device, batch_size=32, splits=1):
    """Computes the inception score of the generated images imgs

    imgs -- Torch dataset of (3xHxW) numpy images normalized in the range [-1, 1]
    cuda -- whether or not to run on GPU
    batch_size -- batch size for feeding into Inception v3
    splits -- number of splits
    """
    N = len(imgs)
    print("Data num", N)

    assert batch_size > 0
    assert N > batch_size

    # Set up dataloader
    dataloader = torch.utils.data.DataLoader(imgs,
                                             batch_size=batch_size,
                                             shuffle=True)

    # Load inception model
    inception_model = inception_v3(pretrained=True, transform_input=False)
    # for CUB dataset
    class_num = 200
    inception_model.AuxLogits.fc = nn.Linear(768, class_num)
    inception_model.fc = nn.Linear(2048, class_num)
    inception_model.load_state_dict(torch.load(model))
    for param in inception_model.parameters():
        param.requires_grad = False
    inception_model = inception_model.to(device)
    inception_model.eval()

    # Get predictions
    preds = np.zeros((N, class_num))
    for i, batch in enumerate(dataloader, 0):
        batch = batch.to(device)
        batch_size_i = batch.size()[0]

        output = inception_model(batch)
        pred = F.softmax(output, dim=1).data.cpu().numpy()

        preds[i * batch_size:i * batch_size + batch_size_i] = pred

    # Now compute the mean kl-div
    split_scores = []
    for k in range(splits):
        part = preds[k * (N // splits):(k + 1) * (N // splits), :]
        py = np.mean(part, axis=0)
        scores = []
        for i in range(part.shape[0]):
            pyx = part[i, :]
            scores.append(entropy(pyx, py))
        split_scores.append(np.exp(np.mean(scores)))

    return np.mean(split_scores), np.std(split_scores)
Ejemplo n.º 27
0
 def __init__(self, device='cuda'):
     super(PretrainedInception, self).__init__()
     self.device = device
     self.model = inception_v3(pretrained=True, transform_input=False).to(device)
     self.model.eval()
     self.last_features = 0
     # forward hook example: https://discuss.pytorch.org/t/how-can-l-load-my-best-model-as-a-feature-extractor-evaluator/17254/5
     # architecture: https://github.com/pytorch/vision/blob/master/torchvision/models/inception.py
     self.model.Mixed_7c.register_forward_hook(self.my_forward_hook)
     self.avg_pooling = self.model.avgpool
     self.upsample = nn.Upsample(size=(299, 299), mode='bilinear', align_corners=True)
def inception_v3_planet(pretrained=True):
    net = inception_v3(num_classes=17, aux_logits=False)
    if pretrained:
        state_dict = net.state_dict()
        pretrained_dict = model_zoo.load_url(model_urls['inception_v3_google'])
        state_dict.update({
            key: pretrained_dict[key]
            for key in state_dict if 'fc' not in key
        })
        net.load_state_dict(state_dict)
    return net
def inception_score(dataloader, device, resize=False, splits=1):
    """Computes the inception score of the generated images imgs
    dataloader -- Torch dataloader for the images
    cuda -- whether or not to run on GPU
    batch_size -- batch size for feeding into Inception v3
    splits -- number of splits
    """
    N = len(dataloader.dataset)
    batch_size = dataloader.batch_size
    
    assert batch_size > 0
    assert N >= batch_size

    # Set up dtype
    # if cuda:
    #     dtype = torch.cuda.FloatTensor
    # else:
    #     if torch.cuda.is_available():
    #         print("WARNING: You have a CUDA device, so you should probably set cuda=True")
    #     dtype = torch.FloatTensor

    # Load inception model
    inception_model = inception_v3(pretrained=True, transform_input=False).to(device)
    inception_model.eval()
    up = nn.Upsample(size=(299, 299), mode='bilinear').to(device)
    def get_pred(x):
        if resize:
            x = up(x)
        x = inception_model(x)
        return F.softmax(x).data.cpu().numpy()

    # Get predictions
    preds = np.zeros((N, 1000))

    for i, (batch, _) in enumerate(dataloader, 0):
        batch = batch.to(device)
        batchv = Variable(batch).to(device)
        batch_size_i = batch.size()[0]

        preds[i*batch_size:i*batch_size + batch_size_i] = get_pred(batchv)

    # Now compute the mean kl-div
    split_scores = []

    for k in range(splits):
        part = preds[k * (N // splits): (k+1) * (N // splits), :]
        py = np.mean(part, axis=0)
        scores = []
        for i in range(part.shape[0]):
            pyx = part[i, :]
            scores.append(entropy(pyx, py))
        split_scores.append(np.exp(np.mean(scores)))

    return np.mean(split_scores), np.std(split_scores)
Ejemplo n.º 30
0
    def __init__(self):
        self.__image_size = (299,299,3)
        self.dtype = torch.cuda.FloatTensor
        self.__model = inception_v3(pretrained=True, transform_input=False).type(self.dtype)
        self.__model.eval()
        self.__fc = self.__model.fc
        self.__model.fc = nn.Sequential()

        # wrap with nn.DataParallel
        self.__model = nn.DataParallel(self.__model)
        self.__fc = nn.DataParallel(self.__fc)
Ejemplo n.º 31
0
def inception_score(imgs, batch_size=32, resize=False, splits=1):
    """Computes the inception score of the generated images imgs
    imgs -- Torch dataset of (3xHxW) numpy images normalized in the range [-1, 1]
    cuda -- whether or not to run on GPU
    batch_size -- batch size for feeding into Inception v3
    splits -- number of splits
    """
    N = len(imgs)

    assert batch_size > 0
    assert N > batch_size

    # Set up device
    device = torch.device("cuda:0" if (torch.cuda.is_available()) else "cpu")

    # Set up dataloader
    dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size)

    # Load inception model
    inception_model = inception_v3(pretrained=True,
                                   transform_input=False).to(device)
    inception_model.eval()
    up = nn.Upsample(size=(299, 299), mode='bilinear').to(device)

    # Get predictions
    preds = np.zeros((N, 1000))

    for i, batch in enumerate(dataloader, 0):
        batch_size_i = batch.size()[0]

        with torch.no_grad():
            preds[i * batch_size:i * batch_size + batch_size_i] = get_pred(
                batch.to(device), resize, up, inception_model)

        print(i + 1, "/", len(dataloader), end="\r")

    print("\nComputing KL-Div Mean...")

    # Now compute the mean kl-div
    split_scores = []

    for k in range(splits):
        part = preds[k * (N // splits):(k + 1) * (N // splits), :]
        py = np.mean(part, axis=0)
        scores = []
        for i in range(part.shape[0]):
            pyx = part[i, :]
            scores.append(entropy(pyx, py))
        split_scores.append(np.exp(np.mean(scores)))

        print(k + 1, "/", splits, end="\r")
    print()
    return np.mean(split_scores), np.std(split_scores)
Ejemplo n.º 32
0
 def __init__(self, device, batch_size=32, resize=False):
     super(InceptionScore, self).__init__()
     assert batch_size > 0
     self.resize = resize
     self.batch_size = batch_size
     self.device = device
     # Load inception model
     self.inception_model = inception_v3(pretrained=True,
                                         transform_input=False).to(
                                             self.device)
     self.inception_model.eval()
     self.clean()
Ejemplo n.º 33
0
def create_model(model_name, num_classes=1000, pretrained=False, **kwargs):
    if 'test_time_pool' in kwargs:
        test_time_pool = kwargs.pop('test_time_pool')
    else:
        test_time_pool = True
    if 'extra' in kwargs:
        extra = kwargs.pop('extra')
    else:
        extra = True
    if model_name == 'dpn68':
        model = dpn68(
            num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
    elif model_name == 'dpn68b':
        model = dpn68b(
            num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
    elif model_name == 'dpn92':
        model = dpn92(
            num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool, extra=extra)
    elif model_name == 'dpn98':
        model = dpn98(
            num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
    elif model_name == 'dpn131':
        model = dpn131(
            num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
    elif model_name == 'dpn107':
        model = dpn107(
            num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool)
    elif model_name == 'resnet18':
        model = resnet18(num_classes=num_classes, pretrained=pretrained, **kwargs)
    elif model_name == 'resnet34':
        model = resnet34(num_classes=num_classes, pretrained=pretrained, **kwargs)
    elif model_name == 'resnet50':
        model = resnet50(num_classes=num_classes, pretrained=pretrained, **kwargs)
    elif model_name == 'resnet101':
        model = resnet101(num_classes=num_classes, pretrained=pretrained, **kwargs)
    elif model_name == 'resnet152':
        model = resnet152(num_classes=num_classes, pretrained=pretrained, **kwargs)
    elif model_name == 'densenet121':
        model = densenet121(num_classes=num_classes, pretrained=pretrained, **kwargs)
    elif model_name == 'densenet161':
        model = densenet161(num_classes=num_classes, pretrained=pretrained, **kwargs)
    elif model_name == 'densenet169':
        model = densenet169(num_classes=num_classes, pretrained=pretrained, **kwargs)
    elif model_name == 'densenet201':
        model = densenet201(num_classes=num_classes, pretrained=pretrained, **kwargs)
    elif model_name == 'inception_v3':
        model = inception_v3(
            num_classes=num_classes, pretrained=pretrained, transform_input=False, **kwargs)
    else:
        assert False, "Unknown model architecture (%s)" % model_name
    return model
Ejemplo n.º 34
0
 def test_inception(self):
     x = Variable(
         torch.randn(BATCH_SIZE, 3, 299, 299).fill_(1.0))
     self.exportTest(toC(inception_v3()), toC(x))