コード例 #1
0
ファイル: autoencoder.py プロジェクト: ioneliabuzatu/rnnTorch
 def encoder(self, input):
     tt = torch.cuda if self.isCuda else torch
     h0 = torch.Variable(tt.FloatTensor(self.num_layers, input.size(0), self.hidden_size))
     c0 = torch.Variable(tt.FloatTensor(self.num_layers, input.size(0), self.hidden_size))
     encoded_input, hidden = self.lstm(input, (h0, c0))
     encoded_input = self.relu(encoded_input)
     return encoded_input
コード例 #2
0
ファイル: autoencoder.py プロジェクト: ioneliabuzatu/rnnTorch
 def decoder(self, encoded_input):
     tt = torch.cuda if self.isCuda else torch
     h0 = torch.Variable(tt.FloatTensor(self.num_layers, encoded_input.size(0), self.output_size))
     c0 = torch.Variable(tt.FloatTensor(self.num_layers, encoded_input.size(0), self.output_size))
     decoded_output, hidden = self.lstm(encoded_input, (h0, c0))
     decoded_output = self.sigmoid(decoded_output)
     return decoded_output
コード例 #3
0
def predict():
	"""Predict unseen images"""
	"""Step 0: load data and trained model"""
	mnist = input_data.read_data_sets("./data/", one_hot=True)
	checkpoint_dir = sys.argv[1]

	"""Step 1: build the rnn model"""
	x = tf.placeholder("float", [None, n_steps, n_input])
	y = tf.placeholder("float", [None, n_classes])

	weights = tf.Variable(tf.random_normal([n_hidden, n_classes]), name='weights')
	biases = tf.Variable(tf.random_normal([n_classes]), name='biases')

	pred = rnn_model(x, weights, biases)
	correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
	accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

	"""Step 2: predict new images with the trained model"""
	with tf.Session() as sess:
		sess.run(tf.initialize_all_variables())
		"""Step 2.0: load the trained model"""
		checkpoint_file = tf.train.latest_checkpoint(checkpoint_dir + 'checkpoints')
		print('Loaded the trained model: {}'.format(checkpoint_file))

		saver = tf.train.Saver()
		saver.restore(sess, checkpoint_file)

		"""Step 2.1: predict new data"""
		test_len = 500
		test_data = mnist.test.images[:test_len].reshape((-1, n_steps, n_input))
		test_label = mnist.test.labels[:test_len]
		print("Testing Accuracy:", sess.run(accuracy, feed_dict={x: test_data, y: test_label}))
コード例 #4
0
ファイル: train.py プロジェクト: ml-lab/dilated-densenet
def train(args, model):
    model.train()

    input_transform = output_transform = None
    dataset = RVSC(args.datadir, input_transform, output_transform)
    loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)

    weight = torch.ones(2)
    if args.cuda:
        weight = weight.cuda()
    criterion = CrossEntropyLoss2d(weight)

    optimizer = Adam(model.parameters())

    for epoch in range(1, args.num_epochs+1):
        epoch_loss = []

        for step, (images,labels) in enumerate(loader):
            if args.cuda:
                images = images.cuda()
                labels = labels.cuda()

            x = torch.Variable(images)
            y_true = torch.Variable(labels)
            y_pred = model(x)

            optimizer.zero_grad()
            loss = criterion(y_pred, y_true)
            loss.backward()
            optimizer.step()

            epoch_loss.append(loss.data[0])
            print(loss.data[0])
コード例 #5
0
ファイル: attend.py プロジェクト: quantapix/qnarre
 def __init__(self):
     self.ps = U.Params(params).init_comps()
     self.pre = None
     self.post = None
     i = torch.constant([0.0] * (4 * 10), shape=(4, 10))
     self.src_b = torch.Variable(initial_value=i)
     i = torch.constant([0.0] * (4 * 10), shape=(4, 10))
     self.mem_b = torch.Variable(initial_value=i)
コード例 #6
0
    def forward(self, X):
        input = self.embedding(X)                                #input: [batch_size, len_seq, embedding_size]
        input = input.permute(1, 0, 2)                           #input: [len_seq, batch_size, embedding_size]

        hidden_state = torch.Variable(torch.zeros(1*2, len(X), n_hidden))  #[num_layers(=1) * num_directions(=2), batch_size, n_hidden]
        cell_state = torch.Variable(torch.zeros(1*2, len(X), n_hidden))    #[num_layers(=1) * num_directions(=2), batch_size, n_hidden]

        # final_hidden_state, final_cell_state : [num_layers(=1) * num_directions(=2), batch_size, n_hidden]
        output = (final_hidden_state, final_cell_state) = self.lstm(input, (hidden_state, cell_state))
        output = output.permute(1, 0, 2)      # output : [batch_size, len_seq, n_hidden]
        attn_output,  attention = self.attention_net(output, final_hidden_state)
        return self.out(attn_output), attention      # model : [batch_size, num_classes], attention : [batch_size, n_step]
コード例 #7
0
 def regularization(self, reg_lambda):
     laplacian = torch.Variable(self.laplacian, requires_grad=False)
     if self.on_cuda:
         laplacian = laplacian.cuda()
     weight = self.my_logistic_layers[-1].weight
     reg = torch.abs(weight).mm(laplacian) * torch.abs(weight)
     return reg.sum() * reg_lambda
コード例 #8
0
    def forward(self, predict, score):
        dialogue, sel_a, sel_b, reward, partner_reward = predict
        response_scores, selection_score = score

        reward_transformed = self.transform_reward(reward)
        step_rewards = []
        discount = th.Variable(cu(th.FloatTensor([1.0])))
        for i in range(len(response_scores)):
            step_rewards.append(discount * reward_transformed)
            discount = discount * self.gamma

        loss = th.Variable(cu(th.FloatTensor([0.0])))
        for score, step_reward in zip(response_scores, step_rewards):
            loss -= score * step_reward

        return loss
コード例 #9
0
ファイル: utils.py プロジェクト: xuezzee/social_ac
def categorical_sample(probs, use_cuda=False):
    int_acs = torch.multinomial(probs, 1)
    if use_cuda:
        tensor_type = torch.cuda.FloatTensor
    else:
        tensor_type = torch.FloatTensor
    acs = torch.Variable(tensor_type(*probs.shape).fill_(0)).scatter_(1, int_acs, 1)
    return int_acs, acs
コード例 #10
0
 def pi_maker(feature,beta,temperature=0.1):
     n_bins = len(beta)
     w = torch.reshape(torch.linspace(1,n_bins,n_bins),[-1,1]) #make constant or something later
     torch.Variable(w, requires_grad=False)
     beta, _ = torch.sort(beta)
     beta[0] = 0
     b = torch.cumsum(-beta,0)
     pi = torch.reshape(torch.softmax((w*feature+b)/temperature,0),[-1,1])
     return pi
コード例 #11
0
def getCombination(memory, option):
    ret = []
    if option == 1:
        idx = 0
        while (idx < len(memory)):
            temp = memory[idx]
            temp.append(memory[idx + 1])
            temp.append(memory[idx + 2])
            ret.append(temp)
            idx = idx + 3

    elif option == 2:
        idx = 0
        while (idx < len(memory)):
            temp = torch.Variable(init=memory[idx])
            temp += memory[idx + 1]
            temp += memory[idx + 2]
            ret.append(temp / 3)
            idx = idx + 3

    elif option == 3:
        idx = 0
        while (idx < len(memory)):
            temp = memory[idx]
            temp2 = memory[idx + 1]
            temp3 = memory[idx + 2]
            a, b, c = getParams(nn.Module, [temp, temp2, temp3])
            idx = idx + 3
            ret.append(a * temp + b * temp2 + c * temp3)
    elif option == 4:
        idx = 0
        while (idx < len(memory)):
            temp = memory[idx]
            temp2 = memory[idx + 1]
            temp3 = memory[idx + 2]
            ret.append(max(temp2, temp3) + temp, requires_grad=False)
    elif option == 5:
        idx = 0
        while (idx < len(memory)):
            temp = memory[idx]
            temp2 = memory[idx + 1]
            temp3 = memory[idx + 2]
            ret.append(max(temp2, temp3, temp), requires_grad=False)
    elif option == 6:
        ret = memory

    return ret
コード例 #12
0
ファイル: model.py プロジェクト: Sdernal/TextNormalization
    def _forward_alg(self, feats):
        init_alphas = torch.full((1, self.output_size), -10000.)
        # TODO: sos initialistion
        # init_alphas[0][START_OF_SENTENCE] = 0.

        forward_var = torch.Variable(init_alphas)

        for feat in feats:
            alphas_t = []
            for next_tag in range(self.output_size):
                emit_score = feat[next_tag].veiw(1, -1).expand(
                    1, self.output_size)
                trans_score = self.transitions[next_tag].view(1, -1)
                next_tag_var = forward_var + trans_score + emit_score
                alphas_t.append(log_sum_exp(next_tag_var).view(1))
            forward_var = torch.cat(alphas_t).view(1, -1)

        terminal_var = forward_var  # TODO: + self.transitions[END_OF_SENTENCE]
        alpha = log_sum_exp(terminal_var)
        return alpha
コード例 #13
0
 def compute_gradient_penalty(self, D, real_samples, fake_samples):
     """Calculates the gradient penalty loss for WGAN GP"""
     # Random weight term for interpolation between real and fake samples
     alpha = torch.Tensor(np.random.random((real_samples.size(0), 1, 1, 1))).to(self.device)
     # Get random interpolation between real and fake samples
     interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)
     d_interpolates = D(interpolates)
     fake = torch.Variable( torch.T(real_samples.shape[0], 1).fill_(1.0), requires_grad=False).to(self.device)
     # Get gradient w.r.t. interpolates
     gradients = autograd.grad(
         outputs=d_interpolates,
         inputs=interpolates,
         grad_outputs=fake,
         create_graph=True,
         retain_graph=True,
         only_inputs=True,
     )[0]
     
     gradients = gradients.view(gradients.size(0), -1)
     gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()
     
     return gradient_penalty
コード例 #14
0
def mi_gradient_ascent(input_sample=None, target_model=None, optimizer=None,
                       category=None, iterations=0, verbose=False):
    """ Implementation of gradient based model inversion attack

    Args:
        input_sample (torch.tensor): Initialized input sample, usually
            randomly generated. Size should match the model input.
        target_model (nn.Module): Pretrained model to attack.
        optimizer (nn.optim): Optimizer (initialized on image parameters) used
            in attack.
        category (int): Category to invert.
        iterations (int): Query iterations in the attack.
        verbose (bool): If True will print the loss at each step in attack.

    Returns:
        (list(float)): Returns a list of the losses at each iteration.
    Example:

    Todos:
        Write example
    """
    category = torch.Variable(torch.LongTensor([category])).to(device)
    losses = []

    for i_step in range(iterations):
        target_model.zero_grad()
        out = target_model(input_sample)
        loss = -out.take(category)
        loss.backward()
        #
        optimizer.step()
        input_sample.grad.zero_()
        losses.append(loss.data)
        #

    return losses
コード例 #15
0
def to_var(x):
    if torch.cuda.is_available():
        x = x.cuda()
    return torch.Variable(x)
コード例 #16
0
import torch.nn as nn
from torch.autograd import Variable

# ================================================================== #
#                         Table of Contents                          #
# ================================================================== #

# 1. Distance function
# 2. Loss function

# ================================================================== #
#                       1. Distance function
# ================================================================== #

# Cosine Similarity
input1 = torch.Variable(torch.randn(100, 128))
input2 = Variable(torch.randn(100, 128))
cos = nn.CosineSimilarity(dim=1, eps=1e-6)
output = cos(input1, input2)

# ================================================================== #
#                       2. Loss function
# ================================================================== #

# L1 Loss
loss = nn.L1Loss()
input = torch.autograd.Variable(torch.randn(3, 5), requires_grad=True)
target = torch.autograd.Variable(torch.randn(3, 5))
output = loss(input, target)
output.backward()
コード例 #17
0
ファイル: models.py プロジェクト: M1F1/nn2019
def gaussian(ins, is_training, mean, stddev):
    if is_training:
        noise = torch.Variable(ins.data.new(ins.size()).normal_(mean, stddev))
        return ins + noise
    return ins
コード例 #18
0
 def init_hidden(self, batch_size):
     return torch.Variable(torch.zeros(self.num_recur_layer, batch_size, self.hidden_size))
コード例 #19
0
dataset = ListDataset(data_dir=data_dir,
                      listing=listing,
                      input_transform=input_transform,
                      target_depth_transform=None,
                      target_labels_transform=None,
                      co_transform=co_transform,
                      file_suffix="jpg")

data_loader = torch.utils.data.DataLoader(dataset=dataset,
                                          batch_size=1,
                                          shuffle=False,
                                          drop_last=False)

counter = 0
for x, y, z in data_loader:
    counter += 1
    x_var = torch.Variable(x.type(dtype), volatile=True)
    z_var = torch.Variable(z.type(dtype), volatile=True)

    pred_depth, _ = model(x_var, z_var)

    input_rgb_image = x_var[0].data.permute(1, 2,
                                            0).cpu().numpy().astype(np.uint8)
    plt.imsave('result_linput_rgb_counter_{}.png'.format(counter),
               input_rgb_image)

    input_gt_depth_image = z_var[0].data.permute(1, 2, 0).cpu().numpy().astype(
        np.uint8)
    plt.imsave('result_input_gt_depth_counter_{}.png'.format(counter),
               input_gt_depth_image)