Example #1
0
    def _comput_mean(self):
        meanstd_file = './data/300W_LP/mean.pth.tar'
        if os.path.isfile(meanstd_file):
            ms = torch.load(meanstd_file)
        else:
            print("\tcomputing mean and std for the first time, it may takes a while, drink a cup of coffe...")
            mean = torch.zeros(3)
            std = torch.zeros(3)
            if self.is_train:
                for i in range(self.total):
                    a = self.anno[i]
                    img_path = os.path.join(self.img_folder, self.anno[i].split('_')[0],
                                            self.anno[i][:-8] + '.jpg')
                    img = load_image(img_path)
                    mean += img.view(img.size(0), -1).mean(1)
                    std += img.view(img.size(0), -1).std(1)

            mean /= self.total
            std /= self.total
            ms = {
                'mean': mean,
                'std': std,
            }
            torch.save(ms, meanstd_file)
        if self.is_train:
            print('\tMean: %.4f, %.4f, %.4f' % (ms['mean'][0], ms['mean'][1], ms['mean'][2]))
            print('\tStd:  %.4f, %.4f, %.4f' % (ms['std'][0], ms['std'][1], ms['std'][2]))
        return ms['mean'], ms['std']
Example #2
0
    def encoder_forward(self, opt, source_l=3, bsize=1):
        '''
        Tests if the encoder works as expected

        args:
            opt: set of options
            source_l: Length of generated input sentence
            bsize: Batchsize of generated input
        '''
        word_dict = self.get_vocab()
        feature_dicts = []
        embeddings = make_embeddings(opt, word_dict, feature_dicts)
        enc = make_encoder(opt, embeddings)

        test_src, test_tgt, test_length = self.get_batch(source_l=source_l,
                                                         bsize=bsize)

        hidden_t, outputs = enc(test_src, test_length)

        # Initialize vectors to compare size with
        test_hid = torch.zeros(self.opt.enc_layers, bsize, opt.rnn_size)
        test_out = torch.zeros(source_l, bsize, opt.rnn_size)

        # Ensure correct sizes and types
        self.assertEqual(test_hid.size(),
                         hidden_t[0].size(),
                         hidden_t[1].size())
        self.assertEqual(test_out.size(), outputs.size())
        self.assertEqual(type(outputs), torch.autograd.Variable)
        self.assertEqual(type(outputs.data), torch.FloatTensor)
def l2l_train(model, cluster_center, n_epoch=10000, trunc_step=10):
    optimizer = optim.Adam(model.parameters(), lr=0.01)
    M_all = Variable(torch.zeros(model.n_class, model.n_dim))
    B_all = Variable(torch.zeros(model.n_class))
    for epoch in range(n_epoch):
        loss = 0
        M_step, B_step = [], []
        for step in range(trunc_step):
            data = generate_data(cluster_center)
            optimizer.zero_grad()
            x, y = Variable(torch.from_numpy(data[0])).float(), Variable(torch.from_numpy(data[1]))
            w, b = model(x)
            M = Variable(torch.zeros(model.n_class_n, model.n_dim))
            B = Variable(torch.zeros(model.n_class_n))
            for k in range(model.n_class_n):
                M[k] = torch.cat((w[:, 0][y == model.n_class_l + k].view(-1, 1),
                                  w[:, 1][y == model.n_class_l + k].view(-1, 1)), 1).mean(0)
                B[k] = b[y == model.n_class_l + k].mean()
            if step == 0:
                M_ = M
                B_ = B
            else:
                M_ = step / (step + 1) * M_step[-1] + 1 / (step + 1) * M
                B_ = step / (step + 1) * B_step[-1] + 1 / (step + 1) * B
            M_step.append(M_)
            B_step.append(B_)
            pred = torch.mm(x, M_.t()) + B_.view(1, -1).expand_as(torch.mm(x, M_.t()))
            loss += F.cross_entropy(pred, y)
        loss.backward()
        optimizer.step()
        print('Train Epoch: {}\tLoss: {:.6f}'.format(epoch, loss.data[0]))
    return M_all, B_all, cluster_center
Example #4
0
File: lstm.py Project: Suluo/Kaggle
 def init_hidden(self):
     # the first is the hidden h
     # the second is the cell  c
     return (
         Variable(torch.zeros(self.num_layers, self.batch_size, self.hidden_dim)),
         Variable(torch.zeros(self.num_layers, self.batch_size, self.hidden_dim))
     )
Example #5
0
 def sample(self, mu, logvar, k):
     eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_()) #[P,B,Z]
     z = eps.mul(torch.exp(.5*logvar)) + mu  #[P,B,Z]
     logpz = lognormal(z, Variable(torch.zeros(self.B, self.z_size)), 
                         Variable(torch.zeros(self.B, self.z_size)))  #[P,B]
     logqz = lognormal(z, mu, logvar)
     return z, logpz, logqz
Example #6
0
    def forward(self, input_):

        #init hidden state with xavier
        vert_state = torch.zeros(input_[0].size(1), self.vert_state_dim).cuda()
        edge_state = torch.zeros(input_[1].size(1), self.edge_state_dim).cuda()

        '''if self.gpu_mode >= 0:
            vert_state = torch.tensor(vert_state.cuda())
            edge_state = torch.tensor(edge_state.cuda())'''

        batch_size = input_[0].size(0)
        vert_input = input_[0]
        edge_input = input_[1]
        #print('vert and edge input', vert_input.size(), edge_input.size())
        vert_state_list = []
        edge_state_list = []
        #todo: can this be parallelized?
        for i in range(batch_size):
            torch.nn.init.xavier_uniform(vert_state)
            torch.nn.init.xavier_uniform(edge_state)
            vert_state = self.vert_gru(vert_input[i], vert_state)
            edge_state = self.edge_gru(edge_input[i], edge_state)

            #todo: check whether this way is correct, TF code uses a separate global var to keep hidden state
            for i in range(self.num_steps):
                edge_context = self.get_edge_context(edge_state, vert_state)
                vert_context = self.get_vert_context(vert_state, edge_state)

                edge_state = self.edge_gru(edge_context, edge_state)
                vert_state = self.vert_gru(vert_context, vert_state)

            vert_state_list.append(vert_state)
            edge_state_list.append(edge_state)

        return torch.stack(vert_state_list), torch.stack(edge_state_list)
def l2l_validate(model, cluster_center, n_epoch=100):
    val_accuracy = []
    for epoch in range(n_epoch):
        data_l = generate_data_l(cluster_center)
        data_n = generate_data_n(cluster_center, model.n_class_n)
        x_l, y_l = Variable(torch.from_numpy(data_l[0])).float(), Variable(
            torch.from_numpy(data_l[1]))
        x_n, y_n = Variable(torch.from_numpy(data_n[0])).float(), Variable(
            torch.from_numpy(data_n[1]))
        pred_ll, pred_nl, w, b = model(x_l, x_n)
        M = Variable(torch.zeros(model.n_class_n, model.n_dim))
        B = Variable(torch.zeros(model.n_class_n))
        for k in range(model.n_class_n):
            M[k] = torch.cat((w[:, 0][y_n == model.n_class_l + k].view(-1, 1),
                              w[:, 1][y_n == model.n_class_l + k].view(-1, 1)), 1).mean(0)
            B[k] = b[y_n == model.n_class_l + k].mean()
        pred_ln = torch.mm(x_l, M.t()) + B.view(1, -1).expand_as(torch.mm(x_l, M.t()))
        pred_nn = torch.mm(x_n, M.t()) + B.view(1, -1).expand_as(torch.mm(x_n, M.t()))
        pred = torch.cat((torch.cat((pred_ll, pred_nl)), torch.cat((pred_ln, pred_nn))), 1)
        pred = pred.data.max(1)[1]
        y = torch.cat((y_l, y_n))
        accuracy = pred.eq(y.data).cpu().sum() * 1.0 / y.size()[0]
        # print('accuracy: %.2f' % accuracy)
        val_accuracy.append(accuracy)
        acc_l = pred.eq(y.data).cpu()[0:100].sum() * 1.0 / 100
        acc_n = pred.eq(y.data).cpu()[100:150].sum() * 1.0 / 50
        print('accuracy: %.2f, lifelong accuracy: %.2f, new accuracy: %.2f' % (accuracy, acc_l, acc_n))

    return numpy.mean(numpy.asarray(val_accuracy))
Example #8
0
def test(model):
    game_state = GameState()

    # initial action is do nothing
    action = torch.zeros([model.number_of_actions], dtype=torch.float32)
    action[0] = 1
    image_data, reward, terminal = game_state.frame_step(action)
    image_data = resize_and_bgr2gray(image_data)
    image_data = image_to_tensor(image_data)
    state = torch.cat((image_data, image_data, image_data, image_data)).unsqueeze(0)

    while True:
        # get output from the neural network
        output = model(state)[0]

        action = torch.zeros([model.number_of_actions], dtype=torch.float32)
        if torch.cuda.is_available():  # put on GPU if CUDA is available
            action = action.cuda()

        # get action
        action_index = torch.argmax(output)
        if torch.cuda.is_available():  # put on GPU if CUDA is available
            action_index = action_index.cuda()
        action[action_index] = 1

        # get next state
        image_data_1, reward, terminal = game_state.frame_step(action)
        image_data_1 = resize_and_bgr2gray(image_data_1)
        image_data_1 = image_to_tensor(image_data_1)
        state_1 = torch.cat((state.squeeze(0)[1:, :, :], image_data_1)).unsqueeze(0)

        # set state to be state_1
        state = state_1
Example #9
0
    def test_augmented_lstm_computes_same_function_as_pytorch_lstm(self):
        augmented_lstm = AugmentedLstm(10, 11)
        pytorch_lstm = LSTM(10, 11, num_layers=1, batch_first=True)
        # Initialize all weights to be == 1.
        initializer = InitializerApplicator([(".*", lambda tensor: torch.nn.init.constant_(tensor, 1.))])
        initializer(augmented_lstm)
        initializer(pytorch_lstm)

        initial_state = torch.zeros([1, 5, 11])
        initial_memory = torch.zeros([1, 5, 11])

        # Use bigger numbers to avoid floating point instability.
        sorted_tensor, sorted_sequence, _, _ = sort_batch_by_length(self.random_tensor * 5., self.sequence_lengths)
        lstm_input = pack_padded_sequence(sorted_tensor, sorted_sequence.data.tolist(), batch_first=True)

        augmented_output, augmented_state = augmented_lstm(lstm_input, (initial_state, initial_memory))
        pytorch_output, pytorch_state = pytorch_lstm(lstm_input, (initial_state, initial_memory))
        pytorch_output_sequence, _ = pad_packed_sequence(pytorch_output, batch_first=True)
        augmented_output_sequence, _ = pad_packed_sequence(augmented_output, batch_first=True)

        numpy.testing.assert_array_almost_equal(pytorch_output_sequence.data.numpy(),
                                                augmented_output_sequence.data.numpy(), decimal=4)
        numpy.testing.assert_array_almost_equal(pytorch_state[0].data.numpy(),
                                                augmented_state[0].data.numpy(), decimal=4)
        numpy.testing.assert_array_almost_equal(pytorch_state[1].data.numpy(),
                                                augmented_state[1].data.numpy(), decimal=4)
Example #10
0
    def __init__(self, input_dim=88, z_dim=100, emission_dim=100,
                 transition_dim=200, rnn_dim=600, rnn_dropout_rate=0.0,
                 num_iafs=0, iaf_dim=50, use_cuda=False):
        super(DMM, self).__init__()
        # instantiate PyTorch modules used in the model and guide below
        self.emitter = Emitter(input_dim, z_dim, emission_dim)
        self.trans = GatedTransition(z_dim, transition_dim)
        self.combiner = Combiner(z_dim, rnn_dim)
        self.rnn = nn.RNN(input_size=input_dim, hidden_size=rnn_dim, nonlinearity='relu',
                          batch_first=True, bidirectional=False, num_layers=1,
                          dropout=rnn_dropout_rate)

        # if we're using normalizing flows, instantiate those too
        iafs = [InverseAutoregressiveFlow(z_dim, iaf_dim) for _ in range(num_iafs)]
        self.iafs = nn.ModuleList(iafs)

        # define a (trainable) parameters z_0 and z_q_0 that help define the probability
        # distributions p(z_1) and q(z_1)
        # (since for t = 1 there are no previous latents to condition on)
        self.z_0 = nn.Parameter(torch.zeros(z_dim))
        self.z_q_0 = nn.Parameter(torch.zeros(z_dim))
        # define a (trainable) parameter for the initial hidden state of the rnn
        self.h_0 = nn.Parameter(torch.zeros(1, 1, rnn_dim))

        self.use_cuda = use_cuda
        # if on gpu cuda-ize all PyTorch (sub)modules
        if use_cuda:
            self.cuda()
 def init_hidden(self, num_layers, batch_size):
     # the first is the hidden h
     # the second is the cell  c
     # return (Variable(torch.zeros(1, batch_size, self.hidden_dim)),
     #          Variable(torch.zeros(1, batch_size, self.hidden_dim)))
     return (Variable(torch.zeros(1 * num_layers, batch_size, self.hidden_dim)),
             Variable(torch.zeros(1 * num_layers, batch_size, self.hidden_dim)))
def evaluate(encoder, decoder, sentence, max_length=MAX_LENGTH):
    with torch.no_grad():
        input_tensor = tensorFromSentence(input_lang, sentence)
        input_length = input_tensor.size()[0]
        encoder_hidden = encoder.initHidden()

        encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)

        for ei in range(input_length):
            encoder_output, encoder_hidden = encoder(input_tensor[ei],
                                                     encoder_hidden)
            encoder_outputs[ei] += encoder_output[0, 0]

        decoder_input = torch.tensor([[SOS_token]], device=device)  # SOS

        decoder_hidden = encoder_hidden

        decoded_words = []
        decoder_attentions = torch.zeros(max_length, max_length)

        for di in range(max_length):
            decoder_output, decoder_hidden, decoder_attention = decoder(
                decoder_input, decoder_hidden, encoder_outputs)
            decoder_attentions[di] = decoder_attention.data
            topv, topi = decoder_output.data.topk(1)
            if topi.item() == EOS_token:
                decoded_words.append('<EOS>')
                break
            else:
                decoded_words.append(output_lang.index2word[topi.item()])

            decoder_input = topi.squeeze().detach()

        return decoded_words, decoder_attentions[:di + 1]
Example #13
0
def singleTagLoss(pred_tag, keypoints):
    """
    associative embedding loss for one image
    """
    eps = 1e-6
    tags = []
    pull = 0
    for i in keypoints:
        tmp = []
        for j in i:
            if j[1]>0:
                tmp.append(pred_tag[j[0]])
        if len(tmp) == 0:
            continue
        tmp = torch.stack(tmp)
        tags.append(torch.mean(tmp, dim=0))
        pull = pull +  torch.mean((tmp - tags[-1].expand_as(tmp))**2)

    if len(tags) == 0:
        return make_input(torch.zeros([1]).float()), make_input(torch.zeros([1]).float())

    tags = torch.stack(tags)[:,0]

    num = tags.size()[0]
    size = (num, num, tags.size()[1])
    A = tags.unsqueeze(dim=1).expand(*size)
    B = A.permute(1, 0, 2)

    diff = A - B
    diff = torch.pow(diff, 2).sum(dim=2)[:,:,0]
    push = torch.exp(-diff)
    push = (torch.sum(push) - num)
    return push/((num - 1) * num + eps) * 0.5, pull/(num + eps)
def knn(Mxx, Mxy, Myy, k, sqrt):
    n0 = Mxx.size(0)
    n1 = Myy.size(0)
    label = torch.cat((torch.ones(n0),torch.zeros(n1)))
    M = torch.cat((torch.cat((Mxx,Mxy),1), torch.cat((Mxy.transpose(0,1),Myy), 1)), 0)
    if sqrt:
        M = M.abs().sqrt()
    INFINITY = float('inf')
    val, idx = (M+torch.diag(INFINITY*torch.ones(n0+n1))).topk(k, 0, False)

    count = torch.zeros(n0+n1)
    for i in range(0,k):
        count = count + label.index_select(0,idx[i])
    pred = torch.ge(count, (float(k)/2)*torch.ones(n0+n1)).float()

    s = Score_knn()
    s.tp = (pred*label).sum()
    s.fp = (pred*(1-label)).sum()
    s.fn = ((1-pred)*label).sum()
    s.tn = ((1-pred)*(1-label)).sum()
    s.precision = s.tp/(s.tp+s.fp)
    s.recall = s.tp/(s.tp+s.fn)
    s.acc_t = s.tp/(s.tp+s.fn)
    s.acc_f = s.tn/(s.tn+s.fp)
    s.acc = torch.eq(label, pred).float().mean()
    s.k = k 

    return s
    def __getitem__(self, idx):

        face_ind = 1
        if idx < self.n_MSR:
            vid = self.train_list[idx]
            text = self.text_features[vid]
            r = random.randint(0, len(text)-1)
            text = text[r]
            flow = self.flow_features[vid]
            audio = self.audio_features[vid]
            video = self.visual_features[vid]
            face = self.face_features[vid]

            if np.sum(face) == 0:
                face_ind = 0
        elif self.coco:
            video = self.coco_visual[idx-self.n_MSR]
            text = self.coco_text[idx-self.n_MSR]
            audio = th.zeros(1,128)
            flow = th.zeros(1024)
            face = th.zeros(128)
            face_ind = 0

        return {'video': video, 
                'flow': flow,
                'face': face,
                'text': text,
                'coco_ind': self.coco_ind[idx],
                'face_ind': face_ind,
                'audio': audio
                }
Example #16
0
 def init_hiddens(self, x, requires_grad=False):
     batch_size = x.size()[0]
     return [
         Variable(torch.zeros(batch_size, h_size), requires_grad=requires_grad).cuda() if x.is_cuda  # puts the tensor on gpu if our input is on gpu
         else Variable(torch.zeros(batch_size, h_size), requires_grad=requires_grad)
         for h_size in self.sizes[1:]
     ]
Example #17
0
    def sample(self, mu, logvar, k):

        # print (mu)
        # print (logvar)


        if torch.cuda.is_available():
            eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_()).cuda() #[P,B,Z]

            # print (mu.size())
            # print (logvar.size())
            # print (eps.size())

            z = eps.mul(torch.exp(.5*logvar)) + mu  #[P,B,Z]
            logpz = lognormal(z, Variable(torch.zeros(self.B, self.z_size).cuda()), 
                                Variable(torch.zeros(self.B, self.z_size)).cuda())  #[P,B]



            # logqz = lognormal(z, mu, logvar)

            logqz = lognormal(z, Variable(mu.data), Variable(logvar.data))



        else:
            eps = Variable(torch.FloatTensor(k, self.B, self.z_size).normal_())#[P,B,Z]
            z = eps.mul(torch.exp(.5*logvar)) + mu  #[P,B,Z]
            logpz = lognormal(z, Variable(torch.zeros(self.B, self.z_size)), 
                                Variable(torch.zeros(self.B, self.z_size)))  #[P,B]
            logqz = lognormal(z, mu, logvar) 
        return z, logpz, logqz
Example #18
0
    def forward(self, X):
        """
        In this case we can predict the next 
        """
        self.sample_posterior()
        outputs = []
        h_t = torch.zeros(X.size(0), self.cf_a.HS, dtype=self.cf_a.dtype, device = self.cf_a.device)
        c_t = torch.zeros(X.size(0),  self.cf_a.HS, dtype=self.cf_a.dtype, device = self.cf_a.device)
        h_t2 = torch.zeros(X.size(0),  self.cf_a.HS, dtype=self.cf_a.dtype, device = self.cf_a.device)
        c_t2 = torch.zeros(X.size(0),  self.cf_a.HS, dtype=self.cf_a.dtype, device = self.cf_a.device)

        for i, input_t in enumerate(X.chunk(X.size(1), dim=1)):
            h_t, c_t = self.lstm1(input_t, (h_t, c_t))
            h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
            output = self.linear(h_t2)
#            output = self.linear2(self.cf_a.activation_func(self.linear(h_t2)))
            outputs += [output]
            
        for i in range(self.future):# if we should predict the future
            h_t, c_t = self.lstm1(output, (h_t, c_t))
            h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
            output = self.linear(h_t2)
            outputs += [output]
        outputs = torch.stack(outputs, 1).squeeze(2)
        return outputs
Example #19
0
    def create_mask_montage(self, image, predictions):
        """
        Create a montage showing the probability heatmaps for each one one of the
        detected objects

        Arguments:
            image (np.ndarray): an image as returned by OpenCV
            predictions (BoxList): the result of the computation by the model.
                It should contain the field `mask`.
        """
        masks = predictions.get_field("mask")
        masks_per_dim = self.masks_per_dim
        masks = torch.nn.functional.interpolate(
            masks.float(), scale_factor=1 / masks_per_dim
        ).byte()
        height, width = masks.shape[-2:]
        max_masks = masks_per_dim ** 2
        masks = masks[:max_masks]
        # handle case where we have less detections than max_masks
        if len(masks) < max_masks:
            masks_padded = torch.zeros(max_masks, 1, height, width, dtype=torch.uint8)
            masks_padded[: len(masks)] = masks
            masks = masks_padded
        masks = masks.reshape(masks_per_dim, masks_per_dim, height, width)
        result = torch.zeros(
            (masks_per_dim * height, masks_per_dim * width), dtype=torch.uint8
        )
        for y in range(masks_per_dim):
            start_y = y * height
            end_y = (y + 1) * height
            for x in range(masks_per_dim):
                start_x = x * width
                end_x = (x + 1) * width
                result[start_y:end_y, start_x:end_x] = masks[y, x]
        return cv2.applyColorMap(result.numpy(), cv2.COLORMAP_JET)
Example #20
0
 def predict(self, x, attn_type = "hard"):
     #predict with greedy decoding
     emb = self.embedding(x)
     h = Variable(torch.zeros(1, x.size(0), self.hidden_dim))
     c = Variable(torch.zeros(1, x.size(0), self.hidden_dim))
     enc_h, _ = self.encoder(emb, (h, c))
     y = [Variable(torch.zeros(x.size(0)).long())]
     self.attn = []        
     for t in range(x.size(1)):
         emb_t = self.embedding(y[-1])
         dec_h, (h, c) = self.decoder(emb_t.unsqueeze(1), (h, c))
         scores = torch.bmm(enc_h, dec_h.transpose(1,2)).squeeze(2)
         attn_dist = F.softmax(scores, dim = 1)
         self.attn.append(attn_dist.data)
         if attn_type == "hard":
             _, argmax = attn_dist.max(1)
             one_hot = Variable(torch.zeros_like(attn_dist.data).scatter_(-1, argmax.data.unsqueeze(1), 1))
             context = torch.bmm(one_hot.unsqueeze(1), enc_h).squeeze(1)                    
         else:                
             context = torch.bmm(attn_dist.unsqueeze(1), enc_h).squeeze(1)
         pred = self.vocab_layer(torch.cat([dec_h.squeeze(1), context], 1))
         _, next_token = pred.max(1)
         y.append(next_token)
     self.attn = torch.stack(self.attn, 0).transpose(0, 1)
     return torch.stack(y, 0).transpose(0, 1)
Example #21
0
    def __init__(self, num_steps, num_processes, obs_shape, action_space):
        self.states = torch.zeros(num_steps + 1, num_processes, *obs_shape)
        self.rewards = torch.zeros(num_steps, num_processes, 1)

        self.returns = torch.zeros(num_steps + 1, num_processes, 1)


        # if action_space.__class__.__name__ == 'Discrete':
        action_shape = 1
        # else:
        #     action_shape = action_space.shape[0]
        self.actions = torch.zeros(num_steps, num_processes, action_shape)
        # if action_space.__class__.__name__ == 'Discrete':
        self.actions = self.actions.long()

        self.masks = torch.ones(num_steps + 1, num_processes, 1)


        # self.value_preds = torch.zeros(num_steps + 1, num_processes, 1)
        self.value_preds = []


        # self.action_log_probs = torch.zeros(num_steps, num_processes, 1)
        # self.dist_entropy = torch.zeros(num_steps, num_processes, 1)
        self.action_log_probs = []
        self.dist_entropy = []
    def init_hidden(self):
        if torch.cuda.is_available():
            hidden = torch.zeros(self.n_layers, 1, self.hidden_size).cuda()
        else:
            hidden = torch.zeros(self.n_layers, 1, self.hidden_size)

        return Variable(hidden)
    def forward(self, X_list_of_chains):
        
        """
        X is a list of tensors from which to evaluate the performance.
        Every element in X can have any length.
        The batch size is 1 in this case... we just run it a number times
        
        """
        self.sample_posterior()

#        print ("Total_sample_dim", X.shape)
        h_t = torch.zeros(X_list_of_chains[0].size(1), self.cf_a.HS, dtype=self.cf_a.dtype, device = self.cf_a.device)
        c_t = torch.zeros(X_list_of_chains[0].size(1),  self.cf_a.HS, dtype=self.cf_a.dtype,device = self.cf_a.device)

        ## We generate the output for every vector in the chain
        outputs = []
        for X in X_list_of_chains:
            for i, input_t in enumerate(X.chunk(X.size(0), dim=0)):
                input_t = input_t[:,0,:]
#                print ("One_timestep_dim",input_t.shape)
                h_t, c_t = self.lstm1(input_t, (h_t, c_t))
        
            output = self.linear(h_t)
            outputs += [output]
        outputs = torch.cat(outputs, 0)
#        print ("prediction dim ", output.shape)
#        print ("predictions dim ", outputs.shape)
        return outputs
    def _construct_previous(self, layer, direction, inputs, tree, idx):
        if direction == 'up':
            oidx = tree.children_idx(idx)
        else:
            oidx = tree.parents_idx(idx)

        if oidx:
            h_prev, c_prev = [], []

            for i in oidx:
                h_prev_i, c_prev_i = self._upward_downward(layer,
                                                           direction,
                                                           inputs,
                                                           tree, i)

                h_prev.append(h_prev_i)
                c_prev.append(c_prev_i)

            h_prev = torch.stack(h_prev, 1)
            c_prev = torch.stack(c_prev, 1)

        elif inputs.is_cuda:
            h_prev = torch.zeros(self.hidden_size, 1).cuda()
            c_prev = torch.zeros(self.hidden_size, 1).cuda()

        else:
            h_prev = torch.zeros(self.hidden_size, 1)
            c_prev = torch.zeros(self.hidden_size, 1)

        return oidx, (h_prev, c_prev)
    def _construct_x_t(self, layer, inputs, idx, tree):
        if layer > 0 and self.bidirectional:
            x_t = torch.cat([self.hidden_state[layer - 1]['up'][idx],
                             self.hidden_state[layer - 1]['down'][idx]])
        elif layer > 0:
            x_t = self.hidden_state[layer - 1]['up'][idx]
        else:
            if idx in tree.terminal_indices:
                string_idx = tree.terminal_indices.index(idx)

                if self._has_batch_dimension:
                    x_t = inputs[string_idx, 0]
                else:
                    x_t = inputs[string_idx]
            else:
                if self._has_batch_dimension:
                    x_t_raw = torch.zeros(self.input_size, 1)
                else:
                    x_t_raw = torch.zeros(self.input_size)

                if inputs.is_cuda:
                    x_t = x_t_raw.cuda()

                else:
                    x_t = x_t_raw

        return x_t
Example #26
0
    def fit(self):
        args = self.args

        for epoch in range(args.max_epochs):
            self.G.train()
            self.D.train()
            for step, inputs in enumerate(self.train_loader):
                batch_size = inputs[0].size(0)

                images = inputs[0].to(self.device)
                labels = inputs[1].to(self.device)
                
                # create the labels used to distingush real or fake
                real_labels = torch.ones(batch_size, dtype=torch.int64).to(self.device)
                fake_labels = torch.zeros(batch_size, dtype=torch.int64).to(self.device)
                
                # train the discriminator
                
                # discriminator <- real image
                D_real, D_real_cls = self.D(images)
                D_loss_real = self.loss_fn(D_real, real_labels)
                D_loss_real_cls = self.loss_fn(D_real_cls, labels)
                
                # noise vector
                z = torch.randn(batch_size, args.z_dim).to(self.device)

                # make label to onehot vector
                y_onehot = torch.zeros((batch_size, 10)).to(self.device)
                y_onehot.scatter_(1, labels.unsqueeze(1), 1)
                y_onehot.requires_grad_(False)
                
                # discriminator <- fake image
                G_fake = self.G(y_onehot, z)
                D_fake, D_fake_cls = self.D(G_fake)
                D_loss_fake = self.loss_fn(D_fake, fake_labels)
                D_loss_fake_cls = self.loss_fn(D_fake_cls, labels)
                
                D_loss = D_loss_real + D_loss_fake + \
                         D_loss_real_cls + D_loss_fake_cls
                self.D.zero_grad()
                D_loss.backward()
                self.optim_D.step()
                
                # train the generator

                z = torch.randn(batch_size, args.z_dim).to(self.device)
                G_fake = self.G(y_onehot, z)
                D_fake, D_fake_cls = self.D(G_fake)
                
                G_loss = self.loss_fn(D_fake, real_labels) + \
                         self.loss_fn(D_fake_cls, labels)
                self.G.zero_grad()
                G_loss.backward()
                self.optim_G.step()

            if (epoch+1) % args.print_every == 0:
                print("Epoch [{}/{}] Loss_D: {:.3f}, Loss_G: {:.3f}".
                    format(epoch+1, args.max_epochs, D_loss.item(), G_loss.item()))
                self.save(args.ckpt_dir, epoch+1)
                self.sample(epoch+1)
Example #27
0
    def addition_feature(self, index):
        data = [self.context, self.question]
        add_features = [None, None]

        for k in range(len(data)):
            features = {}
            tmp_seq_len = data[k]['token'].shape[1]

            if self.config['use_pos']:
                features['pos'] = torch.zeros((tmp_seq_len, len(self.feature_dict['id2pos'])), dtype=torch.float)
                for i, ele in enumerate(data[k]['pos'][index]):
                    if ele == PreprocessData.padding_idx:
                        break
                    features['pos'][i, ele] = 1

            if self.config['use_ent']:
                features['ent'] = torch.zeros((tmp_seq_len, len(self.feature_dict['id2ent'])), dtype=torch.float)
                for i, ele in enumerate(data[k]['ent'][index]):
                    if ele == PreprocessData.padding_idx:
                        break
                    features['ent'][i, ele] = 1

            if self.config['use_em']:
                features['em'] = to_float_tensor(data[k]['em'][index]).unsqueeze(-1)
            if self.config['use_em_lemma']:
                features['em_lemma'] = to_float_tensor(data[k]['em_lemma'][index]).unsqueeze(-1)

            if len(features) > 0:
                add_features[k] = torch.cat(list(features.values()), dim=-1)

        return add_features
    def test_make_scipy_bounds(self):
        X = torch.zeros(3, 1, 2)
        # both None
        self.assertIsNone(make_scipy_bounds(X=X, lower_bounds=None, upper_bounds=None))
        # lower None
        upper_bounds = torch.ones(2)
        bounds = make_scipy_bounds(X=X, lower_bounds=None, upper_bounds=upper_bounds)
        self.assertIsInstance(bounds, Bounds)
        self.assertTrue(
            np.all(np.equal(bounds.lb, np.full((3, 1, 2), float("-inf")).flatten()))
        )
        self.assertTrue(np.all(np.equal(bounds.ub, np.ones((3, 1, 2)).flatten())))
        # upper None
        lower_bounds = torch.zeros(2)
        bounds = make_scipy_bounds(X=X, lower_bounds=lower_bounds, upper_bounds=None)
        self.assertIsInstance(bounds, Bounds)
        self.assertTrue(np.all(np.equal(bounds.lb, np.zeros((3, 1, 2)).flatten())))
        self.assertTrue(
            np.all(np.equal(bounds.ub, np.full((3, 1, 2), float("inf")).flatten()))
        )
        # floats
        bounds = make_scipy_bounds(X=X, lower_bounds=0.0, upper_bounds=1.0)
        self.assertIsInstance(bounds, Bounds)
        self.assertTrue(np.all(np.equal(bounds.lb, np.zeros((3, 1, 2)).flatten())))
        self.assertTrue(np.all(np.equal(bounds.ub, np.ones((3, 1, 2)).flatten())))

        # 1-d tensors
        bounds = make_scipy_bounds(
            X=X, lower_bounds=lower_bounds, upper_bounds=upper_bounds
        )
        self.assertIsInstance(bounds, Bounds)
        self.assertTrue(np.all(np.equal(bounds.lb, np.zeros((3, 1, 2)).flatten())))
        self.assertTrue(np.all(np.equal(bounds.ub, np.ones((3, 1, 2)).flatten())))
Example #29
0
    def encoder_forward(self, opt, source_l=3, bsize=1):
        '''
        Tests if the encoder works as expected

        args:
            opt: set of options
            source_l: Length of generated input sentence
            bsize: Batchsize of generated input
        '''
        if opt.rnn_size > 0:
            opt.enc_rnn_size = opt.rnn_size
        word_field = self.get_field()
        embeddings = build_embeddings(opt, word_field)
        enc = build_encoder(opt, embeddings)

        test_src, test_tgt, test_length = self.get_batch(source_l=source_l,
                                                         bsize=bsize)

        hidden_t, outputs, test_length = enc(test_src, test_length)

        # Initialize vectors to compare size with
        test_hid = torch.zeros(self.opt.enc_layers, bsize, opt.enc_rnn_size)
        test_out = torch.zeros(source_l, bsize, opt.dec_rnn_size)

        # Ensure correct sizes and types
        self.assertEqual(test_hid.size(),
                         hidden_t[0].size(),
                         hidden_t[1].size())
        self.assertEqual(test_out.size(), outputs.size())
        self.assertEqual(type(outputs), torch.Tensor)
 def init_hidden(self):
     # Before we've done anything, we dont have any hidden state.
     # Refer to the Pytorch documentation to see exactly
     # why they have this dimensionality.
     # The axes semantics are (num_layers, minibatch_size, hidden_dim)
     return (autograd.Variable(torch.zeros(1, 1, self.hidden_dim)),
             autograd.Variable(torch.zeros(1, 1, self.hidden_dim)))
 def initHidden(self):
     return Variable(torch.zeros(1, self.hidden_size))
Example #32
0
    def build_targets(self, pred_boxes, ground_truth, height, width):
        batch_size = len(ground_truth)

        conf_mask = torch.ones(
            batch_size, self.num_anchors, height * width,
            requires_grad=False) * self.noobject_scale
        coord_mask = torch.zeros(batch_size,
                                 self.num_anchors,
                                 1,
                                 height * width,
                                 requires_grad=False)
        cls_mask = torch.zeros(batch_size,
                               self.num_anchors,
                               height * width,
                               requires_grad=False).byte()
        tcoord = torch.zeros(batch_size,
                             self.num_anchors,
                             4,
                             height * width,
                             requires_grad=False)
        tconf = torch.zeros(batch_size,
                            self.num_anchors,
                            height * width,
                            requires_grad=False)
        tcls = torch.zeros(batch_size,
                           self.num_anchors,
                           height * width,
                           requires_grad=False)

        for b in range(batch_size):
            if len(ground_truth[b]) == 0:
                continue

            # Build up tensors
            cur_pred_boxes = pred_boxes[b * (self.num_anchors * height *
                                             width):(b + 1) *
                                        (self.num_anchors * height * width)]
            if self.anchor_step == 4:
                anchors = self.anchors.clone()
                anchors[:, :2] = 0
            else:
                anchors = torch.cat(
                    [torch.zeros_like(self.anchors), self.anchors], 1)
            gt = torch.zeros(len(ground_truth[b]), 4)
            for i, anno in enumerate(ground_truth[b]):
                gt[i, 0] = (anno[0] + anno[2] / 2) / self.reduction
                gt[i, 1] = (anno[1] + anno[3] / 2) / self.reduction
                gt[i, 2] = anno[2] / self.reduction
                gt[i, 3] = anno[3] / self.reduction

            # Set confidence mask of matching detections to 0
            iou_gt_pred = bbox_ious(gt, cur_pred_boxes)
            mask = (iou_gt_pred > self.thresh).sum(0) >= 1
            conf_mask[b][mask.view_as(conf_mask[b])] = 0

            # Find best anchor for each ground truth
            gt_wh = gt.clone()
            gt_wh[:, :2] = 0
            iou_gt_anchors = bbox_ious(gt_wh, anchors)
            _, best_anchors = iou_gt_anchors.max(1)

            # Set masks and target values for each ground truth
            for i, anno in enumerate(ground_truth[b]):
                gi = min(width - 1, max(0, int(gt[i, 0])))
                gj = min(height - 1, max(0, int(gt[i, 1])))
                best_n = best_anchors[i]
                iou = iou_gt_pred[i][best_n * height * width + gj * width + gi]
                coord_mask[b][best_n][0][gj * width + gi] = 1
                cls_mask[b][best_n][gj * width + gi] = 1
                conf_mask[b][best_n][gj * width + gi] = self.object_scale
                tcoord[b][best_n][0][gj * width + gi] = gt[i, 0] - gi
                tcoord[b][best_n][1][gj * width + gi] = gt[i, 1] - gj
                tcoord[b][best_n][2][gj * width + gi] = math.log(
                    max(gt[i, 2], 1.0) / self.anchors[best_n, 0])
                tcoord[b][best_n][3][gj * width + gi] = math.log(
                    max(gt[i, 3], 1.0) / self.anchors[best_n, 1])
                tconf[b][best_n][gj * width + gi] = iou
                tcls[b][best_n][gj * width + gi] = int(anno[4])

        return coord_mask, conf_mask, cls_mask, tcoord, tconf, tcls
Example #33
0
        img_paths, mask_paths = get_path_pairs(folder, split_f)
    elif split == 'test':
        split_f = os.path.join(folder, 'test.txt')
        img_paths, mask_paths = get_path_pairs(folder, split_f)
    else:
        split_f = os.path.join(folder, 'all.txt')
        img_paths, mask_paths = get_path_pairs(folder, split_f)

    return img_paths, mask_paths


trainset = CocostuffSegmentation(split='train', mode='train')

print(len(trainset.images))
nclass = trainset.NUM_CLASS
tvect = torch.zeros(nclass)
for index in range(len(trainset.images)):
    print(index)
    img, mask = trainset.__getitem__(index)
    hist = torch.histc(torch.tensor(np.array(mask)).float(), bins=nclass, min=0, max=nclass - 1)
    tvect = tvect+hist

norm_tvect = tvect/torch.sum(tvect)
print(norm_tvect)

# nclass = trainset.NUM_CLASS
# tvect = torch.zeros(nclass)
# all = torch.zeros(1)
# norm_tvect = torch.zeros(nclass)
# for index in range(len(trainset.images)):
#     print(index)
Example #34
0
 def get_h0(self, batchsize: int) -> Dict[str, torch.Tensor]:
     shape = (self.num_lstm_layer, batchsize, self.hid_dim)
     hid = {"h0": torch.zeros(*shape), "c0": torch.zeros(*shape)}
     return hid
Example #35
0
def train(args,
          gen_net: nn.Module,
          dis_net: nn.Module,
          gen_optimizer,
          dis_optimizer,
          gen_avg_param,
          train_loader,
          epoch,
          writer_dict,
          schedulers=None):
    writer = writer_dict['writer']
    gen_step = 0

    # train mode
    gen_net = gen_net.train()
    dis_net = dis_net.train()

    dis_params_flatten = parameters_to_vector(dis_net.parameters())
    gen_params_flatten = parameters_to_vector(gen_net.parameters())
    bce_loss = nn.BCEWithLogitsLoss()

    if args.optimizer == 'sLead_Adam':
        # just to fill-up the grad buffers
        imgs = iter(train_loader).__next__()[0]
        z = torch.cuda.FloatTensor(
            np.random.normal(0, 1, (imgs.shape[0], args.latent_dim)))
        fake_imgs = gen_net(z)
        fake_validity = dis_net(fake_imgs)
        d_loss = torch.mean(nn.ReLU(inplace=True)(1 + fake_validity))
        g_loss = -torch.mean(fake_validity)
        (0.0 * d_loss).backward(create_graph=True)
        (0.0 * g_loss).backward(create_graph=True)

    for iter_idx, (imgs, _) in enumerate(tqdm(train_loader)):
        global_steps = writer_dict['train_global_steps']

        # Adversarial ground truths
        real_imgs = imgs.type(torch.cuda.FloatTensor)

        # Sample noise as generator input
        z = torch.cuda.FloatTensor(
            np.random.normal(0, 1, (imgs.shape[0], args.latent_dim)))

        # ---------------------
        #  Train Discriminator
        # ---------------------
        real_validity = dis_net(real_imgs)
        fake_imgs = gen_net(z)
        assert fake_imgs.size() == real_imgs.size()
        fake_validity = dis_net(fake_imgs)

        # cal loss
        if args.loss_type == 'hinge':
            d_loss = torch.mean(
                nn.ReLU(inplace=True)(1.0 - real_validity)) + torch.mean(
                    nn.ReLU(inplace=True)(1 + fake_validity))
        elif args.loss_type == 'bce':
            fake_labels = torch.zeros(imgs.shape[0]).cuda()
            real_labels = torch.ones(imgs.shape[0]).cuda()
            real_loss = bce_loss(real_validity.squeeze(), real_labels)
            fake_loss = bce_loss(fake_validity.squeeze(), fake_labels)
            d_loss = real_loss + fake_loss

        if args.optimizer == 'Adam':
            dis_optimizer.zero_grad()
            d_loss.backward()
            dis_optimizer.step()
        elif args.optimizer == 'sLead_Adam':
            # if global_steps % args.n_critic == 0:
            gradsD = torch.autograd.grad(outputs=d_loss,
                                         inputs=(dis_net.parameters()),
                                         create_graph=True)
            for p, g in zip(dis_net.parameters(), gradsD):
                p.grad = g
            gen_params_flatten_prev = gen_params_flatten + 0.0
            gen_params_flatten = parameters_to_vector(
                gen_net.parameters()) + 0.0
            grad_gen_params_flatten = parameters_grad_to_vector(
                gen_net.parameters())
            delta_gen_params_flatten = gen_params_flatten - gen_params_flatten_prev
            vjp_dis = torch.autograd.grad(
                grad_gen_params_flatten,
                dis_net.parameters(),
                grad_outputs=delta_gen_params_flatten,
                retain_graph=True)
            dis_optimizer.step(vjps=vjp_dis)
        # else:
        #     # do regular adam
        #     dis_optimizer.zero_grad()
        #     d_loss.backward()
        #     dis_optimizer.step()

        writer.add_scalar('d_loss', d_loss.item(), global_steps)

        # -----------------
        #  Train Generator
        # -----------------
        if global_steps % args.n_critic == 0:
            # cal loss
            gen_z = torch.cuda.FloatTensor(
                np.random.normal(0, 1, (args.gen_batch_size, args.latent_dim)))
            gen_imgs = gen_net(gen_z)
            fake_validity = dis_net(gen_imgs)
            if args.loss_type == 'hinge':
                g_loss = -torch.mean(fake_validity)
            elif args.loss_type == 'bce':
                real_labels = torch.ones(args.gen_batch_size).cuda()
                g_loss = bce_loss(fake_validity.squeeze(), real_labels)

            if args.optimizer == 'Adam':
                gen_optimizer.zero_grad()
                g_loss.backward()
                gen_optimizer.step()

            elif args.optimizer == 'sLead_Adam':
                gradsG = torch.autograd.grad(outputs=g_loss,
                                             inputs=(gen_net.parameters()),
                                             create_graph=True)
                for p, g in zip(gen_net.parameters(), gradsG):
                    p.grad = g

                dis_params_flatten_prev = dis_params_flatten + 0.0
                dis_params_flatten = parameters_to_vector(
                    dis_net.parameters()) + 0.0
                grad_dis_params_flatten = parameters_grad_to_vector(
                    dis_net.parameters())
                delta_dis_params_flatten = dis_params_flatten - dis_params_flatten_prev
                vjp_gen = torch.autograd.grad(
                    grad_dis_params_flatten,
                    gen_net.parameters(),
                    grad_outputs=delta_dis_params_flatten)

                gen_optimizer.step(vjps=vjp_gen)

            # adjust learning rate
            if schedulers:
                gen_scheduler, dis_scheduler = schedulers
                g_lr = gen_scheduler.step(global_steps)
                d_lr = dis_scheduler.step(global_steps)
                writer.add_scalar('LR/g_lr', g_lr, global_steps)
                writer.add_scalar('LR/d_lr', d_lr, global_steps)

            # moving average weight
            for p, avg_p in zip(gen_net.parameters(), gen_avg_param):
                avg_p.mul_(0.999).add_(0.001, p.data)

            writer.add_scalar('g_loss', g_loss.item(), global_steps)
            gen_step += 1

        # verbose
        if gen_step and iter_idx % args.print_freq == 0:
            tqdm.write(
                "[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]" %
                (epoch, args.max_epoch, iter_idx % len(train_loader),
                 len(train_loader), d_loss.item(), g_loss.item()))

        writer_dict['train_global_steps'] = global_steps + 1
def inputTensor(line):
    tensor = torch.zeros(len(line), 1, n_letters)
    for li in range(len(line)):
        letter = line[li]
        tensor[li][0][all_letters.find(letter)] = 1
    return tensor
def categoryTensor(category):
    li = all_categories.index(category)
    tensor = torch.zeros(1, n_categories)
    tensor[0][li] = 1
    return tensor
Example #38
0
def one_hot(dims, value, indx):
    vec = torch.zeros(dims)
    vec[indx] = value
    return vec
db_pre_trained = sample.decision_boundary()


# Training GAN
# Optimizers
D_optimizer = torch.optim.SGD(D.parameters(), lr=learning_rate)
G_optimizer = torch.optim.SGD(G.parameters(), lr=learning_rate)

D_losses = []
G_losses = []
for epoch in range(num_epochs):
    # Generate samples
    x_ = data.sample(batch_size)
    x_ = Variable(torch.FloatTensor(np.reshape(x_, [batch_size, input_dim])))
    y_real_ = Variable(torch.ones([batch_size, output_dim]))
    y_fake_ = Variable(torch.zeros([batch_size, output_dim]))

    # Train discriminator with real data
    D_real_decision = D(x_)
    D_real_loss = criterion(D_real_decision, y_real_)

    # Train discriminator with fake data
    z_ = gen.sample(batch_size)
    z_ = Variable(torch.FloatTensor(np.reshape(z_, [batch_size, input_dim])))
    z_ = G(z_)

    D_fake_decision = D(z_)
    D_fake_loss = criterion(D_fake_decision, y_fake_)

    # Back propagation
    D_loss = D_real_loss + D_fake_loss
Example #40
0
# -*- coding: UTF-8 -*-

import torch
from torch.autograd import Variable
import torch.nn.functional as F    # 激励函数都在这
import matplotlib.pyplot as plt


# 假数据
n_data = torch.ones(100, 2)         # 数据的基本形态
x0 = torch.normal(2*n_data, 1)      # 类型0 x data (tensor), shape=(100, 2)
y0 = torch.zeros(100)               # 类型0 y data (tensor), shape=(100, 1)
x1 = torch.normal(-2*n_data, 1)     # 类型1 x data (tensor), shape=(100, 1)
y1 = torch.ones(100)                # 类型1 y data (tensor), shape=(100, 1)
#print('n_data:\n', n_data)


# 注意 x, y 数据的数据形式是一定要像下面一样 (torch.cat 是在合并数据)
x = torch.cat((x0, x1), 0).type(torch.FloatTensor)  # FloatTensor = 32-bit floating
y = torch.cat((y0, y1), ).type(torch.LongTensor)    # LongTensor = 64-bit integer

# print('x.data.numpy():\n', x.data.numpy())
# print('x.data.numpy()[:, 0]:\n', x.data.numpy()[:, 0])
# print('x.data.numpy()[:, 1]:\n', x.data.numpy()[:, 1])



# 用 Variable 来修饰这些数据 tensor
# x, y = torch.autograd.Variable(x), Variable(y)
#x, y = Variable(x), Variable(y)
Example #41
0
    def _forward(self, batch, task, teacher=None, teacher_lm=None):
        # Encode input features
        if self.input_type == 'speech':
            if self.mtl_per_batch:
                eout_dict = self.encode(batch['xs'], task)
            else:
                eout_dict = self.encode(batch['xs'], 'all')
        else:
            eout_dict = self.encode(batch['ys_sub1'])

        observation = {}
        loss = torch.zeros((1,), dtype=torch.float32)
        if self.device_id >= 0:
            loss = loss.cuda(self.device_id)

        # for the forward decoder in the main task
        if (self.fwd_weight > 0 or (self.bwd_weight == 0 and self.ctc_weight > 0) or self.mbr_training) and task in ['all', 'ys', 'ys.ctc', 'ys.mbr']:
            teacher_logits = None
            if teacher is not None:
                teacher.eval()
                teacher_logits = teacher.generate_logits(batch)
                # TODO(hirofumi): label smoothing, scheduled sampling, dropout?
            elif teacher_lm is not None:
                teacher_lm.eval()
                teacher_logits = self.generate_lm_logits(batch['ys'], lm=teacher_lm)

            loss_fwd, obs_fwd = self.dec_fwd(eout_dict['ys']['xs'], eout_dict['ys']['xlens'],
                                             batch['ys'], task,
                                             teacher_logits, self.recog_params, self.idx2token)
            loss += loss_fwd
            if isinstance(self.dec_fwd, RNNTransducer):
                observation['loss.transducer'] = obs_fwd['loss_transducer']
            else:
                observation['acc.att'] = obs_fwd['acc_att']
                observation['ppl.att'] = obs_fwd['ppl_att']
                observation['loss.att'] = obs_fwd['loss_att']
                observation['loss.mbr'] = obs_fwd['loss_mbr']
                if 'loss_quantity' not in obs_fwd.keys():
                    obs_fwd['loss_quantity'] = None
                observation['loss.quantity'] = obs_fwd['loss_quantity']

                if 'loss_latency' not in obs_fwd.keys():
                    obs_fwd['loss_latency'] = None
                observation['loss.latency'] = obs_fwd['loss_latency']

            observation['loss.ctc'] = obs_fwd['loss_ctc']

        # for the backward decoder in the main task
        if self.bwd_weight > 0 and task in ['all', 'ys.bwd']:
            loss_bwd, obs_bwd = self.dec_bwd(eout_dict['ys']['xs'], eout_dict['ys']['xlens'], batch['ys'], task)
            loss += loss_bwd
            observation['loss.att-bwd'] = obs_bwd['loss_att']
            observation['acc.att-bwd'] = obs_bwd['acc_att']
            observation['ppl.att-bwd'] = obs_bwd['ppl_att']
            observation['loss.ctc-bwd'] = obs_bwd['loss_ctc']

        # only fwd for sub tasks
        for sub in ['sub1', 'sub2']:
            # for the forward decoder in the sub tasks
            if (getattr(self, 'fwd_weight_' + sub) > 0 or getattr(self, 'ctc_weight_' + sub) > 0) and task in ['all', 'ys_' + sub, 'ys_' + sub + '.ctc']:
                loss_sub, obs_fwd_sub = getattr(self, 'dec_fwd_' + sub)(
                    eout_dict['ys_' + sub]['xs'], eout_dict['ys_' + sub]['xlens'],
                    batch['ys_' + sub], task)
                loss += loss_sub
                if isinstance(getattr(self, 'dec_fwd_' + sub), RNNTransducer):
                    observation['loss.transducer-' + sub] = obs_fwd_sub['loss_transducer']
                else:
                    observation['loss.att-' + sub] = obs_fwd_sub['loss_att']
                    observation['acc.att-' + sub] = obs_fwd_sub['acc_att']
                    observation['ppl.att-' + sub] = obs_fwd_sub['ppl_att']
                observation['loss.ctc-' + sub] = obs_fwd_sub['loss_ctc']

        return loss, observation
Example #42
0
    def forward(self, S0, V0, rate,BS_vol, indices, z,z1, MC_samples): 
        S_old = torch.repeat_interleave(S0, MC_samples, dim=0)
      # Uncomment when using BS Control Variate:  
      # BS_old = torch.repeat_interleave(S0, MC_samples, dim=0)
        V_old = torch.repeat_interleave(V0, MC_samples, dim=0)  
        K_call = self.strikes_call
        K_put = self.strikes_put
        zeros = torch.repeat_interleave(torch.zeros(1,1), MC_samples, dim=0)
        average_SS = torch.Tensor()
        average_SS1 = torch.Tensor()
        average_SS_OTM = torch.Tensor()
        average_SS1_ITM = torch.Tensor()
        # use fixed step size
        h = self.timegrid[1]-self.timegrid[0]
        n_steps = len(self.timegrid)-1
        # set maturity counter
        countmat=-1
        
        # Solve for S_t, V_t (Euler)   
        irand = [randrange(0,n_steps+1,1) for k in range(48)]
        for i in range(1, len(self.timegrid)):
            dW = (torch.sqrt(h) * z[:,i-1]).reshape(MC_samples,1)
            dW1 = (torch.sqrt(h) * z1[:,i-1]).reshape(MC_samples,1)
            current_time = torch.ones(1, 1)*self.timegrid[i-1]
            input_time  = torch.repeat_interleave(current_time, MC_samples,dim=0)
            inputNN = torch.cat([input_time.reshape(MC_samples,1),S_old, V_old],1)
            inputNNvol = torch.cat([input_time.reshape(MC_samples,1),V_old],1)
       
            if int(i) in irand:
                 S_new =S_old + S_old*rate*h + self.diffusion(inputNN)*dW 
                 V_new = V_old + self.driftV(inputNNvol)*h +self.diffusionV(inputNNvol)*dW + self.diffusionV1(inputNNvol)*dW1
            else:
                 S_new =S_old + S_old*rate*h + self.diffusion(inputNN).detach()*dW 
                 V_new = V_old + self.driftV(inputNNvol).detach()*h +self.diffusionV(inputNNvol).detach()*dW + self.diffusionV1(inputNNvol).detach()*dW1          
            S_new = torch.cat([S_new,zeros],1)
            S_new=torch.max(S_new,1,keepdim=True)[0]
            S_old = S_new
            V_old = V_new
        
            # If particular timestep is a maturity for Vanilla option
            
            if int(i) in indices:
                countmat+=1
                Z_new=torch.Tensor()
                Z_newP_ITM = torch.Tensor()
                Z_newP_OTM = torch.Tensor()
                Z_new2=torch.Tensor()
                countstrikecall=-1
                
            # Evaluate put (OTM) and call (OTM) option prices 
                
                for strike in K_call:
                    countstrikecall+=1
                    strike = torch.ones(1,1)*strike
                    strike_put = torch.ones(1,1)*K_put[countstrikecall]
                    K_extended = torch.repeat_interleave(strike, MC_samples, dim=0).float()
                    K_extended_put = torch.repeat_interleave(strike_put, MC_samples, dim=0).float()

                    # Since we use the same number of maturities for vanilla calls and puts: 
                    
                    price = torch.cat([S_old-K_extended,zeros],1) #call OTM
                    price_OTM = torch.cat([K_extended_put-S_old,zeros],1) #put OTM
                    
                    # Discounting assumes we use 2-year time horizon 
                    
                    price = torch.max(price, 1, keepdim=True)[0]*torch.exp(-rate*1*i/n_steps)
                    price_OTM = torch.max(price_OTM, 1, keepdim=True)[0]*torch.exp(-rate*1*i/n_steps)             
                    Z_new= torch.cat([Z_new,price],1)  
                    Z_newP_OTM= torch.cat([Z_newP_OTM,price_OTM],1)
                    price2 = price_OTM+S0-strike_put*torch.exp(-rate*1*i/n_steps)  
                    price_ITM = price-S0+strike*torch.exp(-rate*1*i/n_steps)  
                    Z_new2= torch.cat([Z_new2,price2],1) 
                    Z_newP_ITM= torch.cat([Z_newP_ITM,price_ITM],1)    
                  
               # MC step:
            
                avg_S = Z_new.mean(dim=0, keepdim = True).T
                avg_SSP_OTM = Z_newP_OTM.mean(dim=0,keepdim=True).T  
                average_SS = torch.cat([average_SS,avg_S.T],0) #call OTM
                average_SS_OTM = torch.cat([average_SS_OTM,avg_SSP_OTM.T],0) #put OTM   
                avg_S2 = Z_new2.mean(dim=0, keepdim = True).T
                avg_SSP_ITM = torch.cat([p.mean().view(1,1) for p in Z_newP_ITM.T], 0)
                average_SS1_ITM = torch.cat([average_SS1_ITM,avg_SSP_ITM.T],0) 
                average_SS1 = torch.cat([average_SS1,avg_S2.T],0) 
                    
            # Return model vanilla option prices    
                
        return torch.cat([average_SS,average_SS_OTM,average_SS1,average_SS1_ITM  ],0)  
 def _get_letter_tensor(letter):
     letter_tensor = torch.zeros(1, loader.n_letters)
     letter_tensor[0][loader.all_letters.find(letter)] = 1
     return letter_tensor.to(config.device)
Example #44
0
if __name__ == '__main__':
    if args.train:
        # training loop
        for eps in range(max_episodes):
            if ENV == 'Reacher':
                state = env.reset(SCREEN_SHOT)
            else:
                state = env.reset()
            last_action = env.action_space.sample()
            episode_state = []
            episode_action = []
            episode_last_action = []
            episode_reward = []
            episode_next_state = []
            episode_done = []
            hidden_out = (torch.zeros([1, 1, hidden_dim], dtype=torch.float).cuda(), \
                torch.zeros([1, 1, hidden_dim], dtype=torch.float).cuda())  # initialize hidden state for lstm, (hidden, cell), each is (layer, batch, dim)

            for step in range(max_steps):
                hidden_in = hidden_out
                action, hidden_out = td3_trainer.policy_net.get_action(
                    state,
                    last_action,
                    hidden_in,
                    noise_scale=explore_noise_scale)
                if ENV == 'Reacher':
                    next_state, reward, done, _ = env.step(
                        action, SPARSE_REWARD, SCREEN_SHOT)
                else:
                    next_state, reward, done, _ = env.step(action)
                    # env.render()
Example #45
0
    def __init__(self, envs, acmodel, num_frames_per_proc, discount, lr, gae_lambda, entropy_coef,
                 value_loss_coef, max_grad_norm, recurrence, preprocess_obss, reshape_reward):
        """
        Initializes a `BaseAlgo` instance.

        Parameters:
        ----------
        envs : list
            a list of environments that will be run in parallel
        acmodel : torch.Module
            the model
        num_frames_per_proc : int
            the number of frames collected by every process for an update
        discount : float
            the discount for future rewards
        lr : float
            the learning rate for optimizers
        gae_lambda : float
            the lambda coefficient in the GAE formula
            ([Schulman et al., 2015](https://arxiv.org/abs/1506.02438))
        entropy_coef : float
            the weight of the entropy cost in the final objective
        value_loss_coef : float
            the weight of the value loss in the final objective
        max_grad_norm : float
            gradient will be clipped to be at most this value
        recurrence : int
            the number of steps the gradient is propagated back in time
        preprocess_obss : function
            a function that takes observations returned by the environment
            and converts them into the format that the model can handle
        reshape_reward : function
            a function that shapes the reward, takes an
            (observation, action, reward, done) tuple as an input
        """

        # Store parameters

        self.env = ParallelEnv(envs)
        self.acmodel = acmodel
        self.acmodel.train()
        self.num_frames_per_proc = num_frames_per_proc
        self.discount = discount
        self.lr = lr
        self.gae_lambda = gae_lambda
        self.entropy_coef = entropy_coef
        self.value_loss_coef = value_loss_coef
        self.max_grad_norm = max_grad_norm
        self.recurrence = recurrence
        self.preprocess_obss = preprocess_obss or default_preprocess_obss
        self.reshape_reward = reshape_reward

        # Store helpers values

        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.num_procs = len(envs)
        self.num_frames = self.num_frames_per_proc * self.num_procs

        # Control parameters

        assert self.num_frames_per_proc % self.recurrence == 0

        # Initialize experience values

        shape = (self.num_frames_per_proc, self.num_procs)

        self.obs = self.env.reset()
        self.obss = [None]*(shape[0])
        self.mask = torch.ones(shape[1], device=self.device)
        self.masks = torch.zeros(*shape, device=self.device)
        self.actions = torch.zeros(*shape, device=self.device, dtype=torch.int)
        self.values = torch.zeros(*shape, device=self.device)
        self.rewards = torch.zeros(*shape, device=self.device)
        self.advantages = torch.zeros(*shape, device=self.device)
        self.log_probs = torch.zeros(*shape, device=self.device)

        # Initialize log values

        self.log_episode_return = torch.zeros(self.num_procs, device=self.device)
        self.log_episode_reshaped_return = torch.zeros(self.num_procs, device=self.device)
        self.log_episode_num_frames = torch.zeros(self.num_procs, device=self.device)

        self.log_done_counter = 0
        self.log_return = [0] * self.num_procs
        self.log_reshaped_return = [0] * self.num_procs
        self.log_num_frames = [0] * self.num_procs
 def _get_category_tensor(category):
     category_tensor = torch.zeros(1, loader.n_categories)
     category_tensor[0][loader.all_categories.index(category)] = 1
     return category_tensor.to(config.device)
	def inverse(self, inputs):
		return inputs[:, self.perm], torch.zeros(
			inputs.size(0), 1, device=inputs.device)
Example #48
0
    def forward(self, x, time_steps):

        batch_size = x.size(0)

        output_linear = self.input_layer(x)

        output_linear = self.dropout_layer(output_linear)
        #print(np.shape(output_linear))
        out = output_linear
        for l in range(self.layer_num):
            #print(l)
            output_linear = out
            h = torch.zeros(batch_size, 256).cuda()
            c = torch.zeros(batch_size, 256).cuda()

            h_steps = torch.zeros(batch_size, 256, time_steps).cuda()
            c_steps = torch.zeros(batch_size, 256, time_steps).cuda()
            out1 = torch.zeros(batch_size,
                               np.shape(output_linear)[1], 256).cuda()

            counter = 0
            for i in range(np.shape(output_linear)[1]):

                input_lstmcell = output_linear[:, i, :]

                h, c = self.lstm_cell(input_lstmcell, (h, c))

                h_steps[:, :, (i % time_steps)] = h
                c_steps[:, :, (i % time_steps)] = c
                counter += 1
                if (counter == time_steps):

                    h = torch.squeeze(self.time_w_layer(h_steps))

                    c = torch.squeeze(self.time_w_layer(c_steps))
                    h_steps = torch.zeros(batch_size, 256, time_steps).cuda()
                    c_steps = torch.zeros(batch_size, 256, time_steps).cuda()

                    counter = 0

                out1[:, i, :] = h
            if (self.biFlag):
                h = torch.zeros(batch_size, 256).cuda()
                c = torch.zeros(batch_size, 256).cuda()

                h_steps = torch.zeros(batch_size, 256, time_steps).cuda()
                c_steps = torch.zeros(batch_size, 256, time_steps).cuda()
                out2 = torch.zeros(batch_size,
                                   np.shape(output_linear)[1], 256).cuda()

                counter = 0
                for i in range(np.shape(output_linear)[1]):

                    input_lstmcell = output_linear[:, (
                        np.shape(output_linear)[1] - i - 1), :]

                    h, c = self.lstm_cell2(input_lstmcell, (h, c))

                    h_steps[:, :, (i % time_steps)] = h
                    c_steps[:, :, (i % time_steps)] = c
                    counter += 1
                    if (counter == time_steps):

                        h = torch.squeeze(self.time_w_layer2(h_steps))

                        c = torch.squeeze(self.time_w_layer2(c_steps))
                        h_steps = torch.zeros(batch_size, 256,
                                              time_steps).cuda()
                        c_steps = torch.zeros(batch_size, 256,
                                              time_steps).cuda()

                        counter = 0

                    out2[:, i, :] = h
            if (self.biFlag == 0):
                out = out1
            if (self.biFlag == 1):
                out = torch.cat((out1, out2), 2)

        out = self.dropout_layer(out)
        out = self.branch_layer(out)

        return out
Example #49
0
        def __load_next__(self):
                data=self.get_data()
                
                max_query_len,max_doc_len,max_cand_len,max_word_len=0,0,0,0
                ans=[]
                clozes=[]
                word_types={}
                for i,instance in enumerate(data):
                        doc,query,doc_char,query_char,cand,ans_,cloze_=instance
                        max_doc_len=max(max_doc_len,len(doc))
                        max_query_len=max(max_query_len,len(query))
                        max_cand_len=max(max_cand_len,len(cand))
                        ans.append(ans_[0])
                        clozes.append(cloze_[0])
                        
                        for index,word in enumerate(doc_char):
                                max_word_len=max(max_word_len,len(word))
                                if tuple(word) not in word_types:
                                        word_types[tuple(word)]=[]
                                        word_types[tuple(word)].append((1,i,index))
                        for index,word in enumerate(query_char):
                                max_word_len=max(max_word_len,len(word))
                                if tuple(word) not in word_types:
                                        word_types[tuple(word)]=[]
                                        word_types[tuple(word)].append((0,i,index))
                                        
                docs=torch.zeros(self.batch_size,max_doc_len,dtype=torch.long)
                queries=torch.zeros(self.batch_size,max_query_len,dtype=torch.long)
                cands=torch.zeros(self.batch_size,max_doc_len,max_cand_len,dtype=torch.long)
                docs_mask=torch.zeros(self.batch_size,max_doc_len,dtype=torch.long)
                queries_mask=torch.zeros(self.batch_size,max_query_len,dtype=torch.long)
                cand_mask=torch.zeros(self.batch_size,max_doc_len,dtype=torch.long)
                qe_comm=torch.zeros(self.batch_size,max_doc_len,dtype=torch.long)
                answers=torch.tensor(ans,dtype=torch.long)
                clozes=torch.tensor(clozes,dtype=torch.long)
                
                for i,instance in enumerate(data):
                        doc,query,doc_char,query_char,cand,ans_,cloze_=instance
                        docs[i,:len(doc)]=torch.tensor(doc)
                        queries[i,:len(query)]=torch.tensor(query)
                        docs_mask[i,:len(doc)]=1
                        queries_mask[i,:len(query)]=1
                        
                        for k,index in enumerate(doc):
                                for j,index_c in enumerate(cand):
                                        if index==index_c:
                                                cands[i][k][j]=1
                                                cand_mask[i][k]=1
                                                
                                for y in query:
                                        if y==index:
                                                qe_comm[i][k]=1
                                                break

                        for x,cl in enumerate(cand):
                                if cl==answers[i]:
                                        answers[i]=x
                                        break
                                                
                doc_char=torch.zeros(self.batch_size,max_doc_len,dtype=torch.long)
                query_char=torch.zeros(self.batch_size,max_query_len,dtype=torch.long)
                char_type=torch.zeros(len(word_types),max_word_len,dtype=torch.long)
                char_type_mask=torch.zeros(len(word_types),max_word_len,dtype=torch.long)
                
                index=0
                for word,word_list in word_types.items():
                        char_type[index,:len(word)]=torch.tensor(list(word))
                        char_type_mask[index,:len(word)]=1
                        for (i,j,k) in word_list:
                                if i==1:
                                        doc_char[j,k]=index
                                else:
                                        query_char[j,k]=index
                        index+=1
                                        
                return docs,doc_char,docs_mask,queries,query_char,queries_mask, \
                    char_type,char_type_mask,answers,clozes,cands,cand_mask,qe_comm
Example #50
0
def run_tz(
        agent, optimizer, task, p, n_examples, supervised,
        fix_cond=None, fix_penalty=None, slience_recall_time=None,
        scramble=False, learning=True, get_cache=True, get_data=False,
        rm_mid_targ=False, noRL=False,
):
    # sample data
    X, Y = task.sample(n_examples, to_torch=True)
    # logger
    log_return, log_pi_ent = 0, 0
    log_loss_sup, log_loss_actor, log_loss_critic = 0, 0, 0
    log_cond = np.zeros(n_examples,)
    log_dist_a = [[] for _ in range(n_examples)]
    log_targ_a = [[] for _ in range(n_examples)]
    log_cache = [None] * n_examples

    for i in range(n_examples):
        # pick a condition
        cond_i = pick_condition(p, rm_only=supervised, fix_cond=fix_cond)
        # get the example for this trial
        X_i, Y_i = X[i], Y[i]
        if scramble:
            X_i, Y_i = time_scramble(X_i, Y_i, task)
        # get time info
        T_total = np.shape(X_i)[0]
        T_part, pad_len, event_ends, event_bonds = task.get_time_param(T_total)
        enc_times = get_enc_times(p.net.enc_size, task.n_param, pad_len)

        # attach cond flag
        cond_flag = torch.zeros(T_total, 1)
        cond_indicator = -1 if cond_i == 'NM' else 1
        # if attach_cond == 1 then normal, if -1 then reversed
        cond_flag[-T_part:] = cond_indicator * p.env.attach_cond
        if p.env.attach_cond != 0:
            X_i = torch.cat((X_i, cond_flag), 1)

        # prealloc
        loss_sup = 0
        probs, rewards, values, ents = [], [], [], []
        log_cache_i = [None] * T_total

        # init model wm and em
        penalty_val_p1, penalty_rep_p1 = sample_penalty(p, fix_penalty, True)
        penalty_val_p2, penalty_rep_p2 = sample_penalty(p, fix_penalty)

        hc_t = agent.get_init_states()
        agent.retrieval_off()
        agent.encoding_off()

        for t in range(T_total):
            t_relative = t % T_part
            in_2nd_part = t >= T_part

            if not in_2nd_part:
                penalty_val, penalty_rep = penalty_val_p1, penalty_rep_p1
            else:
                penalty_val, penalty_rep = penalty_val_p2, penalty_rep_p2
                if rm_mid_targ and t_relative == 0:
                    # save memories and the start of p2 and pop midway target
                    em_copy = deepcopy(agent.em.vals)
                    mem_rmd = agent.em.remove_memory(-2)

            # testing condition
            if slience_recall_time is not None:
                slience_recall(t_relative, in_2nd_part,
                               slience_recall_time, agent)
            # whether to encode
            if not supervised:
                set_encoding_flag(t, enc_times, cond_i, agent)

            # forward
            x_it = append_info(X_i[t], [penalty_rep])
            pi_a_t, v_t, hc_t, cache_t = agent.forward(
                x_it.view(1, 1, -1), hc_t)
            # after delay period, compute loss
            a_t, p_a_t = agent.pick_action(pi_a_t)
            # get reward
            r_t = get_reward(a_t, Y_i[t], penalty_val)

            # cache the results for later RL loss computation
            rewards.append(r_t)
            values.append(v_t)
            probs.append(p_a_t)
            ents.append(entropy(pi_a_t))
            # compute supervised loss
            yhat_t = torch.squeeze(pi_a_t)[:-1]
            loss_sup += F.mse_loss(yhat_t, Y_i[t])

            if not supervised:
                # update WM/EM bsaed on the condition
                hc_t = cond_manipulation(cond_i, t, event_ends[0], hc_t, agent)

            # cache results for later analysis
            if get_cache:
                log_cache_i[t] = cache_t
            # for behavioral stuff, only record prediction time steps
            if t % T_part >= pad_len:
                log_dist_a[i].append(to_sqnp(pi_a_t))
                log_targ_a[i].append(to_sqnp(Y_i[t]))
            # at the end, recover the removed midway memory
            if t == T_total - 1 and rm_mid_targ:
                agent.em.vals = em_copy

        # compute RL loss
        returns = compute_returns(rewards, normalize=p.env.normalize_return)
        loss_actor, loss_critic = compute_a2c_loss(probs, values, returns)
        pi_ent = torch.stack(ents).sum()
        # if learning and not supervised
        if learning:
            if noRL:
                loss = loss_sup
            else:
                if supervised:
                    loss = loss_sup
                else:
                    loss = loss_actor + loss_critic - pi_ent * p.net.eta
            optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm_(agent.parameters(), 1)
            optimizer.step()

        # after every event sequence, log stuff
        log_loss_sup += loss_sup / n_examples
        log_pi_ent += pi_ent.item() / n_examples
        log_return += torch.stack(rewards).sum().item() / n_examples
        log_loss_actor += loss_actor.item() / n_examples
        log_loss_critic += loss_critic.item() / n_examples
        log_cond[i] = TZ_COND_DICT.inverse[cond_i]
        if get_cache:
            log_cache[i] = log_cache_i

    # return cache
    log_dist_a = np.array(log_dist_a)
    log_targ_a = np.array(log_targ_a)
    results = [log_dist_a, log_targ_a, log_cache, log_cond]
    metrics = [log_loss_sup, log_loss_actor, log_loss_critic,
               log_return, log_pi_ent]
    out = [results, metrics]
    if get_data:
        X_array_list = [to_sqnp(X[i]) for i in range(n_examples)]
        Y_array_list = [to_sqnp(Y[i]) for i in range(n_examples)]
        training_data = [X_array_list, Y_array_list]
        out.append(training_data)
    return out
Example #51
0
 def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
     super().__init__()
     self.bias = nn.Parameter(torch.zeros(1, channel, 1, 1))
     self.negative_slope = negative_slope
     self.scale = scale
	def forward(self, inputs):
		return inputs[:, self.perm], torch.zeros(
			inputs.size(0), 1, device=inputs.device)
Example #53
0
def adj_bias_normalize(adj):
    neg_zeros = -9e15 * torch.ones(adj.size()).type_as(adj)
    zeros = torch.zeros(adj.size()).type_as(adj)
    adj = torch.where(adj == 0, neg_zeros, adj)
    adj = torch.where(adj > 0, zeros, adj)
    return adj
Example #54
0
    def __init__(self):
        super().__init__()

        self.weight = nn.Parameter(torch.zeros(1))
Example #55
0
def generate_fake_test_from_train_labels(train_seen_label, attribute, seenclasses, unseenclasses, num, per_seen=0.10, \
                                        per_unseen=0.40, per_seen_unseen= 0.50):
    if train_seen_label.min() == 0:
        print("Training data already trimmed and converted")
    else:
        print("original training data received (-1,1)'s ")
        train_seen_label = torch.clamp(train_seen_label,0,1)

    #remove all zero labeled images while training
    train_seen_label = train_seen_label[(train_seen_label.sum(1) != 0).nonzero().flatten()]
    seen_attributes = attribute[seenclasses]
    unseen_attributes = attribute[unseenclasses]
    seen_percent, unseen_percent, seen_unseen_percent = per_seen , per_unseen, per_seen_unseen

    print("seen={}, unseen={}, seen-unseen={}".format(seen_percent, unseen_percent, seen_unseen_percent))
    print("syn num={}".format(num))
    gzsl = []
    for i in range(0, num):
        new_gzsl_syn_list = []
        seen_unseen_label_pairs = {}
        nbrs = NearestNeighbors(n_neighbors=1, algorithm='auto').fit(unseen_attributes)
        for seen_idx, seen_att in zip(seenclasses,seen_attributes):
            _, indices = nbrs.kneighbors(seen_att[None,:])
            seen_unseen_label_pairs[seen_idx.tolist()] = unseenclasses[indices[0][0]].tolist()

        #ADDING ONLY SEEN LABELS
        idx = torch.randperm(len(train_seen_label))[0:int(len(train_seen_label)*seen_percent)]
        seen_labels = train_seen_label[idx]
        _new_gzsl_syn_list = torch.zeros(seen_labels.shape[0], attribute.shape[0])
        _new_gzsl_syn_list[:,:len(seenclasses)] = seen_labels
        new_gzsl_syn_list.append(_new_gzsl_syn_list)

        #ADDING ONLY UNSEEN LABELS
        idx = torch.randperm(len(train_seen_label))[0:int(len(train_seen_label)*unseen_percent)]
        temp_label = train_seen_label[idx]
        _new_gzsl_syn_list = torch.zeros(temp_label.shape[0], attribute.shape[0])
        for m,lab in enumerate(temp_label):
            new_lab = torch.zeros(attribute.shape[0])
            unseen_lab = lab.nonzero().flatten()
            u=[]
            for i in unseen_lab:
                u.append(seen_unseen_label_pairs[i.tolist()])
            new_lab[u]=1
            _new_gzsl_syn_list[m,:] = new_lab
        unseen_labels = _new_gzsl_syn_list
        new_gzsl_syn_list.append(unseen_labels)

        #ADDING BOTH SEEN AND UNSEEN LABELS 50% OF THE SELECTED SEEN LABELS IS MAPPED TO UNSEEN LABELS
        idx = torch.randperm(len(train_seen_label))[0:int(len(train_seen_label)*seen_unseen_percent)]
        temp_label = train_seen_label[idx]
        _new_gzsl_syn_list = torch.zeros(temp_label.shape[0], attribute.shape[0])
        for m,lab in enumerate(temp_label):
            u = []
            new_lab = torch.zeros(attribute.shape[0])
            seen_unseen_lab = lab.nonzero().flatten()
            temp_seen_label = np.random.choice(seen_unseen_lab,int(len(seen_unseen_lab)*0.50))
            u.extend(temp_seen_label)
            rem_seen_label =  np.setxor1d(temp_seen_label,seen_unseen_lab)
            for i in rem_seen_label:
                u.append(seen_unseen_label_pairs[i.tolist()])
            new_lab[u]=1
            _new_gzsl_syn_list[m,:] = new_lab
        seen_unseen_labels = _new_gzsl_syn_list
        new_gzsl_syn_list.append(seen_unseen_labels)

        new_gzsl_syn_list = torch.cat(new_gzsl_syn_list)
        gzsl.append(new_gzsl_syn_list)
    
    gzsl = torch.cat(gzsl)
    tmp_list = gzsl.sum(0)
    ## To make sure every unseen label gets covered
    empty_lab = torch.arange(tmp_list.numel())[tmp_list==0]
    min_uc = int(tmp_list[len(seenclasses):][tmp_list[len(seenclasses):]>0].min().item())
    for el in empty_lab:
        idx = torch.randperm(gzsl.size(0))[:min_uc]
        gzsl[idx,el] = 1
    gzsl = gzsl.long()
    print("GZSL TEST LABELS:",gzsl.shape)
    return gzsl
Example #56
0
decoder.eval()

with torch.no_grad():
    text_data = parse_text('hello, this is just a test').to(device)
    text_data = text_data.unsqueeze(0)
    text_emb = text_embedding(text_data)

    text_pos = (torch.arange(text_data.size(1)) + 1).to(device)
    text_pos = text_pos.unsqueeze(0).to(device)
    text_pos_emb = pos_embedding_(text_pos)
    text_mask = (text_pos == 0).unsqueeze(1)
    enc_out, att_heads_enc = encoder(text_emb, text_mask, text_pos_emb)

    mel_pos = torch.arange(1, 512).view(1, 511).to(device)
    mel_pos_emb_ = pos_embedding(mel_pos)
    mel_mask_ = torch.triu(torch.ones(511, 511, dtype=torch.bool), 1).unsqueeze(0).to(device)
    # [B, T, C], [B, T, C], [B, T, 1], [B, T, T_text]
    mel = torch.zeros(1, 511, 80).to(device)
    for pos_idx in tqdm(range(511)):
        mel_pos_emb = mel_pos_emb_[:, :pos_idx + 1]
        mel_mask = mel_mask_[:, :pos_idx + 1, :pos_idx + 1]
        mels_out, mels_out_post, gates_out, att_heads_dec, att_heads = decoder(mel[:, :pos_idx + 1], enc_out,
                                                                               mel_mask, text_mask, mel_pos_emb)

        mel[:, pos_idx] = mels_out_post[:, pos_idx]
        if gates_out[0, -1, 0] > .5:
            mel = mel[:, :pos_idx + 1]
            break
wav = mel_to_wav(mel[0])
torchaudio.save('test.wav', wav.to('cpu'), sample_rate, 32)
# optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=args.lr, betas=(args.beta1, args.beta2))


# ----------
#  Training
# ----------
for epoch in range(args.n_epochs):
    for i, (imgs, _) in enumerate(dataloader):

        Batch_size = args.batch_size
        # Adversarial ground truths
        valid = Variable(torch.ones(Batch_size, 1).cuda(),requires_grad=False)
        fake = Variable(torch.zeros(Batch_size, 1).cuda(),requires_grad=False)

        # Configure input
        real_imgs = Variable(imgs.type(torch.FloatTensor).cuda())

        # -----------------
        #  Train Generator
        # -----------------

        optimizer_G.zero_grad()
        # Sample noise as generator input
        z = Variable(torch.FloatTensor(np.random.normal(0, 1, (Batch_size, args.latent_dim))).cuda())
        # Generate a batch of images
        gen_imgs = generator(z)
        # Loss measures generator's ability to fool the discriminator
        PRO_D_fake = discriminator(gen_imgs)
Example #58
0
    def get_target(self, target, anchors, in_w, in_h, ignore_threshold):
        # 计算一共有多少张图片
        bs = len(target)
        # 获得先验框
        anchor_index = [[0, 1, 2], [3, 4, 5],
                        [6, 7, 8]][self.feature_length.index(in_w)]
        subtract_index = [0, 3, 6][self.feature_length.index(in_w)]
        # 创建全是0或者全是1的阵列
        mask = torch.zeros(bs,
                           int(self.num_anchors / 3),
                           in_h,
                           in_w,
                           requires_grad=False)
        noobj_mask = torch.ones(bs,
                                int(self.num_anchors / 3),
                                in_h,
                                in_w,
                                requires_grad=False)

        tx = torch.zeros(bs,
                         int(self.num_anchors / 3),
                         in_h,
                         in_w,
                         requires_grad=False)
        ty = torch.zeros(bs,
                         int(self.num_anchors / 3),
                         in_h,
                         in_w,
                         requires_grad=False)
        tw = torch.zeros(bs,
                         int(self.num_anchors / 3),
                         in_h,
                         in_w,
                         requires_grad=False)
        th = torch.zeros(bs,
                         int(self.num_anchors / 3),
                         in_h,
                         in_w,
                         requires_grad=False)
        t_box = torch.zeros(bs,
                            int(self.num_anchors / 3),
                            in_h,
                            in_w,
                            4,
                            requires_grad=False)
        tconf = torch.zeros(bs,
                            int(self.num_anchors / 3),
                            in_h,
                            in_w,
                            requires_grad=False)
        tcls = torch.zeros(bs,
                           int(self.num_anchors / 3),
                           in_h,
                           in_w,
                           self.num_classes,
                           requires_grad=False)

        box_loss_scale_x = torch.zeros(bs,
                                       int(self.num_anchors / 3),
                                       in_h,
                                       in_w,
                                       requires_grad=False)
        box_loss_scale_y = torch.zeros(bs,
                                       int(self.num_anchors / 3),
                                       in_h,
                                       in_w,
                                       requires_grad=False)
        for b in range(bs):
            if len(target[b]) == 0:
                continue
            # 计算出在特征层上的点位
            gxs = target[b][:, 0:1] * in_w
            gys = target[b][:, 1:2] * in_h

            gws = target[b][:, 2:3] * in_w
            ghs = target[b][:, 3:4] * in_h

            # 计算出属于哪个网格
            gis = torch.floor(gxs)
            gjs = torch.floor(gys)

            # 计算真实框的位置
            gt_box = torch.FloatTensor(
                torch.cat(
                    [torch.zeros_like(gws),
                     torch.zeros_like(ghs), gws, ghs], 1))

            # 计算出所有先验框的位置
            anchor_shapes = torch.FloatTensor(
                torch.cat((torch.zeros(
                    (self.num_anchors, 2)), torch.FloatTensor(anchors)), 1))
            # 计算重合程度
            anch_ious = jaccard(gt_box, anchor_shapes)

            # Find the best matching anchor box
            best_ns = torch.argmax(anch_ious, dim=-1)
            for i, best_n in enumerate(best_ns):
                if best_n not in anchor_index:
                    continue
                # Masks
                gi = gis[i].long()
                gj = gjs[i].long()
                gx = gxs[i]
                gy = gys[i]
                gw = gws[i]
                gh = ghs[i]
                if (gj < in_h) and (gi < in_w):
                    best_n = best_n - subtract_index
                    # 判定哪些先验框内部真实的存在物体
                    noobj_mask[b, best_n, gj, gi] = 0
                    mask[b, best_n, gj, gi] = 1
                    # 计算先验框中心调整参数
                    tx[b, best_n, gj, gi] = gx
                    ty[b, best_n, gj, gi] = gy
                    # 计算先验框宽高调整参数
                    tw[b, best_n, gj, gi] = gw
                    th[b, best_n, gj, gi] = gh
                    # 用于获得xywh的比例
                    box_loss_scale_x[b, best_n, gj, gi] = target[b][i, 2]
                    box_loss_scale_y[b, best_n, gj, gi] = target[b][i, 3]
                    # 物体置信度
                    tconf[b, best_n, gj, gi] = 1
                    # 种类
                    tcls[b, best_n, gj, gi, target[b][i, 4].long()] = 1
                else:
                    print('Step {0} out of bound'.format(b))
                    print('gj: {0}, height: {1} | gi: {2}, width: {3}'.format(
                        gj, in_h, gi, in_w))
                    continue
        t_box[..., 0] = tx
        t_box[..., 1] = ty
        t_box[..., 2] = tw
        t_box[..., 3] = th
        return mask, noobj_mask, t_box, tconf, tcls, box_loss_scale_x, box_loss_scale_y
Example #59
0
 def init_hidden(self):
     # Initialize hidden and cell states
     # (num_layers * num_directions, batch, hidden_size)
     return Variable(torch.zeros(num_layers, batch_size, hidden_size))
 def init_hidden(self, x):
     h0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim)
     c0 = torch.zeros(self.layer_dim, x.size(0), self.hidden_dim)
     return [t.cuda() for t in (h0, c0)]