Ejemplo n.º 1
0
    def calc_metrics(self, g_list, t_list):
        mrrs, hit_1s, hit_3s, hit_10s, losses = [], [], [], [], []
        ranks = []
        for g, t in zip(g_list, t_list):

            ent_embed = self.get_per_graph_ent_embeds(t, g)
            all_embeds_g = self.get_all_embeds_Gt(t)
            index_sample = torch.stack(
                [g.edges()[0], g.edata['type_s'],
                 g.edges()[1]]).transpose(0, 1)
            label = torch.ones(index_sample.shape[0])
            if self.use_cuda:
                index_sample = cuda(index_sample)
                label = cuda(label)

            if index_sample.shape[0] == 0: continue

            rank = self.evaluater.calc_metrics_single_graph(
                ent_embed, self.rel_embeds, all_embeds_g, index_sample, g, t)
            loss = self.link_classification_loss(ent_embed, self.rel_embeds,
                                                 index_sample, label)
            ranks.append(rank)
            losses.append(loss.item())
        ranks = torch.cat(ranks)

        return ranks, np.mean(losses)
Ejemplo n.º 2
0
    def calc_metrics(self, per_graph_ent_embeds, g_list, t_list, hist_embeddings, start_time_tensor, cur_t):
        mrrs, hit_1s, hit_3s, hit_10s, losses = [], [], [], [], []
        ranks = []
        i = 0
        for g, t, ent_embed in zip(g_list, t_list, per_graph_ent_embeds):
            time_diff_tensor = cur_t - start_time_tensor[i]
            all_embeds_g = self.get_all_embeds_Gt(ent_embed, g, t, hist_embeddings[i][0], hist_embeddings[i][1], time_diff_tensor)

            index_sample = torch.stack([g.edges()[0], g.edata['type_s'], g.edges()[1]]).transpose(0, 1)
            label = torch.ones(index_sample.shape[0])
            if self.use_cuda:
                index_sample = cuda(index_sample)
                label = cuda(label)
            if index_sample.shape[0] == 0: continue
            rank = self.evaluater.calc_metrics_single_graph(ent_embed, self.rel_embeds, all_embeds_g, index_sample, g, t)
            loss = self.link_classification_loss(ent_embed, self.rel_embeds, index_sample, label)
            ranks.append(rank)
            losses.append(loss.item())
            i += 1
        try:
            ranks = torch.cat(ranks)
        except:
            ranks = cuda(torch.tensor([]).long()) if self.use_cuda else torch.tensor([]).long()

        return ranks, np.mean(losses)
    def get_gradients_antoshka(self,
                               x,
                               y_true,
                               y_target=None,
                               eps=0.03,
                               alpha=2 / 255,
                               iteration=1):
        self.set_mode('eval')

        x = Variable(cuda(x, self.cuda), requires_grad=True)
        y_true = Variable(cuda(y_true, self.cuda), requires_grad=False)
        if y_target is not None:
            targeted = True
            y_target = Variable(cuda(y_target, self.cuda), requires_grad=False)
        else:
            targeted = False

        h = self.net(x)
        prediction = h.max(1)[1]
        accuracy = torch.eq(prediction, y_true).float().mean()
        cost = F.cross_entropy(h, y_true)

        x_grad = self.attack.get_gradients_anton(x, y_true, False)

        return x_grad
Ejemplo n.º 4
0
    def ad_test(self, target, epsilon, alpha, iteration):
        self.set_mode('eval')
        correct = 0.
        cost = 0.
        total = 0.
        data_loader = self.data_loader['test']
        for batch_idx, (images, labels) in enumerate(data_loader):
            x_true = Variable(cuda(images, self.cuda))
            y_true = Variable(cuda(labels, self.cuda))

            if isinstance(target, int) and (target in range(self.y_dim)):
                y_target = torch.LongTensor(y_true.size()).fill_(target)
            else:
                y_target = None

            if self.attack_mode == 'FGSM':
                x, _, _ = self.FGSM(x_true, y_true, y_target, epsilon, alpha,
                                    iteration)
            elif self.attack_mode == 'ILLC':
                x, _, _ = self.ILLC(x_true, y_true, y_target, epsilon, alpha,
                                    iteration)

            logit = self.net(x)
            prediction = logit.max(1)[1]

            correct += torch.eq(prediction, y_true).float().sum().data.item()
            cost += F.cross_entropy(logit, y_true,
                                    size_average=False).data.item()

            total += x.size(0)
        accuracy = correct / total
        cost /= total
        print('ACC:{:.4f}'.format(accuracy))
        self.set_mode('train')
Ejemplo n.º 5
0
 def sample_data(self):
     data_loader = self.data_loader['test']
     for batch_idx, (images, labels) in enumerate(data_loader):
         x_true = Variable(cuda(images, self.cuda))
         y_true = Variable(cuda(labels, self.cuda))
         break
     return x_true, y_true
Ejemplo n.º 6
0
    def get_model_config(self, config):
        if config.model_type.split('_')[0] == 'pd':
            position_dependent = True
        else:
            position_dependent = False

        if (config.model_type in ['gumbel_blstm', 'pd_gumbel_blstm'
                                  ]) and (config.downsample_method == 'no-op'):
            self.audio_net = cuda(
                GumbelBLSTM(self.K,
                            n_layers=self.n_layers,
                            n_gumbel_units=self.n_clusters,
                            n_class=self.n_visual_class,
                            input_size=self.input_size,
                            ds_ratio=1,
                            bidirectional=True,
                            max_len=self.max_segment_num,
                            position_dependent=position_dependent), self.cuda)
        elif (config.model_type in [
                'gumbel_mlp', 'pd_gumbel_mlp'
        ]) and (config.downsample_method != 'no-op'):
            self.audio_net = cuda(
                GumbelMLP(self.K,
                          n_layers=self.n_layers,
                          n_gumbel_units=self.n_phone_class,
                          n_class=self.n_visual_class,
                          input_size=self.input_size,
                          max_len=self.max_segment_num,
                          position_dependent=position_dependent), self.cuda)
        else:
            raise ValueError(
                f'Invalid model type: {config.model_type} for downsample method {config.downsample_method}'
            )
Ejemplo n.º 7
0
  def cluster(self,
              n_clusters=50,
              out_prefix='quantized_outputs'):
    self.set_mode('eval')
    testset = self.data_loader['test'].dataset
    temp = self.history['temp']

    us_ratio = int(self.hop_len_ms / 10) * self.audio_net.ds_ratio 
    with torch.no_grad():
      B = 0
      utt_ids = []
      X = []      
      for b_idx, (audios, phoneme_labels, word_labels,\
                  audio_masks, phone_masks, word_masks)\
                  in enumerate(self.data_loader['test']):
        if b_idx > 2 and self.debug:
          break
        if b_idx == 0:
          B = audios.size(0)

        
        audios = cuda(audios, self.cuda)
        audio_masks = cuda(audio_masks, self.cuda)
        if self.audio_feature == 'wav2vec2':
          x = self.audio_feature_net.feature_extractor(audios)
        else:
          x = audios
        
        audio_lens = audio_masks.sum(-1).long()
        outputs = self.audio_net(x, return_feat=True)
        embedding = outputs[-1]

        for idx in range(audios.size(0)): 
          global_idx = b_idx * B + idx
          utt_id = os.path.splitext(os.path.basename(testset.dataset[global_idx][0]))[0] 
          embed = embedding[idx, :audio_lens[idx]].cpu().detach().numpy()
          X.extend(embed.tolist())
          utt_ids.extend([utt_id]*embed.shape[0])

      X = np.asarray(X)
      begin_time = time.time()
      clusterer = KMeans(n_clusters=n_clusters).fit(X) 
      print(f'KMeans take {time.time()-begin_time} s to finish')
      np.save(self.ckpt_dir.joinpath('cluster_means.npy'), clusterer.cluster_centers_)
      
      ys = clusterer.predict(X)
      filename = self.ckpt_dir.joinpath(out_prefix+'.txt')
      out_f = open(filename, 'w')
      for utt_id, group in groupby(list(zip(utt_ids, ys)), lambda x:x[0]):
        y = ','.join([str(g[1]) for g in group for _ in range(us_ratio)])
        out_f.write(f'{utt_id} {y}\n') 
      out_f.close()
      gold_path = os.path.join(os.path.join(testset.data_path, f'{testset.splits[0]}'))
      token_f1, token_prec, token_recall = compute_token_f1(
                                             filename,
                                             gold_path,
                                             self.ckpt_dir.joinpath(f'confusion.png'),
                                           )
Ejemplo n.º 8
0
    def universal(self, args):
        self.set_mode('eval')

        init = False

        correct = 0
        cost = 0
        total = 0

        data_loader = self.data_loader['test']
        for e in range(100000):
            for batch_idx, (images, labels) in enumerate(data_loader):

                x = Variable(cuda(images, self.cuda))
                y = Variable(cuda(labels, self.cuda))

                if not init:
                    sz = x.size()[1:]
                    r = torch.zeros(sz)
                    r = Variable(cuda(r, self.cuda), requires_grad=True)
                    init = True

                logit = self.net(x+r)
                p_ygx = F.softmax(logit, dim=1)
                H_ygx = (-p_ygx*torch.log(self.eps+p_ygx)).sum(1).mean(0)
                prediction_cost = H_ygx
                #prediction_cost = F.cross_entropy(logit,y)
                #perceptual_cost = -F.l1_loss(x+r,x)
                #perceptual_cost = -F.mse_loss(x+r,x)
                #perceptual_cost = -F.mse_loss(x+r,x) -r.norm()
                perceptual_cost = -F.mse_loss(x+r, x) -F.relu(r.norm()-5)
                #perceptual_cost = -F.relu(r.norm()-5.)
                #if perceptual_cost.data[0] < 10: perceptual_cost.data.fill_(0)
                cost = prediction_cost + perceptual_cost
                #cost = prediction_cost

                self.net.zero_grad()
                if r.grad:
                    r.grad.fill_(0)
                cost.backward()

                #r = r + args.eps*r.grad.sign()
                r = r + r.grad*1e-1
                r = Variable(cuda(r.data, self.cuda), requires_grad=True)



                prediction = logit.max(1)[1]
                correct = torch.eq(prediction, y).float().mean().data[0]
                if batch_idx % 100 == 0:
                    if self.visdom:
                        self.vf.imshow_multi(x.add(r).data)
                        #self.vf.imshow_multi(r.unsqueeze(0).data,factor=4)
                    print(correct*100, prediction_cost.data[0], perceptual_cost.data[0],\
                            r.norm().data[0])

        self.set_mode('train')
Ejemplo n.º 9
0
    def FGSM(self,
             x,
             y_true,
             y_target=None,
             eps=0.03,
             alpha=2 / 255,
             iteration=1):
        self.set_mode('eval')

        x = Variable(cuda(x, self.cuda), requires_grad=True)
        y_true = Variable(cuda(y_true, self.cuda), requires_grad=False)
        if y_target is not None:
            targeted = True
            y_target = Variable(cuda(y_target, self.cuda), requires_grad=False)
        else:
            targeted = False

        h = self.net(x)
        prediction = h.max(1)[1]
        accuracy = torch.eq(prediction, y_true).float().mean()
        cost = F.cross_entropy(h, y_true)

        if iteration == 1:
            if targeted:
                x_adv, h_adv, h = self.attack.fgsm(x, y_target, True, eps)
            else:
                x_adv, h_adv, h = self.attack.fgsm(x, y_true, False, eps)
        else:
            if targeted:
                x_adv, h_adv, h = self.attack.i_fgsm(x, y_target, True, eps,
                                                     alpha, iteration)
            else:
                x_adv, h_adv, h = self.attack.i_fgsm(x, y_true, False, eps,
                                                     alpha, iteration)

        prediction_adv = h_adv.max(1)[1]
        accuracy_adv = torch.eq(prediction_adv, y_true).float().mean()
        cost_adv = F.cross_entropy(h_adv, y_true)

        # make indication of perturbed images that changed predictions of the classifier
        if targeted:
            changed = torch.eq(y_target, prediction_adv)
        else:
            changed = torch.eq(prediction, prediction_adv)
            changed = torch.eq(changed, 0)
        changed = changed.float().view(-1, 1, 1, 1).repeat(1, 3, 28, 28)

        changed[:, 0, :, :] = where(changed[:, 0, :, :] == 1, 252, 91)
        changed[:, 1, :, :] = where(changed[:, 1, :, :] == 1, 39, 252)
        changed[:, 2, :, :] = where(changed[:, 2, :, :] == 1, 25, 25)
        changed = self.scale(changed / 255)
        changed[:, :, 3:-2, 3:-2] = x_adv.repeat(1, 3, 1, 1)[:, :, 3:-2, 3:-2]

        self.set_mode('train')

        return x_adv.data, changed.data,\
                (accuracy.data[0], cost.data[0], accuracy_adv.data[0], cost_adv.data[0])
Ejemplo n.º 10
0
 def build_model(self):
     self.ent_encoder = SARGCN(self.args, self.hidden_size, self.embed_size,
                               self.num_rels, self.total_time)
     self.time_diff_test = torch.tensor(
         list(range(self.test_seq_len - 1, -1, -1))).float()
     self.time_diff_train = torch.tensor(
         list(range(self.train_seq_len - 1, -1, -1))).float()
     if self.use_cuda:
         self.time_diff_test = cuda(self.time_diff_test)
         self.time_diff_train = cuda(self.time_diff_train)
    def train(self):
        self.set_mode('train')
        for e in range(self.epoch):
            self.global_epoch += 1

            correct = 0.
            cost = 0.
            total = 0.
            for batch_idx, (images,
                            labels) in enumerate(self.data_loader['train']):
                self.global_iter += 1

                x = Variable(cuda(images, self.cuda))
                y = Variable(cuda(labels, self.cuda))

                logit = self.net(x)
                prediction = logit.max(1)[1]

                correct = torch.eq(prediction,
                                   y).float().mean().data  #[0]  # Anton
                cost = F.cross_entropy(logit, y)

                self.optim.zero_grad()
                cost.backward()
                self.optim.step()

                if batch_idx % 100 == 0:
                    if self.print_:
                        print()
                        print(self.env_name)
                        print('[{:03d}:{:03d}]'.format(self.global_epoch,
                                                       batch_idx))
                        print('acc:{:.3f} loss:{:.3f}'.format(
                            correct, cost.data))  # [0]))  # Anton

                    if self.tensorboard:
                        self.tf.add_scalars(main_tag='performance/acc',
                                            tag_scalar_dict={'train': correct},
                                            global_step=self.global_iter)
                        self.tf.add_scalars(
                            main_tag='performance/error',
                            tag_scalar_dict={'train': 1 - correct},
                            global_step=self.global_iter)
                        self.tf.add_scalars(
                            main_tag='performance/cost',
                            tag_scalar_dict={'train': cost.data[0]},
                            global_step=self.global_iter)

            self.test()

        if self.tensorboard:
            self.tf.add_scalars(main_tag='performance/best/acc',
                                tag_scalar_dict={'test': self.history['acc']},
                                global_step=self.history['iter'])
        print(" [*] Training Finished!")
Ejemplo n.º 12
0
 def build_model(self):
     super().build_model()
     self.time_diff_test = torch.tensor(
         list(range(self.test_seq_len - 1, 0, -1)) +
         list(range(self.test_seq_len - 1, 0, -1)) + [0.])
     self.time_diff_train = torch.tensor(
         list(range(self.train_seq_len - 1, 0, -1)) +
         list(range(self.train_seq_len - 1, 0, -1)) + [0.])
     if self.use_cuda:
         self.time_diff_test = cuda(self.time_diff_test)
         self.time_diff_train = cuda(self.time_diff_train)
    def test(self):
        self.set_mode('eval')

        correct = 0.
        cost = 0.
        total = 0.

        data_loader = self.data_loader['test']
        for batch_idx, (images, labels) in enumerate(data_loader):
            x = Variable(cuda(images, self.cuda))
            y = Variable(cuda(labels, self.cuda))

            logit = self.net(x)
            prediction = logit.max(1)[1]

            correct += torch.eq(prediction,
                                y).float().sum().data  #[0]  # Anton
            cost += F.cross_entropy(logit, y,
                                    size_average=False).data  #[0]  # Anton
            total += x.size(0)

        accuracy = correct / total
        cost /= total

        if self.print_:
            print()
            print('[{:03d}]\nTEST RESULT'.format(self.global_epoch))
            print('ACC:{:.4f}'.format(accuracy))
            print('*TOP* ACC:{:.4f} at e:{:03d}'.format(
                accuracy,
                self.global_epoch,
            ))
            print()

            if self.tensorboard:
                self.tf.add_scalars(main_tag='performance/acc',
                                    tag_scalar_dict={'test': accuracy},
                                    global_step=self.global_iter)

                self.tf.add_scalars(main_tag='performance/error',
                                    tag_scalar_dict={'test': (1 - accuracy)},
                                    global_step=self.global_iter)

                self.tf.add_scalars(main_tag='performance/cost',
                                    tag_scalar_dict={'test': cost},
                                    global_step=self.global_iter)

        if self.history['acc'] < accuracy:
            self.history['acc'] = accuracy
            self.history['epoch'] = self.global_epoch
            self.history['iter'] = self.global_iter
            self.save_checkpoint('best_acc.tar')

        self.set_mode('train')
Ejemplo n.º 14
0
    def phone_level_cluster(self, out_prefix='predictions'):
        self.load_checkpoint()
        X_a = np.zeros((self.n_class, self.K))
        norm = np.zeros((self.n_class, 1))
        audio_files = []
        encodings = []
        # Find the centroid of each phone-level cluster
        B = self.data_loader['test'].batch_size
        testset = self.data_loader['test'].dataset
        for b_idx, (audios, _, _, audio_masks,
                    _) in enumerate(self.data_loader['test']):
            if b_idx > 2 and self.debug:
                break
            audios = cuda(audios, self.cuda)
            audio_masks = cuda(audio_masks, self.cuda)
            _, _, encoding, embedding = self.audio_net(audios,
                                                       mask=audio_masks,
                                                       return_feat=True)
            encoding = encoding.permute(0, 2, 1).cpu().detach().numpy()
            embedding = embedding.cpu().detach().numpy()
            X_a += encoding @ embedding
            norm += encoding.sum(axis=-1, keepdims=True)
            audio_files.extend([
                testset.dataset[b_idx * B + i][0]
                for i in range(audios.size(0))
            ])
            encodings.append(encoding.T)
        encodings = np.concatenate(encodings)

        X_a /= norm
        X_s = self.audio_net.bottleneck.weight +\
                     self.audio_net.bottleneck.bias
        X = np.concatenate([X_a, X_s], axis=1)

        kmeans = KMeans(n_clusters=50).fit(X)
        phoneme_labels = kmeans.labels_

        out_file = os.path.join(self.ckpt_dir,
                                f'{out_prefix}_phone_level_clustering.txt')
        out_f = open(out_file, 'w')
        pred_phones = encodings.max(-1)[0]
        for idx, (audio_file,
                  encoding) in enumerate(zip(audio_files, encodings)):
            audio_id = os.path.splitext(os.path.split(audio_file)[1])[0]
            pred_phonemes = ','.join(
                [str(phoneme_labels[phn]) for phn in pred_phones[idx]])
            out_f.write('{audio_id} {pred_phonemes}\n')
        out_f.close()

        gold_path = os.path.join(os.path.join(testset.data_path, 'test/'))
        compute_token_f1(
            out_file, gold_path,
            os.path.join(self.ckpt_dir, 'confusion_phone_level_cluster.png'))
Ejemplo n.º 15
0
    def calc_metrics(self, per_graph_ent_embeds_loc, per_graph_ent_embeds_rec,
                     g_list, t_list, hist_embeddings_forward_loc,
                     hist_embeddings_forward_rec, start_time_tensor_forward,
                     hist_embeddings_backward_loc,
                     hist_embeddings_backward_rec, start_time_tensor_backward,
                     cur_t):
        mrrs, hit_1s, hit_3s, hit_10s, losses = [], [], [], [], []
        ranks = []
        i = 0
        for g, t, ent_embed_loc, ent_embed_rec in zip(
                g_list, t_list, per_graph_ent_embeds_loc,
                per_graph_ent_embeds_rec):
            time_diff_tensor_forward = cur_t - start_time_tensor_forward[i]
            time_diff_tensor_backward = cur_t - start_time_tensor_backward[i]
            all_embeds_g_loc, all_embeds_g_rec = self.get_all_embeds_Gt(
                ent_embed_loc, ent_embed_rec, g, t,
                hist_embeddings_forward_loc[i],
                hist_embeddings_forward_rec[i][0],
                hist_embeddings_forward_rec[i][1], time_diff_tensor_forward,
                hist_embeddings_backward_loc[i],
                hist_embeddings_backward_rec[i][0],
                hist_embeddings_backward_rec[i][1], time_diff_tensor_backward)

            index_sample = torch.stack(
                [g.edges()[0], g.edata['type_s'],
                 g.edges()[1]]).transpose(0, 1)
            # label = torch.ones(index_sample.shape[0])
            if self.use_cuda:
                index_sample = cuda(index_sample)
                # label = cuda(label)
            if index_sample.shape[0] == 0: continue
            weight_subject_query_subject_embed, weight_subject_query_object_embed, weight_object_query_subject_embed, weight_object_query_object_embed = self.calc_ensemble_ratio(
                index_sample, t, g)
            # pdb.set_trace()
            rank = self.evaluater.calc_metrics_single_graph(
                ent_embed_loc, ent_embed_rec, self.rel_embeds,
                all_embeds_g_loc, all_embeds_g_rec, index_sample,
                weight_subject_query_subject_embed,
                weight_subject_query_object_embed,
                weight_object_query_subject_embed,
                weight_object_query_object_embed, g, t)

            # loss = self.link_classification_loss(ent_embed_loc, ent_embed_rec, self.rel_embeds, index_sample, label)
            ranks.append(rank)
            # losses.append(loss.item())
            i += 1
        try:
            ranks = torch.cat(ranks)
        except:
            ranks = cuda(torch.tensor(
                []).long()) if self.use_cuda else torch.tensor([]).long()

        return ranks, np.mean(losses)
Ejemplo n.º 16
0
    def train(self):
        self.set_mode('train')
        acc_train_plt = [0]
        loss_plt = []
        acc_test_plt = [0]
        for e in range(self.epoch):
            self.global_epoch += 1
            local_iter = 0
            correct = 0.
            cost = 0.
            total = 0.
            total_acc = 0.
            total_loss = 0.
            # train for each batch iteration
            for batch_idx, (images,
                            labels) in enumerate(self.data_loader['train']):
                self.global_iter += 1
                local_iter += 1
                #print("image size is ", np.shape(images))

                x = Variable(cuda(images, self.cuda))
                y = Variable(cuda(labels, self.cuda))

                logit = self.net(x)
                prediction = logit.max(1)[1]

                correct = torch.eq(prediction, y).float().mean().data.item()
                cost = F.cross_entropy(logit, y)
                total_acc += correct
                total_loss += cost.data.item()

                self.optim.zero_grad()
                cost.backward()  #back propagation
                self.optim.step()

                #for every 100th batch show accuracy and loss information of training result
                if batch_idx % 100 == 0:
                    if self.print_:
                        print()
                        print(self.env_name)
                        print('[{:03d}:{:03d}]'.format(self.global_epoch,
                                                       batch_idx))
                        print('acc:{:.3f} loss:{:.3f}'.format(
                            correct, cost.data.item()))
            total_acc = total_acc / local_iter
            total_loss = total_loss / local_iter
            acc_train_plt.append(total_acc)
            loss_plt.append(total_loss)
            acc_test_plt.append(self.test(
            ))  #show test results every epochs and save in acc_test_plt.
        print(" [*] Training Finished!")
        self.plot_result(acc_train_plt, acc_test_plt, loss_plt,
                         self.history['acc'])
Ejemplo n.º 17
0
    def single_graph_negative_sampling(self, t, g, num_ents):
        t = t.item()
        triples = torch.stack([g.edges()[0], g.edata['type_s'],
                               g.edges()[1]]).transpose(0, 1)
        sample, neg_tail_sample, neg_head_sample, label = self.negative_sampling(
            self.true_heads_train[t], self.true_tails_train[t], triples,
            num_ents, g)

        neg_tail_sample, neg_head_sample, label = torch.from_numpy(
            neg_tail_sample), torch.from_numpy(
                neg_head_sample), torch.from_numpy(label)
        if self.use_cuda:
            sample, neg_tail_sample, neg_head_sample, label = cuda(
                sample), cuda(neg_tail_sample), cuda(neg_head_sample), cuda(
                    label)
        return sample, neg_tail_sample, neg_head_sample, label
Ejemplo n.º 18
0
    def model_init(self, args):
        # Network
        self.net = cuda(ToyNet(y_dim=self.y_dim), self.cuda)
        self.net.weight_init(_type='kaiming')

        # Optimizers
        self.optim = optim.Adam([{'params':self.net.parameters(), 'lr':self.lr}],
                                betas=(0.5, 0.999))
Ejemplo n.º 19
0
 def get_model_config(self, config):      
   self.audio_net = cuda(InfoQuantizer(in_channels=self.input_size,
                                       channels=self.K,
                                       n_embeddings=self.n_clusters,
                                       z_dim=self.n_visual_class
                                       ), self.cuda)
   self.use_logsoftmax = config.get('use_logsoftmax', False)
   print(f'Use log softmax: {self.use_logsoftmax}')
Ejemplo n.º 20
0
 def get_model_config(self, config):
     self.audio_net = cuda(
         GumbelBLSTM(self.K,
                     n_layers=self.n_layers,
                     n_gumbel_units=self.n_phone_class,
                     n_class=self.n_visual_class,
                     input_size=self.input_size,
                     ds_ratio=1,
                     bidirectional=True), self.cuda)
Ejemplo n.º 21
0
    def get_model_config(self, config):
        if config.model_type == 'blstm':
            self.audio_net = cuda(
                BLSTM(self.K,
                      n_layers=self.n_layers,
                      n_class=self.n_visual_class,
                      input_size=80,
                      ds_ratio=1,
                      bidirectional=True), self.cuda)

        elif config.model_type == 'mlp':
            self.audio_net = cuda(
                MLP(self.K,
                    n_layers=self.n_layers,
                    n_class=self.n_visual_class,
                    input_size=self.input_size,
                    max_seq_len=self.max_segment_num), self.cuda)
        else:
            raise ValueError(f'Invalid model type {config.model_type}')
Ejemplo n.º 22
0
    def calc_ensemble_ratio(self, triples, t, g):
        sub_feature_vecs = []
        obj_feature_vecs = []
        t = t.item()
        for s, r, o in triples:

            s = g.ids[s.item()]
            r = r.item()
            o = g.ids[o.item()]
            # triple_freq = self.drop_edge.triple_freq_per_time_step_agg[t][(s, r, o)]
            # ent_pair_freq = self.drop_edge.ent_pair_freq_per_time_step_agg[t][(s, o)]
            sub_freq = self.drop_edge.sub_freq_per_time_step_agg[t][s]
            obj_freq = self.drop_edge.obj_freq_per_time_step_agg[t][o]
            rel_freq = self.drop_edge.rel_freq_per_time_step_agg[t][r]
            sub_rel_freq = self.drop_edge.sub_rel_freq_per_time_step_agg[t][(
                s, r)]
            obj_rel_freq = self.drop_edge.obj_rel_freq_per_time_step_agg[t][(
                o, r)]
            # 0: no local, 1: no temporal

            sub_feature_vecs.append(
                torch.tensor([obj_freq, rel_freq, obj_rel_freq]))
            obj_feature_vecs.append(
                torch.tensor([sub_freq, rel_freq, sub_rel_freq]))
        # pdb.set_trace()

        try:
            sub_features = torch.stack(sub_feature_vecs).float()
            obj_features = torch.stack(obj_feature_vecs).float()
            if self.use_cuda:
                sub_features = cuda(sub_features)
                obj_features = cuda(obj_features)
            weight_subject = torch.sigmoid(self.subject_linear(sub_features))
            weight_object = torch.sigmoid(self.object_linear(obj_features))
        except:
            weight_subject = cuda(torch.tensor(
                []).long()) if self.use_cuda else torch.tensor([]).long()
            weight_object = cuda(torch.tensor(
                []).long()) if self.use_cuda else torch.tensor([]).long()

        return weight_subject, weight_object
Ejemplo n.º 23
0
 def forward_ema(self, g, prev_embeddings, time_batched_list_t, node_sizes,
                 alpha, train_seq_len):
     current_graph, time_embedding = self.forward(g, time_batched_list_t,
                                                  node_sizes)
     cur_embeddings = current_graph.ndata['h'] + time_embedding
     pdb.set_trace()
     all_time_embeds = torch.cat(
         [prev_embeddings, cur_embeddings.unsqueeze(1)], dim=1)
     ema_vec = torch.pow(1 - alpha, cuda(torch.arange(train_seq_len)))
     ema_vec[:, :-1] *= alpha
     ema_vec = ema_vec.flip(-1).unsqueeze(0)
     averaged = torch.sum(all_time_embeds.transpose(1, 2) * ema_vec, -1)
     return averaged
Ejemplo n.º 24
0
 def model_init(self, args):
     # Network
     if args.dataset == 'MNIST':
         print("MNIST")
         self.net = cuda(ToyNet_MNIST(y_dim=self.y_dim), self.cuda)
     elif args.dataset == 'CIFAR10':
         print("Dataset used CIFAR10")
         if args.network_choice == 'ToyNet':
             self.net = cuda(ToyNet_CIFAR10(y_dim=self.y_dim), self.cuda)
         elif args.network_choice == 'ResNet18':
             self.net = cuda(ResNet18(), self.cuda)
         elif args.network_choice == 'ResNet34':
             self.net = cuda(ResNet34(), self.cuda)
         elif args.network_choice == 'ResNet50':
             self.net = cuda(ResNet50(), self.cuda)
     self.net.weight_init(_type='kaiming')
     # setup optimizer
     self.optim = optim.Adam([{
         'params': self.net.parameters(),
         'lr': self.lr
     }],
                             betas=(0.5, 0.999))
Ejemplo n.º 25
0
 def forward_ema_isolated(self, node_repr, prev_embeddings, time, alpha,
                          train_seq_len):
     cur_embeddings, time_embedding = super().forward_isolated(
         node_repr, time)
     # pdb.set_trace()
     all_time_embeds = torch.cat(
         [prev_embeddings, (cur_embeddings + time_embedding).unsqueeze(1)],
         dim=1)
     ema_vec = torch.pow(1 - alpha, cuda(torch.arange(train_seq_len)))
     ema_vec[:, :-1] *= alpha
     ema_vec = ema_vec.flip(-1).unsqueeze(0)
     averaged = torch.sum(all_time_embeds.transpose(1, 2) * ema_vec, -1)
     return averaged
Ejemplo n.º 26
0
    def calc_metrics(self, per_graph_ent_embeds_loc, per_graph_ent_embeds_rec, g_list, t_list, hist_embeddings, attn_mask):
        mrrs, hit_1s, hit_3s, hit_10s, losses = [], [], [], [], []
        ranks = []
        i = 0
        for g, t, ent_embed_loc, ent_embed_rec in zip(g_list, t_list, per_graph_ent_embeds_loc, per_graph_ent_embeds_rec):
            all_embeds_g_loc, all_embeds_g_rec = self.get_all_embeds_Gt(ent_embed_loc, ent_embed_rec, g, t, hist_embeddings[:, i, 0], hist_embeddings[:, i, 1], attn_mask[:, i], val=True)
            index_sample = torch.stack([g.edges()[0], g.edata['type_s'], g.edges()[1]]).transpose(0, 1)
            label = torch.ones(index_sample.shape[0])
            if self.use_cuda:
                index_sample = cuda(index_sample)
                label = cuda(label)
            if index_sample.shape[0] == 0: continue
            weight_subject_query_subject_embed, weight_subject_query_object_embed, weight_object_query_subject_embed, weight_object_query_object_embed = self.calc_ensemble_ratio(index_sample, t, g)
            rank = self.evaluater.calc_metrics_single_graph(ent_embed_loc, ent_embed_rec, self.rel_embeds, all_embeds_g_loc, all_embeds_g_rec, index_sample, weight_subject_query_subject_embed,
                                                            weight_subject_query_object_embed, weight_object_query_subject_embed, weight_object_query_object_embed, g, t)
            ranks.append(rank)
            i += 1
        try:
            ranks = torch.cat(ranks)
        except:
            ranks = cuda(torch.tensor([]).long()) if self.use_cuda else torch.tensor([]).long()

        return ranks, np.mean(losses)
Ejemplo n.º 27
0
    def test(self):
        self.set_mode('eval')
        correct = 0.
        cost = 0.
        total = 0.
        data_loader = self.data_loader['test']
        for batch_idx, (images, labels) in enumerate(data_loader):
            x = Variable(cuda(images, self.cuda))
            y = Variable(cuda(labels, self.cuda))

            logit = self.net(x)
            prediction = logit.max(1)[1]

            correct += torch.eq(prediction, y).float().sum().data.item()
            cost += F.cross_entropy(logit, y, size_average=False).data.item()
            total += x.size(0)
        accuracy = correct / total
        cost /= total

        if self.history['acc'] < accuracy:
            self.history['acc'] = accuracy
            self.history['epoch'] = self.global_epoch
            self.history['iter'] = self.global_iter
            self.save_checkpoint('best_acc.tar')

        if self.print_:
            print()
            print('[{:03d}]\nTEST RESULT'.format(self.global_epoch))
            print('ACC:{:.4f}'.format(self.history['acc']))
            print('*TOP* ACC:{:.4f} at e:{:03d}'.format(
                self.history['acc'],
                self.global_epoch,
            ))
            print()

        self.set_mode('train')
        return accuracy
Ejemplo n.º 28
0
    def reparametrize_n(self, mu, std, n=1):
        # reference :
        # http://pytorch.org/docs/0.3.1/_modules/torch/distributions.html#Distribution.sample_n
        def expand(v):
            if isinstance(v, Number):
                return torch.Tensor([v]).expand(n, 1)
            else:
                return v.expand(n, *v.size())

        if n != 1:
            mu = expand(mu)
            std = expand(std)

        eps = Variable(cuda(std.data.new(std.size()).normal_(), std.is_cuda))
        return mu + eps * std
Ejemplo n.º 29
0
    def train(self, X, Y):
        self.set_mode('train')
        for e in range(self.epoch):
            self.global_epoch += 1

            correct = 0.
            cost = 0.
            total = 0.
            for batch_idx, (images,
                            labels) in enumerate(self.data_loader['train']):
                self.global_iter += 1

                x = Variable(cuda(images, self.cuda))
                y = Variable(cuda(labels, self.cuda))

                logit = self.net(x)
                prediction = logit.max(1)[1]

                correct = torch.eq(prediction, y).float().mean().data[0]
                cost = F.cross_entropy(logit, y)

                self.optim.zero_grad()
                cost.backward()
                self.optim.step()

                if batch_idx % 100 == 0:
                    if self.print_:
                        print()
                        print(self.env_name)
                        print('[{:03d}:{:03d}]'.format(self.global_epoch,
                                                       batch_idx))
                        print('acc:{:.3f} loss:{:.3f}'.format(
                            correct, cost.data[0]))

            self.test()
        print(" [*] Training Finished!")
 def get_model_config(self, config):      
   self.audio_net = cuda(InfoQuantizer(in_channels=self.input_size,
                                       channels=self.K,
                                       n_embeddings=self.n_clusters,
                                       z_dim=self.n_visual_class
                                       ), self.cuda)
   self.use_logsoftmax = config.get('use_logsoftmax', True)
   print(f'Use log softmax: {self.use_logsoftmax}')
   
   self.clustering_method = config.clustering_method
   if self.clustering_method == 'kmeans':
     self.clusterer = KMeans(n_clusters=self.n_clusters) 
   elif self.clustering_method == 'dib':
     self.clusterer = DIB(n_clusters=self.n_clusters)
   else:
     raise ValueError('Unknown clustering method: {self.clustering_method}')