コード例 #1
0
def make_viz_stations(model, data_loader, viz_stations, station2id):

    inds = [station2id[k] for k in viz_stations]
    inds_slicer = [[ind * 2, ind * 2 + 1] for ind in inds]
    inds_slicer = torch.tensor(inds_slicer).flatten()
    totals_predict = []
    totals_actual = []
    for n_time, data in enumerate(data_loader):
        predict = model(data).y
        actual = data.y
        v_predict = torch.take(predict, inds_slicer)
        v_actual = torch.take(actual, inds_slicer)
        totals_predict.extend(list(v_predict))
        totals_actual.extend(list(v_actual))

    mind = pd.MultiIndex.from_product(
        [['arrivals', 'departures'],
         range(n_time + 1), viz_stations],
        names=['event_type', 'local_time_id', 'station_id'])
    df_1 = pd.DataFrame(totals_actual, index=mind, columns=['actual_count'])
    df_2 = pd.DataFrame(totals_predict,
                        index=mind,
                        columns=['predicted_count'])
    df = df_1.join(df_2)
    print(df)
コード例 #2
0
ファイル: train_eval_NGCF.py プロジェクト: cleverer123/NGACF
def eval_rank(model, test_loader, lossfn, parallel, top_k):
    model.eval()
    HR, NDCG = [], []
    for batch_id, batch in enumerate(test_loader):
        u_idxs = batch[0].long().cuda()
        i_idxs = batch[1].long().cuda()
        predictions = model(u_idxs, i_idxs)

        if parallel and torch.cuda.device_count() >1:
            i_idxs = i_idxs.view(torch.cuda.device_count(), -1)
            print(predictions)
            for device_idx, prediction in enumerate(predictions):
                device = torch.device('cuda:{}'.format(device_idx))
                i_idx = i_idxs[device_idx, :].to(device)
                _, indices = torch.topk(prediction, top_k)

                recommends = torch.take(i_idx, indices).cpu().numpy().tolist()
                gt_item = i_idx[0].item()
                HR.append(hit(gt_item, recommends))
                NDCG.append(ndcg(gt_item, recommends))
        else:
            _, indices = torch.topk(predictions, top_k)
            recommends = torch.take(i_idxs, indices).cpu().numpy().tolist()
            gt_item = i_idxs[0].item()
            HR.append(hit(gt_item, recommends))
            NDCG.append(ndcg(gt_item, recommends))
        
        
        if batch_id % 240 == 0 :
            print("-----------The timeStamp of evaluating batch {:03d}/{}".format(batch_id, len(test_loader)) + " is: " + time.strftime("%H: %M: %S", time.gmtime(time.time())))
           

    return np.mean(HR), np.mean(NDCG)
コード例 #3
0
    def advance(self, word_probs):
        """
        Given prob over words for every last beam

        Parameters:

        * `word_probs`- probs of advancing from the last step ([K x] slot num)

        Returns: True if beam search is complete.
        """
        num_words = word_probs.size(-1)
        cur_top_k = self.size if len(self.prev_ks) == 0 else self.top_k
        top_k, sort_key = word_probs.topk(cur_top_k, -1, True, True)
        # Sum the previous scores.
        if len(self.prev_ks) > 0:
            beam_scores = top_k + self.scores.unsqueeze(
                1)  # broadcast mechanism
        else:
            beam_scores = top_k if word_probs.dim() == 1 else top_k[0]
        rank_beam_scores = beam_scores + self.penalty[:cur_top_k]
        flat_beam_scores = rank_beam_scores.contiguous().view(-1)
        _, best_scores_id = flat_beam_scores.topk(self.size, 0, True, True)

        # best_scores_id is flattened beam x cur_top_k array, so calculate which
        # word and beam each score came from
        prev_k = best_scores_id // cur_top_k
        self.prev_ks.append(prev_k)
        if sort_key.dim() == 1:
            sort_key = sort_key.unsqueeze(0).repeat(self.size, 1)
        next_y = torch.take(sort_key.contiguous().view(-1), best_scores_id)
        self.next_ys.append(next_y)
        self.scores = torch.take(beam_scores.contiguous().view(-1),
                                 best_scores_id)
        return self.done()
コード例 #4
0
ファイル: fgsm.py プロジェクト: flodorner/Infocup_final
    def _train_on_label(self, images, labels, target_label):
        optimizer = optim.Adam(self.model.parameters(), lr=self.retrain_lr)
        label_num = len(labels[0])
        labels = labels[:, target_label]
        images = torch.from_numpy(images)
        labels = torch.from_numpy(labels).float()

        self.model.train()
        for m in self.model.modules():
            if isinstance(m, nn.BatchNorm2d):
                m.eval()

        images, labels = images.to(self.device), labels.to(self.device)
        optimizer.zero_grad()
        output = self.model(images)
        indexes = [i * label_num + target_label for i in range(len(labels))]
        loss = nn.MSELoss()(torch.take(output, torch.tensor(indexes).to(self.device)), labels)
        loss.backward()
        optimizer.step()

        self.model.eval()
        loss = nn.MSELoss()(torch.take(output, torch.tensor(indexes).to(self.device)), labels)

        loss_number = loss.item()

        return loss_number
コード例 #5
0
    def forward(self, input_data, target, adaptive_margin, size_average=False):
        """自定义损失函数,自适应边际损失函数"""
        # 自定义max函数 torch.max(), 自适应阈值为 1+P(p|s, o)-P(p'!s,o)
        # 网络输出为score, paper中的loss
        assert input_data.ndimension() == 2
        assert input_data.ndimension() == adaptive_margin.ndimension()
        assert adaptive_margin.ndimension() == input_data.ndimension()
        loss_data = 0
        for mini_index in range(input_data.size()[0]):
            mask = target[mini_index] > -1
            mini_target = torch.masked_select(target[mini_index], mask)
            assert mini_target.ndimension() == 1
            # 一维的
            for i in range(mini_target.size()[0]):
                # index_self = input_data[mini_index] != input_data[mini_index, mini_target[i]]
                index = torch.range(start=0,
                                    end=input_data.size()[1] - 1,
                                    dtype=torch.int64).cuda()
                index_mask = (index != mini_target[i])
                except_index = torch.masked_select(index, index_mask)
                except_self = torch.take(input_data[mini_index], except_index)
                need_margin = torch.take(adaptive_margin[mini_index],
                                         except_index)

                different = 1 + (
                    adaptive_margin[mini_index, mini_target[i]] - need_margin
                ) - (input_data[mini_index, mini_target[i]] - except_self)
                loss_data += torch.sum(F.relu(different))

        loss_data = loss_data / input_data.size()[1]
        if size_average:
            return loss_data / input_data.size()[0]
        else:
            return loss_data
コード例 #6
0
ファイル: pmf.py プロジェクト: ChingChingYa/HCAN
 def predict(self, data):
     index_data = torch.IntTensor([[int(ele[0]), int(ele[1])]
                                   for ele in data])
     torch.take(x, torch.LongTensor(indices))
     u_features = self.U.take(index_data.take(0, axis=1), axis=0)
     v_features = self.V.take(index_data.take(1, axis=1), axis=0)
     a = u_features * v_features
     preds_value_array = torch.sum(u_features * v_features, 1)
     return preds_value_array
コード例 #7
0
def compute_loss(model, x):
    mean, logvar = model.encode(x)
    z = model.reparameterize(mean, logvar)
    x_decoded = model.decode(z)

    pdist = nn.PairwiseDistance(p=2) # Euclidean distance

    # This removes the channel dimension from the tensor and makes pdist easier to use
    x_pos = torch.zeros(batch_size,2,num_particles).cuda()
    x_pos = x[:,0,1:,:]
    x_int = torch.zeros(batch_size,num_particles).cuda()
    x_int = x[:,0,0,:]

    x_pos = x_pos.view(batch_size, 2, 1, num_particles)

    # This removes the channel dimension from the tensor and makes pdist easier to use
    x_decoded_pos = torch.zeros(batch_size,2,num_particles).cuda()
    x_decoded_pos = x_decoded[:,0,1:,:]
    x_decoded_int = torch.zeros(batch_size,num_particles).cuda()
    x_decoded_int[:] = x_decoded[:,0,0,:]

    x_decoded_pos = x_decoded_pos.view(batch_size, 2, num_particles, 1)
    x_decoded_pos = torch.repeat_interleave(x_decoded_pos, num_particles, -1)

    dist = torch.pow(pdist(x_pos, x_decoded_pos),2)

    ieo = torch.min(dist, dim = 1)
    ieo_idx = ieo.indices.clone()

    oei = torch.min(dist, dim = 2)
    oei_idx = oei.indices.clone()

    aux_idx = num_particles * torch.arange(batch_size).cuda()
    aux_idx = aux_idx.view(batch_size, 1)
    aux_idx = torch.repeat_interleave(aux_idx, num_particles, axis=-1)

    ieo_idx = ieo_idx + aux_idx
    oei_idx = oei_idx + aux_idx

    get_x_int = torch.take(x_int, oei_idx)

    get_x_decoded_int = torch.take(x_decoded_int,ieo_idx)

    eucl = ieo.values + oei.values + beta * (torch.pow((x_decoded_int - get_x_int), 2) + torch.pow((x_int - get_x_decoded_int), 2))

    eucl = torch.sum(eucl) / batch_size

    reconstruction_loss = - eucl

    KL_divergence = 0.5 * torch.sum(torch.pow(mean, 2) + torch.exp(logvar) - logvar - 1.0).sum() / batch_size # Compares mu = mean, sigma = exp(0.5 * logvar) gaussians with standard gaussians

    ELBO = reconstruction_loss - KL_divergence

    loss = - ELBO

    return loss, KL_divergence, eucl
コード例 #8
0
def convert_semantics_to_rgb(semantics):
    r"""Converts semantic IDs to RGB images.
    """
    semantics = semantics.long() % 40
    mapping_rgb = torch.from_numpy(d3_40_colors_rgb).to(semantics.device)
    semantics_r = torch.take(mapping_rgb[:, 0], semantics)
    semantics_g = torch.take(mapping_rgb[:, 1], semantics)
    semantics_b = torch.take(mapping_rgb[:, 2], semantics)
    semantics_rgb = torch.stack([semantics_r, semantics_g, semantics_b], -1)

    return semantics_rgb
コード例 #9
0
 def uvcount(self, data):
     tmp = torch.LongTensor([0, 1, 2, 3])
     index_data = torch.IntTensor([[int(ele[0]), int(ele[1])]
                                   for ele in data])
     u_index = torch.LongTensor(
         [torch.take(i, torch.LongTensor([0])) for i in index_data])
     v_index = torch.LongTensor(
         [torch.take(i, torch.LongTensor([1])) for i in index_data])
     u_features = [torch.gather(self.U[i.item()], 0, tmp) for i in u_index]
     v_features = [torch.gather(self.V[i.item()], 0, tmp) for i in v_index]
     a = [torch.mul(u, v) for u, v in zip(u_features, v_features)]
     self.uv = torch.stack(a)
コード例 #10
0
 def log_prob(self, weights):
   new_weights = weights.view(-1)
   normal_density1 = dist.Normal(0,self.sigma1).log_prob(new_weights)
   exp_normal_density1 = torch.exp(normal_density1)
   exp_normal_density2 = torch.exp(
       dist.Normal(0.0, self.sigma2).log_prob(new_weights))
   nonzero = exp_normal_density2.nonzero()
   zero = (exp_normal_density2==0).nonzero()
   sum_log_prob = torch.sum(torch.log(self.pi * torch.take(exp_normal_density1,nonzero) \
                 + (1-self.pi)*torch.take(exp_normal_density2,nonzero))) \
                 + torch.sum(torch.take(normal_density1, zero)+np.log(self.pi))
   return sum_log_prob
コード例 #11
0
 def predict(self, data):
     tmp = torch.LongTensor([0, 1, 2, 3])
     index_data = torch.IntTensor([[int(ele[0]), int(ele[1])]
                                   for ele in data])
     u_index = torch.LongTensor(
         [torch.take(i, torch.LongTensor([0])) for i in index_data])
     v_index = torch.LongTensor(
         [torch.take(i, torch.LongTensor([1])) for i in index_data])
     u_features = [torch.gather(self.U[i.item()], 0, tmp) for i in u_index]
     v_features = [torch.gather(self.V[i.item()], 0, tmp) for i in v_index]
     a = [torch.mul(u, v) for u, v in zip(u_features, v_features)]
     preds_value_array = torch.DoubleTensor([torch.sum(i, 0) for i in a])
     uv = [i.detach().numpy() for i in torch.stack(a)]
     return uv, preds_value_array
コード例 #12
0
    def forward(self, inputs, targets):
        n = inputs.size(0)

        # pairwise distances
        dist = pdist(inputs)

        # find the hardest positive and negative
        mask_pos = targets.expand(n, n).eq(targets.expand(n, n).t())
        mask_neg = ~mask_pos
        mask_pos[torch.eye(n).byte().cuda()] = 0

        if self.sample:
            # sample pos and negative to avoid outliers causing collapse
            posw = (dist + 1e-12) * mask_pos.float()
            posi = torch.multinomial(posw, 1)
            dist_p = dist.gather(0, posi.view(1, -1))
            negw = (1 / (dist + 1e-12)) * mask_neg.float()
            negi = torch.multinomial(negw, 1)
            dist_n = dist.gather(0, negi.view(1, -1))
        else:
            # hard negative
            ninf = torch.ones_like(dist) * float('-inf')
            dist_p = torch.max(dist * mask_pos.float(), dim=1)[0]
            nindex = torch.max(torch.where(mask_neg, -dist, ninf), dim=1)[1]
            dist_n = dist.gather(0, nindex.unsqueeze(0))

        # calc loss
        diff = dist_p - dist_n
        if isinstance(self.margin, str) and self.margin == 'soft':
            diff = F.softplus(diff)
        else:
            diff = torch.clamp(diff + self.margin, min=0.)
        loss = diff.mean()

        # calculate metrics, no impact on loss
        metrics = OrderedDict()
        with torch.no_grad():
            _, top_idx = torch.topk(dist, k=2, largest=False)
            top_idx = top_idx[:, 1:]
            flat_idx = top_idx.squeeze() + n * torch.arange(
                n, out=torch.LongTensor()).cuda()
            top1_is_same = torch.take(mask_pos, flat_idx)
            metrics['prec'] = top1_is_same.float().mean().item()
            metrics['dist_acc'] = (dist_n > dist_p).float().mean().item()
            metrics['dist_sm'] = (dist_n >
                                  dist_p + self.margin).float().mean().item()
            metrics['nonzero_count'] = torch.nonzero(diff).size(0)
            metrics['dist_p '] = dist_p.mean().item()
            metrics['dist_n'] = dist_n.mean().item()

        if self.debug:
            print('%.5f' % loss.item(), end=' ')
            for k, v in metrics.items():
                if k.startswith('non'):
                    print('%s: %4d' % (k, v), end=', ')
                else:
                    print('%s: %.5f' % (k, v), end=', ')
            print()

        return loss, metrics
コード例 #13
0
    def best_nghb_vtx(self, log_probs: torch.Tensor) -> int:
        r"""
        Args:
            log_probs (torch.Tensor): vector of vertices' probabilities.

        Returns:
            the vertex id with the best probability
        """

        vtx = self.pos[0]

        # Neighbours' ids of the current vertex `vtx`
        scalar_ngbs = gp.build_neighbours(self.env_knl.ntw.graph)[vtx]

        # Neighbours' probabilities output by the model
        ngbs_log_probs = \
            torch.take(log_probs,
                       torch.LongTensor(scalar_ngbs).cuda() if self.gpu
                       else torch.LongTensor(scalar_ngbs))

        max_log_probs, indices = torch.max(ngbs_log_probs, 0)  # plural for the
        # case where there are maximum probabilities equal
        i = indices.item()

        return scalar_ngbs[i]
コード例 #14
0
ファイル: train2.py プロジェクト: clink4/simple_shot
def metric_prediction(gallery, query, train_label, metric_type):
    gallery = gallery.view(gallery.shape[0], -1)
    query = query.view(query.shape[0], -1)
    #print("in metric_prediction; size of gallery is ", gallery.size())
    #print("in metric_prediction; size of query is ", query.size())
    distance = get_metric(metric_type)(gallery, query)
    print("in metric_prediction; size of distance is ", distance.shape)
    print("distance is", distance)

    dist2 = torch.zeros(75, 5)
    for gallery_index, gallery_item in enumerate(gallery):
        for query_index, query_item in enumerate(query):
            gallery_norm = torch.norm(gallery_item)
            query_norm = torch.norm(query_item)
            x = gallery_item / gallery_norm
            y = query_item / query_norm
            dist2[query_index][gallery_index] = 1. - torch.sum(torch.mul(x, y))

    print("dist2 is ", dist2)
    print("dist2 shape ", dist2.shape)

    #'cosine': lambda gallery, query: 1. - F.cosine_similarity(query[:, None, :], gallery[None, :, :], dim=2),
    predict = torch.argmin(distance, dim=1)
    predict = torch.take(train_label, predict)
    #print("in metric_prediction; prediction (label) is ", predict)
    return predict
コード例 #15
0
    def test_step(self, batch, batch_idx):
        query_tokens, batch_qids, batch_qrels = batch
        queries = self.forward(query_tokens, token_type=1)
        scores, indices = torch.einsum("ae, be->ba", self.index_tensor,
                                       queries).topk(
                                           self.hparams.test_rank_len,
                                           sorted=True)
        rankings = [row.tolist() for row in torch.take(self.pid_map, indices)]

        if self.hparams.save_test_rankings:
            with open(self.hparams.save_test_rankings, "a") as w:
                for qid, ranking in zip(batch_qids, rankings):
                    for i, pid in enumerate(ranking):
                        w.write("{}\t{}\t{}\n".format(qid, pid, i + 1))

        metrics = calc_ranking_metrics(rankings,
                                       batch_qrels,
                                       count_rankings=False)
        metrics = {"test/" + k: v for k, v in metrics.items()}
        result = pl.EvalResult()
        result.log_dict(
            metrics,
            on_step=False,
            on_epoch=True,
            prog_bar=True,
        )
        return result
コード例 #16
0
            def get_point_value(point):
                subs = [locs[cd][i] for i, cd in enumerate(point)]
                loc_list_p = [
                    s.long() * np.long(l) for s, l in zip(subs, d_size)
                ]
                idx_p = torch.sum(torch.stack(loc_list_p, dim=0), dim=0)

                vol_val_flat = torch.stack([
                    torch.stack(
                        [torch.take(vol_ij, idx_i) for vol_ij in vol_i], dim=0)
                    for vol_i, idx_i in zip(vol, idx_p)
                ],
                                           dim=0)

                vol_val = torch.reshape(vol_val_flat, final_shape)
                # get the weight of this cube_pt based on the distance
                # if c[d] is 0 --> weight = 1 - (pt - floor(pt)) = diff_loc1
                # if c[d] is 1 --> weight = pt - floor(pt) = diff_loc0
                wts_lst = [weights_loc[cd][i] for i, cd in enumerate(point)]
                if self.linear_norm:
                    wt = sum(wts_lst) / norm_factor
                else:
                    wt = reduce(mul, wts_lst)
                wt = torch.reshape(wt, weights_shape)
                if self.interp_layer is not None:
                    return wt, vol_val
                else:
                    return wt * vol_val
コード例 #17
0
 def encode_score(self, emit):
     batch_size = emit.size()[0]
     sentence_len = emit.size()[1] - 2
     label_num = emit.size()[2]
     mask = torch.tensor(self.make_s_row(batch_size, label_num))
     emit = torch.chunk(emit, batch_size, 0)
     emits = torch.squeeze(torch.cat(emit, 2), 0)
     one_t_row = self.transition[1][:].repeat(batch_size)
     if self.config.use_cuda is True:
         s_matrix = torch.zeros((sentence_len, label_num * batch_size),
                                dtype=torch.float).cuda()
         mask = mask.cuda()
     else:
         s_matrix = torch.zeros((sentence_len, label_num * batch_size),
                                dtype=torch.float)
     s_matrix[0][:] = emits[1][:] + one_t_row
     for idx in range(1, sentence_len):
         s_row = torch.take(s_matrix[idx - 1][:], mask)
         e_row = self.make_e_row(emits[idx + 1][:], batch_size, label_num)
         t_row = self.make_t_row(self.transition, batch_size, label_num)
         next_tag_var = s_row + e_row + t_row
         s_matrix[idx][:] = self.log_sum_exp(next_tag_var, batch_size,
                                             label_num)
     t_last_row = self.transition[:, 2].repeat((1, batch_size))
     last_tag_var = s_matrix[-1][:] + t_last_row
     s_end = self.log_sum_exp(last_tag_var, batch_size, label_num)
     s_end_sum = s_end.sum()
     return s_end_sum
コード例 #18
0
 def __getitem__(self, index):
     # encode sentence
     s = self._get_line()
     label, sentence = s.strip().split(" ", 1)
     if self.truncate_to:
         sentence = sentence[:self.truncate_to]
     sentence = [self.charset[c] for c in sentence]
     # if sentence not long enough then pad with a dummy character
     if len(sentence) < self.sample_len:
         diff = self.sample_len - len(sentence)
         sentence += [self.charset_size] * diff
     # sentence to torch tensor
     sentence = torch.LongTensor(sentence)
     # calculate number of samples to take from sentence
     num_samples = min(self.max_samples,
                       math.ceil(sentence.size(0) / self.sample_len))
     # create matrix of indices to take from the sentence
     sample_idxes = torch.randint(sentence.size(0) - (self.sample_len - 1),
                                  (num_samples, 1),
                                  dtype=torch.long)
     sample_idxes = sample_idxes + self.sample_broadcaster
     # grab samples
     samples = torch.take(sentence,
                          sample_idxes)  # num_samples x sample_len
     # make labels for samples
     labels = [self.label_encoder[label]] * num_samples
     labels = torch.LongTensor(labels)
     return samples, labels, num_samples
コード例 #19
0
    def best_nghb_vtx(self, log_probs: torch.Tensor) -> int:
        r"""
        Args:
            log_probs (torch.Tensor): vector of vertices' probabilities.

        Returns:
            the vertex id with the best probability
        """

        vtx = self.pos[0]

        # Neighbours' ids of the current vertex `vtx`
        scalar_ngbs = gp.build_neighbours(self.env_knl.ntw.graph)[vtx]

        # Tensor containing `scalar_ngbs`
        scalar_ngbs_t = self.cuda(torch.LongTensor(scalar_ngbs), self.gpu)

        # Neighbours' probabilities output by the model
        ngbs_probs = torch.exp(torch.take(log_probs, scalar_ngbs_t))
        ngbs_probs = ngbs_probs / torch.sum(ngbs_probs)

        next_vtx = np.random.choice(scalar_ngbs, p=ngbs_probs.detach().cpu(). \
                                    numpy())

        del scalar_ngbs_t, ngbs_probs

        return next_vtx
コード例 #20
0
def gather_nd(params, indices):
    """
    Args:
        params: Tensor to index
        indices: k-dimension tensor of integers. 
    Returns:
        output: 1-dimensional tensor of elements of ``params``, where
            output[i] = params[i][indices[i]]
            
            params   indices   output

            1 2       1 1       4
            3 4       2 0 ----> 5
            5 6       0 0       1
    """
    max_value = functools.reduce(operator.mul, list(params.size())) - 1
    indices = indices.t().long()
    ndim = indices.size(0)
    idx = torch.zeros_like(indices[0]).long()
    m = 1

    for i in range(ndim)[::-1]:
        idx += indices[i]*m
        m *= params.size(i)

    idx[idx < 0] = 0
    idx[idx > max_value] = 0
    return torch.take(params, idx)
コード例 #21
0
    def __getitem__(self, index):
        # highdata_raw二维,经过dataloader输出为三维,与conv层match
        raw0 = load_data(self.filenames[index])
        # raw = sequence_random_crop(raw0, 30000)

        highdata_raw = np.expand_dims(raw0, 0)
        highdata = torch.from_numpy(highdata_raw).type('torch.FloatTensor')

        # 如果highdata是100Hz的话,需要先把highdata降到100
        # highdata = torch.take(highdata, torch.arange(0, highdata.size()[1], 10).long()).unsqueeze(0)

        highdata_log = torch.div(torch.log(torch.mul(highdata, 1000) + 1),
                                 math.log(100))

        # interpolate需要三维输入,先升为三维后,再squeeze降维
        # lowdata = torch.nn.functional.interpolate(highdata.unsqueeze(0), scale_factor=1 / self.config.scale_factor).squeeze(0)

        # take 函数结果变为一维
        lowdata = torch.take(
            highdata,
            torch.arange(0,
                         highdata.size()[1],
                         self.config.scale_factor).long()).unsqueeze(0)
        lowdata_log = torch.div(torch.log(torch.mul(lowdata, 1000) + 1),
                                math.log(100))

        return lowdata_log, highdata_log, highdata  # 返回txt, label, groundtruth
コード例 #22
0
ファイル: layers.py プロジェクト: marianocabezas/pytorch-code
            def get_point_value(point):
                subs = map(lambda (i, cd): locs[cd][i], enumerate(point))
                loc_list_p = map(lambda (s, l): s * l, zip(subs, d_size))
                idx_p = torch.sum(torch.stack(loc_list_p, dim=0), dim=0)

                vol_val_flat = torch.stack(
                    map(
                        lambda (vol_i, idx_i): torch.stack(
                            map(
                                lambda vol_ij: torch.take(
                                    vol_ij, idx_i
                                ),
                                vol_i
                            ),
                            dim=0
                        ),
                        zip(vol, idx_p)
                    ),
                    dim=0
                )

                vol_val = torch.reshape(vol_val_flat, final_shape)
                # get the weight of this cube_pt based on the distance
                # if c[d] is 0 --> want weight = 1 - (pt - floor[pt]) = diff_loc1
                # if c[d] is 1 --> want weight = pt - floor[pt] = diff_loc0
                wts_lst = map(lambda (i, cd): weights_loc[cd][i], enumerate(point))
                if self.linear_norm:
                    wt = sum(wts_lst) / norm_factor
                else:
                    wt = reduce(mul, wts_lst)

                wt = torch.reshape(wt, weights_shape)
                return wt * vol_val
コード例 #23
0
 def get_matrix_by_index(tensor, _x, _y):
     assert tensor.dim() == 4 and tensor.size(0) == _x.size(0) == _y.size(0)
     (_B, _H, _W, _), _P = tensor.size(), _x.size(1)
     indexes = torch.arange(0, _B, device=_x.device).unsqueeze(
         -1) * _H * _W * 2 + _y * _W * 2 + _x * 2
     indexes = torch.stack([indexes, indexes + 1], dim=-1)
     return torch.take(tensor, indexes)
コード例 #24
0
    def select_top_values_and_indices(self, tensor, name=None, momentum=None):
        """Selecting top k gradient per layer
        :parameter
        tensor : 4D tensor, tensor of gradients at the certain layer
        name : string, name of the layer
        momentum : float, value of momentum correlation
        :return
        top_indices : tensor, indices with top values at current layer
        top_values : tensor, values at top_indices
        """
        current_layer = tensor + self.layers[name]
        current_layer = current_layer.flatten()
        kbig = int(len(current_layer) * self.percentage / 100)
        if kbig == 0:
            kbig = 10
        _, top_indices_unsorted = torch.topk(torch.abs(current_layer), kbig)
        top_values_unsorted = torch.take(current_layer, top_indices_unsorted)

        indices_sorted = torch.argsort(top_values_unsorted)
        top_values = top_values_unsorted[indices_sorted]
        top_indices = top_indices_unsorted[indices_sorted]

        small_values_tensor = tensor.clone()
        small_values_tensor = small_values_tensor.put_(top_indices, torch.zeros(len(top_indices)))

        self.layers[name] = self.layers[name].put_(top_indices, torch.zeros(len(top_indices)))
        self.layers[name] += small_values_tensor * momentum
        return top_indices, top_values
コード例 #25
0
def multinomial_cross_entropy_loss(Z_hat, Z, w):
    """Calculates the weighted multinomial cross entropy loss."""
    q_star = torch.argmax(Z, dim=1, keepdim=True)
    v = torch.take(w, q_star)
    loss = torch.mean(-torch.sum(v * Z * torch.log(Z_hat + eps), dim=(1, 2,
                                                                      3)))
    return loss
コード例 #26
0
    def pushpull_loss(self, pred, gt):
        nGT = gt.shape[0]
        gt = gt.transpose(0, 1)
        tl = torch.take(pred[0], gt[0])
        br = torch.take(pred[1], gt[1])

        pred_mean = (tl + br) / 2
        pull = ((tl - pred_mean)**2 +
                (br - pred_mean)**2).sum() / (nGT + self.eps)

        dist = (1 -
                torch.abs(pred_mean[..., None] - pred_mean[None, ...])).clamp(
                    min=0)
        push = dist.triu(1).sum() / ((nGT - 1) * nGT + self.eps)

        return self.push_scale * push + self.pull_scale * pull
コード例 #27
0
ファイル: index.py プロジェクト: zshwuhan/neuralTPPs
def take_3_by_2(x: th.Tensor, index: th.LongTensor) -> th.Tensor:
    """Index into a rank 3 tensor with a rank 2 tensor. Specifically, replace
    each index I with the corresponding indexed D-dimensional vector, where I
    specifies the location in L, batch-wise.

    Args:
        x: [B,L,D] The D-dimensional vectors to be indexed.
        index: [B,I] The indexes.

    Returns:
        [B,I,D] The indexed tensor.

    """
    b, l, d = x.shape

    batch_idx_shift = th.arange(start=0, end=b, device=x.device) * l * d
    batch_idx_shift = batch_idx_shift.reshape([b, 1, 1])

    rep_idxs = th.arange(start=0, end=d, device=x.device).reshape([1, 1, d])

    idxs_shift = batch_idx_shift + rep_idxs                      # [B,1,D]

    idxs_shifted = index.unsqueeze(dim=-1) * d                   # [B,I,1]
    idxs_shifted = idxs_shifted + idxs_shift                     # [B,I,D]

    return th.take(x, index=idxs_shifted)                        # [B,I,D]
コード例 #28
0
ファイル: utils.py プロジェクト: yangminghuan/rec-sys-pytorch
def evaluate(model, test_data, top_k):
    """
    用于模型评估
    :param model: 评估的模型
    :param test_data: 评估数据集
    :param top_k: 前k个项目
    :return:
    """
    hr, ndcg = [], []
    user_list, pos_list, neg_list = torch.from_numpy(
        test_data[0]), torch.from_numpy(test_data[1]), torch.from_numpy(
            test_data[2])
    for user_id, pos_id, neg_id in zip(user_list, pos_list, neg_list):
        items = torch.cat([pos_id.unsqueeze(0), neg_id])
        pos_score, neg_score = model(user_id, pos_id, neg_id)
        score = torch.cat([pos_score.unsqueeze(0), neg_score])
        _, idx = torch.topk(score, top_k)
        recommends = torch.take(items, idx).numpy().tolist()
        if pos_id in recommends:
            hr.append(1)
            index = recommends.index(pos_id)
            ndcg.append(1 / np.log2(index + 2))
        else:
            hr.append(0)
            ndcg.append(0)
    return np.mean(hr), np.mean(ndcg)
コード例 #29
0
ファイル: retina_net.py プロジェクト: yf817/RegRCNN
def compute_focal_class_loss(anchor_matches, class_pred_logits, gamma=2.):
    """ Focal Loss FL = -(1-q)^g log(q) with q = pred class probability.

    :param anchor_matches: (n_anchors). [-1, 0, class] for negative, neutral, and positive matched anchors.
    :param class_pred_logits: (n_anchors, n_classes). logits from classifier sub-network.
    :param gamma: g in above formula, good results with g=2 in original paper.
    :return: focal loss
    """
    # Positive and Negative anchors contribute to the loss,
    # but neutral anchors (match value = 0) don't.
    pos_indices = torch.nonzero(anchor_matches > 0).squeeze(
        -1)  # dim=-1 instead of 1 or 0 to cover empty matches.
    neg_indices = torch.nonzero(anchor_matches == -1).squeeze(-1)
    target_classes = torch.cat(
        (anchor_matches[pos_indices].long(),
         torch.LongTensor([0] * neg_indices.shape[0]).cuda()))

    non_neutral_indices = torch.cat((pos_indices, neg_indices))
    q = F.softmax(class_pred_logits[non_neutral_indices],
                  dim=1)  # q shape: (n_non_neutral_anchors, n_classes)

    # one-hot encoded target classes: keep only the pred probs of the correct class. it will receive incentive to be maximized.
    # log(q_i) where i = target class --> FL shape (n_anchors,)
    # need to transform to indices into flattened tensor to use torch.take
    target_locs_flat = q.shape[1] * torch.arange(
        q.shape[0]).cuda() + target_classes
    q = torch.take(q, target_locs_flat)

    FL = torch.log(q)  # element-wise log
    FL *= -(1 - q)**gamma

    # take mean over all considered anchors
    FL = FL.sum() / FL.shape[0]
    return FL
コード例 #30
0
    def _take_from_embedding(self, idx):
        offsets = torch.arange(0, self.latent_dim,
                               device=idx.device) * self.num_categories
        dist_idx_flat = idx + offsets.repeat(idx.shape[0], 1)
        latent_code = torch.take(self.embeddings.squeeze(0), dist_idx_flat)

        return latent_code