Пример #1
0
 def forward(self, x):
     distances = torch.tensor([])
     for i in range(len(self.memory)):
         distance = torch.cosine_similarity(x, self.memory[i], dim=0)
         # print(distance)
         # print(distances)
         distances = torch.cat(
             (distances, torch.unsqueeze(distance, dim=0)), dim=0)
     argmax = torch.argmax(distances, dim=0)
     mask = torch.zeros(
         distances.shape[0], dtype=torch.double
     )  # could use "pytorch.sparse" here to maximize efficiency
     mask[argmax] = 1
     out = distances * mask
     return out
Пример #2
0
def train(net, epoch_size, batch_size, optimizer, device, pos_neg_dict,
          query_dict, passage_dict, scale):
    criterion = nn.CrossEntropyLoss()
    train_loss = 0.0
    net.train()
    for mb_idx in range(epoch_size):
        # Read in a new mini-batch of data!
        queries, pos, neg, labels = mini_batch(batch_size, device,
                                               pos_neg_dict, query_dict,
                                               passage_dict)
        optimizer.zero_grad()
        q_embed = net(queries)
        pos_embed = net(pos)
        neg_embed = net(neg)
        out_pos = torch.cosine_similarity(q_embed, pos_embed).unsqueeze(0).T
        out_neg = torch.cosine_similarity(q_embed, neg_embed).unsqueeze(0).T
        out = torch.cat((out_pos, out_neg), -1) * torch.tensor(
            [scale], dtype=torch.float).to(device)
        loss = criterion(out, torch.tensor(labels).to(device))
        loss.backward()
        optimizer.step()
        train_loss += loss.item()
        # print(str(mb_idx) + " iteration: " + str(train_loss / (mb_idx + 1)))
    return train_loss / epoch_size
Пример #3
0
def global_KNN_cosine(feat_vec):#这个要要验证一下,cosine_similarity的输出是不是可以跨维度的。   reply:cosine_similarity是可以跨维度,或者说可以保持维度
    b,t,_ = feat_vec.size()
    refine_feature = torch.zeros(b, t - 1, feat_vec.size(2))
    feat_vec_avg = torch.mean(feat_vec, 1)
    similar_matrix = torch.zeros(b, t)

    for i in range(t):
        similar_score = torch.cosine_similarity(feat_vec_avg, feat_vec[:, i, :])
        similar_matrix[:, i] = similar_score

    remove_id = torch.argmin(similar_matrix, 1)

    for i in range(b):
        refine_feature[i] = feat_vec[i, torch.arange(t) != remove_id[i], :]  #b*t-1*1024

    cosine_sum_similar = 0
    for i in range(t-1):
        for j in range(i+1,t):
            cosine_similar_score = torch.cosine_similarity(refine_feature[:,i,:],refine_feature[:,j,:])
            cosine_similar_score = torch.div(cosine_similar_score+1,2)+0    #成比例压缩到(0,1)区间
            cosine_similar_score = -torch.log(cosine_similar_score)
            cosine_sum_similar = cosine_sum_similar + cosine_similar_score

    return refine_feature, cosine_sum_similar
Пример #4
0
 def get_topk_similar_tokens(self,
                             query_token,
                             index_to_token,
                             token_to_index,
                             device,
                             k=5):
     x = self.embed_v(
         torch.LongTensor([token_to_index[query_token]]).to(device))
     w = torch.Tensor(self.embed_v.weight.cpu()).to(device)
     # cos [num_embeddings]
     # cos = torch.squeeze(w @ x) / torch.sqrt(torch.sum(w * w, dim=1) + torch.sum(x * x) + 1e-9)
     cos = torch.cosine_similarity(x, w, dim=-1)
     values, indices = torch.topk(cos, k + 1)
     for i in range(1, k + 1):
         print(f"{index_to_token[indices[i]]}: sim={values[i]}")
Пример #5
0
def analysis():
    model = CustomClassificationModel(cfg).to(cfg.device)
    model.load_state_dict(torch.load(cfg.model_load_path, map_location='cpu'))
    model.eval()
    embedding_weights = model.embeddings.weight.data
    U = model.attention.weight
    attention_scores = torch.cosine_similarity(embedding_weights, U, dim=-1)

    sorted_scores = sorted(enumerate(attention_scores),
                           key=lambda x: x[1],
                           reverse=True)
    max_scores = [cfg.index2word[str(pair[0])] for pair in sorted_scores[:15]]
    min_scores = [cfg.index2word[str(pair[0])] for pair in sorted_scores[-15:]]
    print('max:\n', max_scores)
    print('min:\n', min_scores)
Пример #6
0
def global_Center_cosine(feat_vec):
    b, t, _ = feat_vec.size()
    refine_feature = torch.zeros(b, t - 1, feat_vec.size(2))
    similar_matrix = torch.zeros(b,t,t)

    for i in range(t):
        for j in range(t):
            similar_matrix[:,i,j] = torch.cosine_similarity(feat_vec[:,i,:],feat_vec[:,j,:])

    similar_score = torch.sum(similar_matrix,2,keepdim=True)
    remove_id = torch.argmin(similar_score,1)

    for i in range(b):
        refine_feature[i] = feat_vec[i, torch.arange(t) != remove_id[i], :]

    cosine_sum_similar = 0
    for i in range(t - 1):
        for j in range(i + 1, t - 1):
            cosine_similar_score = torch.cosine_similarity(refine_feature[:,i,:],refine_feature[:,j,:])
            cosine_similar_score = torch.div(cosine_similar_score + 1, 2)
            cosine_similar_score = -torch.log(cosine_similar_score)
            cosine_sum_similar = cosine_sum_similar + cosine_similar_score

    return refine_feature , cosine_sum_similar
Пример #7
0
def find_similarity(query):
    weight_paths = list((Path("weights")/FLAGS.runname).glob("*.pt"))
    res = []
    for wp in weight_paths:
        data = torch.load(wp)
        img_idxs = data["img_idxs"]
        feas = data["features"].to(device)
        queries = query.repeat(feas.size(0), 1)
        with torch.no_grad():
            if FLAGS.distance == "cosine_similarity":
                dis = torch.cosine_similarity(queries, feas)
        for d, i in zip(dis, img_idxs):
            res.append((i, d.cpu()))
    res = sorted(res, key=lambda x: x[1], reverse=True)
    return res
Пример #8
0
    def forward(self, sentence_features: Iterable[Dict[str, Tensor]],
                labels: Tensor):
        reps = [
            self.model(sentence_feature)['sentence_embedding']
            for sentence_feature in sentence_features
        ]
        rep_a, rep_b = reps

        output = torch.cosine_similarity(rep_a, rep_b)
        loss_fct = nn.MSELoss()
        if labels is not None:
            loss = loss_fct(output, labels.view(-1))
            return loss
        else:
            return reps, output
Пример #9
0
    def validation_step(self, batch, batch_idx):
        query, positive, negative = batch
        forwarded_q = self.forward(query, token_type=1)
        forwarded_pos = self.forward(positive, token_type=0)
        forwarded_neg = self.forward(negative, token_type=0)

        loss = self._loss_fn(forwarded_q, forwarded_pos, forwarded_neg)

        pos_scores = torch.cosine_similarity(forwarded_q, forwarded_pos)
        neg_scores = torch.cosine_similarity(forwarded_q, forwarded_neg)
        accuracy = (pos_scores > neg_scores).to(dtype=torch.float32).mean()

        result = pl.EvalResult(checkpoint_on=loss)
        result.log("val/loss",
                   loss,
                   on_step=False,
                   on_epoch=True,
                   prog_bar=True)
        result.log("val/acc",
                   accuracy,
                   on_step=False,
                   on_epoch=True,
                   prog_bar=True)
        return result
Пример #10
0
    def compute_preds(self, x, y, negatives):

        neg_is_pos = (y == negatives).all(-1)
        y = y.unsqueeze(0)
        targets = torch.cat([y, negatives], dim=0)

        logits = torch.cosine_similarity(x.float(), targets.float(),
                                         dim=-1).type_as(x)

        logits /= self.logit_temp

        if neg_is_pos.any():
            logits[1:][neg_is_pos] = float("-inf")

        return logits
Пример #11
0
    def evaluate_identification_similarity(self, val_dataloader):
        src_id_sim = 0
        tgt_id_sim = 0
        self.generator.eval()
        for batch in tqdm(val_dataloader):
            Xs, Xt, _ = self.adapt(batch)
            with torch.no_grad():
                src_embed = self.arcface(F.interpolate(Xs[:, :, 19:237, 19:237], [112, 112], mode='bilinear', align_corners=True))
                Y_hat = self.generator(Xt, src_embed, return_attributes=False)

                src_embed = self.mobiface(F.interpolate(Xs[:, :, 19:237, 19:237], [112, 112], mode='bilinear', align_corners=True))
                tgt_embed = self.mobiface(F.interpolate(Xt[:, :, 19:237, 19:237], [112, 112], mode='bilinear', align_corners=True))
                fake_embed = self.mobiface(F.interpolate(Y_hat[:, :, 19:237, 19:237], [112, 112], mode='bilinear', align_corners=True))
            
            src_id_sim += (torch.cosine_similarity(src_embed, fake_embed, dim=1)).float().mean()
            tgt_id_sim += (torch.cosine_similarity(tgt_embed, fake_embed, dim=1)).float().mean()

        self.generator.train()

        metrics = {
            'src_similarity': 100 * (src_id_sim / len(val_dataloader)).item(),
            'tgt_similarity': 100 * (tgt_id_sim / len(val_dataloader)).item()
        }
        return metrics
Пример #12
0
    def loss_fn(self, loss_, embeds, labels):

        lang_count = int(self.config_yml['BATCH_SIZE_DIARAIZATION'] /
                         self.config_yml['UTTR_COUNT'])
        embeds3d = embeds.view(lang_count, self.config_yml['UTTR_COUNT'], -1)
        dcl = self.direct_classification_loss(embeds, labels)

        centroids = torch.mean(embeds3d, dim=1)

        centroids_neg = (torch.sum(embeds3d, dim=1, keepdim=True) -
                         embeds3d) / (self.config_yml['UTTR_COUNT'] - 1)
        cosim_neg = torch.cosine_similarity(embeds,
                                            centroids_neg.view_as(embeds),
                                            dim=1).view(lang_count, -1)
        centroids = centroids.repeat(
            lang_count * self.config_yml['UTTR_COUNT'], 1)

        embeds2de = embeds.unsqueeze(1).repeat_interleave(lang_count, 1).view(
            -1, self.config_yml['EMBEDDING_SIZE'])
        cosim = torch.cosine_similarity(embeds2de, centroids)
        cosim_matrix = cosim.view(lang_count, self.config_yml['UTTR_COUNT'],
                                  -1)
        neg_ix = list(range(lang_count))

        cosim_matrix[neg_ix, :, neg_ix] = cosim_neg

        sim_matrix = (self.similarity_weight * cosim_matrix) + \
            self.similarity_bias

        sim_matrix = sim_matrix.view(self.config_yml['BATCH_SIZE_DIARAIZATION'], -1)
        targets = torch.range(
            0, self.config_yml['ACC_COUNT'] - 1).repeat_interleave(
                self.config_yml['UTTR_COUNT']).long().to(device=self.device)
        ce_loss = loss_(sim_matrix, targets)

        return (ce_loss, dcl), sim_matrix
Пример #13
0
    def _loss_fn(self, query, positive, negative):
        """Processes a triplet of a query, a posiitve passage and a negative one to determine
        the corresponding loss value. Inputs should be tensors of embeddings"""
        passages = torch.cat((positive, negative))
        scores = torch.cosine_similarity(query.unsqueeze(1),
                                         passages.unsqueeze(0),
                                         dim=2)
        scores = torch.sub(1, torch.acos(scores) / pi)
        positives = torch.eye(*scores.shape).to(device=self.device)

        loss_raw = (
            (scores - scores.diag().unsqueeze(1) + self.hparams.margin) *
            torch.sub(1, positives)).clamp_min(0)
        loss = loss_raw.sum()
        return loss
    def forward(self, feature_out_1, feature_out_2, feature_low_level_1, feature_low_level_2, cam_1=None, cam_2=None):
        low_level_feature_1 = self.project(feature_low_level_1)
        low_level_feature_2 = self.project(feature_low_level_2)

        output_feature_1 = self.aspp(feature_out_1)
        output_feature_2 = self.aspp(feature_out_2)
        output_feature_large_1 = self._up_to_target(output_feature_1, low_level_feature_1)
        output_feature_large_2 = self._up_to_target(output_feature_2, feature_low_level_2)

        output_feature_large_fusion_1 = torch.cat([low_level_feature_1, output_feature_large_1], dim=1)
        output_feature_large_fusion_2 = torch.cat([low_level_feature_2, output_feature_large_2], dim=1)

        out_1 = self.classifier(output_feature_large_fusion_1)
        out_2 = self.classifier(output_feature_large_fusion_2)

        #############################################################################################################
        if cam_1 is not None and cam_2 is not None:
            cam_large_1 = self._up_to_target(cam_1, output_feature_large_fusion_1)
            cam_large_2 = self._up_to_target(cam_2, output_feature_large_fusion_2)

            out_mask_1 = torch.sum(torch.sum(
                cam_large_1 * output_feature_large_fusion_1, dim=2), dim=2) / (torch.sum(cam_large_1) + 1e-6)
            out_mask_2 = torch.sum(torch.sum(
                cam_large_2 * output_feature_large_fusion_2, dim=2), dim=2) / (torch.sum(cam_large_2) + 1e-6)
            out_mask_4d_1 = torch.unsqueeze(torch.unsqueeze(
                out_mask_1, dim=-1), dim=-1).expand_as(output_feature_large_fusion_2)
            out_mask_4d_2 = torch.unsqueeze(torch.unsqueeze(
                out_mask_2, dim=-1), dim=-1).expand_as(output_feature_large_fusion_1)
            out_mask_2_to_1 = torch.cosine_similarity(out_mask_4d_2, output_feature_large_fusion_1, dim=1)
            out_mask_1_to_2 = torch.cosine_similarity(out_mask_4d_1, output_feature_large_fusion_2, dim=1)

            result_our = {"cam_large_1": cam_large_1, "cam_large_2": cam_large_2,
                          "d5_mask_1_to_2": out_mask_1_to_2, "d5_mask_2_to_1": out_mask_2_to_1}
            return out_1, out_2, result_our
        #############################################################################################################
        return out_1, out_2, None
Пример #15
0
    def validation_step(self, batch, batch_idx):
        out1 = self.model.forward({
            "input_ids": batch["input_ids1"],
            "attention_mask": batch["attention_mask1"]
        })["sentence_embedding"]
        out2 = self.model.forward({
            "input_ids": batch["input_ids2"],
            "attention_mask": batch["attention_mask2"]
        })["sentence_embedding"]
        scores = batch["scores"]

        loss = torch.cosine_similarity(out1, out2)
        loss = F.mse_loss(loss, scores.view(-1))
        self.log("val_loss", loss, prog_bar=True, on_epoch=True, logger=True)
        return loss
Пример #16
0
    def gen_loss(self, real_samps, fake_samps, loss_type='cos'):
        # cos feature

        # Obtain predictions
        r_f, r_logit = self.dis(real_samps)
        f_f, f_logit = self.dis(fake_samps)

        if loss_type == 'cos':
            loss = 1 - torch.cosine_similarity(r_f, f_f)
        elif loss_type == 'mse':
            MSE_Loss=nn.MSELoss(reduction="mean")
            loss = MSE_Loss(r_f, f_f)
        else: 
            raise ValueError('loss_type must be cos or mse, but {} is provide'.format(loss_type))
        return loss
Пример #17
0
def find_neighbors(term, embeddings, k=10):
    query = np.array([embeddings.loc[term]], dtype=numpy.float32)
    arr = np.array(embeddings.values, dtype=numpy.float32)
    xb = np.ascontiguousarray(arr)
    # make faiss available
    index = faiss.IndexFlatIP(arr.shape[1])
    index.add(xb)  # add vectors to the index
    D, I = index.search(query, k)  # sanity check
    items = [idx for idx in embeddings.iloc[np.squeeze(I)].index]
    distances = []
    for x in I[0]:
        a = torch.tensor(query)
        b = torch.tensor(embeddings.iloc[x]).unsqueeze(0)
        distances.append(torch.cosine_similarity(a, b).item())
    return items, distances
Пример #18
0
 def _calculate_similarity(self, logits, negatives, targets):
     neg_is_pos = (targets == negatives).all(-1)
     # NxT' - true where the negative is actually the positive
     targets = targets.unsqueeze(0)
     # 1xT'xC
     targets = torch.cat([targets, negatives], dim=0)
     # (1+N)xT'XC
     logits = torch.cosine_similarity(
         logits.float().unsqueeze(0).expand(targets.shape[0], -1, -1), targets.float(), dim=-1
     ).type_as(logits)
     # (1+N)xT'
     logits /= self.logit_temp
     if neg_is_pos.any():
         logits[1:][neg_is_pos] = float("-inf")
     return logits
Пример #19
0
    def test_inference_integration(self):
        model = Wav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-base")
        model.to(torch_device)
        feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
            "facebook/wav2vec2-base", return_attention_mask=True
        )
        input_speech = self._load_datasamples(2)

        inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True)

        features_shape = (
            inputs_dict["input_values"].shape[0],
            model._get_feat_extract_output_lengths(torch.tensor(inputs_dict["input_values"].shape[1])),
        )

        torch.manual_seed(0)
        mask_time_indices = _compute_mask_indices(
            features_shape,
            model.config.mask_time_prob,
            model.config.mask_time_length,
            device=inputs_dict["input_values"].device,
            min_masks=2,
        ).to(torch_device)

        with torch.no_grad():
            outputs = model(
                inputs_dict.input_values.to(torch_device),
                attention_mask=inputs_dict.attention_mask.to(torch_device),
                mask_time_indices=mask_time_indices,
            )

        # compute cosine similarity
        cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1)

        # retrieve cosine sim of masked features
        cosine_sim_masked = cosine_sim[mask_time_indices]

        # fmt: off
        expected_cosine_sim_masked = torch.tensor(
            [0.7458, 0.7188, 0.6418, 0.3729, 0.3741, 0.3694, 0.3110, 0.2257, 0.4403, 0.5415, 0.3950, 0.3701, 0.8831,
             0.8613, 0.5229, 0.6696, 0.7206, 0.7877, 0.6758, 0.8746, 0.6596, 0.6282, 0.6178, 0.5839, 0.5926, 0.6651,
             0.4635, 0.6332, 0.6572, 0.8776, 0.4999, 0.7001, 0.7257, 0.5098, 0.6229, 0.4566, 0.5261, 0.6363, 0.5371,
             0.6997],
            device=torch_device,
        )
        # fmt: on

        self.assertTrue(torch.allclose(cosine_sim_masked, expected_cosine_sim_masked, atol=1e-3))
Пример #20
0
    def forward(self, x):
        # W_list = [(x @ self.H_list[..., i]).unsqueeze(2) for i in range(self.topic_e - self.topic_s + 1)]
        W_list = []
        x_nmf = x.detach().data.cpu().numpy()
        for i in range(self.topic_e - self.topic_s + 1):
            # print("training {}th NMF".format(i))
            W = np.zeros((x.shape[0], self.topic_e))
            W[:, :(i + self.topic_s)] = self.nmf_solvers[i].fit_transform(x_nmf)
            W = torch.from_numpy(W).unsqueeze(2).cuda()
            W_list.append(W)
            # H_list.append(self.nmf_solvers[i].components_)
            # W = torch.matmul(x, self.H_list[..., i])  # REMARK: here we have assume that H is orthogonal!
            # W = W.unsqueeze(2)
            # print('Ws',W)
            # W_list.append(W)
        # H_list = torch.from_numpy(H_list).permute(2, 1, 0)  # transpose
        W_list = torch.cat([W for W in W_list], 2)
        # print('W',W_list.shape)
        N, D = int(x.shape[0]), int(x.shape[1])
        # print('x shape',x.shape)
        # TODO similarity or attention?
        t1 = x.repeat(N, 1)
        t2 = x.repeat(1, N).reshape(-1, D)
        # FIXME change this
        t = torch.cat((t1, t2), 1)
        sim = torch.cosine_similarity(t1, t2)
        sim = sim.unsqueeze(0)
        t = sim.T * t
        t = t.reshape(N, N, -1)

        W_max = torch.argmax(W_list, dim=1).float()
        hyper = torch.matmul(W_max, W_max.T)
        hyper = hyper / (N * N * 2)
        # print(W_max, W_max.shape)
        W1 = W_max.repeat(N, 1).reshape(-1, N, self.topic_e - self.topic_s + 1)
        W2 = W_max.repeat(1, N).reshape(-1, N, self.topic_e - self.topic_s + 1)
        # print(W1)
        # print(W2)
        comp = torch.eq(W1, W2)
        topic_count = torch.sum(comp, dim=2, keepdim=True)
        topic_count = torch.div(topic_count.float(),
                                self.topic_e - self.topic_s + 1)

        new_n = topic_count * t
        new_nodes = torch.mean(new_n, dim=0)
        # print('nodes',new_nodes.shape)
        # new_nodes = self.linear_do(self.linear(new_nodes))
        return new_nodes, hyper
Пример #21
0
    def forward(self, text, return_loss = True):
        width, num_cutouts = self.image_size, self.num_cutouts

        out = self.model()

        if not return_loss:
            return out

        pieces = []
        for ch in range(num_cutouts):
            size = int(width * torch.zeros(1,).normal_(mean=.8, std=.3).clip(.5, .95))
            offsetx = torch.randint(0, width - size, ())
            offsety = torch.randint(0, width - size, ())
            apper = out[:, :, offsetx:offsetx + size, offsety:offsety + size]
            if (self.experimental_resample):
                apper = resample(apper, (224, 224))
            else:
                apper = F.interpolate(apper, (224, 224), **self.interpolation_settings)
            pieces.append(apper)

        into = torch.cat(pieces)
        into = normalize_image(into)

        image_embed = perceptor.encode_image(into)
        text_embed = perceptor.encode_text(text)

        latents, soft_one_hot_classes = self.model.latents()
        num_latents = latents.shape[0]
        latent_thres = self.model.latents.thresh_lat

        lat_loss =  torch.abs(1 - torch.std(latents, dim=1)).mean() + \
                    torch.abs(torch.mean(latents)).mean() + \
                    4 * torch.max(torch.square(latents).mean(), latent_thres)

        for array in latents:
            mean = torch.mean(array)
            diffs = array - mean
            var = torch.mean(torch.pow(diffs, 2.0))
            std = torch.pow(var, 0.5)
            zscores = diffs / std
            skews = torch.mean(torch.pow(zscores, 3.0))
            kurtoses = torch.mean(torch.pow(zscores, 4.0)) - 3.0

        lat_loss = lat_loss + torch.abs(kurtoses) / num_latents + torch.abs(skews) / num_latents
        cls_loss = ((50 * torch.topk(soft_one_hot_classes, largest = False, dim = 1, k = 999)[0]) ** 2).mean()

        sim_loss = -self.loss_coef * torch.cosine_similarity(text_embed, image_embed, dim = -1).mean()
        return (lat_loss, cls_loss, sim_loss)
Пример #22
0
def pk_sim(h, p, k):
    h: torch.Tensor  # (p * k, dim)

    X = h.repeat_interleave(p * k, dim=0)
    Y = h.repeat(p * k, 1)
    sim = torch.cosine_similarity(X, Y)
    sim = sim.view(p * k, p * k)

    class_ids = torch.arange(p).repeat_interleave(k)
    inds = torch.triu_indices(p * k, p * k, offset=1)
    sim = sim[inds[0], inds[1]]
    positive = (class_ids[inds[0]] == class_ids[inds[1]]).to(device=h.device)
    s_p = sim[positive]
    s_n = sim[~positive]

    return s_p, s_n
 def forward(self, x):
     out = self.RoBert(torch.LongTensor(x[0]).cuda())[0]
     out1 = self.RoBert(torch.LongTensor(x[1]).cuda())[0]
     out2 = self.RoBert(torch.LongTensor(x[2]).cuda())[0]
     x = torch.masked_select(out, torch.BoolTensor(x[3]).cuda())
     x = x.reshape([-1, 768 * 3])
     tensor1 = torch.index_select(out1, 1, torch.LongTensor([0]).cuda())
     tensor1 = torch.reshape(tensor1, [64, 768])
     tensor2 = torch.index_select(out2, 1, torch.LongTensor([0]).cuda())
     tensor2 = torch.reshape(tensor1, [64, 768])
     sim = torch.cosine_similarity(tensor1, tensor2).reshape([-1, 1])
     concat = torch.cat((x, sim), 1)
     x = F.relu(self.hidden(concat))
     x = F.relu(self.res(x))
     x = self.sigmoid(x)
     return x
Пример #24
0
    def compute_objectives(self, predictions, batch, stage):
        """Computes the loss (CTC+NLL) given predictions and targets."""

        if stage == sb.Stage.TRAIN:
            # We don't have to compute anything as the HF model directly returns
            # the constrative loss.
            loss = predictions
        else:
            # We compute the accuracy between embeddings with cosing sim.
            loss, out, mask_time_indices = predictions
            cosine_sim = torch.cosine_similarity(
                out.projected_states, out.projected_quantized_states, dim=-1)
            acc = cosine_sim[mask_time_indices].mean()
            self.acc_metric.append(acc)

        return loss
 def forward(self,support_x,query_x):
     "Note that the size of support_x and query_x must be [n*b,n*s,64,19,19]"
     count = 0
     for i in range(self.width):
         for j in range(self.width):
             column = support_x[:,:,:,i,j].unsqueeze(-1)
             columns = column.unsqueeze(-1).repeat(1,1,1,self.width,self.width) # [n*b,n*s,64,19,19]
             similarity = self.global_max_pooling(torch.cosine_similarity(query_x,columns,dim=-3)).squeeze() # [n*b,n*s]
             similarity = similarity.unsqueeze(-1) # [n*b,n*s,1]
             if count==0:
                 out = similarity
             else:
                 out = torch.cat([out,similarity],dim=-1)
             count = count+1
     "out -> [n*b,n*s,19*19]"      
     return out
Пример #26
0
def cosine_metric(a, b):
    # a: query feature
    # b: prototype
    n = a.shape[0]
    m = b.shape[0]
    tmp_a = a
    tmp_b = b
    tmp_a = tmp_a.unsqueeze(1).expand(n, m, -1)
    tmp_b = tmp_b.unsqueeze(0).expand(n, m, -1)
    logits = -((tmp_a - tmp_b)**2).sum(
        dim=2)  # calculate the euclidean distance just as the threshold
    for i in range(n):
        logits[i, ] = torch.cosine_similarity(torch.unsqueeze(a[i, ], dim=0),
                                              b)

    return logits
Пример #27
0
def norm_loss_with_scaling(y_pred, y, alpha=[1, 1], p=2, detach=False):
    if y_pred.size(0) > 1:
        normalization = torch.norm(y_pred.detach(),
                                   p=p) if detach else torch.norm(y_pred, p=p)
        y_pred = y_pred / (eps + normalization)  # mean normalization
        y = y / (eps + torch.norm(y, p=p))
        loss0, loss1 = 0, 0
        if alpha[0] > 0:
            loss0 = F.mse_loss(y_pred, y) / 4
        if alpha[1] > 0:
            rho = torch.cosine_similarity(y_pred, y)  #
            loss1 = F.mse_loss(rho * y_pred, y) / 4
        return (alpha[0] * loss0 + alpha[1] * loss1) / (alpha[0] + alpha[1])
    else:
        return F.l1_loss(y_pred,
                         y_pred.detach())  # 0 for batch with single sample.
Пример #28
0
 def forward(self, x):
     similarities = []
     similarity_values = []
     for idx, pattern in enumerate(self.patterns):
         similarity = torch.cosine_similarity(x.flatten(),
                                              pattern.flatten(), 0)
         similarities.append((similarity, idx))
         similarity_values.append(similarity)
     similarity_values = torch.tensor(similarity_values)
     similarity_avg = torch.mean(similarity_values, 0)
     # print(similarity_avg)
     similarities.sort(key=lambda l: l[0], reverse=True)
     most_similar_idx = similarities[0][1]
     most_similar_pattern = self.patterns[most_similar_idx]
     st.write(f'CLASS SIMILARITY AVERAGE: {similarity_avg.detach()}')
     return most_similar_pattern
Пример #29
0
 def forward(self, recon_x, x, targets):
     inputs = torch.squeeze(torch.cosine_similarity(recon_x, x, dim=1))
     targets = torch.squeeze(targets)
     dist_ap, dist_an = [], []
     a = inputs[targets == 1]
     b = inputs[targets == 0]
     for i in range(a.size(0)):
         for j in range(b.size(0)):
             dist_ap.append(a[i].unsqueeze(0))
             dist_an.append(b[j].unsqueeze(0))
     dist_ap = torch.cat(dist_ap)
     dist_an = torch.cat(dist_an)
     #print(dist_ap,dist_an)
     # Compute ranking hinge loss
     y = torch.ones_like(dist_an)
     return self.ranking_loss(dist_ap, dist_an, y)
Пример #30
0
def compute_perceptual_loss(img_batch, ref_img):
    loss = 0

    img_visual_feats = img_batch
    ref_visual_feats = ref_img

    for name, module in vgg_layers._modules.items():
        img_visual_feats = module(img_visual_feats)
        ref_visual_feats = module(ref_visual_feats)

        if name in vgg_layer_name_mapping:
            loss += 10 * -torch.cosine_similarity(img_visual_feats,
                                                  ref_visual_feats).mean()

    loss /= len(vgg_layer_name_mapping)
    return loss