コード例 #1
0
    def dim_reduce(self, adj_matrix, num_reduce, ortho_penalty,
                   variance_penalty, neg_penalty, kernel):
        kernel_p = torch.nn.functional.relu(kernel)
        # np.savetxt('kernel_p.txt', kernel_p.cpu().data.numpy())
        batch_size = int(adj_matrix.shape[0])
        AF = torch.tensordot(adj_matrix, kernel_p, [[-1], [0]])
        reduced_adj_matrix = torch.transpose(
            torch.tensordot(kernel_p, AF,
                            [[0], [1]]),  # num_reduce*batch*num_reduce
            1,
            0)  # num_reduce*batch*num_reduce*num_reduce
        kernel_p_tran = kernel_p.transpose(-1, -2)  # num_reduce * column_dim
        gram_matrix = torch.matmul(kernel_p_tran, kernel_p)
        diag_elements = gram_matrix.diag()

        if ortho_penalty != 0:
            ortho_loss_matrix = torch.square(gram_matrix -
                                             torch.diag(diag_elements))
            ortho_loss = torch.multiply(torch.tensor(ortho_penalty),
                                        torch.sum(ortho_loss_matrix))
            self.losses.append(ortho_loss)

        if variance_penalty != 0:
            variance = diag_elements.var()
            variance_loss = torch.multiply(torch.tensor(variance_penalty),
                                           variance)
            self.losses.append(variance_loss)

        if neg_penalty != 0:
            neg_loss = torch.multiply(
                torch.tensor(neg_penalty),
                torch.sum(
                    torch.nn.functional.relu(torch.tensor(1e-6) - kernel)))
            self.losses.append(neg_loss)
        self.losses.append(0.05 * torch.sum(torch.abs(kernel_p)))
        return reduced_adj_matrix
コード例 #2
0
def apply_filter_pytorch(arr, _filter, axes=(0, 1), cuda=False):
    """
    Applying Filter using pytorch for iterative method.
    """
    _filter = torch.from_numpy(_filter)
    arr = torch.from_numpy(arr)
    if cuda is True:
        _filter = _filter.cuda()
        arr = arr.cuda()
    result = torch.fft.ifft2(
        torch.multiply(torch.fft.ifftshift(1.0 - _filter),
                       torch.fft.fft2(arr, dim=axes)))
    if cuda is True:
        result = result.cpu()
    result = result.numpy().real
    return result
コード例 #3
0
    def forward(self, trend, fuse_input):
        trend = torch.unsqueeze(
            trend, 1)  # (num_samples, 1, num_nodes, output_length,)
        trend = self.trends_conv(trend).permute(
            0, 2, 3, 1)  # (num_samples, num_nodes, out_len,d_model,)

        num_samples, num_nodes, out_len, d_model = trend.shape
        # consistent position encoding
        trend = self.position_enc(trend.reshape(-1, out_len, d_model),
                                  in_decode=True).reshape(
                                      num_samples, num_nodes, out_len, d_model)

        fuse_input = torch.sigmoid(self.linear(fuse_input))
        fuse_input = -self.alpha * fuse_input

        return torch.multiply(trend, fuse_input)
コード例 #4
0
ファイル: triplet_loss.py プロジェクト: ABD-01/Face-Unlock
def batch_all_triplet_loss(labels, embeddings, margin, squared=False):
    """Build the triplet loss over a batch of embeddings.
    We generate all the valid triplets and average the loss over the positive ones.
    Args:
        labels: labels of the batch, of size (batch_size,)
        embeddings: tensor of shape (batch_size, embed_dim)
        margin: margin for triplet loss
        squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
                 If false, output is the pairwise euclidean distance matrix.
    Returns:
        triplet_loss: scalar tensor containing the triplet loss
    """
    # Get the pairwise distance matrix
    pairwise_dist = _pairwise_distances(embeddings, squared=squared)

    # shape (batch_size, batch_size, 1)
    anchor_positive_dist = pairwise_dist.unsqueeze(2)
    assert anchor_positive_dist.shape[2] == 1, "{}".format(anchor_positive_dist.shape)
    # shape (batch_size, 1, batch_size)
    anchor_negative_dist = pairwise_dist.unsqueeze(1)
    assert anchor_negative_dist.shape[1] == 1, "{}".format(anchor_negative_dist.shape)

    # Compute a 3D tensor of size (batch_size, batch_size, batch_size)
    # triplet_loss[i, j, k] will contain the triplet loss of anchor=i, positive=j, negative=k
    # Uses broadcasting where the 1st argument has shape (batch_size, batch_size, 1)
    # and the 2nd (batch_size, 1, batch_size)
    triplet_loss = anchor_positive_dist - anchor_negative_dist + margin

    # Put to zero the invalid triplets
    # (where label(a) != label(p) or label(n) == label(a) or a == p)
    mask = _get_triplet_mask(labels).float()
    triplet_loss = torch.multiply(mask, triplet_loss)

    # Remove negative losses (i.e. the easy triplets)
    triplet_loss.relu_()

    # Count number of positive triplets (where triplet_loss > 0)
    valid_triplets = triplet_loss.greater(1e-16).float()
    num_positive_triplets = valid_triplets.sum()
    num_valid_triplets = mask.sum()
    fraction_positive_triplets = num_positive_triplets / (num_valid_triplets + 1e-16)

    # Get final mean triplet loss over the positive valid triplets
    triplet_loss = triplet_loss.sum() / (num_positive_triplets + 1e-16)

    return triplet_loss #, fraction_positive_triplets
コード例 #5
0
 def forward(self, input):
     x, a, g = input
     x = x.transpose(1, 0, 2, 3)
     g = g.transpose(3, 0, 1, 2)
     x = torch.from_numpy(x).to("cuda")
     a = torch.from_numpy(a).to("cuda")
     g = torch.from_numpy(g).to("cuda")
     concat = torch.cat([x, g])
     concat = concat.permute(1, 0, 2, 3)
     normalized = concat.float() / 255
     x = F.relu(self.conv1(normalized))
     x = F.relu(self.conv2(x))
     x = F.relu(self.conv3(x))
     x = F.relu(self.fc4(x.view(x.size(0), -1)))
     output = self.head(x)
     filtered_output = torch.multiply(output, a)
     return filtered_output
コード例 #6
0
    def forward(self, inputs):
        output = torch.sigmoid(torch.matmul(inputs, self.linear) + self.bias)

        if self.n > 1:
            output = self.n_norm(
                torch.mul(torch.unsqueeze(output, 1),
                          self.R_t.type_as(self.linear)))
        elif self.n > 0:
            output = torch.min(torch.mul(torch.unsqueeze(output, 1),
                                         self.R_t.type_as(self.linear)) - 1,
                               other=torch.tensor(1 - 1e-4).type_as(self.bias))
        else:
            output = torch.max(
                torch.multiply(torch.unsqueeze(output, 1),
                               self.R_t.type_as(self.linear)), -1)

        return output
コード例 #7
0
    def forward(self, logits, labels, seq_mask):
        logits = logits[:, :, 1]
        ones = torch.ones_like(logits)
        zero = torch.zeros_like(logits)
        y_pred = torch.where(logits < 0.5, zero, ones)
        y_pred = y_pred.view(size=(-1, )).float()

        seq_mask = seq_mask.view(size=(-1, )).float()
        # y_pred=torch.multiply(y_pred,seq_mask)

        y_true = labels.view(size=(-1, )).float()
        corr = torch.eq(y_pred, y_true)
        corr = torch.multiply(corr.float(), y_true)
        recall = torch.sum(corr) / (torch.sum(y_true) + 1e-8)
        precision = torch.sum(corr) / (torch.sum(y_pred) + 1e-8)
        f1 = 2 * recall * precision / (recall + precision + 1e-8)
        return recall, precision, f1
コード例 #8
0
ファイル: triplet_loss.py プロジェクト: ABD-01/Face-Unlock
def batch_hard_triplet_loss(labels, embeddings, margin, squared=False):
    """Build the triplet loss over a batch of embeddings.
    For each anchor, we get the hardest positive and hardest negative to form a triplet.
    Args:
        labels: labels of the batch, of size (batch_size,)
        embeddings: tensor of shape (batch_size, embed_dim)
        margin: margin for triplet loss
        squared: Boolean. If true, output is the pairwise squared euclidean distance matrix.
                 If false, output is the pairwise euclidean distance matrix.
    Returns:
        triplet_loss: scalar tensor containing the triplet loss
    """
    # Get the pairwise distance matrix
    pairwise_dist = _pairwise_distances(embeddings, squared=squared)

    # For each anchor, get the hardest positive
    # First, we need to get a mask for every valid positive (they should have same label)
    mask_anchor_positive = _get_anchor_positive_triplet_mask(labels).float()

    # We put to 0 any element where (a, p) is not valid (valid if a != p and label(a) == label(p))
    anchor_positive_dist = torch.multiply(mask_anchor_positive, pairwise_dist)

    # shape (batch_size, 1)
    hardest_positive_dist = torch.max(anchor_positive_dist, dim=1, keepdim=True).values
    # print("hardest_positive_dist", hardest_positive_dist.mean())

    # For each anchor, get the hardest negative
    # First, we need to get a mask for every valid negative (they should have different labels)
    mask_anchor_negative = _get_anchor_negative_triplet_mask(labels).float()

    # We add the maximum value in each row to the invalid negatives (label(a) == label(n))
    max_anchor_negative_dist = torch.max(pairwise_dist, dim=1, keepdim=True).values
    anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (1.0 - mask_anchor_negative)

    # shape (batch_size,)
    hardest_negative_dist = torch.min(anchor_negative_dist, dim=1, keepdim=True).values
    # print("hardest_negative_dist", hardest_negative_dist.mean())

    # Combine biggest d(a, p) and smallest d(a, n) into final triplet loss
    triplet_loss = torch.relu(hardest_positive_dist - hardest_negative_dist + margin)

    # Get final mean triplet loss
    triplet_loss = torch.mean(triplet_loss)

    return triplet_loss
コード例 #9
0
    def _elbo(self, x_batch, p, iter, encoder_mu, encoder_log_var, decoder_mu, decoder_log_var, dof):  # Compute ELBO: Evidence Lower Bound
        p = torch.from_numpy(p)
        x_batch = torch.from_numpy(x_batch)
        weights = torch.clamp(torch.sum(p, 0), 0.01, 2.0)

        # Compute log likelihood
        lls_result = log_likelihood_student(x_batch, decoder_mu, decoder_log_var, dof)
        multiplication_res = torch.multiply(lls_result, weights)
        log_likelihood = torch.mean(multiplication_res)

        # Compute KL divergence
        kl_divergence = torch.mean(0.5 * torch.sum(encoder_mu ** 2 +
                                                   encoder_log_var -
                                                   torch.log(encoder_log_var) - 1,
                                                   dim=1))
        kl_divergence *= np.max([0.1, self._input_dim / iter])

        elbo = log_likelihood - kl_divergence
        return elbo
コード例 #10
0
    def forward(self, inputs):

        inputs = inputs.permute(0, 2, 3, 4, 1).contiguous()
        input_shape = inputs.shape

        # Flatten input
        flat_inputs = inputs.view(-1, self._embedding_dim)

        dist = (
            torch.sum(flat_inputs.detach()**2, dim=1, keepdims=True) +
            torch.sum(self._embedding.weight**2, dim=1)) - 2 * torch.matmul(
                flat_inputs.detach(), self._embedding.weight.t())
        encoding_idx = torch.argmin(dist, dim=1, keepdims=True)
        encodings = torch.zeros(encoding_idx.shape[0],
                                self._num_embeddings,
                                device=inputs.device)
        encodings.scatter_(1, encoding_idx, 1)

        #This part is the same as VQVAE
        quantized = torch.matmul(encodings,
                                 self._embedding.weight).view(input_shape)

        commitment_loss = F.mse_loss(quantized, inputs)

        indicator = torch.zeros(dist.shape, device=inputs.device)
        n_neighbors = torch.zeros(indicator.shape[0],
                                  requires_grad=False,
                                  device=inputs.device)
        for i in range(indicator.shape[0]):
            curr_idx = encoding_idx[i].item()
            neighbor_idx = self.get_neighbors(curr_idx)
            n_neighbors[i] = len(neighbor_idx)
            indicator[i, neighbor_idx] = 1

        total_neighbors = n_neighbors.sum()
        newdist = torch.multiply(dist, indicator)
        somloss = newdist.sum().divide(total_neighbors)

        loss = self.alpha * commitment_loss + self.beta * somloss
        quantized = inputs + (quantized - inputs).detach()
        #avg_probs = torch.mean(encodings, dim = 0)
        #perplexity = torch.exp(-torch.sum(avg_probs * torch.log(avg_probs + 1e-10)))
        return (loss, quantized.permute(0, 4, 1, 2, 3).contiguous(), encodings)
コード例 #11
0
def translate_crnn(img, model, max_seq_length=128, sos_token=1, eos_token=2):
    "data: BxCXHxW"
    model.eval()
    device = img.device

    with torch.no_grad():
        outputs = model(img, None, None)  # T B V
        outputs = outputs.permute(1, 0, 2)  # BxTxV
        # outputs = outputs.to('cpu')

        probs, preds = outputs.max(2)  # BxT

        char_probs = torch.multiply(probs,
                                    preds > 3)  # exclude <pad> <sos> <eos> <*>
        char_probs = torch.sum(char_probs, dim=-1) / (char_probs > 0).sum(-1)

        char_probs = char_probs.cpu().numpy()
        preds = preds.cpu().numpy()
    return preds, char_probs
コード例 #12
0
    def forward(self, logits, labels):
        logits = torch.nn.functional.softmax(logits, dim=-1)
        logits = torch.argmax(logits, dim=-1)
        ones = torch.ones_like(labels)
        zero = torch.zeros_like(labels)
        y_ture_mask = torch.where(labels < 0.5, zero, ones)
        y_pred_mask = torch.where(logits < 0.5, zero, ones)

        y_ture_mask = y_ture_mask.view(size=(-1, ))
        y_pred_mask = y_pred_mask.view(size=(-1, ))

        y_pred = logits.view(size=(-1, )).float()
        y_true = labels.view(size=(-1, )).float()
        corr = torch.eq(y_pred, y_true)
        corr = torch.multiply(corr.float(), y_ture_mask)
        recall = torch.sum(corr) / (torch.sum(y_ture_mask) + 1e-8)
        precision = torch.sum(corr) / (torch.sum(y_pred_mask) + 1e-8)
        f1 = 2 * recall * precision / (recall + precision + 1e-8)
        return recall, precision, f1
コード例 #13
0
    def _tsne_repel(self, z_batch, p):
        nu = LATENT_DIMENSION

        sum_y = torch.sum(torch.square(z_batch), dim=1)
        matmul_result = torch.matmul(z_batch, torch.transpose(z_batch, 0, 1))
        num = -2.0 * matmul_result + torch.reshape(sum_y, [-1, 1]) + sum_y
        num = num / nu

        p_out = torch.from_numpy(p) + 0.1 / BATCH_SIZE
        p_out = p_out / torch.unsqueeze(torch.sum(p_out, dim=1), 1)

        num = torch.pow(1.0 + num, -(nu + 1.0) / 2.0)
        attraction = torch.multiply(p_out, torch.log(num))
        attraction = -torch.sum(attraction)

        den = torch.sum(num, dim=1) - 1
        repellant = torch.sum(torch.log(den))

        return (repellant + attraction) / BATCH_SIZE
コード例 #14
0
ファイル: submodules.py プロジェクト: senyosimpson/sres
    def forward(self, x, beta=0.2):
        input_layer = x
        x1 = self.leakyrelu(self.conv1(x))
        x1 = torch.cat((x1, x), dim=1)

        x2 = self.leakyrelu(self.conv2(x1))
        x2 = torch.cat((x2, x1), dim=1)

        x3 = self.leakyrelu(self.conv3(x2))
        x3 = torch.cat((x3, x2), dim=1)

        x4 = self.leakyrelu(self.conv4(x3))
        x4 = torch.cat((x4, x3), dim=1)
        x4 = torch.multiply(x4, beta)

        if self.shortcut:
            input_layer = self.shortcut(input_layer)
        out = torch.add(x4, input_layer)
        return out
コード例 #15
0
ファイル: model_fgcn.py プロジェクト: llt1836/TE-HI-GCN
 def forward(self, x, adj):
     feature_dim = int(adj.shape[-1])
     eye = torch.eye(feature_dim).cuda()
     if x is None:
         AXW = torch.tensordot(adj, self.kernel, [[-1], [0]])  # batch_size * num_node * feature_dim
     else:
         XW = torch.tensordot(x, self.kernel, [[-1], [0]])  # batch *  num_node * feature_dim
         AXW = torch.matmul(adj, XW)  # batch *  num_node * feature_dim
     I_cAXW = eye+self.c * AXW
     y_relu = torch.nn.functional.relu(I_cAXW)
     temp = torch.mean(input=y_relu, dim=-2, keepdim=True) + 1e-6
     col_mean = temp.repeat([1, feature_dim, 1])
     y_norm = torch.divide(y_relu, col_mean)
     output = torch.nn.functional.softplus(y_norm)
     if self.neg_penalty != 0:
         neg_loss = torch.multiply(torch.tensor(self.neg_penalty),
                                   torch.sum(torch.nn.functional.relu(1e-6 - self.kernel)))
         self.losses.append(neg_loss)
     return output
コード例 #16
0
    def model(self, raw_expr, encoded_expr, read_depth):

        pyro.module("decoder", self.decoder)

        with pyro.plate("genes", self.num_genes):

            dispersion = pyro.sample(
                "dispersion",
                dist.Gamma(
                    torch.tensor(2.).to(self.device),
                    torch.tensor(0.5).to(self.device)))
            psi = pyro.sample(
                "dropout",
                dist.Beta(
                    torch.tensor(1.).to(self.device),
                    torch.tensor(10.).to(self.device)))

        #pyro.module("decoder", self.decoder)
        with pyro.plate("cells", encoded_expr.shape[0]):
            # Dirichlet prior  𝑝(𝜃|𝛼) is replaced by a log-normal distribution

            theta_loc = self.prior_mu * encoded_expr.new_ones(
                (encoded_expr.shape[0], self.num_topics))
            theta_scale = self.prior_std * encoded_expr.new_ones(
                (encoded_expr.shape[0], self.num_topics))
            theta = pyro.sample(
                "theta",
                dist.LogNormal(theta_loc, theta_scale).to_event(1))
            theta = theta / theta.sum(-1, keepdim=True)
            # conditional distribution of 𝑤𝑛 is defined as
            # 𝑤𝑛|𝛽,𝜃 ~ Categorical(𝜎(𝛽𝜃))
            expr_rate = pyro.deterministic("expr_rate", self.decoder(theta))

            mu = torch.multiply(read_depth, expr_rate)
            p = torch.minimum(mu / (mu + dispersion), self.max_prob)

            pyro.sample(
                'obs',
                dist.ZeroInflatedNegativeBinomial(total_count=dispersion,
                                                  probs=p,
                                                  gate=psi).to_event(1),
                obs=raw_expr)
コード例 #17
0
ファイル: PHER.py プロジェクト: connorray/PHER
def optimize(model, target_model, batch, num_actions, criterion, optimizer):
    goals, observations, actions, rewards, next_observations, dones = batch
    next_state_action_values = predict(target_model, goals=goals, observations=next_observations,
                                       num_actions=num_actions)
    next_state_action_values[dones] = 0.0
    state_action_values = torch.from_numpy(rewards).to("cuda") + DISCOUNT_FACTOR_GAMMA * torch.max(
        next_state_action_values, dim=1).values
    one_hot_actions = np.array([one_hot_encode(action, num_actions) for action in actions])
    expected_state_action_values = state_action_values * (
                1 - torch.from_numpy(dones.astype(float)).to("cuda")) - torch.from_numpy(dones.astype(float)).to("cuda")
    state_action_values = model([observations, one_hot_actions, goals])
    state_action_values = torch.sum(torch.multiply(state_action_values, torch.from_numpy(one_hot_actions).to("cuda")),
                                    dim=1)
    loss = criterion(expected_state_action_values, state_action_values)
    optimizer.zero_grad()
    loss.backward()
    # clip norm = 1.0
    torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)  # grad clipping
    optimizer.step()
    return loss.item()
コード例 #18
0
    def forward(self, logits, labels):

        y_pred = logits.view(size=(-1, )).float()
        y_true = labels.view(size=(-1, )).float()

        ones = torch.ones_like(y_true)
        zero = torch.zeros_like(y_true)
        y_true_one = torch.where(y_true < 1, zero, ones)
        y_true_one = y_true_one.view(size=(-1, )).float()

        ones = torch.ones_like(y_pred)
        zero = torch.zeros_like(y_pred)
        y_pred_one = torch.where(y_pred < 1, zero, ones)
        y_pred_one = y_pred_one.view(size=(-1, )).float()

        corr = torch.eq(y_pred, y_true)
        corr = torch.multiply(corr.float(), y_true_one)
        recall = torch.sum(corr) / (torch.sum(y_true_one) + 1e-8)
        precision = torch.sum(corr) / (torch.sum(y_pred_one) + 1e-8)
        f1 = 2 * recall * precision / (recall + precision + 1e-8)

        return recall, precision, f1
コード例 #19
0
 def forward(self, x):
     x = self.conv_layer1(x)
     x = self.conv_layer2(x)
     x = self.conv_layer3(x)
     x = self.resnet_layer(x)
     avg_pool = F.avg_pool2d(x, kernel_size=(x.shape[2], x.shape[3]))
     avg_pool = torch.reshape(avg_pool, (x.shape[0], 1, 1, x.shape[1]))
     avg_pool = self.dense1(avg_pool)
     avg_pool = self.dense2(avg_pool)
     max_pool = F.max_pool2d(x, kernel_size=(x.shape[2], x.shape[3]))
     max_pool = torch.reshape(max_pool, (x.shape[0], 1, 1, x.shape[1]))
     max_pool = self.dense1(max_pool)
     max_pool = self.dense2(max_pool)
     cbam_feature = torch.add(avg_pool, max_pool)
     cbam_feature = torch.sigmoid(cbam_feature)
     cbam_feature = torch.reshape(
         cbam_feature, (cbam_feature.shape[0], cbam_feature.shape[3], 1, 1))
     cbam_feature = torch.multiply(x, cbam_feature)
     output = F.avg_pool2d(cbam_feature,
                           kernel_size=(x.shape[2], x.shape[3]))
     # output = F.softmax(output, dim=1)
     return output.squeeze(3).squeeze(2)
コード例 #20
0
    def forward(self, labels, embeddings, squared=False):
        pairwise_dist = self._pairwise_distance(embeddings)
        mask_anchor_positive = self._get_anchor_positive_triplet_mask(labels)
        anchor_positive_dist = torch.multiply(mask_anchor_positive,
                                              pairwise_dist)
        hardest_positive_dist = torch.max(anchor_positive_dist,
                                          axis=1,
                                          keepdims=True)[0]

        mask_anchor_negative = self._get_anchor_negative_triplet_mask(labels)
        max_anchor_negative_dist = torch.max(pairwise_dist,
                                             axis=1,
                                             keepdims=True)
        anchor_negative_dist = pairwise_dist + max_anchor_negative_dist[0] * (
            1.0 - mask_anchor_negative)
        hardest_negative_dist = torch.min(anchor_negative_dist,
                                          axis=1,
                                          keepdims=True)[0]
        triplet_loss = torch.maximum(
            hardest_positive_dist - hardest_negative_dist + self.margin,
            torch.zeros(pairwise_dist.shape))
        return triplet_loss.mean()
コード例 #21
0
ファイル: critic.py プロジェクト: NikZy/IT3105-AiProg
    def update_state_values(self, td_error: float, reinforcement: float,
                            state: list, next_state: list) -> None:
        state, next_state, discount_factor, reinforcement = self.__convert_to_tensors(
            state, next_state, self.discount_factor, reinforcement)

        target_value = torch.add(
            reinforcement,
            torch.multiply(discount_factor, self.model(next_state)))

        self.optimizer.zero_grad()
        prediction = self.model(state)

        loss = self.loss_function(prediction, target_value)
        loss.backward()

        # Modify gradients
        for index, weight in enumerate(self.model.parameters()):
            weight.grad *= 0.5 / td_error
            self.eligibilities[index] += weight.grad
            weight.grad = self.eligibilities[index] * td_error

        self.optimizer.step()
コード例 #22
0
    def model(self, raw_expr, encoded_expr, read_depth):

        pyro.module("decoder", self.decoder)

        dispersion = pyro.param("dispersion",
                                torch.tensor(5.).to(self.device) *
                                torch.ones(self.num_genes).to(self.device),
                                constraint=constraints.positive)

        with pyro.plate("cells", encoded_expr.shape[0]):

            # Dirichlet prior  𝑝(𝜃|𝛼) is replaced by a log-normal distribution
            theta_loc = self.prior_mu * encoded_expr.new_ones(
                (encoded_expr.shape[0], self.num_topics))
            theta_scale = self.prior_std * encoded_expr.new_ones(
                (encoded_expr.shape[0], self.num_topics))
            theta = pyro.sample(
                "theta",
                dist.LogNormal(theta_loc, theta_scale).to_event(1))
            theta = theta / theta.sum(-1, keepdim=True)

            read_scale = pyro.sample(
                'read_depth',
                dist.LogNormal(torch.log(read_depth), 1.).to_event(1))

            #read_scale = torch.minimum(read_scale, self.max_scale)
            # conditional distribution of 𝑤𝑛 is defined as
            # 𝑤𝑛|𝛽,𝜃 ~ Categorical(𝜎(𝛽𝜃))
            expr_rate, dropout = self.decoder(theta)

            mu = torch.multiply(read_scale, expr_rate)
            p = torch.minimum(mu / (mu + dispersion), self.max_prob)

            pyro.sample('obs',
                        dist.ZeroInflatedNegativeBinomial(
                            total_count=dispersion,
                            probs=p,
                            gate_logits=dropout).to_event(1),
                        obs=raw_expr)
コード例 #23
0
def hd_loss(seg_soft, gt, seg_dtm, gt_dtm, spatial_weight=0, weight_arr=[]):
    """
    compute huasdorff distance loss for binary segmentation
    input: seg_soft: softmax results,  shape=(b,2,x,y,z)
           gt: ground truth, shape=(b,x,y,z)
           seg_dtm: segmentation distance transform map; shape=(b,2,x,y,z)
           gt_dtm: ground truth distance transform map; shape=(b,2,x,y,z)
    output: boundary_loss; sclar
    """

    delta_s = (seg_soft[:, 1, ...] - gt.float())**2
    s_dtm = seg_dtm[:, 1, ...]**2
    g_dtm = gt_dtm[:, 1, ...]**2
    dtm = s_dtm + g_dtm
    multiplied = torch.einsum('bxyz, bxyz->bxyz', delta_s, dtm)
    """ Make spatial weight matrix that is just exponential decay from middle of image """
    if spatial_weight:
        weighted = torch.multiply(multiplied, weight_arr)
        multiplied = weighted

    hd_loss = multiplied.mean()

    return hd_loss
コード例 #24
0
def stack_input_predictions_to_rgb(inputs, predictions):
    """

    :param inputs: (B, 1, H, W) z-normalized in [-1, 1] with mean=0.5 and std=0.5
    :type inputs: torch.Tensor
    :param predictions: (B, 2, H, W) in interval [-1, 1]
    :type predictions: torch.Tensor

    :returns torch.tensor: shape (B, H, W, 3) with RGB values in [0, 255]
    """
    inp_type = inputs.dtype
    unnormalized_inputs = torch.multiply(inputs,
                                         0.5) + 0.5  # [interval now [0, 1]
    l = unnormalized_inputs * 100
    ab = predictions * 127.5 - 0.5

    # l in 0..100
    # ab in -128..127
    lab = torch.cat([l, ab], dim=1).permute(0, 2, 3, 1)  # (B, H, W, C)

    rgb = lab2rgb(lab.float()) * 255
    rgb = rgb.type(inp_type)
    return rgb
コード例 #25
0
    def forward(self, logits, labels, seq_id):
        logits = logits[:, :, 1]
        batch_size, max_length = logits.shape
        y_pred = torch.Tensor(1, max_length).to(device)
        for i in range(batch_size):
            tmp = logits[i]
            SEP = tmp[seq_id[i][1]]
            tmp = tmp.view(1, -1)
            ones = torch.ones_like(tmp)
            zero = torch.zeros_like(tmp)
            tmp_y_pred = torch.where(tmp <= SEP, zero, ones)
            y_pred = torch.cat([y_pred, tmp_y_pred], axis=0)

        y_pred = y_pred[1:, :]
        y_pred = y_pred.view(size=(-1, )).float()
        y_true = labels.contiguous().view(size=(-1, )).float()
        corr = torch.eq(y_pred, y_true)
        corr = torch.multiply(corr.float(), y_true)
        recall = torch.sum(corr) / (torch.sum(y_true) + 1e-8)
        precision = torch.sum(corr) / (torch.sum(y_pred) + 1e-8)
        f1 = 2 * recall * precision / (recall + precision + 1e-8)

        return recall, precision, f1
コード例 #26
0
ファイル: cnn.py プロジェクト: wangru8080/textMatch
    def forward(self, x1, x2):
        x1_embedd = self.embedding(x1)
        x1_embedd = self.spatial_dropout(x1_embedd)
        x2_embedd = self.embedding(x2)
        x2_embedd = self.spatial_dropout(x2_embedd)

        absSub_list = []
        mul_list = []
        cossim_list = []
        euclidean_list = []
        
        for cnn in self.cnn_list:
            x1_cnn_feature = cnn(x1_embedd.permute(0, 2, 1)) # [batch, filters, 1]
            x2_cnn_feature = cnn(x2_embedd.permute(0, 2, 1)) # [batch, filters, 1]

            sub = torch.abs(x1_cnn_feature - x2_cnn_feature).squeeze(-1) # [batch, filters]
            absSub_list.append(sub)

            mul = torch.multiply(x1_cnn_feature, x2_cnn_feature).squeeze(-1) # [batch, filters]
            mul_list.append(sub)

            cos_sim = self.cos_sim_layer(x1_cnn_feature.permute(0, 2, 1), x2_cnn_feature.permute(0, 2, 1)) # [batch, 1]
            cossim_list.append(cos_sim)

            euclidean = self.euclidean_layer(x1_cnn_feature.permute(0, 2, 1), x2_cnn_feature.permute(0, 2, 1)) # [batch, 1]
            euclidean_list.append(euclidean)
        
        absSub_list = torch.cat(absSub_list, dim=-1)
        mul_list = torch.cat(mul_list, dim=-1)
        cossim_list = torch.cat(cossim_list, dim=-1)
        euclidean_list = torch.cat(euclidean_list, dim=-1)

        out = torch.cat([absSub_list, mul_list, cossim_list, euclidean_list], dim=-1)

        logit = self.fc(out)
        prob = F.softmax(logit, dim=1)
        return logit, prob
コード例 #27
0
    def forward(self, user, item):
        inputs = torch.cat([
            torch.index_select(self.p, 0, user),
            torch.index_select(self.q, 0, item),
            torch.multiply(torch.index_select(self.u, 0, user),
                           torch.index_select(self.v, 0, item))
        ],
                           dim=1)

        x = self.layer1(inputs)
        x = torch.sigmoid(x)

        x = self.layer2(x)
        x = torch.sigmoid(x)

        x = self.layer3(x)
        x = torch.sigmoid(x)

        x = self.layer4(x)
        x = torch.sigmoid(x)

        x = self.layer5(x)

        return torch.flatten(x)
コード例 #28
0
    def walk_pair(model, label, z_mod1, z_mod2):
        walk_zs = []
        for i in range(DFLAGS.num_steps):
            im_noise = torch.randn_like(z_mod1).detach()

            im_noise.normal_()
            z_mod1 = z_mod1 + DFLAGS.step_noise * im_noise

            z_mod1.requires_grad_(requires_grad=True)

            energy_next_z1 = model.forward_top(z_mod1, label)
            gaussian_distance = torch.multiply(
                torch.tensor(-DFLAGS.step_valley_depth * model.depth_mod),
                torch.exp(
                    torch.divide(
                        torch.sum(torch.square(torch.subtract(z_mod1, z_mod2)),
                                  dim=model.embedded_dims),  # (1, 2, 3)
                        torch.tensor(-DFLAGS.step_valley_sigma *
                                     model.sigma_mod)).double()))

            if FLAGS.verbose > 1:
                print("step: ", i, energy_next_z1.mean(), "gd: ",
                      gaussian_distance.mean())

            im_grad1 = torch.autograd.grad(
                [torch.add(energy_next_z1.sum(), gaussian_distance.sum())],
                [z_mod1])[0]

            z_mod1 = z_mod1 - DFLAGS.step_lr * model.lr_mod * torch.clamp(
                im_grad1, -FLAGS.gradient_clip, FLAGS.gradient_clip)
            z_mod1 = z_mod1.detach()

            if FLAGS.plot_walks:
                walk_zs.append(z_mod1.cpu().numpy())

        return z_mod1, z_mod2, walk_zs
コード例 #29
0
ファイル: gradcam.py プロジェクト: FrankBrongers/FactorVAE
    def generate(self, z):
        """
        Generates attention map and all individual maps per latent dimension z.
        """

        A = self.get_conv_outputs(self.outputs_forward, self.target_layer)

        b, n, w, h = A.shape
        A_flat = A.view(b * n, w * h)

        M_list = torch.zeros([z.shape[1], b, self.image_size,
                              self.image_size]).to(self.device)

        for i, z_i in enumerate(z[1]):
            one_hot = torch.zeros_like(z)
            one_hot[:, i] = 1
            self.model.zero_grad()
            z.backward(gradient=one_hot, retain_graph=True)

            self.grads = self.get_conv_outputs(self.outputs_backward,
                                               self.target_layer)

            gradients = self.grads[0].to(self.device)
            a_k = (torch.sum(gradients, dim=(2, 3)) /
                   (gradients.shape[2] * gradients.shape[3])).flatten()

            a_kA = torch.multiply(a_k, A_flat.T).T
            a_kA = (a_kA).view((b, n, w, h))
            M_i = F.relu(a_kA).sum(dim=1, keepdim=True)
            M_i = F.interpolate(M_i, (self.image_size, self.image_size),
                                mode="bilinear",
                                align_corners=True)
            M_i = M_i.squeeze(1)
            M_list[i, :, :, :] += M_i

        return M_list
コード例 #30
0
ファイル: stocks_svm.py プロジェクト: benhoar/CS260_Project
def my_hinge_loss(output, target):
    loss = 1 - torch.multiply(output,target)
    loss = torch.clamp(loss,min=0.0,max=float('inf'))
    return torch.mean(loss)