def forward(self, c1_outputs):
        s2_outputs = []
        for c1_output in c1_outputs:
            conv_output = self.conv(c1_output)

            # Unstack the orientations
            conv_output_size = conv_output.shape[3]
            conv_output = conv_output.view(-1, self.num_orientations,
                                           self.num_patches, conv_output_size,
                                           conv_output_size)

            # Pool over orientations
            conv_output = conv_output.sum(dim=1)

            # Compute distance
            c1_sq = self.uniform(torch.sum(c1_output**2, dim=1, keepdim=True))
            dist = c1_sq - 2 * conv_output
            dist += self.patches_sum_sq[None, :, None, None]

            # Apply activation function
            if self.activation == 'gaussian':
                dist = torch.exp(-1 / (2 * self.sigma**2) * dist)
            elif self.activation == 'euclidean':
                dist[dist < 0] = 0  # Negative values should never occur
                torch.sqrt_(dist)
                dist = -dist
            else:
                raise ValueError("activation parameter should be either "
                                 "'gaussian' or 'euclidean'.")

            s2_outputs.append(dist)
        return s2_outputs
Exemple #2
0
    def _update_direction_vars_and_norm(self, params):
        """
        Update the search or update direction. In SGD training mode it is used as the update direction.
        In measure line state the normalized direction is used as search direction.
        :param params: the network parameters
        """
        with torch.no_grad():
            norm = torch.tensor(0.0)
            for p in params:
                if p.grad is None:
                    continue
                param_state = self.state[p]
                if 'dir_buffer' not in param_state:
                    buf = param_state['dir_buffer'] = torch.zeros_like(
                        p.grad.data, device=p.device)
                else:
                    buf = param_state['dir_buffer']
                buf.mul_(self.momentum)  # _ = inplace
                buf.add_(p.grad.data)
                flat_buf = buf.view(-1)
                norm = norm + torch.dot(flat_buf, flat_buf)
            torch.sqrt_(norm)
            if norm == 0.0:
                norm = self.epsilon

            if torch.cuda.is_available() and isinstance(norm, torch.Tensor):
                self.direction_norm = norm.cuda()
            else:
                self.direction_norm = norm
    def aggregate(self, nodes, pre_hidden_embs, pre_neighs):
        unique_nodes_list, samp_neighs, unique_nodes = pre_neighs

        assert len(nodes) == len(samp_neighs)
        indicator = [(nodes[i] in samp_neighs[i]) for i in range(len(samp_neighs))]
        assert False not in indicator
        if not self.gat and not self.gcn:
            samp_neighs = [(samp_neighs[i]-set([nodes[i]])) for i in range(len(samp_neighs))]
        if len(pre_hidden_embs) == len(unique_nodes):
            embed_matrix = pre_hidden_embs
        else:
            embed_matrix = pre_hidden_embs[torch.LongTensor(unique_nodes_list)]
        # get row and column nonzero indices for the mask tensor
        row_indices = [i for i in range(len(samp_neighs)) for j in range(len(samp_neighs[i]))]
        column_indices = [unique_nodes[n] for samp_neigh in samp_neighs for n in samp_neigh]
        # get the edge counts for each edge
        edge_counts = self.adj_matrix[nodes][:, unique_nodes_list].toarray()
        edge_counts = torch.FloatTensor(edge_counts).to(embed_matrix.device)
        torch.sqrt_(edge_counts)
        if self.gat:
            indices = (torch.LongTensor(row_indices), torch.LongTensor(column_indices))
            nodes_indices = torch.LongTensor([unique_nodes[nodes[n]] for n in row_indices])
            row_embs = embed_matrix[nodes_indices]
            col_embs = embed_matrix[column_indices]
            atts = self.attention(row_embs, col_embs).squeeze()
            mask = torch.zeros(len(samp_neighs), len(unique_nodes)).to(embed_matrix.device)
            mask.index_put_(indices, atts)
            mask = mask * edge_counts
            # softmax
            mask = torch.exp(mask) * (mask != 0).float()
            mask = F.normalize(mask, p=1, dim=1)
        else:
            mask = torch.zeros(len(samp_neighs), len(unique_nodes)).to(embed_matrix.device)
            mask[row_indices, column_indices] = 1
            # multiply edge counts to mask
            mask = mask * edge_counts
            mask = F.normalize(mask, p=1, dim=1)
            mask = mask.to(embed_matrix.device)

        if self.agg_func == 'MEAN':
            aggregate_feats = mask.mm(embed_matrix)
        elif self.agg_func == 'MAX':
            indexs = [x.nonzero() for x in mask != 0]
            aggregate_feats = []
            for feat in [embed_matrix[x.squeeze()] for x in indexs]:
                if len(feat.size()) == 1:
                    aggregate_feats.append(feat.view(1, -1))
                else:
                    aggregate_feats.append(torch.max(feat,0)[0].view(1, -1))
            aggregate_feats = torch.cat(aggregate_feats, 0)

        return aggregate_feats
    def forward(self, img):

        img_r = img[:, 0:1]
        img_g = img[:, 1:2]
        img_b = img[:, 2:3]
        blur_horizontal = self.gaussian_filter_horizontal(img_r)
        blurred_img_r = self.gaussian_filter_vertical(blur_horizontal)
        blur_horizontal = self.gaussian_filter_horizontal(img_g)
        blurred_img_g = self.gaussian_filter_vertical(blur_horizontal)
        blur_horizontal = self.gaussian_filter_horizontal(img_b)
        blurred_img_b = self.gaussian_filter_vertical(blur_horizontal)

        blurred_img = torch.stack(
            [blurred_img_r, blurred_img_g, blurred_img_b], dim=1)
        blurred_img = torch.stack([torch.squeeze(blurred_img)])

        grad_x_r = self.sobel_filter_horizontal(blurred_img_r)
        grad_y_r = self.sobel_filter_vertical(blurred_img_r)
        grad_x_g = self.sobel_filter_horizontal(blurred_img_g)
        grad_y_g = self.sobel_filter_vertical(blurred_img_g)
        grad_x_b = self.sobel_filter_horizontal(blurred_img_b)
        grad_y_b = self.sobel_filter_vertical(blurred_img_b)

        # COMPUTE THICK EDGES

        grad_mag_r = torch.sqrt_(grad_x_r**2 + grad_y_r**2)
        grad_mag_g = torch.sqrt_(grad_x_g**2 + grad_y_g**2)
        grad_mag_b = torch.sqrt_(grad_x_b**2 + grad_y_b**2)
        #grad_mag_r = torch.abs(grad_x_r) + torch.abs(grad_y_r)
        #grad_mag_g = torch.abs(grad_x_g) + torch.abs(grad_y_g)
        #grad_mag_b = torch.abs(grad_x_b) + torch.abs(grad_y_b)
        #grad_mag = torch.cat([grad_x_r,grad_y_r,grad_x_g,grad_y_g,grad_x_b,grad_y_b],dim=1)/10.0
        '''grad_orientation = []
        temp = (torch.atan2(grad_y_r,grad_x_r) * (180.0/3.14159)) + 180.0
        temp =  torch.round( temp / 45.0 ) * 45.0
        grad_orientation.append(temp)
        temp = (torch.atan2(grad_y_g,grad_x_g) * (180.0/3.14159)) + 180.0
        temp =  torch.round( temp / 45.0 ) * 45.0
        grad_orientation.append(temp)
        temp = (torch.atan2(grad_y_b,grad_x_b) * (180.0/3.14159)) + 180.0
        temp =  torch.round( temp / 45.0 ) * 45.0
        grad_orientation.append(temp)'''

        #grad_mag = (grad_mag_r+grad_mag_b+grad_mag_b)/3.0
        #grad_or = torch.cat([grad_mag_r,grad_mag_b,grad_mag_b],dim=1)/100.0
        #all_filtered = self.directional_filter(grad_mag)
        #print(all_filtered.shape)
        #return 0, 0 , 0 , 0 , all_filtered[:,:3,:,:] , 0
        return torch.cat(
            [grad_x_r, grad_y_r, grad_x_g, grad_y_g, grad_x_b, grad_y_b],
            dim=1)
        '''grad_orientation = (torch.atan2(grad_y_r+grad_y_g+grad_y_b, grad_x_r+grad_x_g+grad_x_b) * (180.0/3.14159))
Exemple #5
0
 def forward(ctx, x):
     x = x.clamp(min=1.0 + 1e-15)
     ctx.save_for_backward(x)
     z = x.double()
     z = (z + torch.sqrt_(z.pow(2) - 1)).clamp_min_(1e-15).log_().to(
         x.dtype)
     return z
 def forward(self, x):
     lrt_mean = self.bias
     lrt_std = torch.sqrt_(1e-16 + F.linear(x * x, self.sigma * self.sigma))
     if self.training:
         eps = Variable(lrt_std.data.new(lrt_std.size()).normal_())
     else:
         eps = 0.0
     return lrt_mean + eps * lrt_std
 def forward(self, x):
     lrt_mean = self.bias
     lrt_std = torch.sqrt_(1e-16 + F.linear(x * x, self.sigma * self.sigma))
     if self.training:
         eps = Variable(lrt_std.data.new(lrt_std.size()).normal_())
     else:
         eps = 0.0
     return lrt_mean + eps * lrt_std
Exemple #8
0
    def forward(self, ab_prediction: Tensor, masks: Tensor, ab_gt: Tensor = None) -> Tensor:
        inp_dtype = ab_prediction.dtype if self.target == 'prediction' else ab_gt.dtype
        if self.target == 'gt':
            assert ab_gt is not None
            assert ab_prediction.size() == ab_gt.size()
        # x: (B, C, H, W)
        # masks: (B, H, W) LongTensor
        masks.requires_grad = False
        # masks: (B, S, H, W)
        masks = F.one_hot(masks).bool().permute((0, 3, 1, 2))

        # expand x and the masks to make the loss calculation vectorized per image
        ab_pred_exp = torch.unsqueeze(ab_prediction, 2).expand(
            ab_prediction.size()[0:2] + (len(masks[0]),) + ab_prediction.size()[2:])

        if self.target == 'gt':
            ab_gt_exp = torch.unsqueeze(ab_gt, 2).expand(
                ab_gt.size()[0:2] + (len(masks[0]),) + ab_gt.size()[2:])

        batch_segment_masks_exp = torch.unsqueeze(masks, 1).expand(
            ab_prediction.size()[0:2] + (len(masks[0]),) + ab_prediction.size()[2:]).contiguous()
        batch_segment_masks_exp.requires_grad = False

        target = ab_pred_exp if self.target == 'prediction' else ab_gt_exp

        # parallel for all in batch dim
        # for each channel (a, b)
        masked = torch.mul(target, batch_segment_masks_exp)  # (B, C, S, H, W)
        masked = masked.detach()
        x_mean = masked.sum(-1).sum(-1) / (batch_segment_masks_exp.sum(-1).sum(-1) + 1e-8)  # (B, C, S)
        # (B, C, S, H, W) all values in a segment are the same
        x_mean = torch.unsqueeze(torch.unsqueeze(x_mean, -1), -1).expand_as(ab_pred_exp)
        x_mean = x_mean.detach()

        if self.mode == 'square':
            # inputs to mse: (B, C, len(segment))
            with autocast(enabled=False):
                loss = F.mse_loss(ab_pred_exp[batch_segment_masks_exp].float(),
                                  x_mean[batch_segment_masks_exp].float()).type(inp_dtype)  # scalar, can be nan
        elif self.mode == 'euclidean':
            # inputs to square: (B, C, len(segment))
            squared = torch.square(ab_pred_exp[batch_segment_masks_exp] -
                                   x_mean[batch_segment_masks_exp])  # (B, C, S, len(segment))
            summed = squared.reshape(ab_prediction.size()[0:2] + (-1,)).sum(1)
            # gradient of sqrt(0) is nan, eps is required
            loss = torch.sqrt_(summed + 1e-8).mean()
        elif self.mode == 'linear':
            # inputs to L1: (B, C, len(segment))
            with autocast(enabled=False):
                loss = F.smooth_l1_loss(ab_pred_exp[batch_segment_masks_exp].float(),
                                        x_mean[batch_segment_masks_exp].float()).type(inp_dtype)  # scalar, can be nan
        if torch.isnan(loss):
            return torch.tensor(0.0)

        return loss
def euclidean_distance_tensor(dataA, dataB, out=None, splits=1):
    v1 = torch.sum(dataA**2, 1).view(-1, 1)
    v2 = torch.sum(dataB**2, 1).view(1, -1)
    ABt = torch.mm(dataA, torch.t(dataB), out=out)
    rsq = torch.clamp(v1 + v2 - 2 * ABt, min=0, out=out)
    #print(rsq.shape)
    r = rsq
    assert rsq.shape[0] % splits == 0
    sz = rsq.shape[0] // splits
    for i in range(splits):
        r_part = r[i * sz:(i + 1) * sz, :]
        torch.sqrt_(r_part)

    #print(torch.isnan(r).any())
    #print((r<0).any())
    #print((r<=0).any())
    #print((r<=0).all())

    return r
    """
def mahalanobias_metric_gpu(X, mean, var):
    torch.cuda.empty_cache()
    dis = torch.zeros([X.shape[0], mean.shape[0]])
    for k in range(mean.shape[0]):
        _m = mean[k]
        _inv = torch.inverse(var[k])
        # method 1
        delta = X - _m
        temp = torch.mm(delta, _inv)
        dis[:, k] = torch.sqrt_(torch.sum(torch.mul(delta, temp), dim=1))
    return dis
def batched_3d_nms(locations,
                   dimensions,
                   scores,
                   rotys,
                   batch_ids,
                   iou_threshold=0.25):
    """
    Select best objects by the position constraint of 3d object
    """
    loc_earths = locations[:, 0::2]
    keeps = []
    for id in torch.unique(batch_ids).cpu().tolist():
        index = (batch_ids == id).nonzero(as_tuple=False).view(-1)
        keep = torch.ones_like(index)
        mask = 1 - torch.eye(len(index.cpu().tolist())).to(loc_earths.device)
        mask = mask.bool()
        loc = loc_earths[index]
        dim = dimensions[index]  # h, w, l
        score = scores[index]
        score = score.unsqueeze(-1) - score.unsqueeze(0)
        roty = rotys[index]
        det_loc = loc.view(-1, 1, 2) - loc.unsqueeze(dim=0)
        det_loc = det_loc.pow_(2.)
        det_loc = torch.sqrt_(det_loc[:, :, 0] + det_loc[:, :, 1])
        # det_roty = roty.view(-1, 1, 1) - roty.unsqueeze(dim=0)
        # r_idx = det_roty > np.pi
        # det_roty[r_idx] = det_roty[r_idx] - np.pi
        # r_idx = det_roty < -np.pi
        # det_roty[r_idx] = det_roty[r_idx] + np.pi
        dim1 = dim.view(-1, 1, 3)
        dim2 = dim.unsqueeze(dim=0)
        dim_cond1 = (dim1[:, :, 1] + dim2[:, :, 1]) / 2.
        dim_cond2 = (dim1[:, :, 1] + dim2[:, :, 2]) / 2.
        dim_cond3 = (dim1[:, :, 2] + dim2[:, :, 1]) / 2.
        dim_cond4 = (dim1[:, :, 2] + dim2[:, :, 2]) / 2.
        dim_cond = (dim_cond1 + dim_cond2 + dim_cond3 + dim_cond4) / 4.
        remove = (det_loc < dim_cond)
        remove = (remove & mask)
        remove_idx = (score[remove] >= 0)
        if remove_idx.sum() == 0:
            keeps.append(keep.bool())
            continue
        remove = remove.nonzero()
        remove = remove[remove_idx]
        remove = remove[:, 1].unique()
        keep[remove] = 0
        keeps.append(keep.bool())

    return torch.cat(keeps, dim=0) if len(keeps) != 0 else torch.tensor(
        [], dtype=torch.bool, device=loc_earths.device)
Exemple #12
0
    def forward(self, input):

        if self.training:
            std = torch.exp(self.logvar)
            a = F.linear(input, self.weight, self.bias)
            eps = torch.randn_like(a)
            b = eps.mul_(torch.sqrt_(F.linear(input * input, std)))
            output = a + b

            kl = (-0.5 * (1 + self.logvar - self.weight.pow(2) -
                          self.logvar.exp())).sum(dim=-1).mean()  # / (10)
            return output, kl
        else:
            output = F.linear(input, self.weight, self.bias)
            return output, 0
Exemple #13
0
    def forward(self, neg, pos, fdb, context, emotion_context):
        # 1 LSTM  - negative prediction
        embed_neg = self.embedding(neg)
        outputs_neg, hidden_neg = self.lstm(embed_neg)
        last_hidden_neg = hidden_neg[1][-1]

        # 2 LSTM  - positive response
        embed_pos = self.embedding(pos)
        outputs_pos, hidden_pos = self.lstm(embed_pos)
        last_hidden_pos = hidden_pos[1][-1]

        # 3 LSTM  - next feedback
        embed_fdb = self.embedding(fdb)
        outputs_fdb, hidden_fdb = self.lstm(embed_fdb)
        last_hidden_fdb = hidden_fdb[1][-1]

        neg_sample = outputs_neg
        pos_sample = outputs_pos

        # 4 semantic classify
        neg_emotion_logits = self.EmotionClassify(neg_sample, last_hidden_fdb, context, emotion_context, batch_norm=True)  # (bsz, 1)
        pos_emotion_logits = self.EmotionClassify(pos_sample, last_hidden_fdb, context, emotion_context, batch_norm=True)  # (bsz, 1)

        # 5 discriminator_loss
        disc_emo_loss = torch.mean(neg_emotion_logits - pos_emotion_logits)
        gen_emo_loss = torch.mean(neg_emotion_logits)


        # 6 wgan
        alpha_empty = torch.empty(context.size(0), 1, 1)
        alpha = Variable(torch.nn.init.uniform_(tensor=alpha_empty, a=0., b=1.)).cuda()  # alpha~[0,1]
        interpolates = alpha * neg_sample + ((1 - alpha) * pos_sample)
        disc_interpolates = self.EmotionClassify(interpolates, context, emotion_context, True)  # (bs, 1)

        interpolates.register_hook(extract)
        disc_interpolates.backward(torch.ones_like(disc_interpolates), retain_graph=True)
        gradients = xg  # normalization

        # two norm
        slopes = torch.sqrt_(torch.sum(torch.mul(gradients, gradients), 1))
        gradient_penalty = torch.mean((slopes-1) ** 2)
        gradient_penalty = config.gp_lambda * gradient_penalty
        assert torch.sum(torch.isnan(gradient_penalty)) == 0, "omg!!!!"
        disc_emo_loss += gradient_penalty  # add gradient norm

        return disc_emo_loss, gen_emo_loss
    def forward(self, input):
        batch_size, _, height, width = input.shape

        # B x C x H x W; Difference from the mean at each location
        output = input - input.mean(dim=0, keepdim=True)

        # C x H x W; Standard deviation at each location across the batch
        output = torch.sqrt_(output.pow_(2.0).mean(dim=0, keepdim=False) + 10e-8)

        # 1 x 1 x 1 x 1; Mean standard deviation across entire featuremap
        output = output.mean().view(1, 1, 1, 1)

        # B x 1 x H x W; Copy the mean to all locations of a new channel
        output = output.repeat(batch_size, 1, height, width)

        # Append that channel to the original input
        output = torch.cat([input, output], 1)
        return output
def nms3d(objs):
    """
        Select best objects by the position constraint of 3d object
        """
    locations = objs.get_field('location')
    dimensions = objs.get_field('dimension')
    scores = objs.get_field('score')
    rotys = objs.get_field('Ry')
    loc_earths = locations[:, 0::2]
    keep = torch.ones_like(scores)
    mask = 1 - torch.eye(len(scores)).to(loc_earths.device)
    mask = mask.bool()
    loc = loc_earths.clone()
    dim = dimensions.clone()  # h, w, l
    score = scores.clone()
    score = score.unsqueeze(-1) - score.unsqueeze(0)
    roty = rotys.clone()
    det_loc = loc.view(-1, 1, 2) - loc.unsqueeze(dim=0)
    det_loc = det_loc.pow_(2.)
    det_loc = torch.sqrt_(det_loc[:, :, 0] + det_loc[:, :, 1])
    # det_roty = roty.view(-1, 1, 1) - roty.unsqueeze(dim=0)
    # r_idx = det_roty > np.pi
    # det_roty[r_idx] = det_roty[r_idx] - np.pi
    # r_idx = det_roty < -np.pi
    # det_roty[r_idx] = det_roty[r_idx] + np.pi
    dim1 = dim.view(-1, 1, 3)
    dim2 = dim.unsqueeze(dim=0)
    dim_cond1 = (dim1[:, :, 1] + dim2[:, :, 1]) / 2.
    dim_cond2 = (dim1[:, :, 1] + dim2[:, :, 2]) / 2.
    dim_cond3 = (dim1[:, :, 2] + dim2[:, :, 1]) / 2.
    dim_cond4 = (dim1[:, :, 2] + dim2[:, :, 2]) / 2.
    dim_cond = (dim_cond1 + dim_cond2 + dim_cond3 + dim_cond4) / 4.
    remove = (det_loc < dim_cond)
    remove = (remove & mask)
    remove_idx = (score[remove] >= 0)
    if remove_idx.sum() != 0:
        remove = remove.nonzero()
        remove = remove[remove_idx]
        remove = remove[:, 1].unique()
        keep[remove] = 0
    keep = keep.bool()
    objs.add_field('mask', keep, to_tensor=True)
    objs.delete_by_mask()
    return objs
Exemple #16
0
 def forward(ctx, x):
     ctx.save_for_backward(x)
     z = x.double()
     return (z + torch.sqrt_(1 + z.pow(2))).clamp_min_(1e-15).log_().to(x.dtype)
Exemple #17
0
 def forward(self, x):
     lrt_mean = self.bias
     lrt_std = torch.sqrt_(
         1e-16 + F.linear(x * x, self.sigma * self.sigma))  # eq. (4)
     eps = Variable(lrt_std.data.new(lrt_std.size()).normal_())
     return lrt_mean + eps * lrt_std
Exemple #18
0
    def forward(self, img):
        img = (img[:, 0:1] + img[:, 1:2] + img[:, 2:3]) / 3.0
        #blur_horizontal = self.gaussian_filter_horizontal(img)
        #blurred_img = self.gaussian_filter_vertical(blur_horizontal)

        grad_x = self.sobel_filter_horizontal(img)

        grad_y = self.sobel_filter_vertical(img)
        # COMPUTE THICK EDGES

        grad_mag = torch.sqrt_(grad_x**2 + grad_y**2)

        grad_orientation = (torch.atan2(grad_y, grad_x) *
                            (180.0 / 3.14159)) + 180.0
        grad_orientation = torch.round(grad_orientation / 45.0) * 45.0
        # grad_orientation =  torch.round( grad_orientatpixel_countion / 45.0 ) * 45.0
        # THIN EDGES (NON-MAX SUPPRESSION)

        all_filtered = self.directional_filter(grad_mag)

        inidices_positive = (grad_orientation / 45) % 8
        inidices_negative = ((grad_orientation / 45) + 4) % 8
        pixel_count = self.size[0] * self.size[1]

        inidices_positive = inidices_positive.float()
        inidices_negative = inidices_negative.float()
        all_filtered = all_filtered.float()
        #print(inidices_positive.size())
        #print(pixel_count)
        #print(self.pixel_range.size())

        indices = (inidices_positive.view(-1).data * pixel_count +
                   self.pixel_range).squeeze()
        channel_select_filtered_positive = all_filtered.view(-1)[
            indices.long()].view(1, self.size[0], self.size[1])

        indices = (inidices_negative.view(-1).data * pixel_count +
                   self.pixel_range).squeeze()
        channel_select_filtered_negative = all_filtered.view(-1)[
            indices.long()].view(1, self.size[0], self.size[1])

        channel_select_filtered = torch.stack([
            channel_select_filtered_positive, channel_select_filtered_negative
        ])

        is_max = channel_select_filtered.min(dim=0)[0] > 0.0
        is_max = torch.unsqueeze(is_max, dim=0)
        thin_edges = grad_mag  #.contiguous()
        thin_edges[is_max == 0.0] = 0.0

        #thresholded = thin_edges.contiguous()
        #thresholded[thin_edges<0.5] = 0.0

        early_threshold = grad_mag  #.contiguous()
        early_threshold[grad_mag < 0.2] = 0.0
        '''grad_mag[is_max==0] = 0.0
        
        # THRESHOLD
        #thresholded = thin_edges.contiguous()
        #thresholded[thin_edges<0.0*self.threshold] = 0.0
        print('6',time.time()-t)'''
        th = early_threshold
        th[th > 0.0] = 1
        th[th <= 0.0] = 0
        return th.detach()
Exemple #19
0
 def updateX(self,X,V,U,Y,beta,sigma):
     #return (V-U-beta+torch.sqrt_((V-U-beta)**2+4*beta*Y))/2
     return (V-beta*U-sigma*beta+torch.sqrt_((V-beta*U-sigma*beta)**2+4*beta*Y))/2
 def forward(ctx, x):
     ctx.save_for_backward(x)
     return (x + torch.sqrt_(1 + x.pow(2))).clamp_min_(1e-5).log_()
Exemple #21
0
 def updateX(self, V, Y, tao):
     return (V - tao + torch.sqrt_((V - tao)**2 + 4 * tao * Y)).div_(2)
Exemple #22
0
 def regulization2(self, model, Lambda):
     w = torch.cat([x.view(-1) for x in model.parameters()])
     err = Lambda * torch.sqrt_(torch.sum(w**2))
     return err
Exemple #23
0
def hyperboloid_graph(x_vals, y_vals, a, c):
    z_sq = c**2 * (x_vals**2 / a**2 + y_vals**2 / a**2 - 1)
    if type(x_vals) == type(torch.Tensor()):
        return torch.sqrt_(z_sq)
    else:
        return np.sqrt(z_sq)
Exemple #24
0
print(a.matmul(b))
print(a.matmul(b).shape)

# 指数运算
a = torch.tensor([1, 2])
print(torch.pow(a, 3))
print(a.pow(3))
print(a**3)
print(a.pow_(3))

# exp
a = torch.tensor([1, 2], dtype=torch.float32)
print(a.type())
print(torch.exp(a))
print(torch.exp_(a))
print(a.exp())
print(a.exp_())

# 对数
a = torch.tensor([10, 2], dtype=torch.float32)
print(torch.log(a))
print(torch.log_(a))
print(a.log())
print(a.log_())

# sqart
a = torch.tensor([10, 2], dtype=torch.float32)
print(torch.sqrt(a))
print(torch.sqrt_(a))
print(a.sqrt())
print(a.sqrt_())