Ejemplo n.º 1
0
def L2_penalty(model):
    loss = 0
    for parameter in model.parameters():
        if parameter.dim() == 1:
            continue
        loss += torch.sum(torch.addcmul(float, parameter, parameter))
    return loss
Ejemplo n.º 2
0
    def backward(self, grad_output):
        input_img, output = self.saved_tensors
        grad_input = None

        positive_mask_1 = (input_img > 0).type_as(grad_output)
        positive_mask_2 = (grad_output > 0).type_as(grad_output)
        grad_input = torch.addcmul(
            torch.zeros(input_img.size()).type_as(input_img),
            torch.addcmul(
                torch.zeros(input_img.size()).type_as(input_img),
                grad_output,
                positive_mask_1,
            ),
            positive_mask_2,
        )
        return grad_input
Ejemplo n.º 3
0
def train(model, criterion, epoch, optimizer, training_data_loader, Loss):
    lr = adjust_learning_rate(epoch - 1)

    for param_group in optimizer.param_groups:
        param_group["lr"] = lr

    print("Epoch = {}, lr = {}".format(epoch, optimizer.param_groups[0]["lr"]))

    epoch_loss = 0
    for iteration, batch in enumerate(training_data_loader, 1):
        input, target = Variable(batch[0]), Variable(batch[1],
                                                     requires_grad=False)
        if opt.cuda:
            input = input.cuda()
            target = target.cuda()

        optimizer.zero_grad()  #清空所有被优化过的Variable的梯度.
        model_out = model(input)
        #prediction = torch.zeros(target.shape[0], target.shape[2], target.shape[3]).cuda()
        prediction = torch.addcmul(model_out[:, 1, :, :], 1,
                                   model_out[:, 0, :, :], input[:, 1, :, :])
        loss = criterion(prediction, target[:, 0, :, :])
        epoch_loss += loss.item()
        loss.backward()
        #nn.utils.clip_grad_norm_(model.parameters(), opt.clip/lr)
        optimizer.step()  #进行单次优化参数更新

        print("===> Epoch[{}]({}/{}): Loss: {:.4f}".format(
            epoch, iteration, len(training_data_loader), loss.item()))

    Loss.append(epoch_loss / len(training_data_loader))
    print("===> Epoch {} Complete: Avg. Loss: {:.4f}".format(
        epoch, epoch_loss / len(training_data_loader)))
    def forward(self, invec):
        x, sumgrad = invec[0], invec[1]

        residualx = x

        out = self.bn1(x)
        out = self.relu(out)
        out = self.conv1(out)

        out = self.bn2(out)
        out = self.relu(out)
        out = self.conv2(out)

        out = self.bn3(out)
        out = self.relu(out)
        out = self.conv3(out)

        if self.downsample is not None:
            residualx = self.downsample(x)
        if self.downsamplegrad is not None:
            sumgrad = self.downsamplegrad(torch.rsqrt(sumgrad))
            sumgrad = sumgrad * sumgrad

        sumgrad = torch.addcmul(sumgrad, value=1.0, tensor1=out, tensor2=out)
        out = out * torch.rsqrt(sumgrad + 1e-8)

        out = out + residualx
        return [out, sumgrad]
Ejemplo n.º 5
0
    def forward(self, input):
        positive_mask = (input > 0).type_as(input)
        output = torch.addcmul(
            torch.zeros(input.size()).type_as(input), input, positive_mask)
        self.save_for_backward(input, output)

        return output
Ejemplo n.º 6
0
    def update_usages(self, prev_write_distribution, prev_read_distributions, free_gates):
        # Read distributions shape: [batch, n_heads, cell count]
        # Free gates shape: [batch, n_heads]

        self._init_consts(prev_read_distributions.device)
        phi = torch.addcmul(self.one, -1, free_gates.unsqueeze(-1), prev_read_distributions).prod(-2)
        # Phi is the free tensor, sized [batch, cell count]

        # If memory usage counter if doesn't exists
        if self.usages is None:
            self._init_sequence(prev_read_distributions)
            # in first timestep nothing is written or read yet, so we don't need any further processing
        else:
            self.usages = torch.addcmul(self.usages, 1, prev_write_distribution.detach(), (1 - self.usages)) * phi

        return phi
Ejemplo n.º 7
0
    def forward(self, pred, true):
        t1 = torch.exp(torch.mul(pred, 2-self.rho, out=None), out=None)
        t = torch.mul(t1, 1/(2-self.rho), out=None)
        t2 = torch.exp(torch.mul(pred, 1-self.rho, out=None), out=None)
        loss = torch.mean(torch.addcmul(t, -1/(1 -self.rho), true, t2, out=None))

        return loss
Ejemplo n.º 8
0
    def forward(self, x):
        residual = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)
        out = self.relu(out)

        out = self.conv3(out)
        out = self.bn3(out)

        if self.downsample is not None:
            residual = self.downsample(x)

        if self.squeeze is None:
            out += residual
        else:
            if self.fused_se:
                out = torch.addcmul(residual, out, self.squeeze(out), value=1)
            else:
                out = residual + out * self.squeeze(out)

        out = self.relu(out)

        return out
 def logm(self, x, y):
     """Logarithmic map on the Lorenz Manifold"""
     xy = th.clamp(self.ldot(x, y).unsqueeze(-1), max=-1)
     v = acosh(-xy, self.eps).div_(
         th.clamp(th.sqrt(xy * xy - 1), min=self._eps)) * th.addcmul(
             y, xy, x)
     return self.normalize_tan(x, v)
    def learn(self):
        if len(self.replay_buffer) > self.batch_size:
            states, actions, rewards, new_states, dones = self.replay_buffer.sample(
                self.batch_size)

            with torch.no_grad():
                target_actions = self.target_actor.forward(new_states)
                critic_value_ = self.target_critic.forward(
                    torch.concat((new_states, target_actions), dim=-1))
            critic_value = self.critic.forward(
                torch.concat((states, actions), dim=-1))
            target = torch.addcmul(rewards, self.gamma, 1 - dones,
                                   critic_value_.squeeze()).view(
                                       self.batch_size, 1)
            self.critic.optimizer.zero_grad()
            critic_loss = torch.nn.functional.mse_loss(target, critic_value)
            critic_loss.backward()
            self.critic.optimizer.step()

            self.actor.optimizer.zero_grad()
            actions = self.actor.forward(states)
            actor_loss = -self.critic.forward(
                torch.concat((states, actions), dim=-1))
            actor_loss = torch.mean(actor_loss)
            actor_loss.backward()
            self.actor.optimizer.step()

            self.target_critic.converge_to(self.critic, tau=self.tau)
            self.target_actor.converge_to(self.actor, tau=self.tau)
Ejemplo n.º 11
0
def style_mod(x, style):
    style = style.view(style.shape[0], 2, x.shape[1], 1,
                       1)  # [n,1024] -> [n,2,512,1,1]
    return torch.addcmul(style[:, 1],
                         value=1.0,
                         tensor1=x,
                         tensor2=style[:, 0] + 1)  # style1 + x*style2
Ejemplo n.º 12
0
    def forward(ctx, input, gamma, beta, eps=1e-5):
        """
    Compute the batch normalization
    
    Args:
      ctx: context object handling storing and retrival of tensors and constants and specifying
           whether tensors need gradients in backward pass
      input: input tensor of shape (B, n_neurons)
      gamma: variance scaling tensor, applied per neuron, shape (n_neurons)
      beta: mean bias tensor, applied per neuron, shape (n_neurons)
      eps: small float added to the variance for stability
    Returns:
      out: batch-x_hat tensor
    """

        ########################
        # PUT YOUR CODE HERE  #
        #######################
        requires_grad = False
        mean = input.mean(dim=0)
        var = input.var(dim=0, unbiased=False)
        denom = (var + eps).sqrt()
        x_hat = (input - mean) / denom
        out = torch.addcmul(beta, gamma, x_hat)
        ctx.save_for_backward(gamma, denom, x_hat)
        ########################
        # END OF YOUR CODE    #
        #######################

        return out
    def test_remote_tensor_tertiary_methods(self):

        hook = TorchHook(verbose=False)
        local = hook.local_worker
        remote = VirtualWorker(hook, 1)
        local.add_worker(remote)

        x = torch.FloatTensor([1, 2, 3]).send(remote)
        y = torch.FloatTensor([1, 2, 3]).send(remote)
        z = torch.FloatTensor([1, 2, 3]).send(remote)
        assert (torch.equal(
            torch.addcmul(z, 2, x, y).get(), torch.FloatTensor([3., 10.,
                                                                21.])))

        x = torch.FloatTensor([1, 2, 3]).send(remote)
        y = torch.FloatTensor([1, 2, 3]).send(remote)
        z = torch.FloatTensor([1, 2, 3]).send(remote)
        z.addcmul_(2, x, y)
        assert (torch.equal(z.get(), torch.FloatTensor([3., 10., 21.])))

        x = torch.FloatTensor([[1, 2]]).send(remote)
        y = torch.FloatTensor([[1, 2, 3], [4, 5, 6]]).send(remote)
        z = torch.FloatTensor([1, 2, 3]).send(remote)
        assert (torch.equal(
            torch.addmm(z, x, y).get(), torch.FloatTensor([[10., 14., 18.]])))
Ejemplo n.º 14
0
    def forward(self, input):
        """
    Compute the batch normalization
    
    Args:
      input: input tensor of shape (B, n_neurons)
    Returns:
      out: batch-x_hat tensor
    """

        ########################
        # PUT YOUR CODE HERE  #
        #######################
        (_n_batch, n_neurons) = input.shape
        assert n_neurons == self.n_neurons
        mean = input.mean(dim=0)
        var = input.var(dim=0, unbiased=False)
        x_hat = (input - mean) / (var + self.eps).sqrt()
        p = self.params
        out = torch.addcmul(p['beta'], p['gamma'], x_hat)
        ########################
        # END OF YOUR CODE    #
        #######################

        return out
Ejemplo n.º 15
0
def RecToPolar_3(RectData):
    '''
    Implement cartesian coordinate to polar coordinate
    imput:
        the array of cartesian coordinate
    output:
        the polar coodinate
        
    '''
    #     RectData=yy
    #     print(RectData.size())
    SizeOfData = RectData.size()
    if (SizeOfData[2] == 3):
        # print(RectData[0:3,:])
        ListSmall = 1e-16  #use a small num for illegal divition
        R = torch.norm(RectData, p=2, dim=2) + ListSmall
        #         print(R)
        Phi_Value = torch.addcdiv(torch.zeros_like(R), 1, RectData[:, :, 2], R)
        Phi = torch.acos(Phi_Value)  #利用反余弦函数求出俯仰角
        r = torch.addcmul(torch.zeros_like(R), 1, R,
                          torch.sin(Phi)) + ListSmall
        Theta_Value = torch.addcdiv(torch.zeros_like(r), 1, RectData[:, :, 0],
                                    r)
        SignalOfNum = torch.lt(RectData[:, :, 1],
                               torch.zeros_like(Theta_Value)).double()
        Flag_Signal_Coe = (-2 * SignalOfNum + 1)
        Flag_Fixed_Tail = np.pi * 2 * SignalOfNum
        Theta = torch.acos(
            Theta_Value).double() * Flag_Signal_Coe + Flag_Fixed_Tail
        result = torch.cat(
            (torch.unsqueeze(R.double(), 2), torch.unsqueeze(
                Theta.double(), 2), torch.unsqueeze(Phi.double(), 2)),
            dim=2)
        return (result)
Ejemplo n.º 16
0
def validate(model, criterion, validating_data_loader, PSNR, RMSE):
    avg_psnr = 0
    avg_rmse = 0
    for batch in validating_data_loader:
        input, target = Variable(batch[0]), Variable(batch[1])
        if opt.cuda:
            input = input.cuda()
            target = target.cuda()

        model_out = model(input)
        #prediction = torch.zeros(target.shape[0], target.shape[2], target.shape[3]).cuda()
        prediction = torch.addcmul(model_out[:, 1, :, :], 1,
                                   model_out[:, 0, :, :], input[:, 1, :, :])
        mse = criterion(prediction * 255.0, target[:, 0, :, :] * 255.0)
        rmse = sqrt(mse.item())
        psnr = 10 * log10(255.0**2 / mse.item())
        avg_rmse += rmse
        avg_psnr += psnr

    PSNR.append(avg_psnr / len(validating_data_loader))
    RMSE.append(avg_rmse / len(validating_data_loader))
    print("===> Avg. PSNR: {:.4f} dB".format(avg_psnr /
                                             len(validating_data_loader)))
    print("===> Avg. RMSE: {:.4f}".format(avg_rmse /
                                          len(validating_data_loader)))
Ejemplo n.º 17
0
def point_map_to_seg(pt_map, seg):
    # pt_map: h x w x 2
    # seg: 2 x 2  => list, tuple, ...
    h, w, _ = pt_map.size()
    p1_map = torch.Tensor(seg[0]).expand(h, w, -1)
    p2_map = torch.Tensor(seg[1]).expand(h, w, -1)
    sub_p1_map = pt_map.sub(p1_map)
    # cross: (p-p1).dot(p2-p1)
    cross = sub_p1_map[:, :, 0].mul(seg[1][0] - seg[0][0]).add(
        sub_p1_map[:, :, 1].mul(seg[1][1] - seg[0][1]))
    mask_1 = cross.le(0).float()  # p1 side
    dist_map = mask_1.mul(sub_p1_map.pow(2).sum(2))
    # seg_d2: ||p2-p1||^2
    seg_d2 = (seg[1][0] - seg[0][0])**2 + (seg[1][1] - seg[0][1])**2
    mask_2 = cross.ge(seg_d2).float()  # p2 side
    dist_map.add_(mask_2.mul(pt_map.sub(p2_map).pow_(2).sum(2)))
    # between p1 and p2
    mask_3 = mask_1.max(mask_2).eq(0).float()
    seg_d2 = max(seg_d2, 1e-6)
    cross.div_(seg_d2)
    q_map = torch.addcmul(p1_map,
                          cross.view(h, w, 1).expand(h, w, 2),
                          p2_map.sub(p1_map))
    dist_map.add_(mask_3.mul(pt_map.sub(q_map).pow_(2).sum(2)))
    return dist_map
Ejemplo n.º 18
0
 def forward(self, input_tensor):
     positive_mask = (input_tensor > 0).type_as(input_tensor)
     output = torch.addcmul(
         torch.zeros(input_tensor.size()).type_as(input_tensor),
         input_tensor, positive_mask)
     self.save_for_backward(input_tensor, output)
     return output
Ejemplo n.º 19
0
def apply_mean_var(x, mean, var, eps):
    inv_stdev = torch.rsqrt(torch.max(var, eps))

    return torch.addcmul((-mean * inv_stdev).to(x.dtype),
                         inv_stdev.to(x.dtype),
                         x,
                         value=1.0)
Ejemplo n.º 20
0
    def forward(self, input):
        shape = input.size()

        # In order to force the cudnn path, everything needs to be
        # contiguous. Hence the check here and reallocation below.
        if not input.is_contiguous():
            input = input.contiguous()
        input = input.view(1, -1, shape[-1])

        # Expand w and b buffers if necessary.
        n = input.size(1)
        cur = self.dummy.numel() if self.dummy is not None else 0
        if cur == 0:
            self.dummy = input.data.new(n)
            self.w = input.data.new(n).fill_(1)
            self.b = input.data.new(n).zero_()
        elif n > cur:
            self.dummy.resize_(n)
            self.w.resize_(n)
            self.w[cur:n].fill_(1)
            self.b.resize_(n)
            self.b[cur:n].zero_()
        dummy = self.dummy[:n]
        w = self.w[:n]
        b = self.b[:n]
        output = F.batch_norm(input, dummy, dummy, w, b, True, 0., self.eps)
        return torch.addcmul(self.bias, 1, output.view(*shape), self.gain)
Ejemplo n.º 21
0
    def forward(self, feat, label, easy_margin=False):
        eps = 1e-4
        batch_size = feat.shape[0]
        norms = torch.norm(feat, p=2, dim=-1, keepdim=True)
        feat_l2norm = torch.div(feat, norms)
        feat_l2norm = feat_l2norm.clamp(min=-1 + eps,
                                        max=1 - eps)  # for numerical stability
        feat_l2norm = feat_l2norm * self.s

        norms_w = torch.norm(self.weights, p=2, dim=-1, keepdim=True)
        weights_l2norm = torch.div(self.weights, norms_w)
        weights_l2norm = weights_l2norm.clamp(min=-1 + eps, max=1 -
                                              eps)  # for numerical stability

        fc7 = torch.matmul(feat_l2norm, torch.transpose(weights_l2norm, 0, 1))

        # zy = mx.sym.pick(fc7, gt_label, axis=1)
        label = label.cpu()
        fc7 = fc7.cpu()

        target_one_hot = torch.zeros(len(label), NUM_OF_CLASSES).scatter_(
            1, label.unsqueeze(1), 1.)
        zy = torch.addcmul(torch.zeros(fc7.size()), 1., fc7, target_one_hot)
        zy = zy.sum(-1)

        cos_t = zy / self.s
        cos_t = cos_t.clamp(min=-1 + eps,
                            max=1 - eps)  # for numerical stability

        t = torch.acos(cos_t)
        t = t + self.m

        body = torch.cos(t)
        new_zy = body * self.s

        diff = new_zy - zy
        # diff = mx.sym.expand_dims(diff, 1)
        diff = diff.unsqueeze(1)

        # gt_one_hot = mx.sym.one_hot(gt_label, depth = args.num_classes, on_value = 1.0, off_value = 0.0)
        # body = mx.sym.broadcast_mul(gt_one_hot, diff)
        body = torch.addcmul(torch.zeros(diff.size()), 1., diff,
                             target_one_hot)

        output = fc7 + body

        return output.to(self.device)
Ejemplo n.º 22
0
def _jit_linear_cg_updates(result, alpha, residual_inner_prod, eps, beta,
                           residual, precond_residual, mul_storage,
                           curr_conjugate_vec):
    # # Update result
    # # result_{k} = result_{k-1} + alpha_{k} p_vec_{k-1}
    torch.addcmul(result, alpha, curr_conjugate_vec, out=result)

    # beta_{k} = (precon_residual{k}^T r_vec_{k}) / (precon_residual{k-1}^T r_vec_{k-1})
    residual_inner_prod.add_(eps)
    torch.reciprocal(residual_inner_prod, out=beta)
    torch.mul(residual, precond_residual, out=mul_storage)
    torch.sum(mul_storage, -2, keepdim=True, out=residual_inner_prod)
    beta.mul_(residual_inner_prod)

    # Update curr_conjugate_vec
    # curr_conjugate_vec_{k} = precon_residual{k} + beta_{k} curr_conjugate_vec_{k-1}
    curr_conjugate_vec.mul_(beta).add_(precond_residual)
Ejemplo n.º 23
0
def _jit_linear_cg_updates_no_precond(
    mvms,
    result,
    has_converged,
    alpha,
    residual_inner_prod,
    eps,
    beta,
    residual,
    precond_residual,
    mul_storage,
    is_zero,
    curr_conjugate_vec,
):
    torch.mul(curr_conjugate_vec, mvms, out=mul_storage)
    torch.sum(mul_storage, dim=-2, keepdim=True, out=alpha)

    # Do a safe division here
    torch.lt(alpha, eps, out=is_zero)
    alpha.masked_fill_(is_zero, 1)
    torch.div(residual_inner_prod, alpha, out=alpha)
    alpha.masked_fill_(is_zero, 0)

    # We'll cancel out any updates by setting alpha=0 for any vector that has already converged
    alpha.masked_fill_(has_converged, 0)

    # Update residual
    # residual_{k} = residual_{k-1} - alpha_{k} mat p_vec_{k-1}
    torch.addcmul(residual, -alpha, mvms, out=residual)

    # Update precond_residual
    # precon_residual{k} = M^-1 residual_{k}
    precond_residual = residual.clone()

    _jit_linear_cg_updates(
        result,
        alpha,
        residual_inner_prod,
        eps,
        beta,
        residual,
        precond_residual,
        mul_storage,
        is_zero,
        curr_conjugate_vec,
    )
Ejemplo n.º 24
0
    def backward(self, y):

        grad = None
        x, output = self.saved_tensors
        grad = torch.addcmul(torch.zeros(y.size()), y, (y > 0).type_as(y))
        grad[x <= 0] = 0

        return grad
Ejemplo n.º 25
0
Archivo: net.py Proyecto: jhejna/ul_gen
    def forward(self, x, s1, s2, noise):
        if self.has_first_conv:
            if not self.fused_scale:
                x = upscale2d(x)
            x = self.conv_1(x)
            x = self.blur(x)

        if noise:
            if noise == 'batch_constant':
                x = torch.addcmul(x, value=1.0, tensor1=self.noise_weight_1,
                                  tensor2=torch.randn([1, 1, x.shape[2], x.shape[3]]))
            else:
                x = torch.addcmul(x, value=1.0, tensor1=self.noise_weight_1,
                                  tensor2=torch.randn([x.shape[0], 1, x.shape[2], x.shape[3]]))
        else: 
            s = math.pow(self.layer + 1, 0.5)
            x = x + s * torch.exp(-x * x / (2.0 * s * s)) / math.sqrt(2 * math.pi) * 0.8
        x = x + self.bias_1

        x = F.leaky_relu(x, 0.2)

        x = self.instance_norm_1(x)

        x = style_mod(x, self.style_1(s1))

        x = self.conv_2(x)

        if noise:
            if noise == 'batch_constant':
                x = torch.addcmul(x, value=1.0, tensor1=self.noise_weight_2,
                                  tensor2=torch.randn([1, 1, x.shape[2], x.shape[3]]))
            else:
                x = torch.addcmul(x, value=1.0, tensor1=self.noise_weight_2,
                                  tensor2=torch.randn([x.shape[0], 1, x.shape[2], x.shape[3]]))
        else:
            s = math.pow(self.layer + 1, 0.5)
            x = x +  s * torch.exp(-x * x / (2.0 * s * s)) / math.sqrt(2 * math.pi) * 0.8

        x = x + self.bias_2

        x = F.leaky_relu(x, 0.2)
        x = self.instance_norm_2(x)

        x = style_mod(x, self.style_2(s2))

        return x
Ejemplo n.º 26
0
    def forward(self, input):
        self._check_input_dim(input)

        if self.momentum is None:
            exponential_average_factor = 0.0
        else:
            exponential_average_factor = self.momentum

        if self.training and self.track_running_stats:
            if self.num_batches_tracked is not None:
                self.num_batches_tracked += 1
                if self.momentum is None:
                    exponential_average_factor = 1.0 / float(self.num_batches_tracked)
                else:
                    exponential_average_factor = self.momentum

        out = F.batch_norm(
            input, self.running_mean, self.running_var, None, None,
            self.training or not self.track_running_stats,
            exponential_average_factor, self.eps)

        if self.affine :
            if self.single_eps or self.deterministic:
                if self.deterministic:
                    weight = self.weight_mu
                    bias = self.bias_mu
                else:
                    weight = self.weight_mu + torch.exp(self.weight_log_sigma) * \
                            torch.randn(self.num_features, device=input.device, dtype=input.dtype)
                    bias = self.bias_mu + torch.exp(self.bias_log_sigma) * \
                            torch.randn(self.num_features, device=input.device, dtype=input.dtype)
                weight = weight.unsqueeze(0)
                bias = bias.unsqueeze(0)
            else:
                weight = self.weight_mu + torch.exp(self.weight_log_sigma) * \
                        torch.randn(input.shape[0], self.num_features, device=input.device, dtype=input.dtype)
                bias = self.bias_mu + torch.exp(self.bias_log_sigma) * \
                        torch.randn(input.shape[0], self.num_features, device=input.device, dtype=input.dtype)
            if out.dim() == 4:
                out = torch.addcmul(bias[:, :, None, None], weight[:, :, None, None], out)
            elif out.dim() == 2:
                out = torch.addcmul(bias, weight, out)
            else:
                raise NotImplementedError
        return out
Ejemplo n.º 27
0
def wh_delta2bbox(anchor_points,
                  shape_wh,
                  deltas,
                  means=[0, 0, 0, 0],
                  stds=[1.0, 1.0, 1.0, 1.0],
                  max_shape=None,
                  norm=None,
                  wh_ratio_clip=16 / 1000):

    means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4)
    stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4)
    denorm_deltas = deltas * stds + means
    dx = denorm_deltas[:, 0::4]
    dy = denorm_deltas[:, 1::4]
    dw = denorm_deltas[:, 2::4]
    dh = denorm_deltas[:, 3::4]
    max_ratio = np.abs(np.log(wh_ratio_clip))
    dw = dw.clamp(min=-max_ratio, max=max_ratio)
    dh = dh.clamp(min=-max_ratio, max=max_ratio)

    px = (anchor_points[:, 0]).unsqueeze(1).expand_as(dx)
    py = (anchor_points[:, 1]).unsqueeze(1).expand_as(dy)
    pw = (shape_wh[:, 0]).unsqueeze(1).expand_as(dw)
    ph = (shape_wh[:, 1]).unsqueeze(1).expand_as(dh)
    pw = pw.clamp(min=-max_ratio, max=max_ratio)
    ph = ph.clamp(min=-max_ratio, max=max_ratio)
    pw = norm * pw.exp()
    ph = norm * ph.exp()

    gw = pw * dw.exp()
    gh = ph * dh.exp()
    gx = torch.addcmul(px, 1, pw, dx)  # gx = px + pw * dx
    gy = torch.addcmul(py, 1, ph, dy)  # gy = py + ph * dy

    x1 = gx - gw * 0.5 + 0.5
    y1 = gy - gh * 0.5 + 0.5
    x2 = gx + gw * 0.5 - 0.5
    y2 = gy + gh * 0.5 - 0.5
    if max_shape is not None:
        x1 = x1.clamp(min=0, max=max_shape[1] - 1)
        y1 = y1.clamp(min=0, max=max_shape[0] - 1)
        x2 = x2.clamp(min=0, max=max_shape[1] - 1)
        y2 = y2.clamp(min=0, max=max_shape[0] - 1)
    bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas)
    return bboxes
Ejemplo n.º 28
0
    def backward(self, grad_output):
        conv_output, fcn_output = self.saved_tensors
        grad_input = None

        positive_mask = (grad_output > 0).type_as(grad_output)
        grad_input = torch.addcmul(
            torch.zeros(conv_output.size()).type_as(conv_output), grad_output,
            positive_mask)
        return grad_input
Ejemplo n.º 29
0
    def forward(self, outputs, labels):
        batch_size = outputs.size(0)
        dist_mat = euclidean_distances(outputs)
        ID_mat = compute_ID_mat(labels)

        pos_dist_mat = Variable(torch.zeros(batch_size, batch_size).to(device))
        pos_dist_mat = torch.addcmul(pos_dist_mat, 1, ID_mat, dist_mat)

        neg_dist_mat = Variable(torch.zeros(batch_size, batch_size).to(device))
        neg_dist_mat = torch.addcmul(neg_dist_mat, 1, 1 - ID_mat, dist_mat)
        mask_ = (neg_dist_mat == 0)
        neg_dist_mat.masked_fill_(mask_, 10000)

        hard_pos = torch.max(pos_dist_mat, dim=0)[0]
        hard_neg = torch.min(neg_dist_mat, dim=0)[0]

        triplet_losses = torch.clamp(hard_pos - hard_neg + self.margin, min=0)
        return torch.sum(triplet_losses) / triplet_losses.size(0)
Ejemplo n.º 30
0
 def forward(self, input1, input2, similarity):
     one = torch.as_tensor(np.ones(1) + Global.eps).to(device)
     eucl = nn.PairwiseDistance(p=2, eps=1e-6)
     input1 = input1.view(1, -1).to(device)
     input2 = input2.view(1, -1).to(device)
     dist = eucl(input1, input2).double()
     sim = torch.div(one, torch.addcmul(one, 1, dist, dist)).float()
     similarity = similarity.to(device)
     return torch.add(input=F.kl_div(sim, similarity), alpha=self.param, other=F.mse_loss(sim, similarity)).to(device)
Ejemplo n.º 31
0
    def test_local_tensor_tertiary_methods(self):

        x = torch.FloatTensor([1, 2, 3])
        y = torch.FloatTensor([1, 2, 3])
        z = torch.FloatTensor([1, 2, 3])
        assert (torch.equal(torch.addcmul(z, 2, x, y), torch.FloatTensor([3.,  10.,  21.])))

        x = torch.FloatTensor([1, 2, 3])
        y = torch.FloatTensor([1, 2, 3])
        z = torch.FloatTensor([1, 2, 3])
        z.addcmul_(2, x, y)
        assert (torch.equal(z, torch.FloatTensor([3., 10., 21.])))

        x = torch.FloatTensor([[1, 2]])
        y = torch.FloatTensor([[1, 2, 3], [4, 5, 6]])
        z = torch.FloatTensor([1, 2, 3])
        assert(torch.equal(torch.addmm(z, x, y), torch.FloatTensor([[10., 14., 18.]])))
Ejemplo n.º 32
0
    def test_remote_tensor_tertiary_methods(self):

        hook = TorchHook(verbose=False)
        local = hook.local_worker
        remote = VirtualWorker(hook, 1)
        local.add_worker(remote)

        x = torch.FloatTensor([1, 2, 3]).send(remote)
        y = torch.FloatTensor([1, 2, 3]).send(remote)
        z = torch.FloatTensor([1, 2, 3]).send(remote)
        assert (torch.equal(torch.addcmul(z,  2, x, y).get(), torch.FloatTensor([3., 10., 21.])))

        x = torch.FloatTensor([1, 2, 3]).send(remote)
        y = torch.FloatTensor([1, 2, 3]).send(remote)
        z = torch.FloatTensor([1, 2, 3]).send(remote)
        z.addcmul_(2, x, y)
        assert (torch.equal(z.get(), torch.FloatTensor([3., 10., 21.])))

        x = torch.FloatTensor([[1, 2]]).send(remote)
        y = torch.FloatTensor([[1, 2, 3], [4, 5, 6]]).send(remote)
        z = torch.FloatTensor([1, 2, 3]).send(remote)
        assert (torch.equal(torch.addmm(z, x, y).get(), torch.FloatTensor([[10., 14., 18.]])))