コード例 #1
0
    def forward(self, inputs):
        x = self.down_layer(inputs)

        gap = porch.nn.functional.adaptive_avg_pool2d(x, 1)
        gap_logit = self.gap_fc(gap.view(x.shape[0], -1))
        gap_weight = porch.Tensor(list(self.gap_fc.parameters())[0]).permute(1,0)
        gap = x * gap_weight.unsqueeze(2).unsqueeze(3)

        gmp = porch.nn.functional.adaptive_max_pool2d(x, 1)
        gmp_logit = self.gmp_fc(gmp.view(x.shape[0], -1))
        gmp_weight =  porch.Tensor(list(self.gmp_fc.parameters())[0]).permute(1,0)
        gmp = x * gmp_weight.unsqueeze(2).unsqueeze(3)


        cam_logit = porch.cat([gap_logit, gmp_logit], 1)
        x = porch.cat([gap, gmp], 1)
        x = self.relu(self.conv1x1(x))
        x =porch.Tensor(x)
        heatmap = porch.sum(x, dim=1, keepdim=True)
        if self.light:
            x_ = porch.nn.functional.adaptive_avg_pool2d(x, 1)
            x_ = self.fc(x_.view(x_.shape[0], -1))
        else:
            x_ = self.fc(x.view(x.shape[0], -1))



        gamma, beta = self.gamma(x_), self.beta(x_)

        for i in range(self.n_res):
            x = getattr(self, "ResNetAdaILNBlock_" + str(i + 1))(x, gamma, beta)
        out = self.up_layer(x)

        return out, cam_logit ,heatmap
コード例 #2
0
def torch_cov(m, rowvar=False):
    '''Estimate a covariance matrix given data.

    Covariance indicates the level to which two variables vary together.
    If we examine N-dimensional samples, `X = [x_1, x_2, ... x_N]^T`,
    then the covariance matrix element `C_{ij}` is the covariance of
    `x_i` and `x_j`. The element `C_{ii}` is the variance of `x_i`.

    Args:
        m: A 1-D or 2-D array containing multiple variables and observations.
            Each row of `m` represents a variable, and each column a single
            observation of all those variables.
        rowvar: If `rowvar` is True, then each row represents a
            variable, with observations in the columns. Otherwise, the
            relationship is transposed: each column represents a variable,
            while the rows contain observations.

    Returns:
        The covariance matrix of the variables.
    '''
    if m.dim() > 2:
        raise ValueError('m has more than 2 dimensions')
    if m.dim() < 2:
        m = m.view(1, -1)
    if not rowvar and m.size(0) != 1:
        m = m.t()
    # m = m.type(torch.double)  # uncomment this line if desired
    fact = 1.0 / (m.size(1) - 1)
    m -= torch.mean(m, dim=1, keepdim=True)
    mt = torch.Tensor(m).t()  # if complex: mt = m.t().conj()
    return fact * torch.Tensor(m).matmul(mt).squeeze()
コード例 #3
0
ファイル: BigGAN.py プロジェクト: zzz2010/Contrib
  def forward(self, z, y):
    # If hierarchical, concatenate zs and ys
    if self.hier:
      zs = torch.split(z, self.z_chunk_size, 1)
      z = zs[0]
      ys = [torch.cat([y, item], 1) for item in zs[1:]]
    else:
      ys = [y] * len(self.blocks)
      
    # First linear layer
    h = torch.Tensor(self.linear(z))


    # Reshape
    h = h.view(h.size(0), -1, self.bottom_width, self.bottom_width)
    
    # Loop over blocks
    for index, blocklist in enumerate(self.blocks):
      # Second inner loop in case block has multiple layers
      for block in blocklist:
        h = block(h, torch.Tensor(ys[index]))

        
    # Apply batchnorm-relu-conv-tanh at output
    return torch.tanh(self.output_layer(h))
コード例 #4
0
    def forward(self, x):
        in_mean, in_var = porch.mean(x, dim=(2, 3), keepdim=True), porch.var(x, dim=(2, 3), keepdim=True)
        out_in = (x - in_mean) / porch.sqrt(in_var + self.eps)
        ln_mean, ln_var = porch.mean(x, dim=(1, 2, 3), keepdim=True), porch.var(x, dim=(1, 2, 3), keepdim=True)
        out_ln = (x - ln_mean) / porch.sqrt(ln_var + self.eps)
        out = porch.Tensor(self.rho).expand(x.shape[0], -1, -1, -1) * out_in + (1 - porch.Tensor(self.rho).expand(x.shape[0], -1, -1, -1)) * out_ln
        out = out * porch.Tensor(self.gamma).expand(x.shape[0], -1, -1, -1) + porch.Tensor(self.beta).expand(x.shape[0], -1, -1, -1)

        return out
コード例 #5
0
    def forward(self, x, gamma, beta):
        in_mean, in_var = porch.mean(x, dim=[2, 3], keepdim=True), porch.var(x, dim=[2, 3], keepdim=True)
        out_in = (x - in_mean) / porch.sqrt(in_var + self.eps)
        ln_mean, ln_var = porch.mean(x, dim=[1, 2, 3], keepdim=True), porch.var(x, dim=[1, 2, 3], keepdim=True)
        out_ln = (x - ln_mean) / porch.sqrt(ln_var + self.eps)
        out = porch.Tensor(self.rho).expand(x.shape[0], -1, -1, -1) * out_in + (1 - porch.Tensor(self.rho).expand(x.shape[0], -1, -1, -1)) * out_ln
        out = out * porch.Tensor(gamma).unsqueeze(2).unsqueeze(3) + porch.Tensor(beta).unsqueeze(2).unsqueeze(3)

        return out
コード例 #6
0
def avg_pool2d(input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None):
    if stride is None:
        stride=kernel_size

    return torch.Tensor(fluid.layers.pool2d(input,
                           pool_size=kernel_size, pool_type="avg", pool_stride=stride,
                                            pool_padding=padding, global_pooling=False, use_cudnn=True,
                                            ceil_mode=ceil_mode, name=None, exclusive=not count_include_pad, data_format="NCHW"))
コード例 #7
0
def sqrt_newton_schulz(A, numIters, dtype=None):
  with torch.no_grad():
    if dtype is None:
      dtype = A.dtype
    batchSize = A.shape[0]
    dim = A.shape[1]
    normA = A.mul(A).sum(dim=1).sum(dim=1).sqrt()
    Y = torch.Tensor(A/(normA.view(batchSize, 1, 1).expand(*A.shape )))
    I = torch.Tensor(torch.eye(dim,dim).view(1, dim, dim).repeat(batchSize,1,1).astype("float32"))
    Z = torch.Tensor(torch.eye(dim,dim).view(1, dim, dim).repeat(batchSize,1,1).astype("float32"))
    for i in range(numIters):
      T = torch.Tensor(0.5*(3.0*I - Z.bmm(Y)))
      Y = Y.bmm(T)
      Z = T.bmm(Z)

    sA = Y*torch.sqrt(normA).view(batchSize, 1, 1).expand(*A.shape )
  return sA
コード例 #8
0
def batch_norm(x,  running_mean, running_var, weight=None, bias=None,
               training=False, momentum=0.1, eps=1e-5):
    layer_object=fluid.dygraph.BatchNorm(x.shape[1],momentum=momentum,epsilon=eps,trainable_statistics=training)
    fluid.layers.assign(running_mean,layer_object._mean)
    fluid.layers.assign(running_var, layer_object._variance)
    if weight is not None:
        fluid.layers.assign(weight, layer_object.weight)
    if bias is not None:
        fluid.layers.assign(bias, layer_object.bias)
    return torch.Tensor(layer_object(x))
コード例 #9
0
 def forward(self, x, y):
     out = self.main(x)
     out = porch.Tensor(out)
     out = out.view(out.size(0), -1)  # (batch, num_domains)
     idx = porch.LongTensor(np.arange(y.shape[0]))
     # out = out[idx, y]  # (batch)
     s = porch.take(
         out, list(zip(range(y.shape[0]),
                       y.numpy().astype(int).tolist())))
     return s
コード例 #10
0
 def forward(self, x, y):
     # Calculate class-conditional gains and biases
     gain = torch.Tensor(1 + self.gain(y)).view(y.size(0), -1, 1, 1)
     bias = torch.Tensor(self.bias(y)).view(y.size(0), -1, 1, 1)
     # If using my batchnorm
     if self.mybn or self.cross_replica:
         return self.bn(x, gain=gain, bias=bias)
     # else:
     else:
         if self.norm_style == 'bn':
             out = F.batch_norm(x, self.stored_mean, self.stored_var, None,
                                None, self.training, 0.1, self.eps)
         elif self.norm_style == 'in':
             out = F.instance_norm(x, self.stored_mean, self.stored_var,
                                   None, None, self.training, 0.1, self.eps)
         elif self.norm_style == 'gn':
             out = groupnorm(x, self.normstyle)
         elif self.norm_style == 'nonorm':
             out = x
         return out * gain + bias
コード例 #11
0
def interpolate(input, size=None, scale_factor=None, mode='nearest', align_corners=False,align_mode=1,data_format='NCHW'):
    if isinstance(size,int):
        size=[size,size]
    return torch.Tensor(fluid.layers.interpolate(input,
                out_shape=size,
                scale=scale_factor,
                name=None,
                resample=mode.upper(),
                actual_shape=None,
                align_corners=align_corners,
                align_mode=align_mode,
                data_format=data_format))
コード例 #12
0
    def forward(self, inputs):
        x = self.model(inputs)

        gap = porch.nn.functional.adaptive_avg_pool2d(x, 1)
        gap_logit = self.gap_fc(gap.view(x.shape[0], -1))
        gap_weight = list(self.gap_fc.parameters())[0]
        gap = x * porch.Tensor(gap_weight).unsqueeze(0).unsqueeze(3)

        gmp = porch.nn.functional.adaptive_max_pool2d(x, 1)
        gmp_logit = self.gmp_fc(gmp.view(x.shape[0], -1))
        gmp_weight = list(self.gmp_fc.parameters())[0]
        gmp = x * porch.Tensor(gmp_weight).unsqueeze(0).unsqueeze(3)

        cam_logit = porch.cat([gap_logit, gmp_logit], 1)
        x = porch.cat([gap, gmp], 1)
        x = self.leaky_relu(self.conv1x1(x))

        heatmap = porch.sum(x, dim=1, keepdim=True)
        x = self.pad(x)
        out = self.conv(x)

        return out, cam_logit,heatmap
コード例 #13
0
    def train(self):
        G = nx.Graph()
        G.add_edges_from(self.data.edge_index.t().tolist())
        embeddings = self.model.train(G)

        # Map node2id
        features_matrix = np.zeros((self.num_nodes, self.hidden_size))
        for vid, node in enumerate(G.nodes()):
            features_matrix[node] = embeddings[vid]

        label_matrix = torch.Tensor(self.label_matrix)

        return self._evaluate(features_matrix, label_matrix, self.num_shuffle)
コード例 #14
0
 def W_(self):
     self.training = True
     if isinstance(self, SNLinear):
         W_mat = torch.Tensor(self.weight).t(
         )  ##linear layer weight is different from pytorch weight, need to transpose
     else:
         W_mat = torch.Tensor(self.weight).view(self.weight.shape[0], -1)
     if self.transpose:
         W_mat = W_mat.t()
     # Apply num_itrs power iterations
     for _ in range(self.num_itrs):
         svs, us, vs = power_iteration(W_mat,
                                       self.u,
                                       update=self.training,
                                       eps=self.eps)
     # Update the svs
     if self.training:
         with torch.no_grad(
         ):  # Make sure to do this in a no_grad() context or you'll get memory leaks!
             for i, sv in enumerate(svs):
                 torch.copy(sv, self.sv[i])
                 # self.sv[i][:] = sv
     return self.weight / svs[0]
コード例 #15
0
ファイル: solver.py プロジェクト: zzz2010/starganv2_paddle
def r1_reg(d_out, x_in):
    return 0.0
    from paddle import fluid
    # zero-centered gradient penalty for real images
    batch_size = x_in.shape[0]
    try:
        grad_dout = fluid.dygraph.grad(outputs=d_out.sum(),
                                       inputs=x_in,
                                       create_graph=False,
                                       retain_graph=True,
                                       only_inputs=True)[0]
        grad_dout2 = porch.Tensor(grad_dout).pow(2)
        assert (grad_dout2.shape == x_in.shape)
        reg = 0.5 * grad_dout2.view(batch_size, -1).sum(1).mean(0)
        return reg
    except:
        return 0.0
コード例 #16
0
def from_dlpack(dlpack):
    tensor_from_dlpack = fluid.core.from_dlpack(dlpack)
    place = tensor_from_dlpack._place()
    if True:  # "win" in platform: # CPU env
        if "int32" in str(tensor_from_dlpack):
            return paddorch.convertTensor(
                paddle.to_tensor(np.array(tensor_from_dlpack), dtype="int32"))
        else:
            return paddorch.Tensor(
                paddle.to_tensor(np.array(tensor_from_dlpack)))
    else:
        with paddle.fluid.dygraph.guard(place=place):
            tensor_from_dlpack.__class__ = paddle.fluid.LoDTensor
            ret = paddle.Tensor(tensor_from_dlpack)
            if "int32" in str(tensor_from_dlpack):
                ret = paddle.to_tensor(ret, dtype="int32")
            tensor_from_dlpack.__class__ = paddle.fluid.core_avx.Tensor
        return ret
コード例 #17
0
def power_iteration(W, u_, update=True, eps=1e-12):
    # Lists holding singular vectors and values
    Wt = torch.Tensor(W).t()
    us, vs, svs = [], [], []
    for i, u in enumerate(u_):
        # Run one step of the power iteration
        with torch.no_grad():
            if W.shape[1] == 27:
                a = 1
            v = torch.matmul(u, W)
            # if (W.shape[0]==u.shape[1])  :
            #   v = torch.matmul(u, W)
            # else:
            #   v = torch.matmul(u, Wt)
            # Run Gram-Schmidt to subtract components of all other singular vectors
            v = F.normalize(gram_schmidt(v, vs), eps=eps)
            # Add to the list
            vs += [v]
            # Update the other singular vector
            u = torch.matmul(v, Wt)
            # if (W.shape[0]!=v.shape[1]):
            #   u = torch.matmul(v, Wt  )
            # else:
            #   u = torch.matmul(v, W)
            # Run Gram-Schmidt to subtract components of all other singular vectors
            u = F.normalize(gram_schmidt(u, us), eps=eps)
            # Add to the list
            us += [u]
            if update:
                torch.copy(u, u_[i])
                # u_[i][:] = u
        # Compute this singular value and add it to the list
        svs += [torch.squeeze(torch.matmul(torch.matmul(v, Wt), u.t()))]
        # if (W.shape[0]!=v.shape[1]):
        #   svs += [torch.squeeze(torch.matmul(torch.matmul(v, Wt  ), u.t() ))]
        # else:
        #   svs += [torch.squeeze(torch.matmul(torch.matmul(v, W), u.t()))]
        #svs += [torch.sum(F.linear(u, W.transpose(0, 1)) * v)]
    return svs, us, vs
コード例 #18
0
def torch_calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
    mu1 = torch.Tensor(mu1.astype("float32"))
    sigma1 = torch.Tensor(sigma1.astype("float32"))
    mu2 = torch.Tensor(mu2.astype("float32"))
    sigma2 = torch.Tensor(sigma2.astype("float32"))
    """Pytorch implementation of the Frechet Distance.
  Taken from https://github.com/bioinf-jku/TTUR
  The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
  and X_2 ~ N(mu_2, C_2) is
          d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
  Stable version by Dougal J. Sutherland.
  Params:
  -- mu1   : Numpy array containing the activations of a layer of the
             inception net (like returned by the function 'get_predictions')
             for generated samples.
  -- mu2   : The sample mean over activations, precalculated on an 
             representive data set.
  -- sigma1: The covariance matrix over activations for generated samples.
  -- sigma2: The covariance matrix over activations, precalculated on an 
             representive data set.
  Returns:
  --   : The Frechet Distance.
  """


    assert mu1.shape == mu2.shape, \
      'Training and test mean vectors have different lengths'
    assert sigma1.shape == sigma2.shape, \
      'Training and test covariances have different dimensions'

    diff = mu1 - mu2

    # Run 50 itrs of newton-schulz to get the matrix sqrt of sigma1 dot sigma2
    covmean = torch.Tensor(
        sqrt_newton_schulz(torch.Tensor(sigma1).mm(sigma2).unsqueeze(0),
                           50)).squeeze()
    out = (torch.dot(diff, diff) + torch.trace(sigma1) + torch.trace(sigma2) -
           2 * torch.trace(covmean))
    return out
コード例 #19
0
def dropout(input, p=0.5, training=True, inplace=False):
    return torch.Tensor(fluid.layers.dropout(input,
            p,
            is_test=not training,
         dropout_implementation='upscale_in_train'))
コード例 #20
0
def leaky_relu(input, negative_slope=0.01, inplace=False):
    return  torch.Tensor(fluid.layers.leaky_relu(input, alpha=negative_slope, name=None))
コード例 #21
0
def relu(input,inplace=False):
    return torch.Tensor(fluid.layers.relu(input))
コード例 #22
0

import torch

torch.manual_seed(0)
a = torch.randn(70839, 64 )

b = torch.randn(64, 64, requires_grad=True)


print(torch.argmax(torch.matmul(a,b)))
import paddorch
import paddle
a2 =paddorch.Tensor(a.detach().cpu().numpy())

b2 = paddorch.Tensor(b.detach().cpu().numpy())

print(paddle.argmax(paddorch.matmul(a2,b2) ))

コード例 #23
0
def adaptive_max_pool2d(input, output_size):
    return torch.Tensor(fluid.layers.adaptive_pool2d(input,pool_size=output_size,pool_type="max"))
コード例 #24
0
def sigmoid(x):
    return torch.Tensor(fluid.layers.sigmoid(x))
コード例 #25
0
            16, 64, 3)  # copy.deepcopy(mapping_network)
        out_model_fn = "../expr/checkpoints/afhq/100000_nets_ema.ckpt/mapping_network.pdparams"
        mapping_network_ema.load_state_dict(porch.load(out_model_fn))

    else:
        mapping_network_ema = core.model.MappingNetwork(
            16, 64, 2)  # copy.deepcopy(mapping_network)
        out_model_fn = "../expr/checkpoints/celeba_hq/100000_nets_ema.ckpt/mapping_network.pdparams"
        mapping_network_ema.load_state_dict(porch.load(out_model_fn))

    d_optimizer = fluid.optimizer.AdamOptimizer(
        learning_rate=lr, parameter_list=mapping_network_ema.parameters())
    from tqdm import tqdm

    mapping_network_ema.train()
    z_train_p = porch.Tensor(z_train)
    y_train_p = porch.LongTensor(y_train)
    m_out_train_p = porch.Tensor(m_out_train)
    best_loss = 100000000
    for ii in range(100000000000):
        st = np.random.randint(0, z_train_p.shape[0] - batch_size)
        out = mapping_network_ema(z_train_p[st:st + batch_size],
                                  y_train_p[st:st + batch_size])
        d_avg_cost = fluid.layers.mse_loss(
            out, m_out_train_p[st:st + batch_size]
        )  #+fluid.layers.mse_loss(out1,m_out_train_1p)+fluid.layers.mse_loss(out2,m_out_train_2p)

        d_avg_cost.backward()
        d_optimizer.minimize(d_avg_cost)
        mapping_network_ema.clear_gradients()
        if ii % 99 == 0:
コード例 #26
0
def softmax(input, dim=None, _stacklevel=3, dtype=None):
    return torch.Tensor(fluid.layers.softmax(input,axis=dim))
コード例 #27
0
ファイル: utils.py プロジェクト: vincenthesiyuan/paddle_torch
def make_grid(tensor,
              nrow=8,
              padding=2,
              normalize=False,
              range=None,
              scale_each=False,
              pad_value=0):
    """Make a grid of images.

    Args:
        tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
            or a list of images all of the same size.
        nrow (int, optional): Number of images displayed in each row of the grid.
            The final grid size is ``(B / nrow, nrow)``. Default: ``8``.
        padding (int, optional): amount of padding. Default: ``2``.
        normalize (bool, optional): If True, shift the image to the range (0, 1),
            by the min and max values specified by :attr:`range`. Default: ``False``.
        range (tuple, optional): tuple (min, max) where min and max are numbers,
            then these numbers are used to normalize the image. By default, min and max
            are computed from the tensor.
        scale_each (bool, optional): If ``True``, scale each image in the batch of
            images separately rather than the (min, max) over all images. Default: ``False``.
        pad_value (float, optional): Value for the padded pixels. Default: ``0``.

    Example:
        See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_

    """

    tensor = torch.Tensor(tensor)
    if not (torch.is_tensor(tensor) or
            (isinstance(tensor, list)
             and all(torch.is_tensor(t) for t in tensor))):
        raise TypeError('tensor or list of tensors expected, got {}'.format(
            type(tensor)))

    # if list of tensors, convert to a 4D mini-batch Tensor
    if isinstance(tensor, list):
        tensor = torch.stack(tensor, dim=0)

    if tensor.dim() == 2:  # single image H x W
        tensor = tensor.unsqueeze(0)
    if tensor.dim() == 3:  # single image
        if tensor.size(0) == 1:  # if single-channel, convert to 3-channel
            tensor = torch.cat((tensor, tensor, tensor), 0)
        tensor = tensor.unsqueeze(0)

    if tensor.dim() == 4 and tensor.size(1) == 1:  # single-channel images
        tensor = torch.cat((tensor, tensor, tensor), 1)

    if normalize is True:
        tensor = tensor.clone()  # avoid modifying tensor in-place
        if range is not None:
            assert isinstance(range, tuple), \
                "range has to be a tuple (min, max) if specified. min and max are numbers"

        def norm_ip(img, min, max):
            img.clamp_(min=min, max=max)
            img.add_(-min).div_(max - min + 1e-5)

        def norm_range(t, range):
            if range is not None:
                norm_ip(t, range[0], range[1])
            else:
                norm_ip(t, float(t.min()), float(t.max()))

        if scale_each is True:
            for t in tensor:  # loop over mini-batch dimension
                norm_range(t, range)
        else:
            norm_range(tensor, range)

    if tensor.size(0) == 1:
        return tensor.squeeze(0)

    # make the mini-batch of images into a grid
    nmaps = tensor.size(0)
    xmaps = min(nrow, nmaps)
    ymaps = int(math.ceil(float(nmaps) / xmaps))
    height, width = int(tensor.size(2) +
                        padding), int(tensor.size(3) + padding)
    num_channels = tensor.size(1)
    # grid = tensor.new_full((num_channels, height * ymaps + padding, width * xmaps + padding), pad_value)
    grid = np.zeros((num_channels, height * ymaps + padding,
                     width * xmaps + padding)) + pad_value
    k = 0
    for y in irange(ymaps):
        for x in irange(xmaps):
            if k >= nmaps:
                break
            # sub_grid=grid[:, (y * height + padding):(y * height + padding+height - padding) ][:,:,(x * width + padding):(x * width + padding+width - padding)]
            # torch.copy(tensor[k],sub_grid)
            grid[:, (y * height + padding):(y * height + padding + height -
                                            padding),
                 (x * width + padding):(x * width + padding + width -
                                        padding)] = tensor[k].numpy()
            # torch.copy(tensor[k],torch.narrow(torch.narrow(grid,1, y * height + padding, height - padding)\
            #     ,2, x * width + padding, width - padding) )

            k = k + 1
    return torch.Tensor(grid)
コード例 #28
0
def tanh(x):
    return torch.Tensor(fluid.layers.tanh(x))
コード例 #29
0


    return FAN(fname_pretrained="./wing.ckpt")



if __name__ == '__main__':
    from paddorch.convert_pretrain_model import load_pytorch_pretrain_model
    import torch as pytorch
    import torchvision

    # place = fluid.CPUPlace()
    place = fluid.CUDAPlace(0)
    np.random.seed(0)
    x=np.random.randn(1,3,256,256).astype("float32")
    with fluid.dygraph.guard(place=place):
        model=FAN()
        model.eval()
        pytorch_model=eval_pytorch_model()
        pytorch_model.eval()
        pytorch_model.
        torch_output = pytorch_model(pytorch.FloatTensor(x).)[1][0]
        pytorch_model
        pytorch_state_dict=pytorch_model.state_dict()
        load_pytorch_pretrain_model(model, pytorch_state_dict)
        torch.save(model.state_dict(),"wing")
        paddle_output = model(torch.Tensor(x))[1][0]

        print("torch mean",torch_output.mean())
        print("paddle mean", torch.mean(paddle_output).numpy())
コード例 #30
0
 def __call__(self, module):
     if hasattr(module, "rho"):
         w = porch.Tensor(module.rho)
         w = w.clamp_(self.clip_min, self.clip_max)
         module.rho = w