Beispiel #1
0
def cosine_similarity(x1, x2, dim=1, eps=1e-8):
    r"""Returns cosine similarity between x1 and x2, computed along dim.

    .. math ::
        \text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert _2, \epsilon)}

    Args:
        x1 (Variable): First input.
        x2 (Variable): Second input (of size matching x1).
        dim (int, optional): Dimension of vectors. Default: 1
        eps (float, optional): Small value to avoid division by zero.
            Default: 1e-8

    Shape:
        - Input: :math:`(\ast_1, D, \ast_2)` where D is at position `dim`.
        - Output: :math:`(\ast_1, \ast_2)` where 1 is at position `dim`.

    Example::

        >>> input1 = autograd.Variable(torch.randn(100, 128))
        >>> input2 = autograd.Variable(torch.randn(100, 128))
        >>> output = F.cosine_similarity(input1, input2)
        >>> print(output)
    """
    w12 = torch.sum(x1 * x2, dim)
    w1 = torch.norm(x1, 2, dim)
    w2 = torch.norm(x2, 2, dim)
    return (w12 / (w1 * w2).clamp(min=eps)).squeeze()
Beispiel #2
0
    def updateOutput(self, input):
        assert input.dim() == 2

        inputSize = self.weight.size(1)
        outputSize = self.weight.size(0)

        if self._weightNorm is None:
            self._weightNorm = self.weight.new()
        if self._inputNorm is None:
            self._inputNorm = self.weight.new()

        # y_j = (w_j * x) / ( || w_j || * || x || )

        torch.norm(self.weight, 2, 1, out=self._weightNorm, keepdim=True).add_(1e-12)

        batchSize = input.size(0)
        nelement = self.output.nelement()
        self.output.resize_(batchSize, outputSize)
        if self.output.nelement() != nelement:
            self.output.zero_()

        self.output.addmm_(0., 1., input, self.weight.t())

        torch.norm(input, 2, 1, out=self._inputNorm, keepdim=True).add_(1e-12)
        self.output.div_(self._weightNorm.view(1, outputSize).expand_as(self.output))
        self.output.div_(self._inputNorm.expand_as(self.output))
        return self.output
def extract_feature(model,dataloaders):
    features = torch.FloatTensor()
    count = 0
    for data in dataloaders:
        img, label = data
        n, c, h, w = img.size()
        count += n
        print(count)
        ff = torch.FloatTensor(n, 2048).zero_()
        for i in range(2):
            if(i==1):
                img = fliplr(img)
            input_img = Variable(img.cuda())
            outputs,f = model(input_img) 
            f = f.data.cpu()
            ff = ff+f
        # norm feature
        if opt.PCB:
            # feature size (n,2048,6)
            # 1. To treat every part equally, I calculate the norm for every 2048-dim part feature.
            # 2. To keep the cosine score==1, sqrt(6) is added to norm the whole feature (2048*6).
            fnorm = torch.norm(ff, p=2, dim=1, keepdim=True) * np.sqrt(6) 
            ff = ff.div(fnorm.expand_as(ff))
            ff = ff.view(ff.size(0), -1)
        else:
            fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
            ff = ff.div(fnorm.expand_as(ff))

        features = torch.cat((features,ff), 0)
    return features
Beispiel #4
0
    def angle_length_loss(y_pred, y_true, weights):
        y_true = y_true.permute(0, 2, 3, 1)
        y_pred = y_pred.permute(0, 2, 3, 1)
        weights = weights.permute(0, 2, 3, 1)

        # Single threshold

        # score_per_bundle = {}
        # bundles = ExpUtils.get_bundle_names(HP.CLASSES)[1:]

        nr_of_classes = int(y_true.shape[-1] / 3.)
        scores = torch.zeros(nr_of_classes)

        for idx in range(nr_of_classes):
            y_pred_bund = y_pred[:, :, :, (idx * 3):(idx * 3) + 3].contiguous()
            y_true_bund = y_true[:, :, :, (idx * 3):(idx * 3) + 3].contiguous()  # [x,y,z,3]
            weights_bund = weights[:, :, :, (idx * 3)].contiguous()  # [x,y,z]

            angles = PytorchUtils.angle_last_dim(y_pred_bund, y_true_bund)
            angles_weighted = angles / weights_bund
            #norm lengths to 0-1 to be more equal to angles?? -> peaks are already around 1 -> ok
            lengths = (torch.norm(y_pred_bund, 2., -1) - torch.norm(y_true_bund, 2, -1)) ** 2
            lenghts_weighted = lengths * weights_bund

            # Divide by weights.max otherwise lens would be way bigger
            # Flip angles to make it a minimization problem
            combined = -angles_weighted + lenghts_weighted / weights_bund.max()

            scores[idx] = torch.mean(combined)

        return torch.mean(scores)
 def nn(self, word, k):
     embedding = self.mu.weight.data.cpu() # [dict, embed_size]
     vector = embedding[self.dset.stoi[word], :].view(-1, 1) # [embed_size, 1]
     distance = torch.mm(embedding, vector).squeeze() / torch.norm(embedding, 2, 1)
     distance = distance / torch.norm(vector, 2, 0)[0]
     distance = distance.numpy()
     index = np.argsort(distance)[:-k]
     return [self.dset.itos[x] for x in index]
Beispiel #6
0
def torch_pearsonr(x, y):  # https://github.com/pytorch/pytorch/issues/1254
    mean_x = torch.mean(x)
    mean_y = torch.mean(y)
    xm = x.sub(mean_x)
    ym = y.sub(mean_y)
    r_num = xm.dot(ym)
    r_den = torch.norm(xm, 2) * torch.norm(ym, 2)
    r_val = r_num / r_den
    return r_val
def train_multilabel(features, targets, classes, train_split, test_split, C=1.0, ignore_hard_examples=True, after_ReLU=False, normalize_L2=False):
    print('\nHyperparameters:\n - C: {}\n - after_ReLU: {}\n - normL2: {}'.format(C, after_ReLU, normalize_L2))
    train_APs = []
    test_APs = []
    for class_id in range(len(classes)):
        
        classifier = SVC(C=C, kernel='linear') # http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
        
        if ignore_hard_examples:
            train_masks = (targets[train_split][:,class_id] != 0).view(-1, 1)
            train_features = torch.masked_select(features[train_split], train_masks.expand_as(features[train_split])).view(-1,features[train_split].size(1))
            train_targets = torch.masked_select(targets[train_split], train_masks.expand_as(targets[train_split])).view(-1,targets[train_split].size(1))
            test_masks = (targets[test_split][:,class_id] != 0).view(-1, 1)
            test_features = torch.masked_select(features[test_split], test_masks.expand_as(features[test_split])).view(-1,features[test_split].size(1))
            test_targets = torch.masked_select(targets[test_split], test_masks.expand_as(targets[test_split])).view(-1,targets[test_split].size(1))
        else:
            train_features = features[train_split]
            train_targets = targets[train_split]
            test_features = features[test_split]
            test_targets = features[test_split]

        if after_ReLU:
            train_features[train_features < 0] = 0
            test_features[test_features < 0] = 0

        if normalize_L2:
            train_norm = torch.norm(train_features, p=2, dim=1).unsqueeze(1)
            train_features = train_features.div(train_norm.expand_as(train_features))
            test_norm = torch.norm(test_features, p=2, dim=1).unsqueeze(1)
            test_features = test_features.div(test_norm.expand_as(test_features))

        train_X = train_features.numpy()
        train_y = (train_targets[:,class_id] != -1).numpy() # uses hard examples if not ignored

        test_X = test_features.numpy()
        test_y = (test_targets[:,class_id] != -1).numpy()

        classifier.fit(train_X, train_y) # train parameters of the classifier

        train_preds = classifier.predict(train_X)
        train_acc = accuracy_score(train_y, train_preds) * 100
        train_AP = average_precision_score(train_y, train_preds) * 100
        train_APs.append(train_AP)

        test_preds = classifier.predict(test_X)
        test_acc = accuracy_score(test_y, test_preds) * 100
        test_AP = average_precision_score(test_y, test_preds) * 100
        test_APs.append(test_AP)

        print('class "{}" ({}/{}):'.format(classes[class_id], test_y.sum(), test_y.shape[0]))
        print('  - {:8}: acc {:.2f}, AP {:.2f}'.format(train_split, train_acc, train_AP))
        print('  - {:8}: acc {:.2f}, AP {:.2f}'.format(test_split, test_acc, test_AP))

    print('all classes:')
    print('  - {:8}: mAP {:.4f}'.format(train_split, sum(train_APs)/len(classes)))
    print('  - {:8}: mAP {:.4f}'.format(test_split, sum(test_APs)/len(classes)))
def getM(mods):
    for m in mods:
        if isinstance(m, legacy.nn.SpatialConvolution):
            m.gradWeight[m.gradWeight.ne(m.gradWeight)] = 0
            l.append(torch.norm(m.gradWeight))
        elif isinstance(m, legacy.nn.Linear):
            l.append(torch.norm(m.gradWeight))
        elif isinstance(m, legacy.nn.Concat) or \
             isinstance(m, legacy.nn.Sequential):
            getM(m.modules)
Beispiel #9
0
 def test_importance_prior(self):
     posterior = pyro.infer.Importance(self.model, guide=None, num_samples=10000)
     marginal = pyro.infer.Marginal(posterior)
     posterior_samples = [marginal() for i in range(1000)]
     posterior_mean = torch.mean(torch.cat(posterior_samples))
     posterior_stddev = torch.std(torch.cat(posterior_samples), 0)
     self.assertEqual(0, torch.norm(posterior_mean - self.mu_mean).data[0],
                      prec=0.01)
     self.assertEqual(0, torch.norm(posterior_stddev - self.mu_stddev).data[0],
                      prec=0.1)
Beispiel #10
0
    def calc_peak_length_dice_pytorch(HP, y_pred, y_true, max_angle_error=[0.9], max_length_error=0.1):
        '''
        Ca

        :param y_pred:
        :param y_true:
        :param max_angle_error:  0.7 ->  angle error of 45° or less; 0.9 ->  angle error of 23° or less
                                 Can be list with several values -> calculate for several thresholds
        :return:
        '''
        import torch
        from tractseg.libs.PytorchEinsum import einsum
        from tractseg.libs.PytorchUtils import PytorchUtils

        y_true = y_true.permute(0, 2, 3, 1)
        y_pred = y_pred.permute(0, 2, 3, 1)

        def angle_last_dim(a, b):
            '''
            Calculate the angle between two nd-arrays (array of vectors) along the last dimension

            without anything further: 1->0°, 0.9->23°, 0.7->45°, 0->90°
            np.arccos -> returns degree in pi (90°: 0.5*pi)

            return: one dimension less then input
            '''
            return torch.abs(einsum('abcd,abcd->abc', a, b) / (torch.norm(a, 2., -1) * torch.norm(b, 2, -1) + 1e-7))

        #Single threshold
        score_per_bundle = {}
        bundles = ExpUtils.get_bundle_names(HP.CLASSES)[1:]
        for idx, bundle in enumerate(bundles):
            # if bundle == "CST_right":
            y_pred_bund = y_pred[:, :, :, (idx * 3):(idx * 3) + 3].contiguous()
            y_true_bund = y_true[:, :, :, (idx * 3):(idx * 3) + 3].contiguous()      # [x,y,z,3]

            angles = angle_last_dim(y_pred_bund, y_true_bund)

            lenghts_pred = torch.norm(y_pred_bund, 2., -1)
            lengths_true = torch.norm(y_true_bund, 2, -1)
            lengths_binary = torch.abs(lenghts_pred-lengths_true) < (max_length_error * lengths_true)
            lengths_binary = lengths_binary.view(-1)

            gt_binary = y_true_bund.sum(dim=-1) > 0
            gt_binary = gt_binary.view(-1)  # [bs*x*y]

            angles_binary = angles > max_angle_error[0]
            angles_binary = angles_binary.view(-1)

            combined = lengths_binary * angles_binary

            f1 = PytorchUtils.f1_score_binary(gt_binary, combined)
            score_per_bundle[bundle] = f1
        return score_per_bundle
Beispiel #11
0
def th_pearsonr(x, y):
    """
    mimics scipy.stats.pearsonr
    """
    mean_x = th.mean(x)
    mean_y = th.mean(y)
    xm = x.sub(mean_x)
    ym = y.sub(mean_y)
    r_num = xm.dot(ym)
    r_den = th.norm(xm, 2) * th.norm(ym, 2)
    r_val = r_num / r_den
    return r_val
Beispiel #12
0
    def angle_last_dim(a, b):
        '''
        Calculate the angle between two nd-arrays (array of vectors) along the last dimension

        without anything further: 1->0°, 0.9->23°, 0.7->45°, 0->90°
        np.arccos -> returns degree in pi (90°: 0.5*pi)

        return: one dimension less then input
        '''
        from tractseg.libs.PytorchEinsum import einsum

        return torch.abs(einsum('abcd,abcd->abc', a, b) / (torch.norm(a, 2., -1) * torch.norm(b, 2, -1) + 1e-7))
Beispiel #13
0
 def triplet_loss(self, z_p, z_n, z_d, margin=0.1, l2=0):
     l_n = torch.sqrt(((z_p - z_n) ** 2).sum(dim=1))
     l_d = - torch.sqrt(((z_p - z_d) ** 2).sum(dim=1))
     l_nd = l_n + l_d
     loss = F.relu(l_n + l_d + margin)
     l_n = torch.mean(l_n)
     l_d = torch.mean(l_d)
     l_nd = torch.mean(l_n + l_d)
     loss = torch.mean(loss)
     if l2 != 0:
         loss += l2 * (torch.norm(z_p) + torch.norm(z_n) + torch.norm(z_d))
     return loss, l_n, l_d, l_nd
Beispiel #14
0
    def _transform(self, x):
        bands = self.dct_transform.frequency_decomposition(
            x, self.factors, axis=-1)

        norms = [torch.norm(b, dim=-1, keepdim=True) for b in bands]
        bands = [b / (n + 1e-8) for (b, n) in zip(bands, norms)]
        fine = torch.cat(bands, dim=-1)

        coarse = torch.cat(norms, dim=-1)
        coarse_norms = torch.norm(coarse, dim=-1, keepdim=True)
        coarse = coarse / (coarse_norms + 1e-8)

        return fine, coarse
 def distance_calcu(self, gallery, query):
     # x = torch.autograd.Variable(torch.cat([gallery,query]))
     gallery = gallery.expand_as(query).contiguous()
     x = torch.cat([gallery, query],1)
     W = self.adpW(x)
     num = query.size(0)
     dist1 = torch.norm(torch.matmul(W,gallery.view(num,-1,1))
                       -torch.matmul(W,query.view(num,-1,1)),2,1)
     x = torch.cat([query,gallery],1)
     W = self.adpW(x)
     dist2 = torch.norm(torch.matmul(W, gallery.view(num, -1, 1))
                       - torch.matmul(W, query.view(num, -1, 1)), 2, 1)
     dist = 0.5*(dist1+dist2)
     return dist
Beispiel #16
0
    def angle_second_dim(a, b):
        '''
        Not working !
        RuntimeError: invalid argument 2: input is not contiguous (and

        Calculate the angle between two nd-arrays (array of vectors) along the second dimension

        without anything further: 1->0°, 0.9->23°, 0.7->45°, 0->90°
        np.arccos -> returns degree in pi (90°: 0.5*pi)

        return: one dimension less then input
        '''
        from tractseg.libs.PytorchEinsum import einsum

        return torch.abs(einsum('abcd,abcd->acd', a, b) / (torch.norm(a, 2., 1) * torch.norm(b, 2, 1) + 1e-7))
Beispiel #17
0
def mpjpe(predicted, target):
    """
    Mean per-joint position error (i.e. mean Euclidean distance),
    often referred to as "Protocol #1" in many papers.
    """
    assert predicted.shape == target.shape
    return torch.mean(torch.norm(predicted - target, dim=len(target.shape)-1))
Beispiel #18
0
def batchwise_unit_norm(x, epsilon=1e-8):
    batch_size = x.shape[0]
    flattened = x.view(batch_size, -1)
    norm = torch.norm(flattened, dim=1, keepdim=True)
    expanded = norm.view(batch_size, *((1,) * (x.dim() - 1)))
    normed = x / (expanded + epsilon)
    return normed
def extract_feature(model,dataloaders):
    features = torch.FloatTensor()
    count = 0
    for data in dataloaders:
        img, label = data
        n, c, h, w = img.size()
        count += n
        print(count)
        if opt.use_dense:
            ff = torch.FloatTensor(n,1024).zero_()
        else:
            ff = torch.FloatTensor(n,2048).zero_()
        for i in range(2):
            if(i==1):
                img = fliplr(img)
            input_img = Variable(img.cuda())
            outputs = model(input_img) 
            f = outputs.data.cpu()
            #print(f.size())
            ff = ff+f
        # norm feature
        fnorm = torch.norm(ff, p=2, dim=1, keepdim=True)
        ff = ff.div(fnorm.expand_as(ff))
        features = torch.cat((features,ff), 0)
    return features
Beispiel #20
0
    def encode(self, indices, lengths, noise):
        embeddings = self.embedding(indices)
        packed_embeddings = pack_padded_sequence(input=embeddings,
                                                 lengths=lengths,
                                                 batch_first=True)

        # Encode
        packed_output, state = self.encoder(packed_embeddings)

        hidden, cell = state
        # batch_size x nhidden
        hidden = hidden[-1]  # get hidden state of last layer of encoder

        # normalize to unit ball (l2 norm of 1) - p=2, dim=1
        norms = torch.norm(hidden, 2, 1)
        
        # For older versions of PyTorch use:
        hidden = torch.div(hidden, norms.expand_as(hidden))
        # For newest version of PyTorch (as of 8/25) use this:
        # hidden = torch.div(hidden, norms.unsqueeze(1).expand_as(hidden))

        if noise and self.noise_radius > 0:
            gauss_noise = torch.normal(means=torch.zeros(hidden.size()),
                                       std=self.noise_radius)
            hidden = hidden + to_gpu(self.gpu, Variable(gauss_noise))

        return hidden
def calculate_variance_term(pred, gt, means, n_objects, delta_v, norm=2):
    """pred: bs, height * width, n_filters
       gt: bs, height * width, n_instances
       means: bs, n_instances, n_filters"""

    bs, n_loc, n_filters = pred.size()
    n_instances = gt.size(2)

    # bs, n_loc, n_instances, n_filters
    means = means.unsqueeze(1).expand(bs, n_loc, n_instances, n_filters)
    # bs, n_loc, n_instances, n_filters
    pred = pred.unsqueeze(2).expand(bs, n_loc, n_instances, n_filters)
    # bs, n_loc, n_instances, n_filters
    gt = gt.unsqueeze(3).expand(bs, n_loc, n_instances, n_filters)

    _var = (torch.clamp(torch.norm((pred - means), norm, 3) -
                        delta_v, min=0.0) ** 2) * gt[:, :, :, 0]

    var_term = 0.0
    for i in range(bs):
        _var_sample = _var[i, :, :n_objects[i]]  # n_loc, n_objects
        _gt_sample = gt[i, :, :n_objects[i], 0]  # n_loc, n_objects

        var_term += torch.sum(_var_sample) / torch.sum(_gt_sample)
    var_term = var_term / bs

    return var_term
def calculate_distance_term(means, n_objects, delta_d, norm=2, usegpu=True):
    """means: bs, n_instances, n_filters"""

    bs, n_instances, n_filters = means.size()

    dist_term = 0.0
    for i in range(bs):
        _n_objects_sample = n_objects[i]

        if _n_objects_sample <= 1:
            continue

        _mean_sample = means[i, : _n_objects_sample, :]  # n_objects, n_filters
        means_1 = _mean_sample.unsqueeze(1).expand(
            _n_objects_sample, _n_objects_sample, n_filters)
        means_2 = means_1.permute(1, 0, 2)

        diff = means_1 - means_2  # n_objects, n_objects, n_filters

        _norm = torch.norm(diff, norm, 2)

        margin = 2 * delta_d * (1.0 - torch.eye(_n_objects_sample))
        if usegpu:
            margin = margin.cuda()
        margin = Variable(margin)

        _dist_term_sample = torch.sum(
            torch.clamp(margin - _norm, min=0.0) ** 2)
        _dist_term_sample = _dist_term_sample / \
            (_n_objects_sample * (_n_objects_sample - 1))
        dist_term += _dist_term_sample

    dist_term = dist_term / bs

    return dist_term
Beispiel #23
0
def weighted_mpjpe(predicted, target, w):
    """
    Weighted mean per-joint position error (i.e. mean Euclidean distance)
    """
    assert predicted.shape == target.shape
    assert w.shape[0] == predicted.shape[0]
    return torch.mean(w * torch.norm(predicted - target, dim=len(target.shape)-1))
Beispiel #24
0
def EPE(predicted_edge, gt_edge, sparse=False, mean=True):
    EPE_map = torch.norm(gt_edge-predicted_edge,2,1)
    if sparse:
        EPE_map = EPE_map[gt_edge != 0]
    if mean:
        return EPE_map.mean()
    else:
        return EPE_map.sum()
def normalize(x, axis=-1):
  """Normalizing to unit length along the specified dimension.
  Args:
    x: pytorch Variable
  Returns:
    x: pytorch Variable, same shape as input      
  """
  x = 1. * x / (torch.norm(x, 2, axis, keepdim=True).expand_as(x) + 1e-12)
  return x
def printM(mods):
    for m in mods:
        if isinstance(m, legacy.nn.SpatialConvolution):
            print('Conv2d norm: {}'.format(torch.norm(m.output)))
        elif isinstance(m, legacy.nn.Linear):
            pass
        elif isinstance(m, legacy.nn.Concat) or \
             isinstance(m, legacy.nn.Sequential):
            printM(m.modules)
Beispiel #27
0
def optimize(vertices, steps, lr=5e-2):
    adam = optim.Adam([vertices], lr=lr)
    for i in range(steps):
        adam.zero_grad()
        center_loss = torch.norm(torch.mean(vertices, dim=0))
        pent_losses = [pent_loss(vertices, inds) for inds in pentagons]
        loss = center_loss + torch.sum(torch.stack(pent_losses))
        loss.backward()
        adam.step()
Beispiel #28
0
def addOrthov2Regularizer(loss,model, regParam, targetLayers) :
    for i in range( len(targetLayers) ) :
        layerParams =   model[targetLayers[i]].named_parameters() 
        for param in layerParams:  # dont regularize bias params
            if 'bias' not in param[0]: 
                W = param[1].t()
                dotproducts = torch.mm( W.t(),  W) # the lower triangle (excluding diagonal) is the dot products between all neurons
                norms = torch.norm(W, dim=0, keepdim = True)
                cosinesimilarities = dotproducts / norms / norms.t()
                C = (  regParam * 0.5) * torch.sum( torch.tril(cosinesimilarities, diagonal =-1)**2 )
                loss += C
Beispiel #29
0
def th_matrixcorr(x, y):
    """
    return a correlation matrix between
    columns of x and columns of y.

    So, if X.size() == (1000,4) and Y.size() == (1000,5),
    then the result will be of size (4,5) with the
    (i,j) value equal to the pearsonr correlation coeff
    between column i in X and column j in Y
    """
    mean_x = th.mean(x, 0)
    mean_y = th.mean(y, 0)
    xm = x.sub(mean_x.expand_as(x))
    ym = y.sub(mean_y.expand_as(y))
    r_num = xm.t().mm(ym)
    r_den1 = th.norm(xm,2,0)
    r_den2 = th.norm(ym,2,0)
    r_den = r_den1.t().mm(r_den2)
    r_mat = r_num.div(r_den)
    return r_mat
Beispiel #30
0
    def on_batch_end(self, state):
        embeddings = state.output["embeddings"]
        logits = state.output["logits"]

        loss = state.criterion(logits.float(), state.input["targets"].long())

        if self.emb_l2_reg > 0:
            loss += torch.mean(
                torch.norm(embeddings.float(), dim=1)) * self.emb_l2_reg

        state.loss = loss
Beispiel #31
0
 def _compute_scores(self, h_embs, r_embs, t_embs):
     # Add the vector element wise
     sum_res = h_embs + r_embs - t_embs
     norms = torch.norm(sum_res, dim=1, p=self.scoring_fct_norm).view(size=(-1,))
     scores = torch.mul(norms, norms)
     return scores
def normalize(inp):
    return inp / torch.norm(inp, dim=-1)[:, None]
Beispiel #33
0
    def step(self, closure: OptLossClosure = None) -> OptFloat:
        r"""Performs a single optimization step.

        Arguments:
            closure: A closure that reevaluates the model and returns the loss.
        """
        loss = None
        if closure is not None:
            loss = closure()

        for group in self.param_groups:
            for p in group['params']:
                if p.grad is None:
                    continue
                grad = p.grad.data
                if grad.is_sparse:
                    msg = ('Lamb does not support sparse gradients, '
                           'please consider SparseAdam instead')
                    raise RuntimeError(msg)

                state = self.state[p]

                # State initialization
                if len(state) == 0:
                    state['step'] = 0
                    # Exponential moving average of gradient values
                    state['exp_avg'] = torch.zeros_like(p)
                    # Exponential moving average of squared gradient values
                    state['exp_avg_sq'] = torch.zeros_like(p)

                exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
                beta1, beta2 = group['betas']

                state['step'] += 1

                # Decay the first and second moment running average coefficient
                # m_t
                exp_avg.mul_(beta1).add_(1 - beta1, grad)
                # v_t
                exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)

                # Paper v3 does not use debiasing.
                if self.debias:
                    bias_correction = math.sqrt(1 - beta2**state['step'])
                    bias_correction /= (1 - beta1**state['step'])
                else:
                    bias_correction = 1

                # Apply bias to lr to avoid broadcast.
                step_size = group['lr'] * bias_correction

                weight_norm = torch.norm(p.data).clamp(0, self.clamp_value)

                adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps'])
                if group['weight_decay'] != 0:
                    adam_step.add_(group['weight_decay'], p.data)

                adam_norm = torch.norm(adam_step)
                if weight_norm == 0 or adam_norm == 0:
                    trust_ratio = 1
                else:
                    trust_ratio = weight_norm / adam_norm
                state['weight_norm'] = weight_norm
                state['adam_norm'] = adam_norm
                state['trust_ratio'] = trust_ratio
                if self.adam:
                    trust_ratio = 1

                p.data.add_(-step_size * trust_ratio, adam_step)

        return loss
Beispiel #34
0
def run_epoch(model, data, is_train=False, lr=1.0):
    """
    One epoch of training/validation (depending on flag is_train).
    """
    if is_train:
        model.train()
    else:
        model.eval()
    epoch_size = ((len(data) // model.batch_size) - 1) // model.seq_len
    start_time = time.time()
    hidden = model.init_hidden()
    hidden = hidden.to(device)
    costs = 0.0
    iters = 0
    losses = []

    # LOOP THROUGH MINIBATCHES
    for step, (x, y) in enumerate(
            ptb_iterator(data, model.batch_size, model.seq_len)):

        inputs = torch.from_numpy(x.astype(np.int64)).transpose(
            0, 1).contiguous().to(device)  #.cuda()
        model.zero_grad()
        hidden = repackage_hidden(hidden)
        outputs, hidden, saved_hiddens = model(inputs, hidden)

        targets = torch.from_numpy(y.astype(np.int64)).transpose(
            0, 1).contiguous().to(device)  #.cuda()

        tt = torch.squeeze(targets.view(-1, model.batch_size * model.seq_len))

        # LOSS COMPUTATION
        # This line currently averages across all the sequences in a mini-batch
        # and all time-steps of the sequences.
        # For problem 4.1, you will (instead) need to compute the average loss
        # at each time-step separately. Hint: use the method retain_grad to keep
        # gradients for intermediate nodes of the computational graph.
        #

        #Only need to run loss function on last time step

        loss_fn = nn.CrossEntropyLoss()
        #loss = loss_fn(outputs.contiguous().view(-1, model.vocab_size), tt)

        loss = []
        for i in range(outputs.size(0)):
            loss.append(
                loss_fn(outputs[i].contiguous().view(-1, model.vocab_size),
                        targets[i]))

        #print(loss_2[-1])

        #print(saved_hiddens[-1])

        #print(hidden)

        #print(torch.autograd.grad(loss_2[-1], saved_hiddens[-1], retain_graph=True, allow_unused=True)[0])

        #print(torch.mean(torch.norm(torch.autograd.grad(loss[-1], saved_hiddens[i], retain_graph=True)[0], dim=(0, 2))))

        loss_grad_norms = []
        for i in range(outputs.size(0)):
            loss_grad_norms.append(
                torch.norm(
                    torch.norm(torch.autograd.grad(loss[-1],
                                                   saved_hiddens[i],
                                                   retain_graph=True)[0],
                               dim=(0, 2))))

        break

    return loss_grad_norms
	def _calc(self, h, t, r):
		return torch.norm(h + r - t, self.config.p_norm, -1)
Beispiel #36
0
    precision = min_dist_2[min_dist_2 < threshold].size(0) / min_dist_2.size(0)
    recall = min_dist_1[min_dist_1 < threshold].size(0) / min_dist_1.size(0)
    return 2 * (precision * recall) / (precision + recall + 1e-8)


if __name__ == "__main__":
    print("Testing loss")
    v = torch.Tensor([[0, 0, 1], [0, 2, 1], [3, 1, 0], [1, 1, 1]])
    vb = torch.Tensor([[0, 1, 1], [0, 2, 1], [3, 1, 0], [1, 1, 1]])
    e = torch.Tensor([[1, 1, 2, 3, 3, 4], [3, 2, 1, 1, 4, 3]]).long()
    n = torch.Tensor([[1, 2, 0], [3, -1, 2], [-0.5, 4, -6], [1, -1, 1]])

    from graph import Graph
    graph = Graph("./ellipsoid/init_info.pickle")
    map = torch.Tensor([[2, 1], [0, 0], [0, 3], [2, 0]]).long()
    graph.maps[0] = map
    graph.len_maps[0] = torch.Tensor([2, 1, 2, 1]).view(-1, 1)
    graph.adjacency_mat[0] = e
    graph.adjacency_mat[0][0] -= 1
    v_n = v[e - 1]
    p_minus_k = v_n[0, :, :] - v_n[1, :, :]
    edge_loss = torch.mean(torch.norm(p_minus_k, dim=1)**2)
    min_idx, chamf_loss = chamfer_loss(v, vb)
    norm_loss = normal_loss(n, min_idx, p_minus_k, graph, 0)

    print("F1 Score", f1_score(v, vb, threshold=1e-4))
    print("Chamfer Loss", chamf_loss)
    print("Laplacian loss", laplace_loss(v, vb, graph, 0))
    print("Edge loss", edge_loss)
    print("Normal loss", norm_loss)
Beispiel #37
0
def quaternion_to_angle_axis(quaternion, eps=1e-6):
    """Convert quaternion vector to angle axis of rotation

    Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h

    Args:
        quaternion (Tensor): batch with quaternions

    Return:
        Tensor: batch with angle axis of rotation

    Shape:
        - Input: :math:`(N, 4)`
        - Output: :math:`(N, 3)`

    Example:
        >>> input = torch.rand(2, 4)  # Nx4
        >>> output = tgm.quaternion_to_angle_axis(input)  # Nx3
    """
    if not torch.is_tensor(quaternion):
        raise TypeError("Input type is not a torch.Tensor. Got {}".format(
            type(quaternion)))

    input_shape = quaternion.shape
    if len(input_shape) == 1:
        quaternion = torch.unsqueeze(quaternion, dim=0)

    assert quaternion.size(1) == 4, 'Input must be a vector of length 4'
    normalizer = 1 / torch.norm(quaternion, dim=1)
    q1 = quaternion[:, 1] * normalizer
    q2 = quaternion[:, 2] * normalizer
    q3 = quaternion[:, 3] * normalizer

    sin_squared = q1 * q1 + q2 * q2 + q3 * q3
    mask = (sin_squared > eps).to(sin_squared.device)
    mask_pos = (mask).type_as(sin_squared)
    mask_neg = (mask == False).type_as(sin_squared)  # noqa
    batch_size = quaternion.size(0)
    angle_axis = torch.zeros(batch_size, 3,
                             dtype=quaternion.dtype).to(quaternion.device)

    sin_theta = torch.sqrt(sin_squared)
    cos_theta = quaternion[:, 0] * normalizer
    mask_theta = (cos_theta < eps).view(1, -1)
    mask_theta_neg = (mask_theta).type_as(cos_theta)
    mask_theta_pos = (mask_theta == False).type_as(cos_theta)  # noqa

    theta = torch.atan2(-sin_theta, -cos_theta) * mask_theta_neg \
        + torch.atan2(sin_theta, cos_theta) * mask_theta_pos

    two_theta = 2 * theta
    k_pos = two_theta / sin_theta
    k_neg = 2.0
    k = k_neg * mask_neg + k_pos * mask_pos

    angle_axis[:, 0] = q1 * k
    angle_axis[:, 1] = q2 * k
    angle_axis[:, 2] = q3 * k

    if len(input_shape) == 1:
        angle_axis = angle_axis.squeeze(0)

    return angle_axis
Beispiel #38
0
    def forward(self, ray_batch, bound_batch, radii, **kwargs):
        """Render rays
        Args:
          ray_batch: array of shape [2, batch_size, 3]. Ray origin and direction for
            each example in batch.
        Returns:
          ret_all includes the following returned values:
          rgb_map: [batch_size, 3]. Predicted RGB values for rays.
          raw: [batch_size, N_sample, C]. Raw data of each point.
          weight_map: [batch_size, N_sample, C]. Convert raw to weight scale (0-1).
          acc_map: [batch_size]. Accumulated opacity (alpha) along a ray.
        """

        # Render settings
        if self.training:
            render_kwargs = self.render_kwargs_train.copy()
            render_kwargs.update(kwargs)
        else:
            render_kwargs = self.render_kwargs_test.copy()
            render_kwargs.update(kwargs)

        # Disentangle ray batch
        rays_o, rays_d = ray_batch
        assert rays_o.shape == rays_d.shape

        # Flatten ray batch
        old_shape = rays_d.shape  # [..., 3(+id)]
        rays_o = torch.reshape(rays_o, [-1, rays_o.shape[-1]]).float()
        rays_d = torch.reshape(rays_d, [-1, rays_d.shape[-1]]).float()

        # Provide ray directions as input
        if self.use_viewdirs:
            viewdirs = rays_d
            viewdirs = viewdirs / torch.norm(viewdirs, dim=-1, keepdim=True)
            viewdirs = torch.reshape(viewdirs,
                                     [-1, viewdirs.shape[-1]]).float()

        # Disentangle bound batch
        near, far = bound_batch
        if isinstance(near, (int, float)):
            near = near * torch.ones_like(rays_d[..., :1], dtype=torch.float)
        if isinstance(far, (int, float)):
            far = far * torch.ones_like(rays_d[..., :1], dtype=torch.float)

        # Extract radius
        if isinstance(radii, (int, float)):
            radii = radii * torch.ones_like(rays_d[..., :1], dtype=torch.float)

        # Batchify rays
        all_ret = {}
        for i in range(0, rays_o.shape[0], self.chunk):
            end = min(i + self.chunk, rays_o.shape[0])
            chunk_o, chunk_d = rays_o[i:end], rays_d[i:end]
            chunk_n, chunk_f, chunk_r = near[i:end], far[i:end], radii[i:end]
            chunk_v = viewdirs[i:end] if self.use_viewdirs else None

            # Render function
            ret = self.render_rays(chunk_o,
                                   chunk_d,
                                   chunk_n,
                                   chunk_f,
                                   chunk_r,
                                   viewdirs=chunk_v,
                                   **render_kwargs)
            for k in ret:
                if k not in all_ret:
                    all_ret[k] = []
                all_ret[k].append(ret[k])
        all_ret = {k: torch.cat(all_ret[k], 0) for k in all_ret}

        # Unflatten
        for k in all_ret:
            k_sh = list(old_shape[:-1]) + list(all_ret[k].shape[1:])
            all_ret[k] = torch.reshape(
                all_ret[k], k_sh)  # [input_rays_shape, per_ray_output_shape]

        return all_ret
 def regularization(self, gama_, eta_):
     l2_h_regularization = gama_ * torch.norm(self.h.data, 2)**2
     l2_W_regularization = eta_ * torch.norm(self.linear_layer.weight, 2)**2
     l2_bias_regularization = eta_ * torch.norm(self.linear_layer.bias.data,
                                                2)**2
     return l2_h_regularization + l2_W_regularization + l2_bias_regularization
Beispiel #40
0
    def one_class_adv_loss(self, x_train_data):
        """Computes the adversarial loss:
        1) Sample points initially at random around the positive training
            data points
        2) Gradient ascent to find the most optimal point in set N_i(r) 
            classified as +ve (label=0). This is done by maximizing 
            the CE loss wrt label 0
        3) Project the points between spheres of radius R and gamma * R 
            (set N_i(r))
        4) Pass the calculated adversarial points through the model, 
            and calculate the CE loss wrt target class 0
        
        Parameters
        ----------
        x_train_data: Batch of data to compute loss on.
        """
        batch_size = len(x_train_data)
        # Randomly sample points around the training data
        # We will perform SGD on these to find the adversarial points
        x_adv = torch.randn(x_train_data.shape).to(
            self.device).detach().requires_grad_()
        x_adv_sampled = x_adv + x_train_data

        for step in range(self.ascent_num_steps):
            with torch.enable_grad():

                new_targets = torch.zeros(batch_size, 1).to(self.device)
                new_targets = torch.squeeze(new_targets)
                new_targets = new_targets.to(torch.float)

                logits = self.model(x_adv_sampled)
                logits = torch.squeeze(logits, dim=1)
                new_loss = F.binary_cross_entropy_with_logits(
                    logits, new_targets)

                grad = torch.autograd.grad(new_loss, [x_adv_sampled])[0]
                grad_norm = torch.norm(grad,
                                       p=2,
                                       dim=tuple(range(1, grad.dim())))
                grad_norm = grad_norm.view(-1, *[1] * (grad.dim() - 1))
                grad_normalized = grad / grad_norm
            with torch.no_grad():
                x_adv_sampled.add_(self.ascent_step_size * grad_normalized)

            if (step + 1) % 10 == 0:
                # Project the normal points to the set N_i(r)
                h = x_adv_sampled - x_train_data
                norm_h = torch.sqrt(
                    torch.sum(h**2, dim=tuple(range(1, h.dim()))))
                alpha = torch.clamp(norm_h, self.radius,
                                    self.gamma * self.radius).to(self.device)
                # Make use of broadcast to project h
                proj = (alpha / norm_h).view(-1, *[1] * (h.dim() - 1))
                h = proj * h
                x_adv_sampled = x_train_data + h  #These adv_points are now on the surface of hyper-sphere

        adv_pred = self.model(x_adv_sampled)
        adv_pred = torch.squeeze(adv_pred, dim=1)
        adv_loss = F.binary_cross_entropy_with_logits(adv_pred,
                                                      (new_targets * 0))

        return adv_loss
Beispiel #41
0
 def get_normalized_data(embedding, num_embeddings, p=2, dim=1):
     norms = torch.norm(embedding.weight, p, dim).data
     return embedding.weight.data.div(
         norms.view(num_embeddings, 1).expand_as(embedding.weight))
Beispiel #42
0
 def get_normalized_data(embedding, p=2, dim=1):
     norms = torch.norm(embedding, p, dim)
     return embedding.div(norms.view(-1, 1).expand_as(embedding))
 def test_initialize_power(self):
     kernel = LinearTruncatedFidelityKernel(fidelity_dims=[1, 2], dimension=3)
     kernel.initialize(power=1)
     actual_value = torch.tensor(1, dtype=torch.float).view_as(kernel.power)
     self.assertLess(torch.norm(kernel.power - actual_value), 1e-5)
Beispiel #44
0
    x.shape[1], num_unmatches
]).long().cuda()  # contains sorted unmatched indices for each reference patch
for epoch in range(0, Nepoch):
    tx_loss = 0
    cyc_loss = 0
    epoch_loss = 0
    epoch_fitloss = 0
    index_matrix = np.arange(0, num_steps, 1)  # Initial reference patches
    index_subset = np.random.choice(index_matrix, size=subset_steps)

    # Update best matches for each reference patch by computing loss wrt all selected patches and finding out the minimum K ones
    Npatches = x.shape[1]
    for step in range(subset_steps):
        ref_ind = copy.copy(index_subset[step])
        x_ref_patch = x[:, ref_ind:ref_ind + 1]
        loss_patch_sorted = torch.argsort(torch.norm((x_ref_patch - x), dim=0))
        match_inds[ref_ind, :] = loss_patch_sorted[1:num_matches + 1]
        unmatch_inds[ref_ind, :] = loss_patch_sorted[
            1 + num_matches + gap:1 + num_matches + num_unmatches +
            gap]  # sort the norms, pick best matching K to K + D ones as unmatching

    # Optimize the filter W wrt selected K patches
    for step in range(subset_steps):  # batch size

        ref_ind = index_subset[step]

        x_ref1 = x_noisy[:, ref_ind:ref_ind + 1].view(1, x_noisy.shape[0])
        x_matched = x_noisy[:, match_inds[ref_ind, :]].view(
            num_matches, x_noisy.shape[0])
        x_unmatched = x_noisy[:, unmatch_inds[ref_ind, :]].view(
            num_unmatches, x_noisy.shape[0])
def alignment_loss(predictions,
                   target,
                   label_sizes,
                   alpha_alignment=1000.0,
                   alpha_backprop=100.0):
    batch_size = predictions.size(0)
    # This should probably be computed using the log_softmax
    confidences = predictions[:, :, 0]
    log_one_minus_confidences = torch.log(1.0 - confidences + 1e-10)

    if target is None:
        return -log_one_minus_confidences.sum()

    locations = predictions[:, :, 1:3]
    target = target[:, :, 0:2]

    log_confidences = torch.log(confidences + 1e-10)

    expanded_locations = locations[:, :, None, :]
    expanded_target = target[:, None, :, :]

    expanded_locations = expanded_locations.expand(locations.size(0),
                                                   locations.size(1),
                                                   target.size(1),
                                                   locations.size(2))
    expanded_target = expanded_target.expand(target.size(0), locations.size(1),
                                             target.size(1), target.size(2))

    #Compute All Deltas
    location_deltas = (expanded_locations - expanded_target)

    normed_difference = torch.norm(location_deltas, 2, 3)**2
    expanded_log_confidences = log_confidences[:, :, None].expand(
        locations.size(0), locations.size(1), target.size(1))
    expanded_log_one_minus_confidences = log_one_minus_confidences[:, :,
                                                                   None].expand(
                                                                       locations
                                                                       .size(
                                                                           0),
                                                                       locations
                                                                       .size(
                                                                           1),
                                                                       target.
                                                                       size(1))

    C = alpha_alignment / 2.0 * normed_difference - expanded_log_confidences + expanded_log_one_minus_confidences

    C = C.data.cpu().numpy()
    X = np.zeros_like(C)
    for b in range(C.shape[0]):
        l = label_sizes[b]
        if l == 0:
            continue

        C_i = C[b, :, :l]
        row_ind, col_ind = linear_sum_assignment(C_i.T)
        X[b][(col_ind, row_ind)] = 1.0

    X = Variable(torch.from_numpy(X).type(predictions.data.type()),
                 requires_grad=False)
    X2 = 1.0 - torch.sum(X, 2)

    location_loss = (alpha_backprop / 2.0 * normed_difference * X).sum()
    confidence_loss = -(expanded_log_confidences * X).sum() - (
        log_one_minus_confidences * X2).sum()

    loss = confidence_loss + location_loss

    loss = loss / batch_size

    return loss
Beispiel #46
0
    def forward(self,
                src_inputs,
                src_mask,
                src_langs,
                tgt_inputs,
                tgt_mask,
                tgt_langs,
                src_neg_inputs=None,
                src_neg_mask=None,
                src_neg_langs=None,
                tgt_neg_inputs=None,
                tgt_neg_mask=None,
                tgt_neg_langs=None,
                normalize: bool = False):
        "Take in and process masked src and target sequences."
        device = self.encoder.embeddings.word_embeddings.weight.device
        src_langs = src_langs.unsqueeze(-1).expand(-1, src_inputs.size(-1))
        src_inputs = src_inputs.to(device)
        src_langs = src_langs.to(device)

        if src_mask.device != device:
            src_mask = src_mask.to(device)
        src_embed = self.encode(src_inputs, src_mask, src_langs)

        tgt_langs = tgt_langs.unsqueeze(-1).expand(
            -1, tgt_inputs.size(-1)).to(device)
        if tgt_inputs.device != device:
            tgt_inputs = tgt_inputs.to(device)
            tgt_mask = tgt_mask.to(device)
        tgt_embed = self.encode(tgt_inputs, tgt_mask, tgt_langs)

        src_norm = torch.norm(src_embed, dim=-1, p=2).unsqueeze(-1) + 1e-4
        src_embed = torch.div(src_embed, src_norm)
        tgt_norm = torch.norm(tgt_embed, dim=-1, p=2).unsqueeze(-1) + 1e-4
        tgt_embed = torch.div(tgt_embed, tgt_norm)
        if normalize:
            if src_neg_langs is not None:
                src_neg_langs = src_neg_langs.unsqueeze(-1).expand(
                    -1, src_neg_inputs.size(-1))
                src_neg_inputs = src_neg_inputs.to(device)
                src_neg_langs = src_neg_langs.to(device)

                if src_neg_mask.device != device:
                    src_neg_mask = src_neg_mask.to(device)
                src_neg_embed = self.encode(src_neg_inputs, src_neg_mask,
                                            src_neg_langs)
                src_neg_norm = torch.norm(src_neg_embed, dim=-1,
                                          p=2).unsqueeze(-1) + 1e-4
                src_neg_embed = torch.div(src_neg_embed, src_neg_norm)

                tgt_neg_langs = tgt_neg_langs.unsqueeze(-1).expand(
                    -1, tgt_neg_inputs.size(-1))
                tgt_neg_inputs = tgt_neg_inputs.to(device)
                tgt_neg_langs = tgt_neg_langs.to(device)

                if tgt_neg_mask.device != device:
                    tgt_neg_mask = tgt_neg_mask.to(device)
                tgt_neg_embed = self.encode(tgt_neg_inputs, tgt_neg_mask,
                                            tgt_neg_langs)
                tgt_neg_norm = torch.norm(tgt_neg_embed, dim=-1,
                                          p=2).unsqueeze(-1) + 1e-4
                tgt_neg_embed = torch.div(tgt_neg_embed, tgt_neg_norm)

                tgt_neg_embd = torch.cat([tgt_neg_embed, tgt_embed])
                src_neg_embd = torch.cat([src_neg_embed, src_embed])

                nominator = torch.sum(src_embed * tgt_embed, dim=-1) + 1e-4

                cross_dot = torch.mm(src_embed, tgt_neg_embd.T)
                cross_dot_rev = torch.mm(tgt_embed, src_neg_embd.T)
                cross_dot_all = torch.cat([cross_dot, cross_dot_rev], dim=1)
                denom = torch.log(
                    torch.sum(torch.exp(cross_dot_all), dim=-1) + 1e-4)
                log_neg = torch.sum(denom - nominator) / len(cross_dot)
            else:
                cross_dot = torch.mm(src_embed, tgt_embed.T)
                denom = torch.log(
                    torch.sum(torch.exp(cross_dot), dim=-1) + 1e-4)
                nominator = torch.diagonal(cross_dot[:, :], 0) + 1e-4
                log_neg = torch.sum(denom - nominator) / len(cross_dot)

            return log_neg
        else:
            dot_prod = torch.sum(src_embed * tgt_embed, dim=-1)
            return dot_prod
Beispiel #47
0
 def get_struct_loss(self):
     a = self.balstm_encoder.attention.weight
     aa = a.matmul(a.t())
     i = torch.eye(aa.shape[0], dtype=torch.float, device=config.CUDA)
     p = torch.norm(aa-i, p='fro')
     return p*p
Beispiel #48
0
def SMPLD_register(args):
    cfg = config.load_config(args.config, 'configs/default.yaml')
    out_dir = cfg['training']['out_dir']
    generation_dir = os.path.join(out_dir, cfg['generation']['generation_dir'])
    is_cuda = (torch.cuda.is_available() and not args.no_cuda)
    device = torch.device("cuda" if is_cuda else "cpu")

    if args.subject_idx >= 0 and args.sequence_idx >= 0:
        logger, _ = create_logger(generation_dir, phase='reg_subject{}_sequence{}'.format(args.subject_idx, args.sequence_idx), create_tf_logs=False)
    else:
        logger, _ = create_logger(generation_dir, phase='reg_all', create_tf_logs=False)

    # Get dataset
    if args.subject_idx >= 0 and args.sequence_idx >= 0:
        dataset = config.get_dataset('test', cfg, sequence_idx=args.sequence_idx, subject_idx=args.subject_idx)
    else:
        dataset = config.get_dataset('test', cfg)

    batch_size = cfg['generation']['batch_size']

    # Loader
    test_loader = torch.utils.data.DataLoader(
        dataset, batch_size=batch_size, num_workers=1, shuffle=False)

    model_counter = defaultdict(int)

    # Set optimization hyper parameters
    iterations, pose_iterations, steps_per_iter, pose_steps_per_iter = 3, 2, 30, 30

    inner_dists = []
    outer_dists = []

    for it, data in enumerate(tqdm(test_loader)):
        idxs = data['idx'].cpu().numpy()
        loc = data['points.loc'].cpu().numpy()
        batch_size = idxs.shape[0]
        # Directories to load corresponding informations
        mesh_dir = os.path.join(generation_dir, 'meshes')   # directory for posed and (optionally) unposed implicit outer/inner meshes
        label_dir = os.path.join(generation_dir, 'labels')   # directory for part labels
        register_dir = os.path.join(generation_dir, 'registrations')   # directory for part labels

        if args.use_raw_scan:
            scan_dir = dataset.dataset_folder   # this is the folder that contains CAPE raw scans
        else:
            scan_dir = None

        all_posed_minimal_meshes = []
        all_posed_cloth_meshes = []
        all_posed_vertices = []
        all_unposed_vertices = []
        scan_part_labels = []

        for idx in idxs:
            model_dict = dataset.get_model_dict(idx)

            subset = model_dict['subset']
            subject = model_dict['subject']
            sequence = model_dict['sequence']
            gender = model_dict['gender']
            filebase = os.path.basename(model_dict['data_path'])[:-4]

            folder_name = os.path.join(subset, subject, sequence)
            # TODO: we assume batch size stays the same if one resumes the job
            # can be more flexible to support different batch sizes before and
            # after resume
            register_file = os.path.join(register_dir, folder_name, filebase + 'minimal.registered.ply')
            if os.path.exists(register_file):
                # batch already computed, break
                break

            # points_dict = np.load(model_dict['data_path'])
            # gender = str(points_dict['gender'])

            mesh_dir_ = os.path.join(mesh_dir, folder_name)
            label_dir_ = os.path.join(label_dir, folder_name)

            if scan_dir is not None:
                scan_dir_ = os.path.join(scan_dir, subject, sequence)

            # Load part labels and vertex translations
            label_file_name = filebase + '.minimal.npz'
            label_dict = dict(np.load(os.path.join(label_dir_, label_file_name)))
            labels = torch.tensor(label_dict['part_labels'].astype(np.int64)).to(device)   # part labels for each vertex (14 or 24)
            scan_part_labels.append(labels)

            # Load minimal implicit surfaces
            mesh_file_name = filebase + '.minimal.posed.ply'
            # posed_mesh = Mesh(filename=os.path.join(mesh_dir_, mesh_file_name))
            posed_mesh = trimesh.load(os.path.join(mesh_dir_, mesh_file_name), process=False)
            posed_vertices = np.array(posed_mesh.vertices)
            all_posed_vertices.append(posed_vertices)

            posed_mesh = tm.from_tensors(torch.tensor(posed_mesh.vertices.astype('float32'), requires_grad=False, device=device),
                    torch.tensor(posed_mesh.faces.astype('int64'), requires_grad=False, device=device))
            all_posed_minimal_meshes.append(posed_mesh)

            mesh_file_name = filebase + '.minimal.unposed.ply'
            if os.path.exists(os.path.join(mesh_dir_, mesh_file_name)) and args.init_pose:
                # unposed_mesh = Mesh(filename=os.path.join(mesh_dir_, mesh_file_name))
                unposed_mesh = trimesh.load(os.path.join(mesh_dir_, mesh_file_name), process=False)
                unposed_vertices = np.array(unposed_mesh.vertices)
                all_unposed_vertices.append(unposed_vertices)

            if args.use_raw_scan:
                # Load raw scans
                mesh_file_name = filebase + '.ply'
                # posed_mesh = Mesh(filename=os.path.join(scan_dir_, mesh_file_name))
                posed_mesh = trimesh.load(os.path.join(scan_dir_, mesh_file_name), process=False)

                posed_mesh = tm.from_tensors(torch.tensor(posed_mesh.vertices.astype('float32') / 1000, requires_grad=False, device=device),
                        torch.tensor(posed_mesh.faces.astype('int64'), requires_grad=False, device=device))
                all_posed_cloth_meshes.append(posed_mesh)
            else:
                # Load clothed implicit surfaces
                mesh_file_name = filebase + '.cloth.posed.ply'
                # posed_mesh = Mesh(filename=os.path.join(mesh_dir_, mesh_file_name))
                posed_mesh = trimesh.load(os.path.join(mesh_dir_, mesh_file_name), process=False)

                posed_mesh = tm.from_tensors(torch.tensor(posed_mesh.vertices.astype('float32'), requires_grad=False, device=device),
                        torch.tensor(posed_mesh.faces.astype('int64'), requires_grad=False, device=device))
                all_posed_cloth_meshes.append(posed_mesh)

        if args.num_joints == 24:
            bm = BodyModel(bm_path='body_models/smpl/male/model.pkl', num_betas=10, batch_size=batch_size).to(device)
            parents = bm.kintree_table[0].detach().cpu().numpy()
            labels = bm.weights.argmax(1)
            # Convert 24 parts to 14 parts
            smpl2ipnet = torch.from_numpy(SMPL2IPNET_IDX).to(device)
            labels = smpl2ipnet[labels].clone().unsqueeze(0)
            del bm
        elif args.num_joints == 14:
            with open('body_models/misc/smpl_parts_dense.pkl', 'rb') as f:
                part_labels = pkl.load(f)

            labels = np.zeros((6890,), dtype=np.int64)
            for n, k in enumerate(part_labels):
                labels[part_labels[k]] = n
            labels = torch.tensor(labels).to(device).unsqueeze(0)
        else:
            raise ValueError('Got {} joints but umber of joints can only be either 14 or 24'.format(args.num_joints))

        th_faces = torch.tensor(smpl_faces.astype('float32'), dtype=torch.long).to(device)

        # We assume loaded meshes are properly scaled and offsetted to the orignal SMPL space,
        if len(all_posed_minimal_meshes) > 0 and len(all_unposed_vertices) == 0:
            # IPNet optimization without vertex traslation
            # raise NotImplementedError('Optimization for IPNet is not implemented yet.')
            if args.num_joints == 24:
                for idx in range(len(scan_part_labels)):
                    scan_part_labels[idx] = smpl2ipnet[scan_part_labels[idx]].clone()

            prior = get_prior(gender=gender, precomputed=True)
            pose_init = torch.zeros((batch_size, 72))
            pose_init[:, 3:] = prior.mean
            betas, pose, trans = torch.zeros((batch_size, 10)), pose_init, torch.zeros((batch_size, 3))

            # Init SMPL, pose with mean smpl pose, as in ch.registration
            smpl = th_batch_SMPL(batch_size, betas, pose, trans, faces=th_faces, gender=gender).to(device)
            smpl_part_labels = torch.cat([labels] * batch_size, axis=0)

            # Optimize pose first
            optimize_pose_only(all_posed_minimal_meshes, smpl, pose_iterations, pose_steps_per_iter, scan_part_labels,
                               smpl_part_labels, None, args)

            # Optimize pose and shape
            optimize_pose_shape(all_posed_minimal_meshes, smpl, iterations, steps_per_iter, scan_part_labels, smpl_part_labels,
                                None, args)

            inner_vertices, _, _, _ = smpl()

            # Optimize vertices for SMPLD
            init_smpl_meshes = [tm.from_tensors(vertices=v.clone().detach(),
                                                faces=smpl.faces) for v in inner_vertices]
            optimize_offsets(all_posed_cloth_meshes, smpl, init_smpl_meshes, 5, 10, args)

            outer_vertices, _, _, _ = smpl()
        elif len(all_posed_minimal_meshes) > 0:
            # NASA+PTFs optimization with vertex traslations
            # Compute poses from implicit surfaces and correspondences
            # TODO: we could also compute bone-lengths if we train PTFs to predict A-pose with a global translation
            # that equals to the centroid of the pointcloud
            poses = compute_poses(all_posed_vertices, all_unposed_vertices, scan_part_labels, parents, args)
            # Convert 24 parts to 14 parts
            for idx in range(len(scan_part_labels)):
                scan_part_labels[idx] = smpl2ipnet[scan_part_labels[idx]].clone()

            pose_init = torch.from_numpy(poses).float()
            betas, pose, trans = torch.zeros((batch_size, 10)), pose_init, torch.zeros((batch_size, 3))

            # Init SMPL, pose with mean smpl pose, as in ch.registration
            smpl = th_batch_SMPL(batch_size, betas, pose, trans, faces=th_faces, gender=gender).to(device)
            smpl_part_labels = torch.cat([labels] * batch_size, axis=0)

            # Optimize pose first
            optimize_pose_only(all_posed_minimal_meshes, smpl, pose_iterations, pose_steps_per_iter, scan_part_labels,
                               smpl_part_labels, None, args)

            # Optimize pose and shape
            optimize_pose_shape(all_posed_minimal_meshes, smpl, iterations, steps_per_iter, scan_part_labels, smpl_part_labels,
                                None, args)

            inner_vertices, _, _, _ = smpl()

            # Optimize vertices for SMPLD
            init_smpl_meshes = [tm.from_tensors(vertices=v.clone().detach(),
                                                faces=smpl.faces) for v in inner_vertices]
            optimize_offsets(all_posed_cloth_meshes, smpl, init_smpl_meshes, 5, 10, args)

            outer_vertices, _, _, _ = smpl()
        else:
            inner_vertices = outer_vertices = None

        if args.use_raw_scan:
            for i, idx in enumerate(idxs):
                model_dict = dataset.get_model_dict(idx)

                subset = model_dict['subset']
                subject = model_dict['subject']
                sequence = model_dict['sequence']
                filebase = os.path.basename(model_dict['data_path'])[:-4]

                folder_name = os.path.join(subset, subject, sequence)
                register_dir_ = os.path.join(register_dir, folder_name)
                if not os.path.exists(register_dir_):
                    os.makedirs(register_dir_)

                if not os.path.exists(os.path.join(register_dir_, filebase + 'minimal.registered.ply')):
                    registered_mesh = trimesh.Trimesh(inner_vertices[i].detach().cpu().numpy().astype(np.float64), smpl_faces, process=False)
                    registered_mesh.export(os.path.join(register_dir_, filebase + 'minimal.registered.ply'))

                if not os.path.exists(os.path.join(register_dir_, filebase + 'cloth.registered.ply')):
                    registered_mesh = trimesh.Trimesh(outer_vertices[i].detach().cpu().numpy().astype(np.float64), smpl_faces, process=False)
                    registered_mesh.export(os.path.join(register_dir_, filebase + 'cloth.registered.ply'))
        else:
            # Evaluate registered mesh
            gt_smpl_mesh = data['points.minimal_smpl_vertices'].to(device)
            gt_smpld_mesh = data['points.smpl_vertices'].to(device)
            if inner_vertices is None:
                # if vertices are None, we assume they already exist due to previous runs
                inner_vertices = []
                outer_vertices = []
                for i, idx in enumerate(idxs):

                    model_dict = dataset.get_model_dict(idx)

                    subset = model_dict['subset']
                    subject = model_dict['subject']
                    sequence = model_dict['sequence']
                    filebase = os.path.basename(model_dict['data_path'])[:-4]

                    folder_name = os.path.join(subset, subject, sequence)
                    register_dir_ = os.path.join(register_dir, folder_name)

                    # registered_mesh = Mesh(filename=os.path.join(register_dir_, filebase + 'minimal.registered.ply'))
                    registered_mesh = trimesh.load(os.path.join(register_dir_, filebase + 'minimal.registered.ply'), process=False)
                    registered_v = torch.tensor(registered_mesh.vertices.astype(np.float32), requires_grad=False, device=device)
                    inner_vertices.append(registered_v)

                    # registered_mesh = Mesh(filename=os.path.join(register_dir_, filebase + 'cloth.registered.ply'))
                    registered_mesh = trimesh.load(os.path.join(register_dir_, filebase + 'cloth.registered.ply'), process=False)
                    registered_v = torch.tensor(registered_mesh.vertices.astype(np.float32), requires_grad=False, device=device)
                    outer_vertices.append(registered_v)

                inner_vertices = torch.stack(inner_vertices, dim=0)
                outer_vertices = torch.stack(outer_vertices, dim=0)

            inner_dist = torch.norm(gt_smpl_mesh - inner_vertices, dim=2).mean(-1)
            outer_dist = torch.norm(gt_smpld_mesh - outer_vertices, dim=2).mean(-1)

            for i, idx in enumerate(idxs):
                model_dict = dataset.get_model_dict(idx)

                subset = model_dict['subset']
                subject = model_dict['subject']
                sequence = model_dict['sequence']
                filebase = os.path.basename(model_dict['data_path'])[:-4]

                folder_name = os.path.join(subset, subject, sequence)
                register_dir_ = os.path.join(register_dir, folder_name)
                if not os.path.exists(register_dir_):
                    os.makedirs(register_dir_)

                logger.info('Inner distance for input {}: {} cm'.format(filebase, inner_dist[i].item()))
                logger.info('Outer distance for input {}: {} cm'.format(filebase, outer_dist[i].item()))

                if not os.path.exists(os.path.join(register_dir_, filebase + 'minimal.registered.ply')):
                    registered_mesh = trimesh.Trimesh(inner_vertices[i].detach().cpu().numpy().astype(np.float64), smpl_faces, process=False)
                    registered_mesh.export(os.path.join(register_dir_, filebase + 'minimal.registered.ply'))

                if not os.path.exists(os.path.join(register_dir_, filebase + 'cloth.registered.ply')):
                    registered_mesh = trimesh.Trimesh(outer_vertices[i].detach().cpu().numpy().astype(np.float64), smpl_faces, process=False)
                    registered_mesh.export(os.path.join(register_dir_, filebase + 'cloth.registered.ply'))

            inner_dists.extend(inner_dist.detach().cpu().numpy())
            outer_dists.extend(outer_dist.detach().cpu().numpy())

    logger.info('Mean inner distance: {} cm'.format(np.mean(inner_dists)))
    logger.info('Mean outer distance: {} cm'.format(np.mean(outer_dists)))
Beispiel #49
0
def transE_l2(head, edge, tail, gamma):
    score = head + edge - tail
    return gamma - th.norm(score, p=2, dim=-1)
def train_engine(__C, dataset, dataset_eval=None):

    data_size = dataset.data_size
    token_size = dataset.token_size
    ans_size = dataset.ans_size
    pretrained_emb = dataset.pretrained_emb

    net = ModelLoader(__C).Net(
        __C,
        pretrained_emb,
        token_size,
        ans_size
    )
    net.cuda()
    net.train()

    if __C.N_GPU > 1:
        net = nn.DataParallel(net, device_ids=__C.DEVICES)

    # Define Loss Function
    loss_fn = eval('torch.nn.' + __C.LOSS_FUNC_NAME_DICT[__C.LOSS_FUNC] + "(reduction='" + __C.LOSS_REDUCTION + "').cuda()")

    # Load checkpoint if resume training
    if __C.RESUME:
        print(' ========== Resume training')

        if __C.CKPT_PATH is not None:
            print('Warning: Now using CKPT_PATH args, '
                  'CKPT_VERSION and CKPT_EPOCH will not work')

            path = __C.CKPT_PATH
        else:
            path = __C.CKPTS_PATH + \
                   '/ckpt_' + __C.CKPT_VERSION + \
                   '/epoch' + str(__C.CKPT_EPOCH) + '.pkl'

        # Load the network parameters
        print('Loading ckpt from {}'.format(path))
        ckpt = torch.load(path)
        print('Finish!')

        if __C.N_GPU > 1:
            net.load_state_dict(ckpt_proc(ckpt['state_dict']))
        else:
            net.load_state_dict(ckpt['state_dict'])
        start_epoch = ckpt['epoch']

        # Load the optimizer paramters
        optim = get_optim(__C, net, data_size, ckpt['lr_base'])
        optim._step = int(data_size / __C.BATCH_SIZE * start_epoch)
        optim.optimizer.load_state_dict(ckpt['optimizer'])
        
        if ('ckpt_' + __C.VERSION) not in os.listdir(__C.CKPTS_PATH):
            os.mkdir(__C.CKPTS_PATH + '/ckpt_' + __C.VERSION)

    else:
        if ('ckpt_' + __C.VERSION) not in os.listdir(__C.CKPTS_PATH):
            #shutil.rmtree(__C.CKPTS_PATH + '/ckpt_' + __C.VERSION)
            os.mkdir(__C.CKPTS_PATH + '/ckpt_' + __C.VERSION)

        optim = get_optim(__C, net, data_size)
        start_epoch = 0

    loss_sum = 0
    named_params = list(net.named_parameters())
    grad_norm = np.zeros(len(named_params))

    # Define multi-thread dataloader
    # if __C.SHUFFLE_MODE in ['external']:
    #     dataloader = Data.DataLoader(
    #         dataset,
    #         batch_size=__C.BATCH_SIZE,
    #         shuffle=False,
    #         num_workers=__C.NUM_WORKERS,
    #         pin_memory=__C.PIN_MEM,
    #         drop_last=True
    #     )
    # else:
    dataloader = Data.DataLoader(
        dataset,
        batch_size=__C.BATCH_SIZE,
        shuffle=True,
        num_workers=__C.NUM_WORKERS,
        pin_memory=__C.PIN_MEM,
        drop_last=True
    )

    logfile = open(
        __C.LOG_PATH +
        '/log_run_' + __C.VERSION + '.txt',
        'a+'
    )
    logfile.write(str(__C))
    logfile.close()

    # Training script
    for epoch in range(start_epoch, __C.MAX_EPOCH):

        # Save log to file
        logfile = open(
            __C.LOG_PATH +
            '/log_run_' + __C.VERSION + '.txt',
            'a+'
        )
        logfile.write(
            '=====================================\nnowTime: ' +
            datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') +
            '\n'
        )
        logfile.close()

        # Learning Rate Decay
        if epoch in __C.LR_DECAY_LIST:
            adjust_lr(optim, __C.LR_DECAY_R)

        # Externally shuffle data list
        # if __C.SHUFFLE_MODE == 'external':
        #     dataset.shuffle_list(dataset.ans_list)

        time_start = time.time()
        # Iteration
        for step, (
                frcn_feat_iter,
                grid_feat_iter,
                bbox_feat_iter,
                ques_ix_iter,
                ans_iter
        ) in enumerate(dataloader):

            optim.zero_grad()

            frcn_feat_iter = frcn_feat_iter.cuda()
            grid_feat_iter = grid_feat_iter.cuda()
            bbox_feat_iter = bbox_feat_iter.cuda()
            ques_ix_iter = ques_ix_iter.cuda()
            ans_iter = ans_iter.cuda()

            loss_tmp = 0
            for accu_step in range(__C.GRAD_ACCU_STEPS):
                loss_tmp = 0

                sub_frcn_feat_iter = \
                    frcn_feat_iter[accu_step * __C.SUB_BATCH_SIZE:
                                  (accu_step + 1) * __C.SUB_BATCH_SIZE]
                sub_grid_feat_iter = \
                    grid_feat_iter[accu_step * __C.SUB_BATCH_SIZE:
                                  (accu_step + 1) * __C.SUB_BATCH_SIZE]
                sub_bbox_feat_iter = \
                    bbox_feat_iter[accu_step * __C.SUB_BATCH_SIZE:
                                  (accu_step + 1) * __C.SUB_BATCH_SIZE]
                sub_ques_ix_iter = \
                    ques_ix_iter[accu_step * __C.SUB_BATCH_SIZE:
                                 (accu_step + 1) * __C.SUB_BATCH_SIZE]
                sub_ans_iter = \
                    ans_iter[accu_step * __C.SUB_BATCH_SIZE:
                             (accu_step + 1) * __C.SUB_BATCH_SIZE]

                pred = net(
                    sub_frcn_feat_iter,
                    sub_grid_feat_iter,
                    sub_bbox_feat_iter,
                    sub_ques_ix_iter
                )

                loss_item = [pred, sub_ans_iter]
                loss_nonlinear_list = __C.LOSS_FUNC_NONLINEAR[__C.LOSS_FUNC]
                for item_ix, loss_nonlinear in enumerate(loss_nonlinear_list):
                    if loss_nonlinear in ['flat']:
                        loss_item[item_ix] = loss_item[item_ix].view(-1)
                    elif loss_nonlinear:
                        loss_item[item_ix] = eval('F.' + loss_nonlinear + '(loss_item[item_ix], dim=1)')

                loss = loss_fn(loss_item[0], loss_item[1])
                if __C.LOSS_REDUCTION == 'mean':
                    # only mean-reduction needs be divided by grad_accu_steps
                    loss /= __C.GRAD_ACCU_STEPS
                loss.backward()

                loss_tmp += loss.cpu().data.numpy() * __C.GRAD_ACCU_STEPS
                loss_sum += loss.cpu().data.numpy() * __C.GRAD_ACCU_STEPS

            if __C.VERBOSE:
                if dataset_eval is not None:
                    mode_str = __C.SPLIT['train'] + '->' + __C.SPLIT['val']
                else:
                    mode_str = __C.SPLIT['train'] + '->' + __C.SPLIT['test']

                print("\r[Version %s][Model %s][Dataset %s][Epoch %2d][Step %4d/%4d][%s] Loss: %.4f, Lr: %.2e" % (
                    __C.VERSION,
                    __C.MODEL_USE,
                    __C.DATASET,
                    epoch + 1,
                    step,
                    int(data_size / __C.BATCH_SIZE),
                    mode_str,
                    loss_tmp / __C.SUB_BATCH_SIZE,
                    optim._rate
                ), end='          ')

            # Gradient norm clipping
            if __C.GRAD_NORM_CLIP > 0:
                nn.utils.clip_grad_norm_(
                    net.parameters(),
                    __C.GRAD_NORM_CLIP
                )

            # Save the gradient information
            for name in range(len(named_params)):
                norm_v = torch.norm(named_params[name][1].grad).cpu().data.numpy() \
                    if named_params[name][1].grad is not None else 0
                grad_norm[name] += norm_v * __C.GRAD_ACCU_STEPS
                # print('Param %-3s Name %-80s Grad_Norm %-20s'%
                #       (str(grad_wt),
                #        params[grad_wt][0],
                #        str(norm_v)))

            optim.step()

        time_end = time.time()
        elapse_time = time_end-time_start
        print('Finished in {}s'.format(int(elapse_time)))
        epoch_finish = epoch + 1

        # Save checkpoint
        if __C.N_GPU > 1:
            state = {
                'state_dict': net.module.state_dict(),
                'optimizer': optim.optimizer.state_dict(),
                'lr_base': optim.lr_base,
                'epoch': epoch_finish
            }
        else:
            state = {
                'state_dict': net.state_dict(),
                'optimizer': optim.optimizer.state_dict(),
                'lr_base': optim.lr_base,
                'epoch': epoch_finish
            }
        torch.save(
            state,
            __C.CKPTS_PATH +
            '/ckpt_' + __C.VERSION +
            '/epoch' + str(epoch_finish) +
            '.pkl'
        )

        # Logging
        logfile = open(
            __C.LOG_PATH +
            '/log_run_' + __C.VERSION + '.txt',
            'a+'
        )
        logfile.write(
            'Epoch: ' + str(epoch_finish) +
            ', Loss: ' + str(loss_sum / data_size) +
            ', Lr: ' + str(optim._rate) + '\n' +
            'Elapsed time: ' + str(int(elapse_time)) + 
            ', Speed(s/batch): ' + str(elapse_time / step) +
            '\n\n'
        )
        logfile.close()

        # Eval after every epoch
        if dataset_eval is not None:
            test_engine(
                __C,
                dataset_eval,
                state_dict=net.state_dict(),
                validation=True
            )

        # if self.__C.VERBOSE:
        #     logfile = open(
        #         self.__C.LOG_PATH +
        #         '/log_run_' + self.__C.VERSION + '.txt',
        #         'a+'
        #     )
        #     for name in range(len(named_params)):
        #         logfile.write(
        #             'Param %-3s Name %-80s Grad_Norm %-25s\n' % (
        #                 str(name),
        #                 named_params[name][0],
        #                 str(grad_norm[name] / data_size * self.__C.BATCH_SIZE)
        #             )
        #         )
        #     logfile.write('\n')
        #     logfile.close()

        loss_sum = 0
        grad_norm = np.zeros(len(named_params))
Beispiel #51
0
def trajectory_empty(pos0,
                     game,
                     Stim,
                     reward_control=0,
                     action_=[],
                     e=0,
                     open_loop=True,
                     init_hidden=True,
                     hidden=torch.zeros(512, 512),
                     context=torch.zeros(1, 38)):
    game.reset(reward_control=reward_control, size=15)
    done = False
    if init_hidden == True:
        game.hidden = game.net.initHidden()
    else:
        game.hidden = hidden.clone()
    game.action = torch.zeros(1, 4)
    Hidden = []
    Action = []
    Y = []
    X = []
    dH = []
    hidden0 = game.hidden.clone()
    game.agent.pos = pos0
    stim = game.visible_state
    #     print (game.visible_state)
    y, x = 0, 0
    action = action_
    for stim in Stim:
        if open_loop == True:
            action = game.step_empty(stim=stim,
                                     action_=action_,
                                     epsilon=e,
                                     open_loop=open_loop,
                                     context=context)  # Down
        else:
            action = game.step_empty(stim=stim,
                                     action_=action,
                                     epsilon=e,
                                     open_loop=open_loop,
                                     context=context)  # Down
        # up
        if action == 0:
            y -= 1
            # right
        elif action == 1:
            x += 1
            # down
        elif action == 2:
            y += 1
            # left
        elif action == 3:
            x -= 1
        game.agent.pos = (y, x)
        Y.append(game.agent.pos[0])
        X.append(game.agent.pos[1])
        Hidden.append(game.hidden.clone().data.numpy().squeeze()
                      )  # need copy , avoid same adress
        dH.append(torch.norm(game.hidden - hidden0))
        Action.append(action)
        hidden0 = game.hidden.clone()
    dH = [dh.data.numpy() for dh in dH]
    return (np.array(Y),
            np.array(X)), np.array(Hidden), np.array(dH), np.array(Action)
    def forward(
        self,
        B_lab_map,
        A_relu2_1,
        A_relu3_1,
        A_relu4_1,
        A_relu5_1,
        B_relu2_1,
        B_relu3_1,
        B_relu4_1,
        B_relu5_1,
        temperature=0.001 * 5,
        detach_flag=False,
        WTA_scale_weight=1,
        feature_noise=0,
    ):
        batch_size = B_lab_map.shape[0]
        channel = B_lab_map.shape[1]
        image_height = B_lab_map.shape[2]
        image_width = B_lab_map.shape[3]
        feature_height = int(image_height / 4)
        feature_width = int(image_width / 4)

        # scale feature size to 44*44
        A_feature2_1 = self.layer2_1(A_relu2_1)
        B_feature2_1 = self.layer2_1(B_relu2_1)
        A_feature3_1 = self.layer3_1(A_relu3_1)
        B_feature3_1 = self.layer3_1(B_relu3_1)
        A_feature4_1 = self.layer4_1(A_relu4_1)
        B_feature4_1 = self.layer4_1(B_relu4_1)
        A_feature5_1 = self.layer5_1(A_relu5_1)
        B_feature5_1 = self.layer5_1(B_relu5_1)

        # concatenate features
        if A_feature5_1.shape[2] != A_feature2_1.shape[2] or A_feature5_1.shape[3] != A_feature2_1.shape[3]:
            A_feature5_1 = F.pad(A_feature5_1, (0, 0, 1, 1), "replicate")
            B_feature5_1 = F.pad(B_feature5_1, (0, 0, 1, 1), "replicate")
        A_features = self.layer(torch.cat((A_feature2_1, A_feature3_1, A_feature4_1, A_feature5_1), 1))
        B_features = self.layer(torch.cat((B_feature2_1, B_feature3_1, B_feature4_1, B_feature5_1), 1))

        # pairwise cosine similarity
        theta = self.theta(A_features).view(batch_size, self.inter_channels, -1)  # 2*256*(feature_height*feature_width)
        theta = theta - theta.mean(dim=-1, keepdim=True)  # center the feature
        theta_norm = torch.norm(theta, 2, 1, keepdim=True) + sys.float_info.epsilon
        theta = torch.div(theta, theta_norm)
        theta_permute = theta.permute(0, 2, 1)  # 2*(feature_height*feature_width)*256
        phi = self.phi(B_features).view(batch_size, self.inter_channels, -1)  # 2*256*(feature_height*feature_width)
        phi = phi - phi.mean(dim=-1, keepdim=True)  # center the feature
        phi_norm = torch.norm(phi, 2, 1, keepdim=True) + sys.float_info.epsilon
        phi = torch.div(phi, phi_norm)
        f = torch.matmul(theta_permute, phi)  # 2*(feature_height*feature_width)*(feature_height*feature_width)
        if detach_flag:
            f = f.detach()

        f_similarity = f.unsqueeze_(dim=1)
        similarity_map = torch.max(f_similarity, -1, keepdim=True)[0]
        similarity_map = similarity_map.view(batch_size, 1, feature_height, feature_width)

        # f can be negative
        f_WTA = f if WTA_scale_weight == 1 else WTA_scale.apply(f, WTA_scale_weight)
        f_WTA = f_WTA / temperature
        f_div_C = F.softmax(f_WTA.squeeze_(), dim=-1)  # 2*1936*1936;

        # downsample the reference color
        B_lab = F.avg_pool2d(B_lab_map, 4)
        B_lab = B_lab.view(batch_size, channel, -1)
        B_lab = B_lab.permute(0, 2, 1)  # 2*1936*channel

        # multiply the corr map with color
        y = torch.matmul(f_div_C, B_lab)  # 2*1936*channel
        y = y.permute(0, 2, 1).contiguous()
        y = y.view(batch_size, channel, feature_height, feature_width)  # 2*3*44*44
        y = self.upsampling(y)
        similarity_map = self.upsampling(similarity_map)

        return y, similarity_map
 def project(self, param_name, param_data, epsilon):
     r = param_data - self.emb_backup[param_name]
     if torch.norm(r) > epsilon:
         r = epsilon * r / torch.norm(r)
     return self.emb_backup[param_name] + r
Beispiel #54
0
def compute_distance_matrix(x, p=2):
    x_flat = x.view(x.size(0), -1)
    distances = torch.norm(x_flat[:, None] - x_flat, dim=2, p=p)
    return distances
    def traverse(self, x, m, mac=0.7, force_function=gravity_function):
        force = torch.zeros_like(x)

        # pair each point with all nodes of the first level
        pairs_o = torch.cat([
            torch.arange(x.shape[0], dtype=torch.long,
                         device=self.device).unsqueeze(1).repeat(
                             1, self.num_o).view(-1, 1),
            torch.arange(self.num_o, dtype=torch.long,
                         device=self.device).unsqueeze(1).repeat(
                             x.shape[0], 1)
        ],
                            dim=1)

        refine = torch.stack([
            torch.arange(x.shape[0], dtype=torch.long, device=self.device),
            torch.zeros(x.shape[0], dtype=torch.long, device=self.device)
        ],
                             dim=1)

        for l in range(len(self.node_indexing)):
            refinement_factor = self.node_indexing[l].shape[1]
            refine[:, 1] *= refinement_factor
            refine = refine.unsqueeze(1).repeat(1, refinement_factor, 1)
            refine[:, :, 1] = refine[:, :, 1] + torch.arange(
                refinement_factor, dtype=torch.long,
                device=self.device).unsqueeze(0)
            pairs_o = refine.view(-1, 2)

            indexing = self.node_indexing[l]
            pairs = pairs_o.clone()

            # adjust indexing of the nodes
            pairs[:, 1] = indexing[pairs_o[:, 1] / refinement_factor,
                                   pairs_o[:, 1] % refinement_factor]
            # remove nodes with index -1
            pairs = pairs[pairs[:, 1] >= 0, :]

            this_com = self.center_of_mass[l][pairs[:, 1], :]
            this_mass = self.node_mass[l][pairs[:, 1]]

            diff = x[pairs[:, 0], :] - this_com
            dist = torch.norm(diff, 2, dim=1)
            if l < self.num_levels:
                section_size = self.size / 2**(l + 1)
            else:
                section_size = 0.
                #print("truncation level pairs:", pairs.shape[0])
            d2r = section_size / dist

            relative_weight_difference = torch.abs(
                (m[pairs[:, 0]] - this_mass) * this_mass)
            different_mass = relative_weight_difference > 0.01

            mac_accept = (d2r < mac)
            end_node = self.is_end_node[l][pairs[:, 1]]
            accept = torch.max(mac_accept,
                               end_node * (dist > 1e-5 * section_size))

            this_f = force_function(m1=this_mass[accept],
                                    m2=m[pairs[:, 0]][accept],
                                    difference=diff[accept],
                                    distance=dist[accept])
            force[:, 0].scatter_add_(0, pairs[:, 0][accept], this_f[:, 0])
            force[:, 1].scatter_add_(0, pairs[:, 0][accept], this_f[:, 1])

            # get pairs that were not accepted
            refine = pairs[(accept == 0).nonzero(), :].squeeze(1)

            #if torch.max(force) > 1e4:
            #    print("error?!")

            # expand the indexing of the nodes for the next level
            #if l < len(self.node_indexing) - 1:
            #    refinement_factor = self.node_indexing[l+1].shape[1]
            #    refine[:, 1] *= refinement_factor
            #    refine = refine.unsqueeze(1).repeat(1, refinement_factor, 1)
            #    refine[:, :, 1] = refine[:, :, 1] + torch.arange(refinement_factor, dtype=torch.long, device=self.device).unsqueeze(0)
            #    pairs_o = refine.view(-1, 2)

        #if len(self.node_indexing) > self.num_levels:
#
#    pass

        return force
Beispiel #56
0
def l2_norm(input, axis=1):
  norm = torch.norm(input, 2, axis, True)
  output = torch.div(input, norm)
  return output
Beispiel #57
0
def normalize_tensor(tensor, eps=1e-10):
    norm_factor = torch.norm(tensor, dim=-1, keepdim=True)
    return tensor / (norm_factor + eps)
Beispiel #58
0
for i, data in enumerate(testdataloader, 0):
    points, choose, img, target, model_points, idx = data
    if len(points.size()) == 2:
        print('No.{0} NOT Pass! Lost detection!'.format(i))
        fw.write('No.{0} NOT Pass! Lost detection!\n'.format(i))
        continue
    points, choose, img, target, model_points, idx = Variable(points).cuda(), \
                                                     Variable(choose).cuda(), \
                                                     Variable(img).cuda(), \
                                                     Variable(target).cuda(), \
                                                     Variable(model_points).cuda(), \
                                                     Variable(idx).cuda()

    pred_r, pred_t, pred_c, emb = estimator(img, points, choose, idx)
    pred_r = pred_r / torch.norm(pred_r, dim=2).view(1, num_points, 1)
    pred_c = pred_c.view(bs, num_points)
    how_max, which_max = torch.max(pred_c, 1)
    pred_t = pred_t.view(bs * num_points, 1, 3)

    my_r = pred_r[0][which_max[0]].view(-1).cpu().data.numpy()
    my_t = (points.view(bs * num_points, 1, 3) + pred_t)[which_max[0]].view(-1).cpu().data.numpy()
    my_pred = np.append(my_r, my_t)

    for ite in range(0, iteration):
        T = Variable(torch.from_numpy(my_t.astype(np.float32))).cuda().view(1, 3).repeat(num_points, 1).contiguous().view(1, num_points, 3)
        my_mat = quaternion_matrix(my_r)
        R = Variable(torch.from_numpy(my_mat[:3, :3].astype(np.float32))).cuda().view(1, 3, 3)
        my_mat[0:3, 3] = my_t
        
        new_points = torch.bmm((points - T), R).contiguous()
Beispiel #59
0
                                               gamma=args.lr_decay)
elif args.opt == "adam":
    opt = optim.Adam(ae.parameters(), lr=args.lr)
    scheduler = optim.lr_scheduler.MultiStepLR(opt, milestones=[1e6])

for j in range(NUM_EPOCHS):
    for i, (images, labels) in enumerate(trainloader):
        # Shape of images: (BATCH_SIZE x channels x width x height)
        # Shape of labels: (BATCH_SIZE)
        opt.zero_grad()
        images, labels = images.cuda(), labels.cuda()
        rec_images = ae(images.clone())
        if args.bce:
            rec_loss = F.binary_cross_entropy(rec_images, images)
        else:
            rec_loss = ch.norm(images - rec_images).pow(2) / images.shape[0]
        loss_str = ""
        loss = rec_loss
        if args.at is not None:
            attack_ims = norm_sep_attack(ae, images, args.at_eps / 8,
                                         args.at_eps, args.at_ns)
            _diff = ae(attack_ims) - ae(images)
            if args.lse:
                diff = ch.norm(_diff, dim=1).pow(2).exp().sum().log()
            elif args.iso:
                diff = (ch.norm(_diff, dim=1) - args.at_eps).pow(2).mean()
            else:
                diff = ch.norm(_diff).pow(2).sum() / images.shape[0]
            max_diff = ch.norm(_diff, dim=1).max()
            loss = loss + args.at * diff
            loss_str += "AT Loss: {at} | AT Max Diff: {md} | ".format(
Beispiel #60
0
    def forward(self, x, seg_gt=torch.zeros(1), feature_alignment=False):
        x2s, x4s, x8s, x16s, x32s, xfc = self.resnet18_8s(x)
        encoder = [x2s, x4s, x8s, x16s, x32s, xfc]
        decoder = []

        fm = self.conv8s(torch.cat([xfc, x8s], 1))
        decoder.append(fm.clone())

        fm = self.up8sto4s(fm)
        if fm.shape[2] == 136:
            fm = nn.functional.interpolate(fm, (135, 180),
                                           mode='bilinear',
                                           align_corners=False)

        fm = self.conv4s(torch.cat([fm, x4s], 1))
        decoder.append(fm.clone())

        fm = self.up4sto2s(fm)
        fm = self.conv2s(torch.cat([fm, x2s], 1))
        decoder.append(fm.clone())

        fm = self.up2storaw(fm)
        x = self.convraw(torch.cat([fm, x], 1))

        # save encoder, decoder features
        #         encoder = [f.detach().cpu().numpy() for f in encoder]
        #         decoder = [f.detach().cpu().numpy() for f in decoder]
        #         with open('/mbrdi/sqnap1_colomirror/gupansh/encoder_holepuncher.pkl','wb') as fp:
        #             pickle.dump(encoder, fp)
        #         with open('/mbrdi/sqnap1_colomirror/gupansh/decoder_holepuncher.pkl','wb') as fp:
        #             pickle.dump(decoder, fp)
        #         input()

        seg_pred = x[:, :self.seg_dim, :, :]
        ver_pred = x[:, self.seg_dim:, :, :]

        #####################################################################
        # single stage model
        #####################################################################

        # x refers to horizontal coord and y refers to vertical coord
        # refer to /lib/utils/pvnet/pvnet_data_utils.py 'compute_vertex' function
        pred_x = []
        pred_y = []
        pred_dx = []
        pred_dy = []

        ver_pred_reshape = ver_pred.permute(0, 2, 3, 1)
        batch_size, h, w, vn_2 = ver_pred_reshape.shape
        ver_pred_reshape = ver_pred_reshape.reshape(batch_size, h, w,
                                                    vn_2 // 2, 2)

        if seg_gt.dim() != 1:
            batch_seg_mask = seg_gt
        else:
            conf, batch_seg_mask = torch.max(seg_pred, dim=1)

        # save segmentation, confidence
#         with open('/mbrdi/sqnap1_colomirror/gupansh/segmentation.pkl','wb') as fp:
#             pickle.dump(batch_seg_mask.detach().cpu().numpy(), fp)
#         with open('/mbrdi/sqnap1_colomirror/gupansh/confidence.pkl','wb') as fp:
#             pickle.dump(conf.detach().cpu().numpy(), fp)
#         input()

        batch_idx_used = []
        image_features = []
        triplet_loss = torch.zeros(batch_size).cuda()
        for b in range(batch_size):
            seg_mask = batch_seg_mask[b]
            seg_indices = seg_mask.nonzero()
            #             data = {}
            #             data['mask_gt'] = seg_gt[b].detach().cpu().numpy()
            #             data['mask_pred'] = seg_mask.detach().cpu().numpy()
            #             with open('mask.pkl','wb') as fp:
            #                 pickle.dump(data, fp)
            #             print(seg_indices)
            #             input()

            # randomly sample 200 points
            if seg_indices.shape[0] >= 250:
                batch_idx_used.append(b)

                sampled_indices_total = np.random.choice(range(
                    seg_indices.shape[0]),
                                                         250,
                                                         replace=False)
                sampled_indices = sampled_indices_total[:200]

                dx = ver_pred_reshape[b, seg_indices[sampled_indices, 0],
                                      seg_indices[sampled_indices, 1], :, 0]
                dx = dx.transpose(0, 1).contiguous()
                dx = dx.view(-1, 1).squeeze()
                dy = ver_pred_reshape[b, seg_indices[sampled_indices, 0],
                                      seg_indices[sampled_indices, 1], :, 1]
                dy = dy.transpose(0, 1).contiguous()
                dy = dy.view(-1, 1).squeeze()
                dxy = torch.stack([dx, dy], 1)
                dxy_norm = torch.norm(dxy, dim=1)
                dx = dx / dxy_norm
                dy = dy / dxy_norm

                px = seg_indices[sampled_indices, 1].float()
                px = px.repeat(vn_2 // 2)
                #                 px = px.repeat_interleave(vn_2//2)
                py = seg_indices[sampled_indices, 0].float()
                py = py.repeat(vn_2 // 2)
                #                 py = py.repeat_interleave(vn_2//2)

                sampled_indices = sampled_indices_total[200:]
                sampled_indices = sampled_indices.repeat(4)
                dx_pair = ver_pred_reshape[b, seg_indices[sampled_indices, 0],
                                           seg_indices[sampled_indices,
                                                       1], :, 0]
                dx_pair = dx_pair.transpose(0, 1).contiguous()
                dx_pair = dx_pair.view(-1, 1).squeeze()
                dy_pair = ver_pred_reshape[b, seg_indices[sampled_indices, 0],
                                           seg_indices[sampled_indices,
                                                       1], :, 1]
                dy_pair = dy_pair.transpose(0, 1).contiguous()
                dy_pair = dy_pair.view(-1, 1).squeeze()
                dxy_pair = torch.stack([dx_pair, dy_pair], 1)
                dxy_pair_norm = torch.norm(dxy_pair, dim=1)
                dx_pair = dx_pair / dxy_pair_norm
                dy_pair = dy_pair / dxy_pair_norm

                px_pair = seg_indices[sampled_indices, 1].float()
                px_pair = px_pair.repeat(vn_2 // 2)
                #                 px_pair = px_pair.repeat_interleave(vn_2//2)
                py_pair = seg_indices[sampled_indices, 0].float()
                py_pair = py_pair.repeat(vn_2 // 2)
                #                 py_pair = py_pair.repeat_interleave(vn_2//2)

                A = torch.stack([dx_pair, -dx, dy_pair, -dy], 1)
                A = A.reshape(1800, 2, 2)

                B = torch.stack([px - px_pair, py - py_pair], 1)
                B = B.unsqueeze(2)

                X, _ = torch.solve(B, A)
                X_1 = X[:, 1].squeeze()
                #                 dx.retain_grad()
                #                 print('dx', dx.grad)
                #                 dx.register_hook(lambda x: print('dx', x))
                #                 X_1.retain_grad()
                #                 print('X_1', X_1.grad)
                #                 X_1.register_hook(lambda x: print('X_1', x))
                #                 input()
                delx = X_1 * dx
                dely = X_1 * dy

                pred_x.append(px / w)
                pred_y.append(py / h)
                pred_dx.append(delx / w)
                pred_dy.append(dely / w)
#                 print('px', px.view(200,9)[:5] + delx.view(200,9)[:5])
#                 print('py', py.view(200,9)[:5] + dely.view(200,9)[:5])
#                 input()

        if len(batch_idx_used) != 0:
            pred_dx = torch.stack(pred_dx, 0)
            pred_dy = torch.stack(pred_dy, 0)
            pred_x = torch.stack(pred_x, 0) - 0.5
            pred_y = torch.stack(pred_y, 0) - 0.5
            #             if self.training:
            #                 # add noise
            #                 outlierRatio = np.random.uniform(self.minOutlier, self.maxOutlier)
            #                 outlierCnt = int(len(pred_dx) * outlierRatio + 0.5)
            #                 outlierChoice = np.random.choice(len(pred_dx), outlierCnt, replace=False)

            #                 pred_x[:, outlierChoice] = torch.from_numpy(np.random.uniform(0, 1, size=[pred_x.shape[0], outlierCnt])).float().cuda()
            #                 pred_y[:, outlierChoice] = torch.from_numpy(np.random.uniform(0, 1, size=[pred_y.shape[0], outlierCnt])).float().cuda()
            #                 #
            #                 pred_dx[:, outlierChoice] = torch.from_numpy(np.random.uniform(-1, 1, size=[pred_dx.shape[0], outlierCnt])).float().cuda()
            #                 pred_dy[:, outlierChoice] = torch.from_numpy(np.random.uniform(-1, 1, size=[pred_dy.shape[0], outlierCnt])).float().cuda()

            #                 noiseSigma = np.random.uniform(self.minNoiseSigma, self.maxNoiseSigma)
            #                 noise = np.clip(np.random.normal(0, noiseSigma, pred_dx.shape).astype(np.float32), -0.1*w, 0.1*w)
            #                 noise /= w
            #                 pred_dx = pred_dx + torch.from_numpy(noise).cuda()

            #                 noise = np.clip(np.random.normal(0, noiseSigma, pred_dy.shape).astype(np.float32), -0.1*h, 0.1*h)
            #                 noise /= h
            #                 pred_dy = pred_dy + torch.from_numpy(noise).cuda()

            pred_xydxdy = torch.stack([pred_x, pred_y, pred_dx, pred_dy], 2)
            pred_xydxdy = pred_xydxdy.permute(0, 2, 1)

            pred_pose, triplet_loss = self.single_stage(pred_xydxdy.detach())
#             pred_pose = self.single_stage(pred_xydxdy.detach())

        batch_pred_pose = torch.zeros(batch_size, 3, 4).cuda()
        #         batch_pred_xy = torch.zeros(batch_size, 1800, 2).cuda()
        mask_pose = torch.zeros(batch_size).cuda()

        if len(batch_idx_used) != 0:
            pred_pose_rot = quaternion2rotation(pred_pose[:, :4])
            pred_pose_trans = pred_pose[:, 4:].unsqueeze(2)
            pred_pose_rt = torch.cat([pred_pose_rot, pred_pose_trans], 2)
            batch_pred_pose[batch_idx_used] = pred_pose_rt
            mask_pose[batch_idx_used] = 1


#             batch_pred_xy[batch_idx_used] = pred_xy.permute(0,2,1)

        ret = {
            'seg': seg_pred,
            'vertex': ver_pred,
            'pred_pose': batch_pred_pose,
            'mask_pose': mask_pose,
            'triplet_loss': triplet_loss
        }
        #         ret = {'seg': seg_pred, 'vertex': ver_pred, 'pred_pose':batch_pred_pose, 'mask_pose':mask_pose, 'pred_xy': batch_pred_xy}
        #         ret = {'seg': seg_pred, 'vertex': ver_pred}

        if not self.training:
            with torch.no_grad():
                self.decode_keypoint(ret)

        return ret