Beispiel #1
0
    def write(self, z, time, debug=False):
        # update usage indicator
        self.u = self.u + T.matmul(Variable(T.from_numpy(np.ones((1, Kr), dtype=np.float32))), self.W_predictor)

        # update writing weights
        prev_v_wr = self.v_wr
        v_wr = np.zeros((N_mem, 1), dtype=np.float32)
        if time < N_mem:
            v_wr[time][0] = 1
        else:
            waste_index = int(T.argmin(self.u).data)
            v_wr[waste_index][0] = 1
        self.v_wr = Variable(T.from_numpy(v_wr))

        # writing
        # z: (1, Z_DIM)
        if debug:
            print(self.M)
        if USE_RETROACTIVE:
            # update retroactive weights
            self.v_ret = GAMMA*self.v_ret + (1-GAMMA)*prev_v_wr
            z_wr = T.cat([z, Variable(T.from_numpy(np.zeros((1, Z_DIM), dtype=np.float32)))], 1)
            z_ret = T.cat([Variable(T.from_numpy(np.zeros((1, Z_DIM), dtype=np.float32))), z], 1)
            self.M = self.M + T.matmul(self.v_wr, z_wr) + T.matmul(self.v_ret, z_ret)
        else:
            self.M = self.M + T.matmul(self.v_wr, z)
        if debug:
            return self.M
    def _projection(self, Vs, Vt):
        '''compute projection from source to target'''
        VsN = Vs.size(0)
        VtN = Vt.size(0)
        diff = Vt[None, :, :] - Vs[:, None, :]
        dist = (diff**2).sum(dim=2).sqrt()
        idx = torch.argmin(dist, dim=1)
        # proj = Vt_rep[np.arange(VsN), idx, :]
        proj = None
        minDist = dist[np.arange(VsN), idx]

        return proj, minDist
Beispiel #3
0
    def match_gt_pred(self, center_gts_info, center_preds_info, device):
        vgt_batch_ids, vgt_person_ids, center_gts = center_gts_info
        vpred_batch_ids, flat_inds, cyxs, top_score = center_preds_info
        mc = {key: [] for key in ['batch_ids', 'flat_inds', 'person_ids']}

        for batch_id, person_id, center_gt in zip(vgt_batch_ids,
                                                  vgt_person_ids, center_gts):
            if batch_id in vpred_batch_ids:
                center_pred = cyxs[vpred_batch_ids == batch_id]
                center_gt = center_pred[torch.argmin(
                    torch.norm(center_pred.float() -
                               center_gt[None].float().to(device),
                               dim=-1))].long()
                cy, cx = torch.clamp(center_gt, 0, self.map_size - 1)
                flat_ind = cy * args.centermap_size + cx
                mc['batch_ids'].append(batch_id)
                mc['flat_inds'].append(flat_ind)
                mc['person_ids'].append(person_id)
        keys_list = list(mc.keys())
        for key in keys_list:
            mc[key] = torch.Tensor(mc[key]).long().to(device)

        return mc
Beispiel #4
0
    def _to_deformed(self, gd):
        if not self.__backward:
            pixel_grid = pixels2points(
                self.__pixel_extent.fill_count(
                    self.__shape, device=self.silent_module.device),
                self.__shape, self.__extent)

            normdiff = torch.sum(
                (gd.unsqueeze(0).transpose(1, 2) - pixel_grid.unsqueeze(2))**2,
                dim=1)
            # _, ind_nearest = torch.topk(normdiff, k=1, dim=1, largest=False)
            ind_nearest = torch.argmin(normdiff, dim=1, keepdim=True)

            gd = torch.mean(pixel_grid[ind_nearest], 1)

        if self.__output == 'bitmap':
            return (deformed_intensities(gd, self.__bitmap, self.__extent), )
        elif self.__output == 'points':
            deformed_bitmap = deformed_intensities(gd, self.__bitmap,
                                                   self.__extent)
            return (gd, deformed_bitmap.flatten() / torch.sum(deformed_bitmap))
        else:
            raise ValueError()
Beispiel #5
0
def generate_action(encoder, trans, current_image, goal_image):
    locations = 2 * (get_seg_idxs(preprocess_image(current_image), COLOR) /
                     63.) - 1

    if False:
        action = sample_actions(locations, 1)[0]
    else:
        current_image = preprocess_image(current_image, to_torch=True)

        z_current, z_goal = run_single(encoder, current_image), run_single(
            encoder, goal_image)
        z_current, z_goal = z_current.unsqueeze(0), z_goal.unsqueeze(0)
        n_trials = 1000
        with torch.no_grad():
            actions = torch.FloatTensor(sample_actions(locations,
                                                       n_trials)).cuda()
            zs = trans(z_current.repeat(n_trials, 1), actions)
            dists = torch.norm((zs - z_goal).view(n_trials, -1), dim=-1)
            idx = torch.argmin(dists)
        action = actions[idx].cpu().numpy()

    location, delta = action[:2], action[2:4]

    location = (location * 0.5 + 0.5) * 63

    delta[1] = -delta[1]
    delta = delta[[1, 0]]

    # action[3] = -action[3]
    # action[[2, 3]] = action[[3, 2]]

    # print("z current:", z_current)
    # print("z_goal:", z_goal)
    # print("z next:", zs[idx])
    # print("action:", action)

    return location, delta
def compute_bilateral_loss_with_repulsion(pred_point, gt_patch_pts,
                                          gt_patch_normals, support_radius,
                                          support_angle, alpha):

    # Our Loss
    pred_point = pred_point.unsqueeze(1).repeat(1, gt_patch_pts.size(1), 1)
    dist_square = ((pred_point - gt_patch_pts)**2).sum(2)
    weight_theta = torch.exp(-1 * dist_square / (support_radius**2))

    nearest_idx = torch.argmin(dist_square, dim=1)
    pred_point_normal = torch.cat(
        [gt_patch_normals[i, index, :] for i, index in enumerate(nearest_idx)])
    pred_point_normal = pred_point_normal.view(-1, 3)
    pred_point_normal = pred_point_normal.unsqueeze(1)
    pred_point_normal = pred_point_normal.repeat(1, gt_patch_normals.size(1),
                                                 1)

    normal_proj_dist = (pred_point_normal * gt_patch_normals).sum(2)
    weight_phi = torch.exp(-1 * ((1 - normal_proj_dist) /
                                 (1 - np.cos(support_angle)))**2)

    # # avoid divided by zero
    weight = weight_theta * weight_phi + 1e-12
    weight = weight / weight.sum(1, keepdim=True)

    # key loss
    project_dist = torch.abs(
        ((pred_point - gt_patch_pts) * gt_patch_normals).sum(2))
    imls_dist = (project_dist * weight).sum(1)

    # repulsion loss
    max_dist = torch.max(dist_square, 1)[0]

    # final loss
    dist = torch.mean((alpha * imls_dist) + (1 - alpha) * max_dist)

    return dist
Beispiel #7
0
    def generate_parameters(self, parameter_id, **kwargs):
        if not self._model_initialized:
            return _random_config(self.searchspace_json, self.random_state)
        else:
            # random samples and pick best with model
            candidate_x = [
                _random_config(self.searchspace_json, self.random_state)
                for _ in range(self.sample_size)
            ]

            # The model has NaN issue when all the candidates are same
            # Also we can save the predict time when this happens
            if all(x == candidate_x[0] for x in candidate_x):
                return candidate_x[0]

            x_test = np.array(
                [np.array(list(xi.values())) for xi in candidate_x])
            m, v = self.model.predict(x_test)

            # The model has NaN issue when all the candidates are very close
            if np.isnan(m).any() or np.isnan(v).any():
                return candidate_x[0]

            mean = torch.Tensor(m)
            sigma = torch.Tensor(v)
            u = (mean - torch.Tensor([0.95]).expand_as(mean)) / sigma
            normal = Normal(torch.zeros_like(u), torch.ones_like(u))
            ucdf = normal.cdf(u)
            updf = torch.exp(normal.log_prob(u))
            ei = sigma * (updf + u * ucdf)

            if self.optimize_mode == 'maximize':
                ind = torch.argmax(ei)
            else:
                ind = torch.argmin(ei)
            new_x = candidate_x[ind]
            return new_x
Beispiel #8
0
    def create_e_mid_mask(self, entity_ids):
        # Dims
        b, g, l = entity_ids.shape
        entity_ids = entity_ids.resize(b * g, l)

        # E1 start:
        ent_ids_copy = entity_ids.detach().clone()
        ent_ids_copy[ent_ids_copy == 2] = 0  # zero out e2
        e1_s = torch.argmax(ent_ids_copy,
                            dim=1)  # index of first '1' (start of tail ent)

        # E2 start:
        ent_ids_copy = entity_ids.detach().clone()
        ent_ids_copy[ent_ids_copy == 1] = 0  # zero out e1
        e2_s = torch.argmax(ent_ids_copy,
                            dim=1)  # index of first '2' (start of tail ent)

        # Concat E1 and E2 start
        e_starts = torch.stack((e1_s, e2_s), dim=1)
        e_starts_max = torch.max(e_starts, dim=1)

        # Inverse mask
        e_mid_mask = entity_ids.detach().clone()
        e_mid_mask[e_mid_mask == 1] = 2
        e_mid_mask[e_mid_mask == 0] = 1
        e_mid_mask[e_mid_mask == 2] = 0

        # Index of first zero in inverse mask
        argmin = torch.argmin(e_mid_mask, dim=1)

        for i, idx in enumerate(argmin):
            e_mid_mask[i, 0:idx] = 0  # zero everything before first zero
            e_mid_mask[i, e_starts_max.
                       values[i]:] = 0  # everything after start of 2nd entity

        e_mid_mask = e_mid_mask.resize(b, g, l)
        return e_mid_mask.to(self.on_device)
Beispiel #9
0
def zero_row_min(x):
    """
    Return a copy of x, where the minimum value along each row has been set to 0.

    For example, if x is:
    x = torch.tensor([[
          [10, 20, 30],
          [ 2,  5,  1]
        ]])

    Then y = zero_row_min(x) should be:
    torch.tensor([
      [0, 20, 30],
      [2,  5,  0]
    ])

    Your implementation should use reduction and indexing operations; you should
    not use any explicit loops. The input tensor should not be modified.

    Inputs:
    - x: Tensor of shape (M, N)

    Returns:
    - y: Tensor of shape (M, N) that is a copy of x, except the minimum value
         along each row is replaced with 0.
    """
    y = None
    #############################################################################
    #                    TODO: Implement this function                          #
    #############################################################################
    # Replace "pass" statement with your code
    y = x.clone()
    y[range(x.shape[0]), torch.argmin(x, dim=1)] = 0
    #############################################################################
    #                            END OF YOUR CODE                               #
    #############################################################################
    return y
Beispiel #10
0
def get_new_categories(user_df):
    tmp_df = user_df.copy()
    tmp_df['category'] = tmp_df[user_df.columns[2:]].apply(
        lambda x: torch.tensor(x.values), axis=1)
    assignments = defaultdict(int)
    category_counts = torch.zeros(len(user_df.columns[2:]), dtype=torch.long)

    # first go over singles to get initial distribution
    for key, categories in zip(tmp_df['item_id'].values,
                               tmp_df['category'].values):
        nonzeroes = categories.nonzero()
        if len(nonzeroes) > 1:
            continue

        if len(nonzeroes) == 1:
            cat = nonzeroes[0]
            assignments[key] = int(cat)
            category_counts[cat] += 1

    for key, categories in zip(tmp_df['item_id'].values,
                               tmp_df['category'].values):
        # for key, categories in zip(tmp_df['item_id'].values[:100], tmp_df['category'].values[:100]):
        nonzeroes = categories.nonzero()
        if len(nonzeroes) <= 1:
            continue

        # if this movie is known, add the right 1 hot vector and continue
        if assignments[key] != 0:
            category_counts[assignments[key]] += 1
        else:
            nonzeroes = nonzeroes.squeeze()
            counts = category_counts[nonzeroes]
            assign_to = torch.argmin(counts).tolist()
            assign_to = nonzeroes[assign_to]
            category_counts[assign_to] += 1
            assignments[key] = int(assign_to)
    return assignments
Beispiel #11
0
    def forward(self, inputs):
        """
        Apply vector quantization.

        If the module is in training mode, this will also
        update the usage tracker and re-initialize dead
        dictionary entries.

        Args:
            inputs: the input Tensor. Either [N x C] or
              [N x C x H x W].

        Returns:
            A tuple (embedded, embedded_pt, idxs):
              embedded: the new [N x C x H x W] Tensor
                which passes gradients to the dictionary.
              embedded_pt: like embedded, but with a
                passthrough gradient estimator. Gradients
                through this pass directly to the inputs.
              idxs: a [N x H x W] Tensor of Longs
                indicating the chosen dictionary entries.
        """
        channels_last = inputs
        if len(inputs.shape) == 4:
            # NCHW to NHWC
            channels_last = inputs.permute(0, 2, 3, 1).contiguous()

        diffs = embedding_distances(self.dictionary, channels_last)
        idxs = torch.argmin(diffs, dim=-1)
        embedded = self.embed(idxs)
        embedded_pt = embedded.detach() + (inputs - inputs.detach())

        if self.training:
            self._update_tracker(idxs)
            self._last_batch = channels_last.detach()

        return embedded, embedded_pt, idxs
Beispiel #12
0
    def deform_clothed_smpl_w_normals(self, theta, J, v_smpl, v_cloth, v_normals):
        assert len(theta) == 1, 'currently we only support batchsize=1'
        num_batch=1

        device = theta.device
        self.cur_device = torch.device(device.type, device.index)

        Rs = batch_rodrigues(theta.view(-1, 3)).view(-1, 24, 3, 3)
        pose_feature = (Rs[:, 1:, :, :]).sub(1.0, self.e3).view(-1, 207)

        pose_params = torch.matmul(pose_feature, self.posedirs).view(-1, self.size[0], self.size[1])
        v_posed_smpl = pose_params + v_smpl

        # Calculate closest SMPL vertex for each vertex of the cloth mesh
        with torch.no_grad():
            dists = ((v_smpl.unsqueeze(1) - v_cloth.unsqueeze(2))**2).sum(-1)
            correspondance = torch.argmin(dists, 2)

        v_posed_cloth = pose_params[0, correspondance[0]] + v_cloth
        self.J_transformed, A = batch_global_rigid_transformation(Rs, J, self.parents, rotate_base = False)

        W=self.weight.expand(num_batch,*self.weight.shape[1:])
        T = torch.matmul(W, A.view(num_batch, 24, 16)).view(num_batch, -1, 4, 4)

        v_posed_homo_smpl = torch.cat([v_posed_smpl, torch.ones(num_batch, v_posed_smpl.shape[1], 1, device = self.cur_device)], dim = 2)
        v_posed_homo_cloth = torch.cat([v_posed_cloth, torch.ones(num_batch, v_posed_cloth.shape[1], 1, device = self.cur_device)], dim = 2)
        v_homo_smpl = torch.matmul(T, torch.unsqueeze(v_posed_homo_smpl, -1))
        v_homo_cloth = torch.matmul(T[0, correspondance[0]], torch.unsqueeze(v_posed_homo_cloth, -1))

        v_normals_posed = torch.cat([v_normals, torch.ones(num_batch, v_normals.shape[1], 1, device=self.cur_device)], dim=2)
        v_normals_posed = torch.matmul(T[0, correspondance[0]], torch.unsqueeze(v_normals_posed, -1))

        verts_smpl = v_homo_smpl[:, :, :3, 0]
        verts_cloth = v_homo_cloth[:, :, :3, 0]
        verts_normals = v_normals_posed[:, :, :3, 0]

        return verts_smpl, verts_cloth, verts_normals
Beispiel #13
0
    def acc_metric(self, targets, predictions):
        '''
        Compute the Accuracy

        @param targets: torch array with targets
        @param predictions: torch array with predictions
        @return acc: Accuracy
        '''

        # Accuracy
        if self.config['output'] == 'softmax':
            predicted_classes = torch.argmax(
                predictions, dim=1).type(dtype=torch.cuda.FloatTensor)
            acc = torch.sum(targets == predicted_classes)
        elif self.config['output'] == 'attribute':
            # Classes from the distances to the attribute representation
            # with this torch.argmin(predictions, dim=1), one computes the argument where distance is min
            # with this self.atts[torch.argmin(predictions, dim=1), 0]
            #  one computes the class that correspond to the argument with min distance
            # self.atts.size() = [# of windows, classes and 19 attributes] = [# of windows, 20], [#, 20]
            predicted_classes = self.atts[torch.argmin(predictions, dim=1), 0]
            logging.info(
                '            Metric:    Acc:    Target     class {}'.format(
                    targets[0, 0]))
            logging.info(
                '            Metric:    Acc:    Prediction class {}'.format(
                    predicted_classes[0]))
            acc = torch.sum(targets[:, 0] == predicted_classes.type(
                dtype=torch.cuda.FloatTensor))
        elif self.config['output'] == 'identity':
            predicted_classes = torch.argmax(
                predictions, dim=1).type(dtype=torch.cuda.FloatTensor)
            acc = torch.sum(targets == predicted_classes)
        acc = acc.item() / float(targets.size()[0])

        # returning accuracy and predicted classes
        return acc, predicted_classes
Beispiel #14
0
def subgraph_filter(x_atom, x_atom_pos, x_bond, x_bond_dist, x_triplet,
                    x_triplet_angle, args):
    D = sqdist(x_atom_pos[:, :, :3], x_atom_pos[:, :, :3])
    x_atom, x_atom_pos, x_bond, x_bond_dist, x_triplet, x_triplet_angle = \
        x_atom.clone().detach(), x_atom_pos.clone().detach(), x_bond.clone().detach(), x_bond_dist.clone().detach(), x_triplet.clone().detach(), x_triplet_angle.clone().detach()
    bsz = x_atom.shape[0]
    bonds_mask = torch.ones(bsz, x_bond.shape[1], 1).to(x_atom.device)
    for mol_id in range(bsz):
        if np.random.uniform(0, 1) > args.cutout:
            continue
        assert not args.use_quad, "Quads are NOT cut out yet"
        atom_dists = D[mol_id]
        atoms = x_atom[mol_id, :, 0]
        n_valid_atoms = (atoms > 0).sum().item()
        if n_valid_atoms < 10:
            continue
        idx_to_drop = np.random.randint(n_valid_atoms - 1)
        dist_row = atom_dists[idx_to_drop]
        neighbor_to_drop = torch.argmin(
            (dist_row[dist_row > 0])[:n_valid_atoms - 1]).item()
        if neighbor_to_drop >= idx_to_drop:
            neighbor_to_drop += 1
        x_atom[mol_id, idx_to_drop] = 0
        x_atom[mol_id, neighbor_to_drop] = 0
        x_atom_pos[mol_id, idx_to_drop] = 0
        x_atom_pos[mol_id, neighbor_to_drop] = 0
        bond_pos_to_drop = (x_bond[mol_id, :, 3] == idx_to_drop) | (x_bond[mol_id, :, 3] == neighbor_to_drop) \
                           | (x_bond[mol_id, :, 4] == idx_to_drop) | (x_bond[mol_id, :, 4] == neighbor_to_drop)
        trip_pos_to_drop = (x_triplet[mol_id, :, 2] == idx_to_drop) | (x_triplet[mol_id, :, 2] == neighbor_to_drop) \
                           | (x_triplet[mol_id, :, 3] == idx_to_drop) | (x_triplet[mol_id, :, 3] == neighbor_to_drop) \
                           | (x_triplet[mol_id, :, 4] == idx_to_drop) | (x_triplet[mol_id, :, 4] == neighbor_to_drop)
        x_bond[mol_id, bond_pos_to_drop] = 0
        x_bond_dist[mol_id, bond_pos_to_drop] = 0
        bonds_mask[mol_id, bond_pos_to_drop] = 0
        x_triplet[mol_id, trip_pos_to_drop] = 0
        x_triplet_angle[mol_id, trip_pos_to_drop] = 0
    return x_atom, x_atom_pos, x_bond, x_bond_dist, x_triplet, x_triplet_angle, bonds_mask
Beispiel #15
0
def get_anchor_point_to_mask_dist_wside(starts, ends, points, mask_gts,
                                        slope_epsilon=1e-5):
    if len(points.shape) == 1:
        points = points.unsqueeze(0)
    sample_num = len(points)
    points_num = points.shape[1] // 2
    points_binary_mask = points.new_zeros(sample_num, points_num)
    mask_gts_x = mask_gts[0::2]
    mask_gts_y = mask_gts[1::2]
    mask_gts_x_shift = torch.roll(mask_gts_x, -1)
    mask_gts_y_shift = torch.roll(mask_gts_y, -1)
    slope = (mask_gts_y_shift - mask_gts_y) / \
             (mask_gts_x_shift - mask_gts_x + slope_epsilon)
    bias = mask_gts_y_shift - slope * mask_gts_x_shift

    points_x = points[:, 0::2].unsqueeze(-1)
    points_y = points[:, 1::2].unsqueeze(-1)
    points_x_filter = ((points_x >= mask_gts_x) & (points_x <= mask_gts_x_shift)) |\
                      ((points_x <= mask_gts_x) & (points_x >= mask_gts_x_shift))
    inter_y = slope * points_x + bias
    inter_y2point_y_dist = inter_y - points_y
    # set unsatisfied points dist to a big number
    inter_y2point_y_dist[~points_x_filter] = 1e8
    inter_y2point_y_absdist_min_idx = torch.argmin(torch.abs(inter_y2point_y_dist), dim=-1).unsqueeze(-1)
    points_match_res = torch.gather(inter_y2point_y_dist, 2, inter_y2point_y_absdist_min_idx).squeeze(-1)

    #TODO: using starts and ends to limit points regression???
    #TODO: setting unsatisified points in points_match_res to 0???
    points_binary_mask_filter = (points_x.squeeze(-1) > starts.unsqueeze(-1)) &\
                                (points_x.squeeze(-1) < ends.unsqueeze(-1))
    points_binary_mask[points_binary_mask_filter] = 1

    # set unmathced points to 0
    unmatched_idx = points_match_res == 1e8
    points_match_res[unmatched_idx] = 0
    points_binary_mask[unmatched_idx] = 0
    return points_match_res, points_binary_mask
Beispiel #16
0
    def forward(self, inputs):
        # convert inputs from BCHW -> BHWC
        inputs = inputs.permute(0, 2, 3, 1).contiguous()
        input_shape = inputs.shape

        # Flatten input
        flat_input = inputs.view(-1, self._embedding_dim)

        # Calculate distances
        distances = (torch.sum(flat_input**2, dim=1, keepdim=True) +
                     torch.sum(self._embedding.weight**2, dim=1) -
                     2 * torch.matmul(flat_input, self._embedding.weight.t()))

        # Encoding
        encoding_indices = torch.argmin(distances, dim=1).unsqueeze(1)
        encodings = torch.zeros(encoding_indices.shape[0],
                                self._num_embeddings,
                                device=inputs.device)
        encodings.scatter_(1, encoding_indices, 1)

        # Quantize and unflatten
        quantized = torch.matmul(encodings,
                                 self._embedding.weight).view(input_shape)

        # Loss
        e_latent_loss = F.mse_loss(quantized.detach(), inputs)
        q_latent_loss = F.mse_loss(quantized, inputs.detach())
        loss = q_latent_loss + self._commitment_cost * e_latent_loss

        quantized = inputs + (quantized - inputs).detach()
        avg_probs = torch.mean(encodings, dim=0)
        perplexity = torch.exp(-torch.sum(avg_probs *
                                          torch.log(avg_probs + 1e-10)))

        # convert quantized from BHWC -> BCHW
        return loss, quantized.permute(0, 3, 1,
                                       2).contiguous(), perplexity, encodings
def WeightedLinkPrediction(G,cluters,LinkPredictionMethod,VectorPairs):
    PartitionClassi=set([*cluters.keys()])
    predLinkWeight=[]
    AddLinkGraph=nx.Graph()
    for OneClassi in PartitionClassi:
        oneClassNodes=cluters[OneClassi]
        SubGraph=nx.Graph()
        SubGraph.add_nodes_from(oneClassNodes)
        #l
        for (i,j) in G.edges:
            if (i in SubGraph.nodes()) and (j in SubGraph.nodes()) and 'weight' in G.get_edge_data(i,j):
                    SubGraph.add_weighted_edges_from([(i,j,G.get_edge_data(i,j)['weight'])])
            else:
                continue
        if SubGraph.number_of_edges()>=2:
            diag,vector=Compute_fiedler_vector(SubGraph)
            for iter1 in range(VectorPairs):
                if torch.min(vector)<0:
                    locx=torch.argmax(vector).tolist()
                    locy=torch.argmin(vector).tolist()
                    StartNode=oneClassNodes[locx]
                    EndNode=oneClassNodes[locy]
                    WrongLink=[tuple(sorted([StartNode,EndNode]))]
                    vector=np.delete(vector,locx-1)
                    vector=np.delete(vector,locy-1)
                    #AddLinkGraph.add_edge(StartNode,EndNode)
                    preds=getattr(nx,LinkPredictionMethod)(SubGraph,WrongLink)
                    for u,v,p in preds:
                        predLinkWeight.append((u,v,p))
        else:
            continue

        ## save
        """AddedLinkGraphResultsFiles= "Results/PartitionResults/AddedLindedGraph.pkl"
        fwG=open(AddedLinkGraphResultsFiles,'wb')
        pickle.dump(G,fwG)"""
    return predLinkWeight
Beispiel #18
0
    def unnormalize_cloth_pose(self, v_cloth_posed, theta, beta, theta_in_rodrigues=True):
        device = theta.device
        self.cur_device = torch.device(device.type, device.index)
        num_batch = beta.shape[0]

        v_shaped = torch.matmul(beta, self.shapedirs).view(-1, self.size[0], self.size[1]) + self.v_template
        Jx = torch.matmul(v_shaped[:, :, 0], self.J_regressor)
        Jy = torch.matmul(v_shaped[:, :, 1], self.J_regressor)
        Jz = torch.matmul(v_shaped[:, :, 2], self.J_regressor)
        J = torch.stack([Jx, Jy, Jz], dim = 2)
        if theta_in_rodrigues:
            Rs = batch_rodrigues(theta.view(-1, 3)).view(-1, 24, 3, 3)
        else: #theta is already rotations
            Rs = theta.view(-1,24,3,3)

        pose_feature = (Rs[:, 1:, :, :]).sub(1.0, self.e3).view(-1, 207)
        pose_displ = torch.matmul(pose_feature, self.posedirs).view(-1, self.size[0], self.size[1])
        v_posed = pose_displ + v_shaped
        self.J_transformed, A = batch_global_rigid_transformation(Rs, J, self.parents, rotate_base = False)

        W=self.weight.expand(num_batch,*self.weight.shape[1:])
        T = torch.matmul(W, A.view(num_batch, 24, 16)).view(num_batch, -1, 4, 4)
        
        v_posed_homo = torch.cat([v_posed, torch.ones(num_batch, v_posed.shape[1], 1, device = self.cur_device)], dim = 2)
        v_homo = torch.matmul(T, torch.unsqueeze(v_posed_homo, -1))

        v_smpl = v_homo[:, :, :3, 0]
        with torch.no_grad():
            dists = ((v_smpl - v_cloth_posed.unsqueeze(1))**2).sum(-1)
            correspondance = torch.argmin(dists, 1)

        invT = torch.inverse(T[0, correspondance])
        v = torch.cat([v_cloth_posed, torch.ones(len(v_cloth_posed), 1, device=self.cur_device)], 1)
        v = torch.matmul(invT, v.unsqueeze(-1))[:, :3, 0]
        unposed_v = v - pose_displ[0, correspondance]

        return unposed_v
Beispiel #19
0
    def accumulateVec(self, vec):
        #assert(len(vec.size()) == 1 and vec.size()[0] == self.d)
        
        # the rest for not precomputed hashes case 
        h1, h2, h3, h4, h5, h6 = self.hashes[:,0:1], self.hashes[:,1:2],\
                                 self.hashes[:,2:3], self.hashes[:,3:4],\
                                 self.hashes[:,4:5], self.hashes[:,5:6]
        
        vals = torch.zeros(self.r, vec.size()[0],dtype=torch.int64, device=self.device)#
        coords = torch.tensor(vec)
        for r in range(self.r):
            buckets = (vec.mul_(h1[r]).add_(h2[r]) % LARGEPRIME % self.c)
            signs = ((vec.mul_(h3[r]).add_(h4[r]).mul_(vec).add_(h5[r])\
                      .mul_(vec).add_(h6[r])) % LARGEPRIME % 2).float().mul_(2).add_(-1)
            self.table[r,:] += torch.bincount(input=buckets,
                                              weights=signs,
                                              minlength=self.c)

            vals[r] = self.table[r, buckets] * signs

        vals = vals.median(dim=0)[0]# this is their estimatesi
        vals = torch.stack((vals, coords), dim=1)
        print(self.topk)
        for val in vals:#
            in_heap = False
            for el in self.topk:
                if el[1] == val[1]:
                    #update existing value
                    #might double count if a lot of the same id are next to
                    #eachother but this should be the same for everything 
                    el[0] += 1
                    in_heap = True
                    break
            cutoff = torch.argmin(self.topk[:,0])
            if ((not in_heap) and val[0] > self.topk[cutoff][0]):
                    self.topk[cutoff] = val
def global_Center_cosine(feat_vec):
    b, t, _ = feat_vec.size()
    refine_feature = torch.zeros(b, t - 1, feat_vec.size(2))
    similar_matrix = torch.zeros(b,t,t)

    for i in range(t):
        for j in range(t):
            similar_matrix[:,i,j] = torch.cosine_similarity(feat_vec[:,i,:],feat_vec[:,j,:])

    similar_score = torch.sum(similar_matrix,2,keepdim=True)
    remove_id = torch.argmin(similar_score,1)

    for i in range(b):
        refine_feature[i] = feat_vec[i, torch.arange(t) != remove_id[i], :]

    cosine_sum_similar = 0
    for i in range(t - 1):
        for j in range(i + 1, t - 1):
            cosine_similar_score = torch.cosine_similarity(refine_feature[:,i,:],refine_feature[:,j,:])
            cosine_similar_score = torch.div(cosine_similar_score + 1, 2)
            cosine_similar_score = -torch.log(cosine_similar_score)
            cosine_sum_similar = cosine_sum_similar + cosine_similar_score

    return refine_feature , cosine_sum_similar
def global_KNN_cosine(feat_vec):#这个要要验证一下,cosine_similarity的输出是不是可以跨维度的。   reply:cosine_similarity是可以跨维度,或者说可以保持维度
    b,t,_ = feat_vec.size()
    refine_feature = torch.zeros(b, t - 1, feat_vec.size(2))
    feat_vec_avg = torch.mean(feat_vec, 1)
    similar_matrix = torch.zeros(b, t)

    for i in range(t):
        similar_score = torch.cosine_similarity(feat_vec_avg, feat_vec[:, i, :])
        similar_matrix[:, i] = similar_score

    remove_id = torch.argmin(similar_matrix, 1)

    for i in range(b):
        refine_feature[i] = feat_vec[i, torch.arange(t) != remove_id[i], :]  #b*t-1*1024

    cosine_sum_similar = 0
    for i in range(t-1):
        for j in range(i+1,t):
            cosine_similar_score = torch.cosine_similarity(refine_feature[:,i,:],refine_feature[:,j,:])
            cosine_similar_score = torch.div(cosine_similar_score+1,2)+0    #成比例压缩到(0,1)区间
            cosine_similar_score = -torch.log(cosine_similar_score)
            cosine_sum_similar = cosine_sum_similar + cosine_similar_score

    return refine_feature, cosine_sum_similar
Beispiel #22
0
def calculate_partitions(partitions_count, cluster_partitions, types):
    partition_distribution = torch.ones((partitions_count,
                                         len(torch.unique(types))),
                                        dtype=torch.long)
    partition_assignments = torch.zeros(cluster_partitions.shape[0],
                                        dtype=torch.long)

    for i in torch.unique(cluster_partitions):
        cluster_positions = (cluster_partitions == i).nonzero()
        cluster_types = types[cluster_positions]
        unique_types_in_cluster, type_count = torch.unique(cluster_types, return_counts=True)
        tmp_distribution = partition_distribution.clone()
        tmp_distribution[:, unique_types_in_cluster] += type_count
        relative_distribution = partition_distribution.double() / tmp_distribution.double()
        min_relative_distribution_group = torch.argmin(torch.sum(relative_distribution, dim=1))
        partition_distribution[min_relative_distribution_group,
                               unique_types_in_cluster] += type_count
        partition_assignments[cluster_positions] = min_relative_distribution_group

    write_out("Loaded data into the following partitions")
    write_out("[[  TM  SP+TM  SP Glob]")
    write_out(partition_distribution - torch.ones(partition_distribution.shape,
                                                  dtype=torch.long))
    return partition_assignments
Beispiel #23
0
    def _l2_dist_metric(self, H):
        """
        Given an array of data, compute the indices of the two closest samples.
        :param H: an Nxd array of data (PyTorch Tensor)
        :return: the two indices of the closest samples
        """
        with torch.no_grad():
            M, d = H.shape
            H2 = torch.reshape(H, (M, 1, d))  # reshaping for broadcasting
            inside = H2 - H
            square_sub = torch.mul(inside, inside)  # square all elements
            psi = torch.sum(square_sub, dim=2)  # capacity x batch_size

            # infinity on diagonal
            mb = psi.shape[0]
            diag_vec = torch.ones(mb).cuda(self.gpu_id) * np.inf
            mask = torch.diag(torch.ones_like(diag_vec).cuda(self.gpu_id))
            psi = mask * torch.diag(diag_vec) + (1. - mask) * psi

            # grab indices
            idx = torch.argmin(psi)
            idx_row = idx / mb
            idx_col = idx % mb
        return torch.min(idx_row, idx_col), torch.max(idx_row, idx_col)
def kmeans_fun_gpu(X, K=10, max_iter=1000, batch_size=8096, tol=1e-40):
    N = X.shape[0]

    indices = torch.randperm(N)[:K]
    init_centers = X[indices]

    batchs = N // batch_size
    last = 1 if N % batch_size != 0 else 0

    choice_cluster = torch.zeros([N]).cuda()
    for _ in range(max_iter):
        for bn in range(batchs + last):
            if bn == batchs and last == 1:
                _end = -1
            else:
                _end = (bn + 1) * batch_size
            X_batch = X[bn * batch_size:_end]

            dis_batch = euclidean_metric_gpu(X_batch, init_centers)
            choice_cluster[bn * batch_size:_end] = torch.argmin(dis_batch,
                                                                dim=1)

        init_centers_pre = init_centers.clone()
        for index in range(K):
            selected = torch.nonzero(choice_cluster == index).squeeze().cuda()
            selected = torch.index_select(X, 0, selected)
            init_centers[index] = selected.mean(dim=0)

        center_shift = torch.sum(
            torch.sqrt(torch.sum((init_centers - init_centers_pre)**2, dim=1)))
        if center_shift < tol:
            break

    k_mean = init_centers.detach().cpu().numpy()
    choice_cluster = choice_cluster.detach().cpu().numpy()
    return k_mean, choice_cluster
Beispiel #25
0
    def compress(self, vec):

        vec = vec.view(-1, self.dim)

        # calculate probability, complexity: O(d*K)
        p = torch.mm(self.c_dagger, vec.transpose(0, 1)).transpose(0, 1)
        l1_norms = torch.norm(p, p=1, dim=1, keepdim=True)
        probability = torch.abs(p) / l1_norms

        # choose codeword with probability
        r = torch.rand(probability.size()[0])
        if self.cuda:
            r = r.cuda()
        rs = r.view(-1, 1).expand_as(probability)

        comp = torch.cumsum(probability, dim=1) >= rs - (1e-5)
        codes = torch.argmin(comp, dim=1) + 1

        selected_p = p.gather(dim=1, index=codes.view(-1, 1))
        u = torch.mul(torch.sign(selected_p.view(-1)), l1_norms.view(-1))

        if self.compressed_norm:
            u = self.norm_compressor.compress(u)
        return [u, codes.type(self.code_dtype)]
Beispiel #26
0
async def hist_match_pytorch_async(source, template, index, storage):

    oldshape = source.size()
    source = source.view(-1)
    template = template.view(-1)

    max_val = max(source.max().item(), template.max().item())
    min_val = min(source.min().item(), template.min().item())

    num_bins = 400
    hist_step = (max_val - min_val) / num_bins

    if hist_step == 0:
        storage[0, index, :, :] = source.reshape(oldshape)
        return

    hist_bin_centers = torch.arange(start=min_val, end=max_val, step=hist_step).to(source.device)
    hist_bin_centers = hist_bin_centers + hist_step / 2.0

    source_hist = torch.histc(input=source, min=min_val, max=max_val, bins=num_bins)
    template_hist = torch.histc(input=template, min=min_val, max=max_val, bins=num_bins)

    source_quantiles = torch.cumsum(input=source_hist, dim=0)
    source_quantiles = source_quantiles / source_quantiles[-1]

    template_quantiles = torch.cumsum(input=template_hist, dim=0)
    template_quantiles = template_quantiles / template_quantiles[-1]

    nearest_indices = torch.argmin(torch.abs(template_quantiles.repeat(len(source_quantiles), 1) - source_quantiles.view(-1, 1).repeat(1, len(template_quantiles))), dim=1)

    source_bin_index = torch.clamp(input=torch.round(source / hist_step), min=0, max=num_bins - 1).long()

    mapped_indices = torch.gather(input=nearest_indices, dim=0, index=source_bin_index)
    matched_source = torch.gather(input=hist_bin_centers, dim=0, index=mapped_indices)

    storage[0, index, :, :] = matched_source.reshape(oldshape)
def lloyd(X, n_clusters, device=0, tol=1e-4):
    X = X.float().cuda(device)

    initial_state = forgy(X, n_clusters)

    while True:
        dis = pairwise_distance(X, initial_state)

        choice_cluster = torch.argmin(dis, dim=1)

        initial_state_pre = initial_state.clone()

        for index in range(n_clusters):
            selected = torch.nonzero(choice_cluster == index).squeeze()

            selected = torch.index_select(X, 0, selected)
            initial_state[index] = selected.mean(dim=0)

        center_shift = torch.sum(torch.sqrt(torch.sum((initial_state - initial_state_pre) ** 2, dim=1)))

        if center_shift ** 2 < tol:
            break

    return choice_cluster.cpu().numpy(), initial_state.cpu().numpy()
    def shrink_buffer(self, class_id,sampled_memory_data, sampled_memory_labs, sampled_memory_counter,sampled_memory_features,pretrained):
        
        total_size=sampled_memory_counter.size(0)
        exceed_num=total_size-self.class_buffer_size
        old_features=sampled_memory_features[:self.class_buffer_size]
        new_features=sampled_memory_features[-exceed_num:]
        dist=self.dist_matrix(new_features,old_features)

        # idx0 is the index in the new feature list
        # idx1 is the index in the old feature list
        for idx0 in range(exceed_num):
            idx1=torch.argmin(dist[idx0])
            pt = sampled_memory_data[idx0+self.class_buffer_size]
            w = sampled_memory_counter[idx1]

            sampled_memory_data[idx1]=pt
            sampled_memory_counter[idx1]=w+1
            sampled_memory_features[idx1]=(new_features[idx0]+sampled_memory_features[idx1]*w)/(w+1)


        self.sampled_class_data[class_id]=sampled_memory_data[:self.class_buffer_size]
        self.sampled_class_labs[class_id]=sampled_memory_labs[:self.class_buffer_size]
        self.sampled_class_counter[class_id]=sampled_memory_counter[:self.class_buffer_size]
        self.sampled_class_features[class_id]=sampled_memory_features[:self.class_buffer_size]
Beispiel #29
0
    def select_action(self, state, eval=False):
        '''
            Get action from current task policy
        '''
        state = torch.FloatTensor(state).to(self.device).unsqueeze(0)
        # Action Sampling for SQRL
        if self.use_constraint_sampling:
            self.safe_samples = 100  # TODO: don't hardcode
            if not self.cnn:
                state_batch = state.repeat(self.safe_samples, 1)
            else:
                state_batch = state.repeat(self.safe_samples, 1, 1, 1)
            pi, log_pi, _ = self.policy.sample(state_batch)
            max_qf_constraint_pi = self.safety_critic.get_value(
                state_batch, pi)

            thresh_idxs = (max_qf_constraint_pi <= self.eps_safe).nonzero()[:,
                                                                            0]
            # Note: these are auto-normalized
            thresh_probs = torch.exp(log_pi[thresh_idxs])
            thresh_probs = thresh_probs.flatten()

            if list(thresh_probs.size())[0] == 0:
                min_q_value_idx = torch.argmin(max_qf_constraint_pi)
                action = pi[min_q_value_idx, :].unsqueeze(0)
            else:
                prob_dist = torch.distributions.Categorical(thresh_probs)
                sampled_idx = prob_dist.sample()
                action = pi[sampled_idx, :].unsqueeze(0)
        # Action Sampling for all other algorithms
        else:
            if eval is False:
                action, _, _ = self.policy.sample(state)
            else:
                _, _, action = self.policy.sample(state)
        return action.detach().cpu().numpy()[0]
Beispiel #30
0
    def apply(self, world, preds, waypoint, probs):
        index, waypoint = self._get_reference_index(world.player.curr_wp)
        probs = torch.Tensor(1. / probs)
        preds = torch.from_numpy(preds)
        ego_preds = preds[:, 0, :]
        # poses = ego_preds.view(-1, self.NPOS)

        errorcost = ego_preds[:, self.rollout_size - 1, :2].sub(torch.Tensor(waypoint[:2])).norm(dim=1).mul(self.prox_w)

        smoothness = (
            ((ego_preds[:, 1:, 3] - ego_preds[:, : self.rollout_size - 1, 3]))
            .abs()
            .mul(self.discount)
            .sum(dim=1)
        ).mul(self.smooth_w)

        result = errorcost.add(smoothness)

        smoothness2 = (
            ((ego_preds[:, 1:, 2] - ego_preds[:, : self.rollout_size - 1, 2]))
            .abs()
            .mul(self.discount)
            .sum(dim=1)
        ).mul(self.smooth_w)
        result = errorcost.add(smoothness2)

        if self.check_collision:
            collisions = self.collision_checker.collision_check(ego_preds, world)
            # collision_cost = torch.from_numpy(collisions).sum(dim=1).mul(self.collision_cost)
            collision_cost = torch.from_numpy(collisions).mul(self.collision_cost)
            result = result.add(collision_cost)

        prob_cost = 0.1 * probs
        result = result.add(prob_cost)
        min_cost_id = torch.argmin(result).item()
        return min_cost_id
Beispiel #31
0
def patch_match(x, y, mask, patch_size=3, radius=3, stride=1):
    batch, channels, height, width = x.size()

    y_pad = F.pad(y, (radius // 2, radius // 2, radius // 2, radius // 2))  # Left, right, up, down
    distance_all = []
    for i in range(0, radius, stride):  # Searching/matching in row-major order
        for j in range(0, radius, stride):
            distance_pix = torch.sum((y_pad[:, :, i:i + height, j:j + width] - x) ** 2, dim=1, keepdim=True)
            distance_all += [F.avg_pool2d(distance_pix, patch_size, stride=1, padding=patch_size // 2)]

    distance_all = torch.cat(distance_all, dim=1)      # Thus this stack of distances will be in row major order
    location_min = torch.argmin(distance_all, dim=1)   # get the pixel/patch with the minimal distance
    location_min = location_min * mask                  # Only need to match within the mask
    distance_min_x = torch.fmod(location_min, radius) - radius // 2  # Need to adjust to take into account searching behind
    distance_min_y = location_min / radius - radius // 2

    grid_x = torch.arange(width).cuda().unsqueeze(0).unsqueeze(0) + distance_min_x.type(torch.float32)
    grid_y = torch.arange(height).cuda().unsqueeze(1).unsqueeze(0) + distance_min_y.type(torch.float32)
    grid_x = torch.clamp(grid_x.float() / width, 0, 1) * 2 - 1
    grid_y = torch.clamp(grid_y.float() / height, 0, 1) * 2 - 1

    grid = torch.stack([grid_x, grid_y], dim=3)
    out = F.grid_sample(y, grid)
    return out
Beispiel #32
0
 def compute_basic_stats(
     times: Union[Sequence, torch.Tensor]
 ) -> List[Union[str, float, Tuple[Union[str, float], Union[str,
                                                            float]]]]:
     data = torch.as_tensor(times, dtype=torch.float32)
     # compute on non-zero data:
     data = data[data > 0]
     total = round(torch.sum(data).item(), 5) if len(
         data) > 0 else "not triggered"  # type: Union[str, float]
     min_index = ("None", "None"
                  )  # type: Tuple[Union[str, float], Union[str, float]]
     max_index = ("None", "None"
                  )  # type: Tuple[Union[str, float], Union[str, float]]
     mean = "None"  # type: Union[str, float]
     std = "None"  # type: Union[str, float]
     if len(data) > 0:
         min_index = (round(torch.min(data).item(),
                            5), torch.argmin(data).item())
         max_index = (round(torch.max(data).item(),
                            5), torch.argmax(data).item())
         mean = round(torch.mean(data).item(), 5)
         if len(data) > 1:
             std = round(torch.std(data).item(), 5)
     return [total, min_index, max_index, mean, std]
Beispiel #33
0
 def argmin(self, dim=None, keepdim=False):
     r"""See :func:`torch.argmin`"""
     return torch.argmin(self, dim, keepdim)