예제 #1
0
def _get_most_activated_patch_idxs_from_channels(z, channel_idxs):
    """
    z: CxHxW
    channel_idxs: K
    """

    k = channel_idxs.shape[0]

    most_activated_patch_idxs = \
        z[channel_idxs].view(k, -1).max(1)[1]

    return torch.unique(most_activated_patch_idxs)
    def __init__(self, sampler, group_ids, batch_size, drop_uneven=False):
        if not isinstance(sampler, Sampler):
            raise ValueError(
                "sampler should be an instance of "
                "torch.utils.data.Sampler, but got sampler={}".format(sampler)
            )
        self.sampler = sampler
        self.group_ids = torch.as_tensor(group_ids)
        assert self.group_ids.dim() == 1
        self.batch_size = batch_size
        self.drop_uneven = drop_uneven

        self.groups = torch.unique(self.group_ids).sort(0)[0]

        self._can_reuse_batches = False
예제 #3
0
def compute_jaccard_dist(target_features,
                         k1=20,
                         k2=6,
                         print_flag=True,
                         lambda_value=0,
                         source_features=None,
                         use_gpu=False):
    end = time.time()
    N = target_features.size(0)
    if (use_gpu):
        # accelerate matrix distance computing
        target_features = target_features.cuda()
        if (source_features is not None):
            source_features = source_features.cuda()

    if ((lambda_value > 0) and (source_features is not None)):
        M = source_features.size(0)
        sour_tar_dist = torch.pow(target_features, 2).sum(dim=1, keepdim=True).expand(N, M) + \
                           torch.pow(source_features, 2).sum(dim=1, keepdim=True).expand(M, N).t()
        sour_tar_dist.addmm_(1, -2, target_features, source_features.t())
        sour_tar_dist = 1 - torch.exp(-sour_tar_dist)
        sour_tar_dist = sour_tar_dist.cpu()
        source_dist_vec = sour_tar_dist.min(1)[0]
        del sour_tar_dist
        source_dist_vec /= source_dist_vec.max()
        source_dist = torch.zeros(N, N)
        for i in range(N):
            source_dist[i, :] = source_dist_vec + source_dist_vec[i]
        del source_dist_vec

    if print_flag:
        print('Computing original distance...')
    original_dist = torch.pow(target_features, 2).sum(dim=1, keepdim=True) * 2
    original_dist = original_dist.expand(
        N, N) - 2 * torch.mm(target_features, target_features.t())
    original_dist /= original_dist.max(0)[0]
    original_dist = original_dist.t()
    del target_features
    initial_rank = torch.argsort(original_dist, dim=-1)

    original_dist = original_dist.cpu()
    initial_rank = initial_rank.cpu()
    all_num = gallery_num = original_dist.size(0)

    #del target_features
    if (source_features is not None):
        del source_features

    if print_flag:
        print('Computing Jaccard distance...')

    nn_k1 = []
    nn_k1_half = []
    for i in range(all_num):
        nn_k1.append(k_reciprocal_neigh(initial_rank, i, k1))
        nn_k1_half.append(
            k_reciprocal_neigh(initial_rank, i, int(np.around(k1 / 2))))

    V = torch.zeros(all_num, all_num)
    for i in range(all_num):
        k_reciprocal_index = nn_k1[i]
        k_reciprocal_expansion_index = k_reciprocal_index
        for candidate in k_reciprocal_index:
            candidate_k_reciprocal_index = nn_k1_half[candidate]
            if (len(
                    np.intersect1d(candidate_k_reciprocal_index,
                                   k_reciprocal_index)) >
                    2 / 3 * len(candidate_k_reciprocal_index)):
                k_reciprocal_expansion_index = torch.cat(
                    (k_reciprocal_expansion_index,
                     candidate_k_reciprocal_index))

        k_reciprocal_expansion_index = torch.unique(
            k_reciprocal_expansion_index)  ## element-wise unique
        weight = torch.exp(-original_dist[i, k_reciprocal_expansion_index])
        V[i, k_reciprocal_expansion_index] = weight / torch.sum(weight)

    if k2 != 1:
        k2_rank = initial_rank[:, :k2].clone().view(-1)
        V_qe = V[k2_rank]
        V_qe = V_qe.view(initial_rank.size(0), k2, -1).sum(1)
        V_qe /= k2
        V = V_qe
        del V_qe
    del initial_rank

    invIndex = []
    for i in range(gallery_num):
        invIndex.append(torch.nonzero(V[:, i])[:, 0])  #len(invIndex)=all_num

    jaccard_dist = torch.zeros_like(original_dist)

    #del original_dist  # added line to save memory

    for i in range(all_num):
        temp_min = torch.zeros(1, gallery_num)
        indNonZero = torch.nonzero(V[i, :])[:, 0]
        indImages = []
        indImages = [invIndex[ind] for ind in indNonZero]
        for j in range(len(indNonZero)):
            temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + torch.min(
                V[i, indNonZero[j]], V[indImages[j], indNonZero[j]])
        jaccard_dist[i] = 1 - temp_min / (2 - temp_min)
    del invIndex

    del V

    pos_bool = (jaccard_dist < 0)
    jaccard_dist[pos_bool] = 0.0
    if print_flag:
        print("Time cost: {}".format(time.time() - end))

    if (lambda_value > 0):
        original_dist[original_dist < 0] = 0.0
        return jaccard_dist * (1 - lambda_value) + original_dist * lambda_value
    else:
        return jaccard_dist  #.cpu()
예제 #4
0
    def compute_adjacency_info(vertices: torch.Tensor, faces: torch.Tensor):
        """Build data structures to help speed up connectivity queries. Assumes
        a homogeneous mesh, i.e., each face has the same number of vertices.

        The outputs have the following format: AA, AA_count
        AA_count: [count_0, ..., count_n]
        with AA:
        [[aa_{0,0}, ..., aa_{0,count_0} (, -1, ..., -1)],
         [aa_{1,0}, ..., aa_{1,count_1} (, -1, ..., -1)],
                    ...
         [aa_{n,0}, ..., aa_{n,count_n} (, -1, ..., -1)]]
        """

        device = vertices.device
        facesize = faces.shape[1]
        nb_vertices = vertices.shape[0]
        nb_faces = faces.shape[0]
        edges = torch.cat([faces[:, i:i + 2]
                           for i in range(facesize - 1)] + [faces[:, [-1, 0]]],
                          dim=0)
        # Sort the vertex of edges in increasing order
        edges = torch.sort(edges, dim=1)[0]
        # id of corresponding face in edges
        face_ids = torch.arange(nb_faces, device=device,
                                dtype=torch.long).repeat(facesize)
        # remove multiple occurences and sort by the first vertex
        # the edge key / id is fixed from now as the first axis position
        # edges_ids will give the key of the edges on the original vector
        edges, edges_ids = torch.unique(edges,
                                        sorted=True,
                                        return_inverse=True,
                                        dim=0)
        nb_edges = edges.shape[0]

        # EDGE2FACE
        sorted_edges_ids, order_edges_ids = torch.sort(edges_ids)
        sorted_faces_ids = face_ids[order_edges_ids]
        # indices of first occurences of each key
        idx_first = torch.where(
            torch.nn.functional.pad(
                sorted_edges_ids[1:] != sorted_edges_ids[:-1], (1, 0),
                value=1))[0]
        nb_faces_per_edge = idx_first[1:] - idx_first[:-1]
        # compute sub_idx (2nd axis indices to store the faces)
        offsets = torch.zeros(sorted_edges_ids.shape[0],
                              device=device,
                              dtype=torch.long)
        offsets[idx_first[1:]] = nb_faces_per_edge
        sub_idx = (torch.arange(
            sorted_edges_ids.shape[0], device=device, dtype=torch.long) -
                   torch.cumsum(offsets, dim=0))
        # TODO(cfujitsang): potential way to compute sub_idx differently
        #                   to test with bigger model
        #sub_idx = torch.ones(sorted_edges_ids.shape[0], device=device, dtype=torch.long)
        #sub_idx[0] = 0
        #sub_idx[idx_first[1:]] = 1 - nb_faces_per_edge
        #sub_idx = torch.cumsum(sub_idx, dim=0)
        nb_faces_per_edge = torch.cat(
            [nb_faces_per_edge, sorted_edges_ids.shape[0] - idx_first[-1:]],
            dim=0)
        max_sub_idx = torch.max(nb_faces_per_edge)
        ef = torch.zeros(
            (nb_edges, max_sub_idx), device=device, dtype=torch.long) - 1
        ef[sorted_edges_ids, sub_idx] = sorted_faces_ids
        # FACE2FACES
        nb_faces_per_face = torch.stack([
            nb_faces_per_edge[edges_ids[i * nb_faces:(i + 1) * nb_faces]]
            for i in range(facesize)
        ],
                                        dim=1).sum(dim=1) - facesize
        ff = torch.cat([
            ef[edges_ids[i * nb_faces:(i + 1) * nb_faces]]
            for i in range(facesize)
        ],
                       dim=1)
        # remove self occurences
        ff[ff == torch.arange(nb_faces, device=device, dtype=torch.long).view(
            -1, 1)] = -1
        ff = torch.sort(ff, dim=-1, descending=True)[0]
        to_del = (ff[:, 1:] == ff[:, :-1]) & (ff[:, 1:] != -1)
        ff[:, 1:][to_del] = -1
        nb_faces_per_face = nb_faces_per_face - torch.sum(to_del, dim=1)
        max_sub_idx = torch.max(nb_faces_per_face)
        ff = torch.sort(ff, dim=-1, descending=True)[0][:, :max_sub_idx]

        # VERTEX2VERTICES and VERTEX2EDGES
        npy_edges = edges.cpu().numpy()
        edge2key = {tuple(npy_edges[i]): i for i in range(nb_edges)}
        #_edges and double_edges 2nd axis correspond to the triplet:
        # [left vertex, right vertex, edge key]
        _edges = torch.cat(
            [edges, torch.arange(nb_edges, device=device).view(-1, 1)], dim=1)
        double_edges = torch.cat([_edges, _edges[:, [1, 0, 2]]], dim=0)
        double_edges = torch.unique(double_edges, sorted=True, dim=0)
        # TODO(cfujitsang): potential improvment, to test with bigger model:
        #double_edges0, order_double_edges = torch.sort(double_edges[0])
        nb_double_edges = double_edges.shape[0]
        # indices of first occurences of each key
        idx_first = torch.where(
            torch.nn.functional.pad(
                double_edges[1:, 0] != double_edges[:-1, 0], (1, 0),
                value=1))[0]
        nb_edges_per_vertex = idx_first[1:] - idx_first[:-1]
        # compute sub_idx (2nd axis indices to store the edges)
        offsets = torch.zeros(nb_double_edges, device=device, dtype=torch.long)
        offsets[idx_first[1:]] = nb_edges_per_vertex
        sub_idx = (
            torch.arange(nb_double_edges, device=device, dtype=torch.long) -
            torch.cumsum(offsets, dim=0))
        nb_edges_per_vertex = torch.cat(
            [nb_edges_per_vertex, nb_double_edges - idx_first[-1:]], dim=0)
        max_sub_idx = torch.max(nb_edges_per_vertex)
        vv = torch.zeros(
            (nb_vertices, max_sub_idx), device=device, dtype=torch.long) - 1
        vv[double_edges[:, 0], sub_idx] = double_edges[:, 1]
        ve = torch.zeros(
            (nb_vertices, max_sub_idx), device=device, dtype=torch.long) - 1
        ve[double_edges[:, 0], sub_idx] = double_edges[:, 2]
        # EDGE2EDGES
        ee = torch.cat([ve[edges[:, 0], :], ve[edges[:, 1], :]], dim=1)
        nb_edges_per_edge = nb_edges_per_vertex[
            edges[:, 0]] + nb_edges_per_vertex[edges[:, 1]] - 2
        max_sub_idx = torch.max(nb_edges_per_edge)
        # remove self occurences
        ee[ee == torch.arange(nb_edges, device=device, dtype=torch.long).view(
            -1, 1)] = -1
        ee = torch.sort(ee, dim=-1, descending=True)[0][:, :max_sub_idx]
        # VERTEX2FACES
        vertex_ordered, order_vertex = torch.sort(faces.view(-1))
        face_ids_in_vertex_order = order_vertex / facesize
        # indices of first occurences of each id
        idx_first = torch.where(
            torch.nn.functional.pad(vertex_ordered[1:] != vertex_ordered[:-1],
                                    (1, 0),
                                    value=1))[0]
        nb_faces_per_vertex = idx_first[1:] - idx_first[:-1]
        # compute sub_idx (2nd axis indices to store the faces)
        offsets = torch.zeros(vertex_ordered.shape[0],
                              device=device,
                              dtype=torch.long)
        offsets[idx_first[1:]] = nb_faces_per_vertex
        sub_idx = (torch.arange(
            vertex_ordered.shape[0], device=device, dtype=torch.long) -
                   torch.cumsum(offsets, dim=0))
        # TODO(cfujitsang): it seems that nb_faces_per_vertex == nb_edges_per_vertex ?
        nb_faces_per_vertex = torch.cat(
            [nb_faces_per_vertex, vertex_ordered.shape[0] - idx_first[-1:]],
            dim=0)
        max_sub_idx = torch.max(nb_faces_per_vertex)
        vf = torch.zeros(
            (nb_vertices, max_sub_idx), device=device, dtype=torch.long) - 1
        vf[vertex_ordered, sub_idx] = face_ids_in_vertex_order

        return edge2key, edges, vv, nb_edges_per_vertex, ve, nb_edges_per_vertex, vf, \
            nb_faces_per_vertex, ff, nb_faces_per_face, ee, nb_edges_per_edge, ef, nb_faces_per_edge
    def set_low_quality_matches_(self, matches, all_matches, match_quality_matrix0, cendis=None):
        """
        Produce additional matches for predictions that have only low-quality matches.
        Specifically, for each ground-truth find the set of predictions that have
        maximum overlap with it (including ties); for each prediction in that set, if
        it is unmatched, then match it to the ground-truth with which it has the highest
        quality value.
        """
        match_quality_matrix = match_quality_matrix0.clone()

        if ENALE_SECOND_THIRD_MAX__ONLY_HIGHEST_IOU_TARGET:
            # If one anchor has the maximum ious with multiple, some targets may
            # match no anchor.
            # An anchor can only be matched to the target, which has the largest iou
            # with it. As a result, a target may match the second or third ...
            # highest iou. This can guarantee every target not be missed.
            matched_vals_0, matches_0 = match_quality_matrix.max(dim=0)
            mask_only_max = match_quality_matrix*0
            tmp = torch.ones(matches_0.shape, device=matches_0.device)
            mask_only_max = mask_only_max.scatter(0, matches_0.view(1,-1), tmp.view(1,-1))
            match_quality_matrix *= mask_only_max

        # For each gt, find the prediction with which it has highest quality
        highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)
        # Find highest quality match available, even if it is low, including ties
        #print(f'highest_quality_foreach_gt: \n{highest_quality_foreach_gt}')

        if cendis is None or True:
          gt_pred_pairs_of_highest_quality = torch.nonzero(
              match_quality_matrix == highest_quality_foreach_gt[:, None]
          )
        else:
          high_mask = match_quality_matrix >= highest_quality_foreach_gt[:, None]*0.95
          #gt_pred_pairs_of_highest_quality0 = torch.nonzero( )
          cendis1 = cendis + (1-high_mask.float()) * 1000
          cendis_min = cendis1.min(dim=1)[0]
          cendis_min_mask = cendis == cendis_min.view(-1,1)
          gt_pred_pairs_of_highest_quality = torch.nonzero(cendis_min_mask * high_mask)
          if not gt_pred_pairs_of_highest_quality.shape[0] == cendis.shape[0]:
            import pdb; pdb.set_trace()  # XXX BREAKPOINT
            pass


        # Example gt_pred_pairs_of_highest_quality:
        #   tensor([[    0, 39796],
        #           [    1, 32055],
        #           [    1, 32070],
        #           [    2, 39190],
        #           [    2, 40255],
        #           [    3, 40390],
        #           [    3, 41455],
        #           [    4, 45470],
        #           [    5, 45325],
        #           [    5, 46390]])
        # Each row is a (gt index, prediction index)
        # Note how gt items 1, 2, 3, and 5 each have two ties

        pred_inds_to_update = gt_pred_pairs_of_highest_quality[:, 1]
        matches[pred_inds_to_update] = all_matches[pred_inds_to_update]

        if IGNORE_HIGHEST_MATCH_NEARBY:
            assert not POS_HIGHEST_MATCH_NEARBY
            ignore_threshold = highest_quality_foreach_gt - 0.05
            ignore_threshold = torch.max((ignore_threshold*0+1)*0.02, ignore_threshold)
            ignore_mask0 =  match_quality_matrix0 > ignore_threshold.view(-1,1)
            ignore_mask1 = ignore_mask0.any(dim=0)
            neg_mask = matches==Matcher.BELOW_LOW_THRESHOLD
            ignore_mask2 = ignore_mask1 * neg_mask
            ignore_ids = torch.nonzero(ignore_mask2).view(-1)
            matches[ignore_ids] = Matcher.BETWEEN_THRESHOLDS
        if POS_HIGHEST_MATCH_NEARBY:
            raise NotImplementedError
            assert not IGNORE_HIGHEST_MATCH_NEARBY
            ignore_threshold = highest_quality_foreach_gt - 0.05
            ignore_mask0 =  match_quality_matrix0 > ignore_threshold.view(-1,1)
            ignore_mask1 = ignore_mask0.any(dim=0)
            ignore_ids = torch.nonzero(ignore_mask1).view(-1)
            ignore_mask3 = torch.transpose( ignore_mask0[:,ignore_ids], 0, 1)
            gt_ids3 = torch.nonzero(ignore_mask3)
            matches[ignore_ids] = Matcher.BETWEEN_THRESHOLDS
            import pdb; pdb.set_trace()  # XXX BREAKPOINT
            pass

        if CHECK_SMAE_ANCHOR_MATCH_MULTI_TARGETS:
            one_anchor_multi_targets = pred_inds_to_update.shape[0] - torch.unique(pred_inds_to_update).shape[0]
            if one_anchor_multi_targets >0:
                import pdb; pdb.set_trace()  # XXX BREAKPOINT
                pass
예제 #6
0
def main(args, devices):
    # load graph data
    ogb_dataset = False
    if args.dataset == 'aifb':
        dataset = AIFB()
    elif args.dataset == 'mutag':
        dataset = MUTAG()
    elif args.dataset == 'bgs':
        dataset = BGS()
    elif args.dataset == 'am':
        dataset = AM()
    else:
        raise ValueError()

    # Load from hetero-graph
    hg = dataset.graph

    num_rels = len(hg.canonical_etypes)
    num_of_ntype = len(hg.ntypes)
    category = dataset.predict_category
    num_classes = dataset.num_classes
    train_idx = dataset.train_idx
    test_idx = dataset.test_idx
    labels = dataset.labels

    # split dataset into train, validate, test
    if args.validation:
        val_idx = train_idx[:len(train_idx) // 5]
        train_idx = train_idx[len(train_idx) // 5:]
    else:
        val_idx = train_idx

    # calculate norm for each edge type and store in edge
    for canonical_etypes in hg.canonical_etypes:
        u, v, eid = hg.all_edges(form='all', etype=canonical_etypes)
        _, inverse_index, count = th.unique(v,
                                            return_inverse=True,
                                            return_counts=True)
        degrees = count[inverse_index]
        norm = th.ones(eid.shape[0]) / degrees
        norm = norm.unsqueeze(1)
        hg.edges[canonical_etypes].data['norm'] = norm

    # get target category id
    category_id = len(hg.ntypes)
    for i, ntype in enumerate(hg.ntypes):
        if ntype == category:
            category_id = i

    g = dgl.to_homo(hg)
    g.ndata[dgl.NTYPE].share_memory_()
    g.edata[dgl.ETYPE].share_memory_()
    g.edata['norm'].share_memory_()
    node_ids = th.arange(g.number_of_nodes())

    # find out the target node ids
    node_tids = g.ndata[dgl.NTYPE]
    loc = (node_tids == category_id)
    target_idx = node_ids[loc]
    target_idx.share_memory_()

    n_gpus = len(devices)
    # cpu
    if devices[0] == -1:
        run(0, 0, args, ['cpu'],
            (g, num_of_ntype, num_classes, num_rels, target_idx, train_idx,
             val_idx, test_idx, labels))
    # gpu
    elif n_gpus == 1:
        run(0, n_gpus, args, devices,
            (g, num_of_ntype, num_classes, num_rels, target_idx, train_idx,
             val_idx, test_idx, labels))
    # multi gpu
    else:
        procs = []
        num_train_seeds = train_idx.shape[0]
        tseeds_per_proc = num_train_seeds // n_gpus
        for proc_id in range(n_gpus):
            proc_train_seeds = train_idx[proc_id * tseeds_per_proc :
                                         (proc_id + 1) * tseeds_per_proc \
                                         if (proc_id + 1) * tseeds_per_proc < num_train_seeds \
                                         else num_train_seeds]
            p = mp.Process(target=run,
                           args=(proc_id, n_gpus, args, devices,
                                 (g, num_of_ntype, num_classes, num_rels,
                                  target_idx, proc_train_seeds, val_idx,
                                  test_idx, labels)))
            p.start()
            procs.append(p)
        for p in procs:
            p.join()
예제 #7
0
def get_k_lowest_eig(adj, k):
    r"""
    Compute the k-lowest eigenvectors of the Laplacian matrix
    for each connected components of the graph. If there are disconnected
    graphs, then the first k eigenvectors are computed for each sub-graph
    separately.
    Parameters
    --------------
        adj: tensor(..., N, N)
            Batches of symmetric adjacency matrices
        k: int
            Compute the k-th smallest eigenvectors and eigenvalues.
        
        normalize_L: bool
            Whether to normalize the Laplacian matrix
            If `False`, then `L = D - A`
            If `True`, then `L = D^-1 (D - A)`
    Returns
    -------------
        eigvec: tensor(..., N, k)
            Resulting k-lowest eigenvectors of the Laplacian matrix of each sub-graph,
            with the same batching as the `adj` tensor.
            The dim==-1 represents the k-th vectors.
            The dim==-2 represents the N elements of each vector. 
            If the a given graph is disconnected, it will give the first ``k`` eigenvector
            of each sub-graph, and will force the first eigenvector to be 0-vectors.
            If there are ``m`` eigenvectors for a given sub-graph, with ``m < k``, it will
            return 0-vectors for all eigenvectors ``> m``
    """

    # Reshape as a 3D tensor for easier looping along batches
    device = adj.device
    shape = list(adj.shape)
    if adj.ndim == 2:
        adj = adj.unsqueeze(0)
    elif adj.ndim > 3:
        adj = adj.view(-1, shape[-2], shape[-1])

    L = get_laplacian_matrix(adj, normalize_L=False)

    # Compute and sort the eigenvectors

    eigval_all, eigvec_all = torch.symeig(L.cpu(), eigenvectors=True)
    eigval_all = eigval_all.to(device)
    eigvec_all = eigvec_all.to(device)
    sort_idx = torch.argsort(eigval_all.abs(), dim=-1, descending=False)
    sort_idx_vec = sort_idx.unsqueeze(-2).expand(eigvec_all.shape)
    eigval_sort = torch.gather(eigval_all, dim=-1, index=sort_idx)
    eigvec_sort = torch.gather(eigvec_all, dim=-1, index=sort_idx_vec)

    k_lowest_eigvec = []

    # Loop each graph to detect if some of them are disconnected. If they are disconnected,
    # then modify the eigenvectors such that the lowest k eigenvectors are returned for
    # each sub-graph
    for ii in range(adj.shape[0]):
        this_eigval = eigval_sort[ii]
        num_connected = torch.sum(this_eigval.abs() < EPS)

        # If there is a single connected graph, then return the k lowest eigen functions
        if num_connected <= 1:
            this_eigvec = eigvec_sort[ii, :, :k]
            if k > this_eigvec.shape[-1]:
                temp_eigvec = torch.zeros(this_eigvec.shape[0], k)
                temp_eigvec[:, :k] = this_eigvec
                this_eigvec = temp_eigvec
            k_lowest_eigvec.append(this_eigvec)

        # Otherwise, return the k lowest eigen functions for each sub-graph
        elif num_connected > 1:
            eigvec0 = eigvec_sort[ii, :, :num_connected]
            unique_idx = torch.zeros(1)
            factor = 100

            # Use the eigenvectors with 0 eigenvalues to find the unique sub-graphs
            # And loop to make sure the number of detected sub-graphs is consistent with the
            # Number of connected sub-graphs.
            while (max(unique_idx) + 1) != num_connected:
                eigvec0_round = torch.round(eigvec0 / (factor * EPS))
                _, unique_idx = torch.unique(eigvec0_round,
                                             return_inverse=True,
                                             dim=0)

                if (max(unique_idx) + 1) < num_connected:
                    factor = (factor / 2)
                elif (max(unique_idx) + 1) > num_connected:
                    factor = (factor * 3)

            # Find the eigenvectors associated to each sub-graph
            sub_graph_factors = torch.zeros(num_connected, len(this_eigval))
            for sub_ii in range(num_connected):
                sub_idx = torch.where(unique_idx == sub_ii)[0]
                sub_graph_factors[sub_ii, :] = torch.mean(torch.abs(
                    eigvec_sort[ii, sub_idx, :]),
                                                          dim=-2)
            max_idx = torch.argmax(sub_graph_factors, dim=0)[num_connected:]

            # Concatenate the k lowest eigenvectors of each sub-graph
            this_k_lowest_eigvec = torch.zeros(len(this_eigval), k)
            for sub_ii in range(num_connected):
                sub_idx = torch.where(unique_idx == sub_ii)[0]
                k_lowest_idx = torch.where(
                    max_idx == sub_ii)[0][:k - 1] + num_connected
                for kk_enum, kk in enumerate(k_lowest_idx):
                    this_k_lowest_eigvec[sub_idx,
                                         kk_enum + 1] = eigvec_sort[ii,
                                                                    sub_idx,
                                                                    kk]

            k_lowest_eigvec.append(this_k_lowest_eigvec)

    # Stack and Reshape to match the input batch shape
    k_lowest_eigvec = torch.stack(k_lowest_eigvec,
                                  dim=0).view(*(shape[:-2] + [-1, k]))

    return k_lowest_eigvec
예제 #8
0
    def predict(self, prediction, nms_conf=0.4):
        """
        prediction:
            0:3 - x, y, h, w
            4 - confidence
            5: - class score
        """

        conf_mask = (prediction[:, :, 4] >
                     self.confidence).float().unsqueeze(2)
        prediction = prediction * conf_mask

        box_corner = prediction.new(*prediction.size())
        box_corner[:, :, 0] = (prediction[:, :, 0] - prediction[:, :, 2] / 2)
        box_corner[:, :, 1] = (prediction[:, :, 1] - prediction[:, :, 3] / 2)
        box_corner[:, :, 2] = (prediction[:, :, 0] + prediction[:, :, 2] / 2)
        box_corner[:, :, 3] = (prediction[:, :, 1] + prediction[:, :, 3] / 2)
        prediction[:, :, :4] = box_corner[:, :, :4]

        outputs = []

        for index, image_pred in enumerate(prediction):
            max_score, max_index = torch.max(image_pred[:, 5:],
                                             1,
                                             keepdim=True)
            image_pred = torch.cat(
                (image_pred[:, :5], max_score, max_index.float()),
                1)  # [10647, 7]

            non_zero_ind = (torch.nonzero(image_pred[:, 4])).view(-1)

            if non_zero_ind.size(0) == 0:
                continue

            image_pred_ = image_pred[non_zero_ind, :]
            img_classes = torch.unique(image_pred_[:, -1])

            objects, img_preds = [], []
            name = self.this_img_names[index].split("/")[-1]

            for c in img_classes:
                image_pred_class = image_pred_[image_pred_[:, -1] == c]

                _, conf_sort_index = torch.sort(image_pred_class[:, 4],
                                                descending=True)
                image_pred_class = image_pred_class[conf_sort_index]

                max_detections = []
                while image_pred_class.size(0):
                    max_detections.append(image_pred_class[0].unsqueeze(0))
                    if len(image_pred_class) == 1:
                        break
                    ious = bbox_iou(max_detections[-1], image_pred_class[1:])
                    image_pred_class = image_pred_class[1:][ious < nms_conf]
                img_preds.append(torch.cat(max_detections))
                objects += [
                    self.classes[int(x.squeeze()[-1])] for x in max_detections
                ]

            outputs.append((name, objects))
            img_preds = torch.cat(img_preds, dim=0)

            if self.rebuild:
                self.tensor2img(img_preds, index, name)

        return outputs
예제 #9
0
def batched_nms(boxes, scores, idxs, nms_cfg, class_agnostic=False):
    """Performs non-maximum suppression in a batched fashion.
    Modified from https://github.com/pytorch/vision/blob
    /505cd6957711af790211896d32b40291bea1bc21/torchvision/ops/boxes.py#L39.
    In order to perform NMS independently per class, we add an offset to all
    the boxes. The offset is dependent only on the class idx, and is large
    enough so that boxes from different classes do not overlap.
    Arguments:
        boxes (torch.Tensor): boxes in shape (N, 4).
        scores (torch.Tensor): scores in shape (N, ).
        idxs (torch.Tensor): each index value correspond to a bbox cluster,
            and NMS will not be applied between elements of different idxs,
            shape (N, ).
        nms_cfg (dict): specify nms type and other parameters like iou_thr.
            Possible keys includes the following.
            - iou_thr (float): IoU threshold used for NMS.
            - split_thr (float): threshold number of boxes. In some cases the
                number of boxes is large (e.g., 200k). To avoid OOM during
                training, the users could set `split_thr` to a small value.
                If the number of boxes is greater than the threshold, it will
                perform NMS on each group of boxes separately and sequentially.
                Defaults to 10000.
        class_agnostic (bool): if true, nms is class agnostic,
            i.e. IoU thresholding happens over all boxes,
            regardless of the predicted class.
    Returns:
        tuple: kept dets and indice.
    """
    nms_cfg_ = nms_cfg.copy()
    class_agnostic = nms_cfg_.pop('class_agnostic', class_agnostic)
    if class_agnostic:
        boxes_for_nms = boxes
    else:
        max_coordinate = boxes.max()
        offsets = idxs.to(boxes) * (max_coordinate + 1)
        boxes_for_nms = boxes + offsets[:, None]

    nms_type = nms_cfg_.pop('type', 'nms')
    # nms_op = eval(nms_type)

    split_thr = nms_cfg_.pop('split_thr', 10000)
    if len(boxes_for_nms) < split_thr:
        # dets, keep = nms_op(boxes_for_nms, scores, **nms_cfg_)
        keep = nms(boxes_for_nms, scores, **nms_cfg_)
        boxes = boxes[keep]
        # scores = dets[:, -1]
        scores = scores[keep]
    else:
        total_mask = scores.new_zeros(scores.size(), dtype=torch.bool)
        for id in torch.unique(idxs):
            mask = (idxs == id).nonzero(as_tuple=False).view(-1)
            # dets, keep = nms_op(boxes_for_nms[mask], scores[mask], **nms_cfg_)
            keep = nms(boxes_for_nms[mask], scores[mask], **nms_cfg_)
            total_mask[mask[keep]] = True

        keep = total_mask.nonzero(as_tuple=False).view(-1)
        keep = keep[scores[keep].argsort(descending=True)]
        boxes = boxes[keep]
        scores = scores[keep]

    return torch.cat([boxes, scores[:, None]], -1), keep
예제 #10
0
파일: tensor.py 프로젝트: masknugget/dgl
def unique(input):
    return th.unique(input)
예제 #11
0
def unique(input):
    if input.dtype == th.bool:
        input = input.type(th.int8)
    return th.unique(input)
예제 #12
0
def train_one(task, class_names, model, optG, optCLF, args, grad):
    '''
        Train the model on one sampled task.
    '''
    model['G'].train()
    model['clf'].train()

    support, query = task
    # print("support, query:", support, query)
    # print("class_names_dict:", class_names_dict)
    sampled_classes = torch.unique(support['label']).cpu().numpy().tolist()
    # print("sampled_classes:", sampled_classes)

    class_names_dict = {}
    class_names_dict['label'] = class_names['label'][sampled_classes]
    # print("class_names_dict['label']:", class_names_dict['label'])
    class_names_dict['text'] = class_names['text'][sampled_classes]
    class_names_dict['text_len'] = class_names['text_len'][sampled_classes]
    class_names_dict['is_support'] = False
    class_names_dict = utils.to_tensor(class_names_dict,
                                       args.cuda,
                                       exclude_keys=['is_support'])

    # Embedding the document
    XS = model['G'](support)  # XS:[N*K, 256(hidden_size*2)]
    # print("XS:", XS.shape)
    YS = support['label']
    # print('YS:', YS)

    CN = model['G'](class_names_dict)  # CN:[N, 256(hidden_size*2)]]
    # print("CN:", CN.shape)

    XQ = model['G'](query)
    YQ = query['label']
    # print('YQ:', YQ)

    YS, YQ = reidx_y(args, YS, YQ)

    for _ in range(args.train_iter):

        # Embedding the document
        XS_mlp = model['clf'](XS)  # [N*K, 256(hidden_size*2)] -> [N*K, 128]

        CN_mlp = model['clf'](CN)  # [N, 256(hidden_size*2)]] -> [N, 128]

        neg_d = neg_dist(XS_mlp, CN_mlp)  # [N*K, N]
        # print("neg_d:", neg_d.shape)

        mlp_loss = model['clf'].loss(neg_d, YS)
        # print("mlp_loss:", mlp_loss)

        optCLF.zero_grad()
        mlp_loss.backward(retain_graph=True)
        optCLF.step()

    XQ_mlp = model['clf'](XQ)
    CN_mlp = model['clf'](CN)
    neg_d = neg_dist(XQ_mlp, CN_mlp)
    g_loss = model['clf'].loss(neg_d, YQ)

    optG.zero_grad()
    g_loss.backward()
    optG.step()

    _, pred = torch.max(neg_d, 1)
    acc_q = model['clf'].accuracy(pred, YQ)

    # YQ_d = torch.ones(query['label'].shape, dtype=torch.long).to(query['label'].device)
    # print('YQ', set(YQ.numpy()))

    # XSource, XSource_inputD, _ = model['G'](source)
    # YSource_d = torch.zeros(source['label'].shape, dtype=torch.long).to(source['label'].device)

    # XQ_logitsD = model['D'](XQ_inputD)
    # XSource_logitsD = model['D'](XSource_inputD)
    #
    # d_loss = F.cross_entropy(XQ_logitsD, YQ_d) + F.cross_entropy(XSource_logitsD, YSource_d)
    # d_loss.backward(retain_graph=True)
    # grad['D'].append(get_norm(model['D']))
    # optD.step()
    #
    # # *****************update G****************
    # optG.zero_grad()
    # XQ_logitsD = model['D'](XQ_inputD)
    # XSource_logitsD = model['D'](XSource_inputD)
    # d_loss = F.cross_entropy(XQ_logitsD, YQ_d) + F.cross_entropy(XSource_logitsD, YSource_d)
    #
    # acc, d_acc, loss, _ = model['clf'](XS, YS, XQ, YQ, XQ_logitsD, XSource_logitsD, YQ_d, YSource_d)
    #
    # g_loss = loss - d_loss
    # if args.ablation == "-DAN":
    #     g_loss = loss
    #     print("%%%%%%%%%%%%%%%%%%%This is ablation mode: -DAN%%%%%%%%%%%%%%%%%%%%%%%%%%")
    # g_loss.backward(retain_graph=True)
    # grad['G'].append(get_norm(model['G']))
    # grad['clf'].append(get_norm(model['clf']))
    # optG.step()

    return g_loss, acc_q
예제 #13
0
def unique_class(classes):
    return torch.unique(classes, dim=-1)
예제 #14
0
def roc_figure(
    y_true: torch.Tensor, y_pred: torch.Tensor
) -> Tuple[Figure, dict]:
    """Draw a receiver operating characteristic curve into a matplotlib figure.

    Returns (figure, auroc)
    """

    _names = ("anomalies",)

    lw = 2
    fig, axs = plt.subplots(
        nrows=2, constrained_layout=True, figsize=(6.4, 9.6)
    )
    aurocs = {}

    x_labels = ("False Positive Rate", "Recall")
    y_labels = ("True Positive Rate", "Precision")
    positions = ("lower right", "lower left")
    titles = ("ROC", "PR")
    fcts = (roc_curve, precision_recall_curve)

    for (ax, x_lbl, y_lbl, pos, title, fct) in zip(
        axs, x_labels, y_labels, positions, titles, fcts
    ):
        aurocs[title] = {}

        for unique in torch.unique(y_true):
            if unique == 0:  # use normal label to test for all anomalies
                _y_true = y_true != 0
                _y_pred = y_pred
                _pos_label = None
            else:
                if len(torch.unique(y_true)) == 2:
                    break
                _y_pred = y_pred[(y_true == unique) ^ (y_true == 0)]
                _y_true = y_true[(y_true == unique) ^ (y_true == 0)]
                _pos_label = unique.numpy()

            if fct is roc_curve:
                fpr, tpr, _ = fct(_y_true, _y_pred, pos_label=_pos_label)
            else:
                # Stupid inversion of labels by sklearn
                tpr, fpr, _ = fct(_y_true, _y_pred, pos_label=_pos_label)
            _auroc = auc(fpr, tpr)
            aurocs[title][_names[unique]] = _auroc

            ax.plot(
                fpr,
                tpr,
                color=_COLORS_ROC[unique],
                figure=fig,
                lw=lw,
                label="{} (AUC: {:0.2f})".format(_names[unique], _auroc),
            )
            if fct is roc_curve:
                ax.plot(
                    [0, 1],
                    [0, 1],
                    color="navy",
                    lw=lw,
                    linestyle="--",
                    figure=fig,
                )
            else:
                ax.plot(
                    [0, 1],
                    [1, 0],
                    color="navy",
                    lw=lw,
                    linestyle="--",
                    figure=fig,
                )

        ax.set_xlim([0.0, 1.0])
        ax.set_ylim([0.0, 1.0])
        ax.set_xlabel(x_lbl)
        ax.set_ylabel(y_lbl)
        ax.set_title(title)
        ax.legend(loc=pos)
    return fig, aurocs
예제 #15
0
 def is_coalesced(self):
     """"""
     row, col = self.edge_index
     index = self.num_nodes * row + col
     return row.size(0) == torch.unique(index).size(0)
예제 #16
0
    def evaluation(self):
        """
        # run inference
        """
        for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
            img = img.to(device, non_blocking=True)
            img = img.half() # uint8 to fp16/32
            img /= 255.0  # 0 - 255 to 0.0 - 1.0
            targets = targets.to(device)
            nb, _, height, width = img.shape  # batch size, channels, height, width

            with torch.no_grad():
                # Run model
                inf_out, train_out = model(img, augment=opt.augment)  # inference and training outputs

                # Run NMS
                targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device)  # to pixels
                lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if opt.save_txt else []  # for autolabelling
                output = non_max_suppression(inf_out, conf_thres=opt.conf_thres, iou_thres=opt.iou_thres, labels=lb)

            # Statistics per image
            for si, pred in enumerate(output):
                labels = targets[targets[:, 0] == si, 1:]
                nl = len(labels)
                tcls = labels[:, 0].tolist() if nl else []  # target class
                path = Path(paths[si])
                seen += 1

                if len(pred) == 0:
                    if nl:
                        stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
                    continue

                # Predictions
                predn = pred.clone()
                scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1])  # native-space pred

                # Assign all predictions as incorrect
                correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
                if nl:
                    detected = []  # target indices
                    tcls_tensor = labels[:, 0]

                    # target boxes
                    tbox = xywh2xyxy(labels[:, 1:5])
                    scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1])  # native-space labels

                    # Per target class
                    for cls in torch.unique(tcls_tensor):
                        ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1)  # prediction indices
                        pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1)  # target indices

                        # Search for detections
                        if pi.shape[0]:
                            # Prediction to target ious
                            ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1)  # best ious, indices

                            # Append detections
                            detected_set = set()
                            for j in (ious > iouv[0]).nonzero(as_tuple=False):
                                d = ti[i[j]]  # detected target
                                if d.item() not in detected_set:
                                    detected_set.add(d.item())
                                    detected.append(d)
                                    correct[pi[j]] = ious[j] > iouv  # iou_thres is 1xn
                                    if len(detected) == nl:  # all targets already located in image
                                        break

                # Append statistics (correct, conf, pcls, tcls)
                stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))


        # Compute statistics
        stats = [np.concatenate(x, 0) for x in zip(*stats)]  # to numpy

        if len(stats) and stats[0].any():
            p, r, ap, f1, ap_class = ap_per_class(*stats, plot=None, save_dir=None, names=names)
            p, r, f1, ap50, ap = p[:, 0], r[:, 0], f1[:,0], ap[:, 0], ap.mean(1)  # [P, R, [email protected], [email protected]:0.95]
            mp, mr, mf1, map50, map = p.mean(), r.mean(), f1.mean(), ap50.mean(), ap.mean()
            nt = np.bincount(stats[3].astype(np.int64), minlength=nc)  # number of targets per class
        else:
            nt = torch.zeros(1)

        # Print results
        print(s)
        pf = '%20s' + '%12.3g' * 7  # print format
        print(pf % ('all', seen, nt.sum(), mp, mr, map50, map, mf1))
예제 #17
0
    def _unique_node_key(self):
        if self._packed_ids is None:
            return self.key[:4]

        uniq_ids = torch.unique(self._packed_ids)
        return (*self.tree._unpack_index(uniq_ids).T, )
def uniq(a: Tensor) -> Set:
    return set(torch.unique(a.cpu()).numpy())
예제 #19
0
파일: test.py 프로젝트: Easy2Ride/yolov5
def test(
        data,
        weights=None,
        batch_size=32,
        imgsz=640,
        conf_thres=0.001,
        iou_thres=0.6,  # for NMS
        save_json=False,
        single_cls=False,
        augment=False,
        verbose=False,
        model=None,
        dataloader=None,
        save_dir=Path(''),  # for saving images
        save_txt=False,  # for auto-labelling
        save_hybrid=False,  # for hybrid auto-labelling
        save_conf=False,  # save auto-label confidences
        plots=True,
        log_imgs=0):  # number of logged images

    # Initialize/load model and set device
    training = model is not None
    if training:  # called by train.py
        device = next(model.parameters()).device  # get model device

    else:  # called directly
        set_logging()
        device = select_device(opt.device, batch_size=batch_size)

        # Directories
        save_dir = Path(
            increment_path(Path(opt.project) / opt.name,
                           exist_ok=opt.exist_ok))  # increment run
        (save_dir / 'labels' if save_txt else save_dir).mkdir(
            parents=True, exist_ok=True)  # make dir

        # Load model
        model = attempt_load(weights, map_location=device)  # load FP32 model
        imgsz = check_img_size(imgsz, s=model.stride.max())  # check img_size

        # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
        # if device.type != 'cpu' and torch.cuda.device_count() > 1:
        #     model = nn.DataParallel(model)

    # Half
    half = device.type != 'cpu'  # half precision only supported on CUDA
    if half:
        model.half()

    # Configure
    model.eval()
    is_coco = data.endswith('coco.yaml')  # is COCO dataset
    with open(data) as f:
        data = yaml.load(f, Loader=yaml.FullLoader)  # model dict
    check_dataset(data)  # check
    nc = 1 if single_cls else int(data['nc'])  # number of classes
    iouv = torch.linspace(0.5, 0.95,
                          10).to(device)  # iou vector for [email protected]:0.95
    niou = iouv.numel()

    # Logging
    log_imgs, wandb = min(log_imgs, 100), None  # ceil
    try:
        import wandb  # Weights & Biases
    except ImportError:
        log_imgs = 0

    # Dataloader
    if not training:
        img = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img
        _ = model(img.half() if half else img
                  ) if device.type != 'cpu' else None  # run once
        path = data['test'] if opt.task == 'test' else data[
            'val']  # path to val/test images
        dataloader = create_dataloader(path,
                                       imgsz,
                                       batch_size,
                                       model.stride.max(),
                                       opt,
                                       pad=0.5,
                                       rect=True)[0]

    seen = 0
    confusion_matrix = ConfusionMatrix(nc=nc)

    names = {
        k: v
        for k, v in enumerate(
            model.names if hasattr(model, 'names') else model.module.names)
    }
    coco91class = coco80_to_coco91_class()
    s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R',
                                 '[email protected]', '[email protected]:.95')
    p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
    loss = torch.zeros(3, device=device)
    jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
    for batch_i, (img, targets, paths,
                  shapes) in enumerate(tqdm(dataloader, desc=s)):
        img = img.to(device, non_blocking=True)
        img = img.half() if half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        targets = targets.to(device)
        nb, _, height, width = img.shape  # batch size, channels, height, width

        with torch.no_grad():
            # Run model
            t = time_synchronized()
            inf_out, train_out = model(
                img, augment=augment)  # inference and training outputs
            t0 += time_synchronized() - t

            # Compute loss
            if training:
                loss += compute_loss([x.float() for x in train_out], targets,
                                     model)[1][:3]  # box, obj, cls

            # Run NMS
            targets[:, 2:] *= torch.Tensor([width, height, width,
                                            height]).to(device)  # to pixels
            lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)
                  ] if save_hybrid else []  # for autolabelling
            t = time_synchronized()
            output = non_max_suppression(inf_out,
                                         conf_thres=conf_thres,
                                         iou_thres=iou_thres,
                                         labels=lb)
            t1 += time_synchronized() - t

        # Statistics per image
        for si, pred in enumerate(output):
            labels = targets[targets[:, 0] == si, 1:]
            nl = len(labels)
            tcls = labels[:, 0].tolist() if nl else []  # target class
            path = Path(paths[si])
            seen += 1

            if len(pred) == 0:
                if nl:
                    stats.append((torch.zeros(0, niou, dtype=torch.bool),
                                  torch.Tensor(), torch.Tensor(), tcls))
                continue

            # Predictions
            predn = pred.clone()
            scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0],
                         shapes[si][1])  # native-space pred

            # Append to text file
            if save_txt:
                gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0
                                                  ]]  # normalization gain whwh
                for *xyxy, conf, cls in predn.tolist():
                    xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) /
                            gn).view(-1).tolist()  # normalized xywh

                    line = (cls, *xywh,
                            conf) if save_conf else (cls,
                                                     *xywh)  # label format
                    with open(save_dir / 'labels' / (path.stem + '.txt'),
                              'a') as f:
                        f.write(('%g ' * len(line)).rstrip() % line + '\n')

            # W&B logging
            if plots and len(wandb_images) < log_imgs:
                box_data = [{
                    "position": {
                        "minX": xyxy[0],
                        "minY": xyxy[1],
                        "maxX": xyxy[2],
                        "maxY": xyxy[3]
                    },
                    "class_id": int(cls),
                    "box_caption": "%s %.3f" % (names[cls], conf),
                    "scores": {
                        "class_score": conf
                    },
                    "domain": "pixel"
                } for *xyxy, conf, cls in pred.tolist()]
                boxes = {
                    "predictions": {
                        "box_data": box_data,
                        "class_labels": names
                    }
                }  # inference-space
                wandb_images.append(
                    wandb.Image(img[si], boxes=boxes, caption=path.name))

            # Append to pycocotools JSON dictionary
            if save_json:
                # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
                image_id = int(
                    path.stem) if path.stem.isnumeric() else path.stem
                box = xyxy2xywh(predn[:, :4])  # xywh
                box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner
                for p, b in zip(pred.tolist(), box.tolist()):
                    jdict.append({
                        'image_id':
                        image_id,
                        'category_id':
                        coco91class[int(p[5])] if is_coco else int(p[5]),
                        'bbox': [round(x, 3) for x in b],
                        'score':
                        round(p[4], 5)
                    })

            # Assign all predictions as incorrect
            correct = torch.zeros(pred.shape[0],
                                  niou,
                                  dtype=torch.bool,
                                  device=device)
            if nl:
                detected = []  # target indices
                tcls_tensor = labels[:, 0]

                # target boxes
                tbox = xywh2xyxy(labels[:, 1:5])
                scale_coords(img[si].shape[1:], tbox, shapes[si][0],
                             shapes[si][1])  # native-space labels
                if plots:
                    confusion_matrix.process_batch(
                        pred, torch.cat((labels[:, 0:1], tbox), 1))

                # Per target class
                for cls in torch.unique(tcls_tensor):
                    ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(
                        -1)  # prediction indices
                    pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(
                        -1)  # target indices

                    # Search for detections
                    if pi.shape[0]:
                        # Prediction to target ious
                        ious, i = box_iou(predn[pi, :4], tbox[ti]).max(
                            1)  # best ious, indices

                        # Append detections
                        detected_set = set()
                        for j in (ious > iouv[0]).nonzero(as_tuple=False):
                            d = ti[i[j]]  # detected target
                            if d.item() not in detected_set:
                                detected_set.add(d.item())
                                detected.append(d)
                                correct[
                                    pi[j]] = ious[j] > iouv  # iou_thres is 1xn
                                if len(
                                        detected
                                ) == nl:  # all targets already located in image
                                    break

            # Append statistics (correct, conf, pcls, tcls)
            stats.append(
                (correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))

        # Plot images
        if plots and batch_i < 3:
            f = save_dir / f'test_batch{batch_i}_labels.jpg'  # labels
            Thread(target=plot_images,
                   args=(img, targets, paths, f, names),
                   daemon=True).start()
            f = save_dir / f'test_batch{batch_i}_pred.jpg'  # predictions
            Thread(target=plot_images,
                   args=(img, output_to_target(output), paths, f, names),
                   daemon=True).start()

    # W&B logging
    if wandb_images:
        wandb.log({"outputs": wandb_images})

    # Compute statistics
    stats = [np.concatenate(x, 0) for x in zip(*stats)]  # to numpy
    if len(stats) and stats[0].any():
        p, r, ap, f1, ap_class = ap_per_class(*stats,
                                              plot=plots,
                                              save_dir=save_dir,
                                              names=names)
        p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(
            1)  # [P, R, [email protected], [email protected]:0.95]
        mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
        nt = np.bincount(stats[3].astype(np.int64),
                         minlength=nc)  # number of targets per class
    else:
        nt = torch.zeros(1)

    # Print results
    pf = '%20s' + '%12.3g' * 6  # print format
    print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))

    # Print results per class
    if verbose and nc > 1 and len(stats):
        for i, c in enumerate(ap_class):
            print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))

    # Print speeds
    t = tuple(x / seen * 1E3
              for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size)  # tuple
    if not training:
        print(
            'Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g'
            % t)

    # Plots
    if plots:
        confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
        if wandb and wandb.run:
            wandb.log({"Images": wandb_images})
            wandb.log({
                "Validation": [
                    wandb.Image(str(f), caption=f.name)
                    for f in sorted(save_dir.glob('test*.jpg'))
                ]
            })

    # Save JSON
    if save_json and len(jdict):
        w = Path(weights[0] if isinstance(weights, list) else weights
                 ).stem if weights is not None else ''  # weights
        anno_json = '../coco/annotations/instances_val2017.json'  # annotations json
        pred_json = str(save_dir / f"{w}_predictions.json")  # predictions json
        print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
        with open(pred_json, 'w') as f:
            json.dump(jdict, f)

        try:  # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
            from pycocotools.coco import COCO
            from pycocotools.cocoeval import COCOeval

            anno = COCO(anno_json)  # init annotations api
            pred = anno.loadRes(pred_json)  # init predictions api
            eval = COCOeval(anno, pred, 'bbox')
            if is_coco:
                eval.params.imgIds = [
                    int(Path(x).stem) for x in dataloader.dataset.img_files
                ]  # image IDs to evaluate
            eval.evaluate()
            eval.accumulate()
            eval.summarize()
            map, map50 = eval.stats[:
                                    2]  # update results ([email protected]:0.95, [email protected])
        except Exception as e:
            print(f'pycocotools unable to run: {e}')

    # Return results
    if not training:
        s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
        print(f"Results saved to {save_dir}{s}")
    model.float()  # for training
    maps = np.zeros(nc) + map
    for i, c in enumerate(ap_class):
        maps[c] = ap[i]
    return (mp, mr, map50, map,
            *(loss.cpu() / len(dataloader)).tolist()), maps, t
예제 #20
0
파일: bench_rgcn.py 프로젝트: merria28/dgl
def track_acc(data):
    # args
    if data == 'aifb':
        num_bases = -1
        l2norm = 0.
    elif data == 'mutag':
        num_bases = 30
        l2norm = 5e-4
    elif data == 'am':
        num_bases = 40
        l2norm = 5e-4
    else:
        raise ValueError()

    data = utils.process_data(data)
    device = utils.get_bench_device()

    g = data[0]

    num_rels = len(g.canonical_etypes)
    category = data.predict_category
    num_classes = data.num_classes
    train_mask = g.nodes[category].data.pop('train_mask').bool().to(device)
    test_mask = g.nodes[category].data.pop('test_mask').bool().to(device)
    labels = g.nodes[category].data.pop('labels').to(device)

    # calculate norm for each edge type and store in edge
    for canonical_etype in g.canonical_etypes:
        u, v, eid = g.all_edges(form='all', etype=canonical_etype)
        _, inverse_index, count = torch.unique(v,
                                               return_inverse=True,
                                               return_counts=True)
        degrees = count[inverse_index]
        norm = 1. / degrees.float()
        norm = norm.unsqueeze(1)
        g.edges[canonical_etype].data['norm'] = norm

    # get target category id
    category_id = len(g.ntypes)
    for i, ntype in enumerate(g.ntypes):
        if ntype == category:
            category_id = i

    g = dgl.to_homogeneous(g, edata=['norm']).to(device)
    num_nodes = g.number_of_nodes()
    edge_norm = g.edata['norm']
    edge_type = g.edata[dgl.ETYPE].long()

    # find out the target node ids in g
    target_idx = torch.where(g.ndata[dgl.NTYPE] == category_id)[0]
    train_idx = target_idx[train_mask]
    test_idx = target_idx[test_mask]
    train_labels = labels[train_mask]
    test_labels = labels[test_mask]

    # since the nodes are featureless, the input feature is then the node id.
    feats = torch.arange(num_nodes, device=device)

    # create model
    model = RGCN(num_nodes, 16, num_classes, num_rels, num_bases, 0,
                 0).to(device)

    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=1e-2,
                                 weight_decay=l2norm)

    model.train()
    for epoch in range(30):
        logits = model(g, feats, edge_type, edge_norm)
        loss = F.cross_entropy(logits[train_idx], train_labels)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    acc = evaluate(model, g, feats, edge_type, edge_norm, test_labels,
                   test_idx)
    return acc
예제 #21
0
    def finalize_hypos(
        self,
        step: int,
        bbsz_idx,
        eos_scores,
        tokens,
        scores,
        finalized: List[List[Dict[str, Tensor]]],
        finished: List[bool],
        beam_size: int,
        attn: Optional[Tensor],
        src_lengths,
        max_len: int,
    ):
        """Finalize hypothesis, store finalized information in `finalized`, and change `finished` accordingly.
        A sentence is finalized when {beam_size} finished items have been collected for it.

        Returns number of sentences (not beam items) being finalized.
        These will be removed from the batch and not processed further.
        Args:
            bbsz_idx (Tensor):
        """
        assert bbsz_idx.numel() == eos_scores.numel()

        # clone relevant token and attention tensors.
        # tokens is (batch * beam, max_len). So the index_select
        # gets the newly EOS rows, then selects cols 1..{step + 2}
        tokens_clone = tokens.index_select(
            0, bbsz_idx)[:, 1:step + 2]  # skip the first index, which is EOS

        tokens_clone[:, step] = self.eos
        attn_clone = (attn.index_select(0, bbsz_idx)[:, :, 1:step + 2]
                      if attn is not None else None)

        # compute scores per token position
        pos_scores = scores.index_select(0, bbsz_idx)[:, :step + 1]
        pos_scores[:, step] = eos_scores
        # convert from cumulative to per-position scores
        pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1]

        # normalize sentence-level scores
        if self.normalize_scores:
            eos_scores /= (step + 1)**self.len_penalty

        # cum_unfin records which sentences in the batch are finished.
        # It helps match indexing between (a) the original sentences
        # in the batch and (b) the current, possibly-reduced set of
        # sentences.
        cum_unfin: List[int] = []
        prev = 0
        for f in finished:
            if f:
                prev += 1
            else:
                cum_unfin.append(prev)
        cum_fin_tensor = torch.tensor(cum_unfin, dtype=torch.int).to(bbsz_idx)

        unfin_idx = torch.div(bbsz_idx, beam_size, rounding_mode="trunc")
        sent = unfin_idx + torch.index_select(cum_fin_tensor, 0, unfin_idx)

        # Create a set of "{sent}{unfin_idx}", where
        # "unfin_idx" is the index in the current (possibly reduced)
        # list of sentences, and "sent" is the index in the original,
        # unreduced batch
        # For every finished beam item
        # sentence index in the current (possibly reduced) batch
        seen = (sent << 32) + unfin_idx
        unique_seen: List[int] = torch.unique(seen).tolist()

        if self.match_source_len:
            condition = step > torch.index_select(src_lengths, 0, unfin_idx)
            eos_scores = torch.where(condition, torch.tensor(-math.inf),
                                     eos_scores)
        sent_list: List[int] = sent.tolist()
        for i in range(bbsz_idx.size()[0]):
            # An input sentence (among those in a batch) is finished when
            # beam_size hypotheses have been collected for it
            if len(finalized[sent_list[i]]) < beam_size:
                if attn_clone is not None:
                    # remove padding tokens from attn scores
                    hypo_attn = attn_clone[i]
                else:
                    hypo_attn = torch.empty(0)

                finalized[sent_list[i]].append({
                    "tokens":
                    tokens_clone[i],
                    "score":
                    eos_scores[i],
                    "attention":
                    hypo_attn,  # src_len x tgt_len
                    "alignment":
                    torch.empty(0),
                    "positional_scores":
                    pos_scores[i],
                })

        newly_finished: List[int] = []
        for unique_s in unique_seen:
            # check termination conditions for this sentence
            unique_sent: int = unique_s >> 32
            unique_unfin_idx: int = unique_s - (unique_sent << 32)

            if not finished[unique_sent] and self.is_finished(
                    step, unique_unfin_idx, max_len, len(
                        finalized[unique_sent]), beam_size):
                finished[unique_sent] = True
                newly_finished.append(unique_unfin_idx)

        return newly_finished
예제 #22
0
def match_label_crop(initial_masks, labels_crop, out_label_crop, rois, depth_crop):
    num = labels_crop.shape[0]
    for i in range(num):
        mask_ids = torch.unique(labels_crop[i])
        for index, mask_id in enumerate(mask_ids):
            mask = (labels_crop[i] == mask_id).float()
            overlap = mask * out_label_crop[i]
            percentage = torch.sum(overlap) / torch.sum(mask)
            if percentage < 0.5:
                labels_crop[i][labels_crop[i] == mask_id] = -1

    # sort the local labels
    sorted_ids = []
    for i in range(num):
        if depth_crop is not None:
            if torch.sum(labels_crop[i] > -1) > 0:
                roi_depth = depth_crop[i, 2][labels_crop[i] > -1]
            else:
                roi_depth = depth_crop[i, 2]
            avg_depth = torch.mean(roi_depth[roi_depth > 0])
            sorted_ids.append((i, avg_depth))
        else:
            x_min = rois[i, 0]
            y_min = rois[i, 1]
            x_max = rois[i, 2]
            y_max = rois[i, 3]
            orig_H = y_max - y_min + 1
            orig_W = x_max - x_min + 1
            roi_size = orig_H * orig_W
            sorted_ids.append((i, roi_size))

    sorted_ids = sorted(sorted_ids, key=lambda x : x[1], reverse=True)
    sorted_ids = [x[0] for x in sorted_ids]

    # combine the local labels
    refined_masks = torch.zeros_like(initial_masks).float()
    count = 0
    for index in sorted_ids:

        mask_ids = torch.unique(labels_crop[index])
        if mask_ids[0] == -1:
            mask_ids = mask_ids[1:]

        # mapping
        label_crop = torch.zeros_like(labels_crop[index])
        for mask_id in mask_ids:
            count += 1
            label_crop[labels_crop[index] == mask_id] = count

        # resize back to original size
        x_min = int(rois[index, 0].item())
        y_min = int(rois[index, 1].item())
        x_max = int(rois[index, 2].item())
        y_max = int(rois[index, 3].item())
        orig_H = int(y_max - y_min + 1)
        orig_W = int(x_max - x_min + 1)
        mask = label_crop.unsqueeze(0).unsqueeze(0).float()
        resized_mask = F.upsample_nearest(mask, (orig_H, orig_W))[0, 0]

        # Set refined mask
        h_idx, w_idx = torch.nonzero(resized_mask).t()
        refined_masks[0, y_min:y_max+1, x_min:x_max+1][h_idx, w_idx] = resized_mask[h_idx, w_idx].cpu()

    return refined_masks, labels_crop
예제 #23
0
    def forward(self,
                input_,
                target,
                edge_ids=None,
                weights=None,
                *args,
                **kwargs):
        """
        Args:
             input_ (torch.tensor): embeddings predicted by the network (NxExDxHxW) (E - embedding dims)
                                    expects float32 tensor
             target (torch.tensor): ground truth instance segmentation (NxDxHxW)
                                    expects int64 tensor

        Returns:
            Combined loss defined as: alpha * variance_term + beta * distance_term + gamma * regularization_term
        """
        input_ = input_[:, :, None]
        n_batches = input_.shape[0]
        # compute the loss per each instance in the batch separately
        # and sum it up in the per_instance variable
        per_instance_loss = 0.
        for idx, (single_input,
                  single_target), eid, ew in enumerate(zip(input_, target)):
            # add singleton batch dimension required for further computation
            if weights is None:
                ew = None
                eid = None
            else:
                ew = weights[idx]
                eid = edge_ids[idx]
            single_input = single_input.unsqueeze(0)
            single_target = single_target.unsqueeze(0)

            # get number of instances in the batch instance
            instances = torch.unique(single_target)
            assert check_consecutive(instances)
            C = instances.size()[0]

            # SPATIAL = D X H X W in 3d case, H X W in 2d case
            # expand each label as a one-hot vector: N x SPATIAL -> N x C x SPATIAL
            single_target = expand_as_one_hot(single_target, C)

            # compare spatial dimensions
            assert single_input.dim() in (4, 5)
            assert single_input.dim() == single_target.dim()
            assert single_input.size()[2:] == single_target.size()[2:]
            spatial_dims = single_input.dim() - 2

            # compute mean embeddings and assign embeddings to instances
            cluster_means, embeddings_per_instance = self._compute_cluster_means(
                single_input, single_target, spatial_dims)
            variance_term = self._compute_variance_term(
                cluster_means, embeddings_per_instance, single_target,
                spatial_dims)
            distance_term = self._compute_distance_term(
                cluster_means, eid, ew, C, spatial_dims)
            # regularization_term = self._compute_regularizer_term(cluster_means, C, spatial_dims)
            # compute total loss and sum it up
            loss = self.alpha * variance_term + self.beta * distance_term  # + self.gamma * regularization_term
            per_instance_loss += loss

        # reduce across the batch dimension
        return per_instance_loss.div(n_batches)
예제 #24
0
        def run_test(device, dtype):
            x = torch.tensor([[[1., 1.], [0., 1.], [2., 1.], [0., 1.]],
                              [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]],
                             dtype=dtype,
                             device=device)
            x_empty = torch.empty(5, 0, dtype=dtype, device=device)
            x_ill_formed_empty = torch.empty(5,
                                             0,
                                             0,
                                             dtype=dtype,
                                             device=device)
            x_ill_formed_empty_another = torch.empty(5,
                                                     0,
                                                     5,
                                                     dtype=dtype,
                                                     device=device)
            expected_unique_dim0 = torch.tensor(
                [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]]],
                dtype=dtype,
                device=device)
            expected_inverse_dim0 = torch.tensor([0, 0])
            expected_counts_dim0 = torch.tensor([2])
            expected_unique_dim1 = torch.tensor(
                [[[0., 1.], [1., 1.], [2., 1.]], [[0., 1.], [1., 1.], [2., 1.]]
                 ],
                dtype=dtype,
                device=device)
            expected_unique_dim1_bool = torch.tensor(
                [[[False, True], [True, True]], [[False, True], [True, True]]],
                dtype=torch.bool,
                device=device)
            expected_inverse_dim1 = torch.tensor([1, 0, 2, 0])
            expected_inverse_dim1_bool = torch.tensor([1, 0, 1, 0])
            expected_counts_dim1 = torch.tensor([2, 1, 1])
            expected_counts_dim1_bool = torch.tensor([2, 2])
            expected_unique_dim2 = torch.tensor(
                [[[1., 1.], [0., 1.], [2., 1.], [0., 1.]],
                 [[1., 1.], [0., 1.], [2., 1.], [0., 1.]]],
                dtype=dtype,
                device=device)
            expected_inverse_dim2 = torch.tensor([0, 1])
            expected_counts_dim2 = torch.tensor([1, 1])
            expected_unique_empty = torch.tensor([],
                                                 dtype=dtype,
                                                 device=device)
            expected_inverse_empty = torch.tensor([],
                                                  dtype=torch.long,
                                                  device=device)
            expected_counts_empty = torch.tensor([],
                                                 dtype=torch.long,
                                                 device=device)
            # dim0
            x_unique = torch.unique(x, dim=0)
            self.assertEqual(expected_unique_dim0, x_unique)

            x_unique, x_inverse = torch.unique(x, return_inverse=True, dim=0)
            self.assertEqual(expected_unique_dim0, x_unique)
            self.assertEqual(expected_inverse_dim0, x_inverse)

            x_unique, x_counts = torch.unique(x,
                                              return_inverse=False,
                                              return_counts=True,
                                              dim=0)
            self.assertEqual(expected_unique_dim0, x_unique)
            self.assertEqual(expected_counts_dim0, x_counts)

            x_unique, x_inverse, x_counts = torch.unique(x,
                                                         return_inverse=True,
                                                         return_counts=True,
                                                         dim=0)
            self.assertEqual(expected_unique_dim0, x_unique)
            self.assertEqual(expected_inverse_dim0, x_inverse)
            self.assertEqual(expected_counts_dim0, x_counts)

            # dim1
            x_unique = torch.unique(x, dim=1)
            if x.dtype == torch.bool:
                self.assertEqual(expected_unique_dim1_bool, x_unique)
            else:
                self.assertEqual(expected_unique_dim1, x_unique)

            x_unique, x_inverse = torch.unique(x, return_inverse=True, dim=1)
            if x.dtype == torch.bool:
                self.assertEqual(expected_unique_dim1_bool, x_unique)
                self.assertEqual(expected_inverse_dim1_bool, x_inverse)
            else:
                self.assertEqual(expected_unique_dim1, x_unique)
                self.assertEqual(expected_inverse_dim1, x_inverse)

            x_unique, x_counts = torch.unique(x,
                                              return_inverse=False,
                                              return_counts=True,
                                              dim=1)
            if x.dtype == torch.bool:
                self.assertEqual(expected_unique_dim1_bool, x_unique)
                self.assertEqual(expected_counts_dim1_bool, x_counts)
            else:
                self.assertEqual(expected_unique_dim1, x_unique)
                self.assertEqual(expected_counts_dim1, x_counts)

            x_unique, x_inverse, x_counts = torch.unique(x,
                                                         return_inverse=True,
                                                         return_counts=True,
                                                         dim=1)
            if x.dtype == torch.bool:
                self.assertEqual(expected_unique_dim1_bool, x_unique)
                self.assertEqual(expected_inverse_dim1_bool, x_inverse)
                self.assertEqual(expected_counts_dim1_bool, x_counts)
            else:
                self.assertEqual(expected_unique_dim1, x_unique)
                self.assertEqual(expected_inverse_dim1, x_inverse)
                self.assertEqual(expected_counts_dim1, x_counts)

            # dim2
            x_unique = torch.unique(x, dim=2)
            self.assertEqual(expected_unique_dim2, x_unique)

            x_unique, x_inverse = torch.unique(x, return_inverse=True, dim=2)
            self.assertEqual(expected_unique_dim2, x_unique)
            self.assertEqual(expected_inverse_dim2, x_inverse)

            x_unique, x_counts = torch.unique(x,
                                              return_inverse=False,
                                              return_counts=True,
                                              dim=2)
            self.assertEqual(expected_unique_dim2, x_unique)
            self.assertEqual(expected_counts_dim2, x_counts)

            x_unique, x_inverse, x_counts = torch.unique(x,
                                                         return_inverse=True,
                                                         return_counts=True,
                                                         dim=2)
            self.assertEqual(expected_unique_dim2, x_unique)
            self.assertEqual(expected_inverse_dim2, x_inverse)
            self.assertEqual(expected_counts_dim2, x_counts)

            # test empty tensor
            x_unique, x_inverse, x_counts = torch.unique(x_empty,
                                                         return_inverse=True,
                                                         return_counts=True,
                                                         dim=1)
            self.assertEqual(expected_unique_empty, x_unique)
            self.assertEqual(expected_inverse_empty, x_inverse)
            self.assertEqual(expected_counts_empty, x_counts)

            # test not a well formed tensor
            # Checking for runtime error, as this is the expected behaviour
            with self.assertRaises(RuntimeError):
                torch.unique(x_ill_formed_empty,
                             return_inverse=True,
                             return_counts=True,
                             dim=1)

            # test along dim2
            with self.assertRaises(RuntimeError):
                torch.unique(x_ill_formed_empty_another,
                             return_inverse=True,
                             return_counts=True,
                             dim=2)

            # test consecutive version
            y = torch.tensor([[0, 1], [0, 1], [0, 1], [1, 2], [1, 2], [3, 4],
                              [0, 1], [0, 1], [3, 4], [1, 2]],
                             dtype=dtype,
                             device=device)
            expected_y_unique = torch.tensor(
                [[0, 1], [1, 2], [3, 4], [0, 1], [3, 4], [1, 2]],
                dtype=dtype,
                device=device)
            expected_y_inverse = torch.tensor([0, 0, 0, 1, 1, 2, 3, 3, 4, 5],
                                              dtype=torch.int64,
                                              device=device)
            expected_y_counts = torch.tensor([3, 2, 1, 2, 1, 1],
                                             dtype=torch.int64,
                                             device=device)
            expected_y_inverse_bool = torch.tensor(
                [0, 0, 0, 1, 1, 1, 2, 2, 3, 3],
                dtype=torch.int64,
                device=device)
            expected_y_counts_bool = torch.tensor([3, 3, 2, 2],
                                                  dtype=torch.int64,
                                                  device=device)
            y_unique, y_inverse, y_counts = torch.unique_consecutive(
                y, return_inverse=True, return_counts=True, dim=0)
            if x.dtype == torch.bool:
                self.assertEqual(expected_y_inverse_bool, y_inverse)
                self.assertEqual(expected_y_counts_bool, y_counts)
            else:
                self.assertEqual(expected_y_inverse, y_inverse)
                self.assertEqual(expected_y_counts, y_counts)
예제 #25
0
def computePrologator(A, SUBNODES_PG):
    dim = A.size(1)
    SUBNODES = int(dim * SUBNODES_PG)
    S = computeStrongConnections(A, 0.25)
    F = np.array([], dtype='int')
    F = torch.from_numpy(F).cuda()
    C = np.array([], dtype='int')
    C = torch.from_numpy(C).cuda()
    U = torch.ones(dim, 1).cuda()

    #neighbour matrix
    N = (A != 0).cuda()
    N = N * (torch.eye(dim) < 1).cuda()
    #Compute lambdas according formula
    lambdas = computeLambdas(F, U, S)

    #SUBSPACE COARSENING
    for iteration in range(0, SUBNODES):
        index = torch.where(lambdas == torch.max(lambdas))[1][0]
        if (iteration == 0):
            C = index.unsqueeze(0)
        else:
            C = torch.cat((C, index.unsqueeze(0)))
        U[C] = 0
        concatenate1 = torch.where(S[index, :] == 1)[0]
        if (iteration == 0):
            F = concatenate1
        else:
            F = torch.unique(
                torch.cat((F.unsqueeze(0), concatenate1.unsqueeze(0)),
                          1)).cuda()
        U[F] = 0
        lambdas = computeLambdas(F, U, S)

    #Compute the Prolongation Operator
    Prol = torch.zeros(dim, dim)

    #Cnodes Mask
    CNodes = torch.zeros(dim, dim).cuda()
    CNodes[C, :] = 1
    CNodes[:, C] = 1

    #Compute P (as in paper) as and AND element wise between CNodes and
    #Neighbours Nodes
    P = torch.logical_and(N, CNodes).cuda()

    dotProduct1 = A * N
    secondProduct2 = A * P

    #Compute weights of Prologation Operator for Fine nodes
    for i in F:
        Js = torch.where(P[i, :] == 1)[0]

        for j in Js:
            firstDot = torch.sum(dotProduct1[i, :])
            secondDot = torch.sum(secondProduct2[i, :])
            Prol[i, j] = -(firstDot / secondDot) * (A[i, j] / A[i, i])

    #Compute weights of Prologation Operator for Coarse nodes
    for i in C:
        Prol[i, i] = A[i, i]

    #Get only the columns that contain the "COARSEN NODES";

    Prol = Prol[:, C]

    Prol = Prol.type(torch.DoubleTensor)
    return Prol
예제 #26
0
    def test_unique(self, device, dtype):
        if dtype is torch.half and self.device_type == 'cpu':
            return  # CPU does not have half support

        def ensure_tuple(x):
            if isinstance(x, torch.Tensor):
                return (x, )
            return x

        if dtype is torch.bool:
            x = torch.tensor(
                [True, False, False, False, True, False, True, False],
                dtype=torch.bool,
                device=device)
            expected_unique = torch.tensor([False, True],
                                           dtype=torch.bool,
                                           device=device)
            expected_inverse = torch.tensor([1, 0, 0, 0, 1, 0, 1, 0],
                                            dtype=torch.long,
                                            device=device)
            expected_counts = torch.tensor([5, 3],
                                           dtype=torch.long,
                                           device=device)
        else:
            x = torch.tensor([1, 2, 3, 2, 8, 5, 2, 3],
                             dtype=dtype,
                             device=device)
            expected_unique = torch.tensor([1, 2, 3, 5, 8],
                                           dtype=dtype,
                                           device=device)
            expected_inverse = torch.tensor([0, 1, 2, 1, 4, 3, 1, 2],
                                            device=device)
            expected_counts = torch.tensor([1, 3, 2, 1, 1], device=device)

        # test sorted unique
        fs = [
            lambda x, **kwargs: torch.unique(x, sorted=True, **kwargs),
            lambda x, **kwargs: x.unique(sorted=True, **kwargs),
        ]
        for f in fs:
            self._test_unique_with_expects(device, dtype, f, x,
                                           expected_unique, expected_inverse,
                                           expected_counts, (2, 2, 2))
            self._test_unique_scalar_empty(dtype, device, f)

        # test unsorted unique
        fs = [
            lambda x, **kwargs: torch.unique(x, sorted=False, **kwargs),
            lambda x, **kwargs: x.unique(sorted=False, **kwargs)
        ]
        for f in fs:
            self._test_unique_scalar_empty(dtype, device, f)
            for return_inverse in [True, False]:
                for return_counts in [True, False]:
                    ret = ensure_tuple(
                        f(x,
                          return_inverse=return_inverse,
                          return_counts=return_counts))
                    self.assertEqual(
                        len(ret), 1 + int(return_inverse) + int(return_counts))
                    x_list = x.tolist()
                    x_unique_list = ret[0].tolist()
                    self.assertEqual(expected_unique.tolist(),
                                     sorted(x_unique_list))
                    if return_inverse:
                        x_inverse_list = ret[1].tolist()
                        for i, j in enumerate(x_inverse_list):
                            self.assertEqual(x_list[i], x_unique_list[j])
                    if return_counts:
                        count_index = 1 + int(return_inverse)
                        x_counts_list = ret[count_index].tolist()
                        for i, j in zip(x_unique_list, x_counts_list):
                            count = 0
                            for k in x_list:
                                if k == i:
                                    count += 1
                            self.assertEqual(j, count)
예제 #27
0
 def test_unique(self):
     x = torch.randint(3, (2, 3, 4, 5)).float()
     self.assertONNX(lambda x: torch.unique(
         x, dim=0, sorted=True, return_inverse=False, return_counts=True),
                     x,
                     opset_version=11)
예제 #28
0
    def train(self):
        writer = SummaryWriter(logdir=self.log_dir)
        device = "cuda:0"
        wu_cfg = self.cfg.fe.trainer
        model = UNet2D(**self.cfg.fe.backbone)
        model.cuda(device)
        # train_set = SpgDset(self.cfg.gen.data_dir_raw_train, patch_manager="no_cross", patch_stride=(10,10), patch_shape=(300,300), reorder_sp=True)
        # val_set = SpgDset(self.cfg.gen.data_dir_raw_val, patch_manager="no_cross", patch_stride=(10,10), patch_shape=(300,300), reorder_sp=True)
        train_set = SpgDset(self.cfg.gen.data_dir_raw_train, reorder_sp=True)
        val_set = SpgDset(self.cfg.gen.data_dir_raw_val, reorder_sp=True)
        # pm = StridedPatches2D(wu_cfg.patch_stride, wu_cfg.patch_shape, train_set.image_shape)
        pm = NoPatches2D()
        train_set.length = len(train_set.graph_file_names) * np.prod(
            pm.n_patch_per_dim)
        train_set.n_patch_per_dim = pm.n_patch_per_dim
        val_set.length = len(val_set.graph_file_names)
        gauss_kernel = GaussianSmoothing(1, 5, 3, device=device)
        # dset = LeptinDset(self.cfg.gen.data_dir_raw, self.cfg.gen.data_dir_affs, wu_cfg.patch_manager, wu_cfg.patch_stride, wu_cfg.patch_shape, wu_cfg.reorder_sp)
        train_loader = DataLoader(train_set,
                                  batch_size=wu_cfg.batch_size,
                                  shuffle=True,
                                  pin_memory=True,
                                  num_workers=0)
        val_loader = DataLoader(val_set,
                                batch_size=wu_cfg.batch_size,
                                shuffle=True,
                                pin_memory=True,
                                num_workers=0)
        optimizer = torch.optim.Adam(model.parameters(), lr=self.cfg.fe.lr)
        sheduler = ReduceLROnPlateau(optimizer,
                                     patience=80,
                                     threshold=1e-4,
                                     min_lr=1e-8,
                                     factor=0.1)
        slcs = [
            slice(None, self.cfg.fe.embeddings_separator),
            slice(self.cfg.fe.embeddings_separator, None)
        ]
        criterion = RegRagContrastiveWeights(delta_var=0.1,
                                             delta_dist=0.3,
                                             slices=slcs)
        acc_loss = 0
        valit = 0
        iteration = 0
        best_loss = np.inf

        while iteration <= wu_cfg.n_iterations:
            for it, (raw, gt, sp_seg, affinities, offs,
                     indices) in enumerate(train_loader):
                raw, gt, sp_seg, affinities = raw.to(device), gt.to(
                    device), sp_seg.to(device), affinities.to(device)
                sp_seg = sp_seg + 1
                edge_img = F.pad(get_contour_from_2d_binary(sp_seg),
                                 (2, 2, 2, 2),
                                 mode='constant')
                edge_img = gauss_kernel(edge_img.float())
                all = torch.cat([raw, gt, sp_seg, edge_img], dim=1)

                angle = float(torch.randint(-180, 180, (1, )).item())
                rot_all = tvF.rotate(all, angle, PIL.Image.NEAREST)
                rot_raw = rot_all[:, :1]
                rot_gt = rot_all[:, 1:2]
                rot_sp = rot_all[:, 2:3]
                rot_edge_img = rot_all[:, 3:]
                angle = abs(angle / 180)
                valid_sp = []
                for i in range(len(rot_sp)):
                    _valid_sp = torch.unique(rot_sp[i], sorted=True)
                    _valid_sp = _valid_sp[1:] if _valid_sp[
                        0] == 0 else _valid_sp
                    if len(_valid_sp) > self.cfg.gen.sp_samples_per_step:
                        inds = torch.multinomial(
                            torch.ones_like(_valid_sp),
                            self.cfg.gen.sp_samples_per_step,
                            replacement=False)
                        _valid_sp = _valid_sp[inds]
                    valid_sp.append(_valid_sp)

                _rot_sp, _sp_seg = [], []
                for val_sp, rsp, sp in zip(valid_sp, rot_sp, sp_seg):
                    mask = rsp == val_sp[:, None, None]
                    _rot_sp.append((mask * (torch.arange(
                        len(val_sp), device=rsp.device)[:, None, None] + 1)
                                    ).sum(0))
                    mask = sp == val_sp[:, None, None]
                    _sp_seg.append((mask * (torch.arange(
                        len(val_sp), device=sp.device)[:, None, None] + 1)
                                    ).sum(0))

                rot_sp = torch.stack(_rot_sp)
                sp_seg = torch.stack(_sp_seg)
                valid_sp = [
                    torch.unique(_rot_sp, sorted=True) for _rot_sp in rot_sp
                ]
                valid_sp = [
                    _valid_sp[1:] if _valid_sp[0] == 0 else _valid_sp
                    for _valid_sp in valid_sp
                ]

                inp = torch.cat([
                    torch.cat([raw, edge_img], 1),
                    torch.cat([rot_raw, rot_edge_img], 1)
                ], 0)
                offs = offs.numpy().tolist()
                edge_feat, edges = tuple(
                    zip(*[
                        get_edge_features_1d(seg.squeeze().cpu().numpy(), os,
                                             affs.squeeze().cpu().numpy())
                        for seg, os, affs in zip(sp_seg, offs, affinities)
                    ]))
                edges = [
                    torch.from_numpy(e.astype(np.long)).to(device).T
                    for e in edges
                ]
                edge_weights = [
                    torch.from_numpy(ew.astype(np.float32)).to(device)[:,
                                                                       0][None]
                    for ew in edge_feat
                ]
                valid_edges_masks = [
                    (_edges[None] == _valid_sp[:, None,
                                               None]).sum(0).sum(0) == 2
                    for _valid_sp, _edges in zip(valid_sp, edges)
                ]
                edges = [
                    _edges[:, valid_edges_mask] - 1
                    for _edges, valid_edges_mask in zip(
                        edges, valid_edges_masks)
                ]
                edge_weights = [
                    _edge_weights[:, valid_edges_mask]
                    for _edge_weights, valid_edges_mask in zip(
                        edge_weights, valid_edges_masks)
                ]

                # put embeddings on unit sphere so we can use cosine distance
                loss_embeds = model(inp[:, :, None]).squeeze(2)
                loss_embeds = criterion.norm_each_space(loss_embeds, 1)

                loss = criterion(loss_embeds,
                                 sp_seg.long(),
                                 rot_sp.long(),
                                 edges,
                                 edge_weights,
                                 valid_sp,
                                 angle,
                                 chunks=int(sp_seg.max().item() //
                                            self.cfg.gen.train_chunk_size))

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

                print(f"step {iteration}: {loss.item()}")
                writer.add_scalar("fe_train/lr",
                                  optimizer.param_groups[0]['lr'], iteration)
                writer.add_scalar("fe_train/loss", loss.item(), iteration)
                if (iteration) % 100 == 0:
                    with torch.set_grad_enabled(False):
                        model.eval()
                        print("####start validation####")
                        for it, (raw, gt, sp_seg, affinities, offs,
                                 indices) in enumerate(val_loader):
                            raw, gt, sp_seg, affinities = raw.to(
                                device), gt.to(device), sp_seg.to(
                                    device), affinities.to(device)
                            sp_seg = sp_seg + 1
                            edge_img = F.pad(
                                get_contour_from_2d_binary(sp_seg),
                                (2, 2, 2, 2),
                                mode='constant')
                            edge_img = gauss_kernel(edge_img.float())
                            all = torch.cat([raw, gt, sp_seg, edge_img], dim=1)

                            angle = float(
                                torch.randint(-180, 180, (1, )).item())
                            rot_all = tvF.rotate(all, angle, PIL.Image.NEAREST)
                            rot_raw = rot_all[:, :1]
                            rot_gt = rot_all[:, 1:2]
                            rot_sp = rot_all[:, 2:3]
                            rot_edge_img = rot_all[:, 3:]
                            angle = abs(angle / 180)
                            valid_sp = [
                                torch.unique(_rot_sp, sorted=True)
                                for _rot_sp in rot_sp
                            ]
                            valid_sp = [
                                _valid_sp[1:]
                                if _valid_sp[0] == 0 else _valid_sp
                                for _valid_sp in valid_sp
                            ]

                            _rot_sp, _sp_seg = [], []
                            for val_sp, rsp, sp in zip(valid_sp, rot_sp,
                                                       sp_seg):
                                mask = rsp == val_sp[:, None, None]
                                _rot_sp.append((mask * (torch.arange(
                                    len(val_sp), device=rsp.device)[:, None,
                                                                    None] + 1)
                                                ).sum(0))
                                mask = sp == val_sp[:, None, None]
                                _sp_seg.append((mask * (torch.arange(
                                    len(val_sp), device=sp.device)[:, None,
                                                                   None] + 1)
                                                ).sum(0))

                            rot_sp = torch.stack(_rot_sp)
                            sp_seg = torch.stack(_sp_seg)
                            valid_sp = [
                                torch.unique(_rot_sp, sorted=True)
                                for _rot_sp in rot_sp
                            ]
                            valid_sp = [
                                _valid_sp[1:]
                                if _valid_sp[0] == 0 else _valid_sp
                                for _valid_sp in valid_sp
                            ]

                            inp = torch.cat([
                                torch.cat([raw, edge_img], 1),
                                torch.cat([rot_raw, rot_edge_img], 1)
                            ], 0)
                            offs = offs.numpy().tolist()
                            edge_feat, edges = tuple(
                                zip(*[
                                    get_edge_features_1d(
                                        seg.squeeze().cpu().numpy(), os,
                                        affs.squeeze().cpu().numpy())
                                    for seg, os, affs in zip(
                                        sp_seg, offs, affinities)
                                ]))
                            edges = [
                                torch.from_numpy(e.astype(
                                    np.long)).to(device).T for e in edges
                            ]
                            edge_weights = [
                                torch.from_numpy(ew.astype(
                                    np.float32)).to(device)[:, 0][None]
                                for ew in edge_feat
                            ]
                            valid_edges_masks = [
                                (_edges[None] == _valid_sp[:, None, None]
                                 ).sum(0).sum(0) == 2
                                for _valid_sp, _edges in zip(valid_sp, edges)
                            ]
                            edges = [
                                _edges[:, valid_edges_mask] - 1
                                for _edges, valid_edges_mask in zip(
                                    edges, valid_edges_masks)
                            ]
                            edge_weights = [
                                _edge_weights[:, valid_edges_mask]
                                for _edge_weights, valid_edges_mask in zip(
                                    edge_weights, valid_edges_masks)
                            ]

                            # put embeddings on unit sphere so we can use cosine distance
                            embeds = model(inp[:, :, None]).squeeze(2)
                            embeds = criterion.norm_each_space(embeds, 1)

                            ls = criterion(
                                embeds,
                                sp_seg.long(),
                                rot_sp.long(),
                                edges,
                                edge_weights,
                                valid_sp,
                                angle,
                                chunks=int(sp_seg.max().item() //
                                           self.cfg.gen.train_chunk_size))

                            acc_loss += ls
                            writer.add_scalar("fe_val/loss", ls, valit)
                            print(f"step {it}: {ls.item()}")
                            valit += 1

                    acc_loss = acc_loss / len(val_loader)
                    if acc_loss < best_loss:
                        print(self.save_dir)
                        torch.save(
                            model.state_dict(),
                            os.path.join(self.save_dir, "best_val_model.pth"))
                        best_loss = acc_loss
                    sheduler.step(acc_loss)
                    acc_loss = 0
                    fig, ((a1, a2), (a3, a4)) = plt.subplots(2,
                                                             2,
                                                             sharex='col',
                                                             sharey='row',
                                                             gridspec_kw={
                                                                 'hspace': 0,
                                                                 'wspace': 0
                                                             })
                    a1.imshow(raw[0].cpu().permute(1, 2, 0).squeeze())
                    a1.set_title('raw')
                    a2.imshow(
                        cm.prism(sp_seg[0].cpu().squeeze() /
                                 sp_seg[0].cpu().squeeze().max()))
                    a2.set_title('sp')
                    a3.imshow(pca_project(embeds[0, slcs[0]].detach().cpu()))
                    a3.set_title('embed', y=-0.01)
                    a4.imshow(pca_project(embeds[0, slcs[1]].detach().cpu()))
                    a4.set_title('embed rot', y=-0.01)
                    plt.show()
                    writer.add_figure("examples", fig, iteration // 100)
                    # model.train()
                    print("####end validation####")
                iteration += 1
                if iteration > wu_cfg.n_iterations:
                    print(self.save_dir)
                    torch.save(model.state_dict(),
                               os.path.join(self.save_dir, "last_model.pth"))
                    break
        return
예제 #29
0
def test(
        data,
        weights=None,
        batch_size=16,
        imgsz=640,
        conf_thres=0.001,
        iou_thres=0.6,  # for NMS
        save_json=False,
        single_cls=False,
        augment=False,
        verbose=False,
        model=None,
        dataloader=None,
        save_dir='',
        merge=False,
        save_txt=False):
    # Initialize/load model and set device
    training = model is not None
    if training:  # called by train.py
        device = next(model.parameters()).device  # get model device

    else:  # called directly
        device = select_device(opt.device, batch_size=batch_size)
        merge, save_txt = opt.merge, opt.save_txt  # use Merge NMS, save *.txt labels
        if save_txt:
            out = Path('inference/output')
            if os.path.exists(out):
                shutil.rmtree(out)  # delete output folder
            os.makedirs(out)  # make new output folder

        # Remove previous
        for f in glob.glob(str(Path(save_dir) / 'test_batch*.jpg')):
            os.remove(f)

        # Load model
        model = Darknet(opt.cfg).to(device)

        # load model
        try:
            ckpt = torch.load(weights[0],
                              map_location=device)  # load checkpoint
            ckpt['model'] = {
                k: v
                for k, v in ckpt['model'].items()
                if model.state_dict()[k].numel() == v.numel()
            }
            model.load_state_dict(ckpt['model'], strict=False)
        except:
            load_darknet_weights(model, weights[0])
        imgsz = check_img_size(imgsz, s=32)  # check img_size

    # Half
    half = device.type != 'cpu'  # half precision only supported on CUDA
    if half:
        model.half()

    # Configure
    model.eval()
    with open(data) as f:
        data = yaml.load(f, Loader=yaml.FullLoader)  # model dict
    nc = 1 if single_cls else int(data['nc'])  # number of classes
    iouv = torch.linspace(0.5, 0.95,
                          10).to(device)  # iou vector for [email protected]:0.95
    niou = iouv.numel()

    # Dataloader
    if not training:
        img = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img
        _ = model(img.half() if half else img
                  ) if device.type != 'cpu' else None  # run once
        path = data['test'] if opt.task == 'test' else data[
            'val']  # path to val/test images
        dataloader = create_dataloader(path,
                                       imgsz,
                                       batch_size,
                                       32,
                                       opt,
                                       hyp=None,
                                       augment=False,
                                       cache=False,
                                       pad=0.5,
                                       rect=True)[0]

    seen = 0
    try:
        names = model.names if hasattr(model, 'names') else model.module.names
    except:
        names = load_classes(opt.names)
    coco91class = coco80_to_coco91_class()
    coco91class = [*coco91class, 79, 80]
    s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R',
                                 '[email protected]', '[email protected]:.95')
    p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
    loss = torch.zeros(3, device=device)
    jdict, stats, ap, ap_class = [], [], [], []
    for batch_i, (img, targets, paths,
                  shapes) in enumerate(tqdm(dataloader, desc=s)):
        img = img.to(device, non_blocking=True)
        img = img.half() if half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        targets = targets.to(device)
        nb, _, height, width = img.shape  # batch size, channels, height, width
        whwh = torch.Tensor([width, height, width, height]).to(device)

        # Disable gradients
        with torch.no_grad():
            # Run model
            t = time_synchronized()
            inf_out, train_out = model(
                img, augment=augment)  # inference and training outputs
            t0 += time_synchronized() - t

            # Compute loss
            if training:  # if model has loss hyperparameters
                loss += compute_loss([x.float() for x in train_out], targets,
                                     model)[1][:3]  # GIoU, obj, cls

            # Run NMS
            t = time_synchronized()
            output = non_max_suppression(inf_out,
                                         conf_thres=conf_thres,
                                         iou_thres=iou_thres,
                                         merge=merge)
            t1 += time_synchronized() - t

        # Statistics per image
        for si, pred in enumerate(output):
            labels = targets[targets[:, 0] == si, 1:]
            nl = len(labels)
            tcls = labels[:, 0].tolist() if nl else []  # target class
            seen += 1

            if pred is None:
                if nl:
                    stats.append((torch.zeros(0, niou, dtype=torch.bool),
                                  torch.Tensor(), torch.Tensor(), tcls))
                continue

            # Append to text file
            if save_txt:
                gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0
                                                  ]]  # normalization gain whwh
                txt_path = str(out / Path(paths[si]).stem)
                pred[:, :4] = scale_coords(img[si].shape[1:], pred[:, :4],
                                           shapes[si][0],
                                           shapes[si][1])  # to original
                for *xyxy, conf, cls in pred:
                    xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) /
                            gn).view(-1).tolist()  # normalized xywh
                    with open(txt_path + '.txt', 'a') as f:
                        f.write(
                            ('%g ' * 5 + '\n') % (cls, *xywh))  # label format

            # Clip boxes to image bounds
            clip_coords(pred, (height, width))

            # Append to pycocotools JSON dictionary
            if save_json:
                # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
                image_id = Path(paths[si]).stem
                box = pred[:, :4].clone()  # xyxy
                scale_coords(img[si].shape[1:], box, shapes[si][0],
                             shapes[si][1])  # to original shape
                box = xyxy2xywh(box)  # xywh
                box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner
                for p, b in zip(pred.tolist(), box.tolist()):
                    jdict.append({
                        'image_id':
                        int(image_id) if image_id.isnumeric() else image_id,
                        'category_id':
                        coco91class[int(p[5])],
                        'bbox': [round(x, 3) for x in b],
                        'score':
                        round(p[4], 5)
                    })

            # Assign all predictions as incorrect
            correct = torch.zeros(pred.shape[0],
                                  niou,
                                  dtype=torch.bool,
                                  device=device)
            if nl:
                detected = []  # target indices
                tcls_tensor = labels[:, 0]

                # target boxes
                tbox = xywh2xyxy(labels[:, 1:5]) * whwh

                # Per target class
                for cls in torch.unique(tcls_tensor):
                    ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(
                        -1)  # prediction indices
                    pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(
                        -1)  # target indices

                    # Search for detections
                    if pi.shape[0]:
                        # Prediction to target ious
                        ious, i = box_iou(pred[pi, :4], tbox[ti]).max(
                            1)  # best ious, indices

                        # Append detections
                        for j in (ious > iouv[0]).nonzero(as_tuple=False):
                            d = ti[i[j]]  # detected target
                            if d not in detected:
                                detected.append(d)
                                correct[
                                    pi[j]] = ious[j] > iouv  # iou_thres is 1xn
                                if len(
                                        detected
                                ) == nl:  # all targets already located in image
                                    break

            # Append statistics (correct, conf, pcls, tcls)
            stats.append(
                (correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))

        # Plot images
        if batch_i < 1:
            f = Path(save_dir) / ('test_batch%g_gt.jpg' % batch_i)  # filename
            plot_images(img, targets, paths, str(f), names)  # ground truth
            f = Path(save_dir) / ('test_batch%g_pred.jpg' % batch_i)
            plot_images(img, output_to_target(output, width, height), paths,
                        str(f), names)  # predictions

    # Compute statistics
    stats = [np.concatenate(x, 0) for x in zip(*stats)]  # to numpy
    if len(stats) and stats[0].any():
        p, r, ap, f1, ap_class = ap_per_class(*stats)
        p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(
            1)  # [P, R, [email protected], [email protected]:0.95]
        mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
        nt = np.bincount(stats[3].astype(np.int64),
                         minlength=nc)  # number of targets per class
    else:
        nt = torch.zeros(1)

    # Print results
    pf = '%20s' + '%12.3g' * 6  # print format
    print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))

    # Print results per class
    if verbose and nc > 1 and len(stats):
        for i, c in enumerate(ap_class):
            print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))

    # Print speeds
    t = tuple(x / seen * 1E3
              for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size)  # tuple
    if not training:
        print(
            'Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g'
            % t)

    # Save JSON
    if save_json and len(jdict):
        fjson = 'detections_val2017_%s_results.json' % \
            (weights.split(os.sep)[-1].replace('.pt', '') if isinstance(weights, str) else '')  # filename
        print('\nCOCO mAP with pycocotools... saving %s...' % f)

        with open('data/light_coco/annotations/instances_val2017.json',
                  'r') as f:
            images = json.loads(f.read())['images']
        ying = {}
        for img in images:
            ying[int(img['file_name'].split('.')[0])] = img['id']
        # print(ying)
        for d in jdict:
            d['image_id'] = ying[d['image_id']]

        with open(fjson, 'w') as file:
            json.dump(jdict, file)

        try:  # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
            from pycocotools.coco import COCO
            from pycocotools.cocoeval import COCOeval

            imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files]
            cocoGt = COCO(
                glob.glob('data/light_coco/annotations/instances_val*.json')
                [0])  # initialize COCO ground truth api
            cocoDt = cocoGt.loadRes(fjson)  # initialize COCO pred api
            cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
            cocoEval.params.imgIds = imgIds  # image IDs to evaluate
            cocoEval.evaluate()
            cocoEval.accumulate()
            cocoEval.summarize()
            map, map50 = cocoEval.stats[:
                                        2]  # update results ([email protected]:0.95, [email protected])
        except Exception as e:
            print('ERROR: pycocotools unable to run: %s' % e)

    # Return results
    model.float()  # for training
    maps = np.zeros(nc) + map
    for i, c in enumerate(ap_class):
        maps[c] = ap[i]
    return (mp, mr, map50, map,
            *(loss.cpu() / len(dataloader)).tolist()), maps, t
예제 #30
0
    x_dev).to(device)
# In this case we don't set x_train.requires_grad = True because the embedding layer acts as a trainable look-up table,
# i.e, the output of the embedding layer has grad_fn=EmbeddingBackward, so the weight of the embedding
# layer is being updated by the gradient computed from this function when going backwards, instead of the usual
# grad_fn=AddmmBackward. The difference is that instead of doing matrix-multiplication, a look up is more efficient
# because we have 1 row of the model.embedding.weight for each id in our vocabulary. The rest of the multiplications
# would be by 0s if we used a one-hot-encoded input vector instead of a token id. Mathematically, however,
# it's the exact same weight update.

# %% -------------------------------------- Training Prep ----------------------------------------------------------
model = MLP(len(token_ids), args.n_neurons).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
criterion = nn.CrossEntropyLoss()

# %% -------------------------------------- Training Loop ----------------------------------------------------------
labels_ditrib = torch.unique(y_dev, return_counts=True)
print("The no information rate is {:.2f}".format(
    100 * labels_ditrib[1].max().item() / len(y_dev)))
if args.train:
    acc_dev_best = 0
    print("Starting training loop...")
    for epoch in range(args.n_epochs):

        loss_train = 0
        model.train()
        for batch in range(len(x_train) // args.batch_size + 1):
            inds = slice(batch * args.batch_size,
                         (batch + 1) * args.batch_size)
            optimizer.zero_grad()
            logits = model(x_train[inds])
            loss = criterion(logits, y_train[inds])
예제 #31
0
    def fit(self, X_l, X_u, patClassId):
        """
        X_l          Input data lower bounds (rows = objects, columns = features)
        X_u          Input data upper bounds (rows = objects, columns = features)
        patClassId  Input data class labels (crisp)
        """

        if self.isNorm == True:
            X_l, X_u = self.dataPreprocessing(X_l, X_u)

        if isinstance(X_l, torch.Tensor) == False:
            X_l = torch.from_numpy(X_l).float()
            X_u = torch.from_numpy(X_u).float()
            patClassId = torch.from_numpy(patClassId).long()

        time_start = time.perf_counter()

        isUsingGPU = False
        if is_Have_GPU and X_l.size(0) * X_l.size(1) >= GPU_Computing_Threshold:
            self.V = X_l.cuda()
            self.W = X_u.cuda()
            self.classId = patClassId.cuda()
            isUsingGPU = True
        else:
            self.V = X_l
            self.W = X_u
            self.classId = patClassId

        # yX, xX = X_l.size()


#        if len(self.cardin) == 0 or len(self.clusters) == 0:
#            self.cardin = np.ones(yX)
#            self.clusters = np.empty(yX, dtype=object)
#            for i in range(yX):
#                self.clusters[i] = np.array([i], dtype = np.int32)
#
        if self.isDraw:
            mark_col = np.array(['r', 'g', 'b', 'y', 'c', 'm', 'k'])
            drawing_canvas = self.initializeCanvasGraph(
                "GFMM - AGGLO-SM-Fast version")

            # plot initial hyperbox
            Vt, Wt = self.pcatransform()
            color_ = np.empty(len(self.classId), dtype=object)
            for c in range(len(self.classId)):
                color_[c] = mark_col[self.classId[c]]
            drawbox(Vt, Wt, drawing_canvas, color_)
            self.delay()

        # training
        isTraining = True
        while isTraining:
            isTraining = False

            # calculate class masks
            yX, xX = self.V.size()
            labList = torch.unique(
                self.classId[self.classId != UNLABELED_CLASS])
            if isUsingGPU == False:
                clMask = torch.zeros((yX, len(labList)), dtype=torch.uint8)
            else:
                clMask = torch.cuda.ByteTensor(yX, len(labList)).fill_(0)

            for i in range(len(labList)):
                clMask[:,
                       i] = (self.classId == labList[i]) | (self.classId
                                                            == UNLABELED_CLASS)

        # calculate pairwise memberships *ONLY* within each class (faster!)
            if isUsingGPU == False:
                b = torch.zeros((yX, yX))
            else:
                b = torch.cuda.FloatTensor(yX, yX).fill_(0)

            if isUsingGPU:
                els = torch.arange(len(labList)).cuda()
            else:
                els = torch.arange(len(labList))

            for i in els:
                Vi = self.V[
                    clMask[:, i]]  # get bounds of patterns with class label i
                Wi = self.W[clMask[:, i]]
                clSize = torch.sum(
                    clMask[:, i])  # get number of patterns of class i
                clIdxs = torch.nonzero(
                    clMask[:, i]
                )[:,
                  0]  # get position of patterns with class label i in the training set

                if self.simil == 'short':
                    for j in range(clSize):
                        if isUsingGPU == False:
                            b[clIdxs[j],
                              clIdxs] = torch_memberG(Wi[j], Vi[j], Vi, Wi,
                                                      self.gamma, self.oper)
                        else:
                            b[clIdxs[j],
                              clIdxs] = gpu_memberG(Wi[j], Vi[j], Vi, Wi,
                                                    self.gamma, self.oper)
                elif self.simil == 'long':
                    for j in range(clSize):
                        if isUsingGPU == False:
                            b[clIdxs[j],
                              clIdxs] = torch_memberG(Vi[j], Wi[j], Wi, Vi,
                                                      self.gamma, self.oper)
                        else:
                            b[clIdxs[j],
                              clIdxs] = gpu_memberG(Vi[j], Wi[j], Wi, Vi,
                                                    self.gamma, self.oper)
                else:
                    for j in range(clSize):
                        if isUsingGPU == False:
                            b[clIdxs[j],
                              clIdxs] = torch_memberG(Vi[j], Wi[j], Vi, Wi,
                                                      self.gamma, self.oper)
                        else:
                            b[clIdxs[j],
                              clIdxs] = gpu_memberG(Vi[j], Wi[j], Vi, Wi,
                                                    self.gamma, self.oper)

            if yX == 1:
                maxb = torch.FloatTensor([])
            else:
                maxb = self.torch_splitSimilarityMaxtrix(
                    b, self.sing, False, isUsingGPU)
                if len(maxb) > 0:
                    maxb = maxb[(maxb[:, 2] >= self.bthres), :]

                    if len(maxb) > 0:
                        # sort maxb in the decending order following the last column
                        values, idx_smaxb = torch.sort(maxb[:, 2],
                                                       descending=True)
                        maxb = torch.cat((maxb[idx_smaxb, 0].reshape(
                            -1, 1), maxb[idx_smaxb, 1].reshape(
                                -1, 1), maxb[idx_smaxb, 2].reshape(-1, 1)),
                                         dim=1)
                        #maxb = maxb[idx_smaxb]

            while len(maxb) > 0:
                curmaxb = maxb[0, :]  # current position handling

                # calculate new coordinates of curmaxb(0)-th hyperbox by including curmaxb(1)-th box, scrap the latter and leave the rest intact
                newV = torch.cat(
                    (self.V[0:curmaxb[0].long(), :],
                     torch.min(self.V[curmaxb[0].long(), :],
                               self.V[curmaxb[1].long(), :]).reshape(1, -1),
                     self.V[curmaxb[0].long() + 1:curmaxb[1].long(), :],
                     self.V[curmaxb[1].long() + 1:, :]),
                    dim=0)
                newW = torch.cat(
                    (self.W[0:curmaxb[0].long(), :],
                     torch.max(self.W[curmaxb[0].long(), :],
                               self.W[curmaxb[1].long(), :]).reshape(1, -1),
                     self.W[curmaxb[0].long() + 1:curmaxb[1].long(), :],
                     self.W[curmaxb[1].long() + 1:, :]),
                    dim=0)
                newClassId = torch.cat((self.classId[0:curmaxb[1].long()],
                                        self.classId[curmaxb[1].long() + 1:]))
                if (newClassId[curmaxb[0].long()] == UNLABELED_CLASS):
                    newClassId[curmaxb[0].long()] = newClassId[
                        curmaxb[1].long()]
                #print('Type newV = ', newV.type())
                # adjust the hyperbox if no overlap and maximum hyperbox size is not violated
                if ((((newW[curmaxb[0].long()] - newV[curmaxb[0].long()]) <=
                      self.teta).all() == True) and
                    (not torch_modifiedIsOverlap(newV, newW, curmaxb[0].long(),
                                                 newClassId, isUsingGPU))):
                    isTraining = True
                    self.V = newV
                    self.W = newW
                    self.classId = newClassId

                    #                    self.cardin[int(curmaxb[0])] = self.cardin[int(curmaxb[0])] + self.cardin[int(curmaxb[1])]
                    #                    self.cardin = np.append(self.cardin[0:int(curmaxb[1])], self.cardin[int(curmaxb[1]) + 1:])
                    #
                    #                    self.clusters[int(curmaxb[0])] = np.append(self.clusters[int(curmaxb[0])], self.clusters[int(curmaxb[1])])
                    #                    self.clusters = np.append(self.clusters[0:int(curmaxb[1])], self.clusters[int(curmaxb[1]) + 1:])
                    #
                    # remove joined pair from the list as well as any pair with lower membership and consisting of any of joined boxes
                    mask = (maxb[:, 0] != curmaxb[0]) & (
                        maxb[:, 1] != curmaxb[0]
                    ) & (maxb[:, 0] != curmaxb[1]) & (
                        maxb[:, 1] != curmaxb[1]) & (maxb[:, 2] >= curmaxb[2])
                    maxb = maxb[mask, :]

                    # update indexes to accomodate removed hyperbox
                    # indices of V and W larger than curmaxb(1,2) are decreased 1 by the element whithin the location curmaxb(1,2) was removed
                    if len(maxb) > 0:
                        maxb[maxb[:, 0] > curmaxb[1],
                             0] = maxb[maxb[:, 0] > curmaxb[1], 0] - 1
                        maxb[maxb[:, 1] > curmaxb[1],
                             1] = maxb[maxb[:, 1] > curmaxb[1], 1] - 1

                    if self.isDraw:
                        Vt, Wt = self.pcatransform()
                        color_ = np.empty(len(self.classId), dtype=object)
                        for c in range(len(self.classId)):
                            color_[c] = mark_col[self.classId[c]]
                        drawing_canvas.cla()
                        drawbox(Vt, Wt, drawing_canvas, color_)
                        self.delay()
                else:
                    maxb = maxb[1:, :]  # scrap examined pair from the list

            if isTraining == True and isUsingGPU == True and self.V.size(
                    0) * self.V.size(1) < GPU_Computing_Threshold:
                isUsingGPU = False
                self.V = self.V.cpu()
                self.W = self.W.cpu()
                self.classId = self.classId.cpu()

        time_end = time.perf_counter()
        self.elapsed_training_time = time_end - time_start

        return self
예제 #32
0
def main(args, devices):
    # load graph data
    ogb_dataset = False
    if args.dataset == 'aifb':
        dataset = AIFBDataset()
    elif args.dataset == 'mutag':
        dataset = MUTAGDataset()
    elif args.dataset == 'bgs':
        dataset = BGSDataset()
    elif args.dataset == 'am':
        dataset = AMDataset()
    elif args.dataset == 'ogbn-mag':
        dataset = DglNodePropPredDataset(name=args.dataset)
        ogb_dataset = True
    else:
        raise ValueError()

    if ogb_dataset is True:
        split_idx = dataset.get_idx_split()
        train_idx = split_idx["train"]['paper']
        val_idx = split_idx["valid"]['paper']
        test_idx = split_idx["test"]['paper']
        hg_orig, labels = dataset[0]
        subgs = {}
        for etype in hg_orig.canonical_etypes:
            u, v = hg_orig.all_edges(etype=etype)
            subgs[etype] = (u, v)
            subgs[(etype[2], 'rev-' + etype[1], etype[0])] = (v, u)
        hg = dgl.heterograph(subgs)
        hg.nodes['paper'].data['feat'] = hg_orig.nodes['paper'].data['feat']
        labels = labels['paper'].squeeze()

        num_rels = len(hg.canonical_etypes)
        num_of_ntype = len(hg.ntypes)
        num_classes = dataset.num_classes
        if args.dataset == 'ogbn-mag':
            category = 'paper'
        print('Number of relations: {}'.format(num_rels))
        print('Number of class: {}'.format(num_classes))
        print('Number of train: {}'.format(len(train_idx)))
        print('Number of valid: {}'.format(len(val_idx)))
        print('Number of test: {}'.format(len(test_idx)))

        if args.node_feats:
            node_feats = []
            for ntype in hg.ntypes:
                if len(hg.nodes[ntype].data) == 0:
                    node_feats.append(None)
                else:
                    assert len(hg.nodes[ntype].data) == 1
                    feat = hg.nodes[ntype].data.pop('feat')
                    node_feats.append(feat.share_memory_())
        else:
            node_feats = [None] * num_of_ntype
    else:
        # Load from hetero-graph
        hg = dataset[0]

        num_rels = len(hg.canonical_etypes)
        num_of_ntype = len(hg.ntypes)
        category = dataset.predict_category
        num_classes = dataset.num_classes
        train_mask = hg.nodes[category].data.pop('train_mask')
        test_mask = hg.nodes[category].data.pop('test_mask')
        labels = hg.nodes[category].data.pop('labels')
        train_idx = th.nonzero(train_mask, as_tuple=False).squeeze()
        test_idx = th.nonzero(test_mask, as_tuple=False).squeeze()
        node_feats = [None] * num_of_ntype

        # AIFB, MUTAG, BGS and AM datasets do not provide validation set split.
        # Split train set into train and validation if args.validation is set
        # otherwise use train set as the validation set.
        if args.validation:
            val_idx = train_idx[:len(train_idx) // 5]
            train_idx = train_idx[len(train_idx) // 5:]
        else:
            val_idx = train_idx

    # calculate norm for each edge type and store in edge
    if args.global_norm is False:
        for canonical_etype in hg.canonical_etypes:
            u, v, eid = hg.all_edges(form='all', etype=canonical_etype)
            _, inverse_index, count = th.unique(v,
                                                return_inverse=True,
                                                return_counts=True)
            degrees = count[inverse_index]
            norm = th.ones(eid.shape[0]) / degrees
            norm = norm.unsqueeze(1)
            hg.edges[canonical_etype].data['norm'] = norm

    # get target category id
    category_id = len(hg.ntypes)
    for i, ntype in enumerate(hg.ntypes):
        if ntype == category:
            category_id = i

    g = dgl.to_homogeneous(hg, edata=['norm'])
    if args.global_norm:
        u, v, eid = g.all_edges(form='all')
        _, inverse_index, count = th.unique(v,
                                            return_inverse=True,
                                            return_counts=True)
        degrees = count[inverse_index]
        norm = th.ones(eid.shape[0]) / degrees
        norm = norm.unsqueeze(1)
        g.edata['norm'] = norm

    g.ndata[dgl.NTYPE].share_memory_()
    g.edata[dgl.ETYPE].share_memory_()
    g.edata['norm'].share_memory_()
    node_ids = th.arange(g.number_of_nodes())

    # find out the target node ids
    node_tids = g.ndata[dgl.NTYPE]
    loc = (node_tids == category_id)
    target_idx = node_ids[loc]
    target_idx.share_memory_()
    train_idx.share_memory_()
    val_idx.share_memory_()
    test_idx.share_memory_()
    # Create csr/coo/csc formats before launching training processes with multi-gpu.
    # This avoids creating certain formats in each sub-process, which saves momory and CPU.
    g.create_formats_()

    n_gpus = len(devices)
    # cpu
    if devices[0] == -1:
        run(0, 0, args, ['cpu'],
            (g, node_feats, num_of_ntype, num_classes, num_rels, target_idx,
             train_idx, val_idx, test_idx, labels), None, None)
    # gpu
    elif n_gpus == 1:
        run(0, n_gpus, args, devices,
            (g, node_feats, num_of_ntype, num_classes, num_rels, target_idx,
             train_idx, val_idx, test_idx, labels), None, None)
    # multi gpu
    else:
        queue = mp.Queue(n_gpus)
        procs = []
        num_train_seeds = train_idx.shape[0]
        num_valid_seeds = val_idx.shape[0]
        num_test_seeds = test_idx.shape[0]
        train_seeds = th.randperm(num_train_seeds)
        valid_seeds = th.randperm(num_valid_seeds)
        test_seeds = th.randperm(num_test_seeds)
        tseeds_per_proc = num_train_seeds // n_gpus
        vseeds_per_proc = num_valid_seeds // n_gpus
        tstseeds_per_proc = num_test_seeds // n_gpus
        for proc_id in range(n_gpus):
            # we have multi-gpu for training, evaluation and testing
            # so split trian set, valid set and test set into num-of-gpu parts.
            proc_train_seeds = train_seeds[proc_id * tseeds_per_proc :
                                           (proc_id + 1) * tseeds_per_proc \
                                           if (proc_id + 1) * tseeds_per_proc < num_train_seeds \
                                           else num_train_seeds]
            proc_valid_seeds = valid_seeds[proc_id * vseeds_per_proc :
                                           (proc_id + 1) * vseeds_per_proc \
                                           if (proc_id + 1) * vseeds_per_proc < num_valid_seeds \
                                           else num_valid_seeds]
            proc_test_seeds = test_seeds[proc_id * tstseeds_per_proc :
                                         (proc_id + 1) * tstseeds_per_proc \
                                         if (proc_id + 1) * tstseeds_per_proc < num_test_seeds \
                                         else num_test_seeds]
            p = mp.Process(target=run,
                           args=(proc_id, n_gpus, args, devices,
                                 (g, node_feats, num_of_ntype, num_classes,
                                  num_rels, target_idx, train_idx, val_idx,
                                  test_idx, labels), (proc_train_seeds,
                                                      proc_valid_seeds,
                                                      proc_test_seeds), queue))
            p.start()
            procs.append(p)
        for p in procs:
            p.join()