示例#1
0
def k_core(f,ft,gnx):
    start = timer.start(ft, 'K-Core')
    result = nx.core_number(gnx)
    timer.stop(ft, start)
    for k in result:
        f.writelines(str(k) + ',' + str(result[k]) + '\n');
    return result
示例#2
0
def louvainCommunityDetection(f, ft, gnx):
    start = timer.start(ft, 'Louvain')
    bp = community.best_partition(gnx)
    comSizeBp = getCommunitySize(gnx, bp)
    timer.stop(ft, start)
    writeTofile(comSizeBp, f)
    return comSizeBp
示例#3
0
def page_rank(gnx, f, ft):
    start = timer.start(ft, 'Page Rank')
    page_rank_values = nx.pagerank(gnx, alpha=0.9)
    timer.stop(ft, start)

    for k in page_rank_values.keys():
        f.writelines(str(k) + ',' + str(page_rank_values[k]) + '\n')
    return page_rank_values
def hierarchy_energy(gnx, f, ft):
    start = timer.start(ft, 'hierarchyEnergy')
    hierarchyEnergy_list, vet_index = calculate_hierarchyEnergy_index(gnx)
    timer.stop(ft, start)
    #writing the results in to file
    for n in range(0, len(vet_index)):
        f.writelines(
            str(vet_index[n]) + ',' + str(hierarchyEnergy_list[n][0]) + '\n')
    return hierarchyEnergy_list
示例#5
0
def attractor_basin(gnx, f, ft):
    if(not gnx.is_directed()):
        return
    start = timer.start(ft, 'Attractor Basin')
    attractor_dict = calc_attractor_basin(gnx)
    timer.stop(ft, start)
    for k in attractor_dict.keys():
        f.writelines(str(k) + ',' + str(attractor_dict[k]) + '\n')
    return attractor_dict
示例#6
0
def general_information_undirected(gnx, f, ft):
    degrees = []
    start = timer.start(ft, 'Genral information')
    nodes = gnx.nodes()
    [degrees.append([n, gnx.degree(n)]) for n in nodes]
    timer.stop(ft, start)
    [f.writelines(str(degree[0]) + ',' + str(degree[1]) + '\n') for degree in degrees]
    map_degree = {}
    for degree in degrees:
        map_degree[degree[0]] = [degree[1]]
    return map_degree
示例#7
0
def bfs_distance_distribution(f, ft, gnx):
    start = timer.start(ft, 'BFS distance distribution')
    bfs_dist = calc_bfs_dist(gnx)
    dist_moments = {}
    for key in bfs_dist.keys():
        lst = []
        lst.append(float(np.mean(bfs_dist[key])))
        lst.append(float(np.std(bfs_dist[key])))
        dist_moments[key] = lst
    timer.stop(ft, start)
    write_bfs_moments_to_file(dist_moments, f)
    return dist_moments
示例#8
0
def flow_mesure(f, ft, gnx):

    start = timer.start(ft, 'Flow Mesure')

    flow_map = calculate_flow_index(gnx)

    timer.stop(ft, start)

    for n in flow_map:
        f.writelines(str(n)+','+str(flow_map[n]) + '\n')

    return flow_map
示例#9
0
def find_all_motifs(f, ft, ggt, motifs_number):
    motifs_veriations = get_motif_veriation_list(motifs_number)

    start = timer.start(ft, 'Find Motifs ' + str(motifs_number) + ' ')
    result = gt.clustering.motifs(ggt,
                                  motif_list=motifs_veriations,
                                  k=motifs_number,
                                  return_maps=True)
    timer.stop(ft, start)

    return parse_motif_result(f, ft, ggt, motifs_number, result,
                              motifs_veriations)
示例#10
0
def find_all_circuits(f, ft, ggt):
    start = timer.start(ft, 'Find Cycles')
    circuits = graph_tool.topology.all_circuits(ggt)
    timer.stop(ft, start)
    for c in circuits:
        first = True
        for v in c:
            if (first):
                f.writelines('[' + str(ggt.vp.id[v]))
                first = False
            else:
                f.writelines(',' + str(ggt.vp.id[v]))
        f.writelines(']\n')
示例#11
0
def general_information_directed(gnx, f, ft):
    out_deg = []
    in_deg = []
    start = timer.start(ft,'Genral information')
    nodes = gnx.nodes()
    [out_deg.append([n, gnx.out_degree(n)]) for n in nodes]
    [in_deg.append([n, gnx.in_degree(n)]) for n in nodes]
    timer.stop(ft,start)
    [f.writelines(str(i) + ',' + str(in_deg[i][1]) +',' + str(out_deg[i][1]) + '\n') for i in nodes]
    map_degree ={}
    for n in nodes:
        map_degree[n] = [in_deg[n][1], out_deg[n][1]]
    return map_degree
示例#12
0
def betweenness_centrality(ggt, f, ft, normalized=False):
    b_prop = ggt.new_vertex_property('float')
    ggt.vp.bc = b_prop

    start = timer.start(ft, 'Betweenness Centrality')
    graph_tool.centrality.betweenness(ggt, vprop=b_prop, norm=normalized)
    timer.stop(ft, start)

    for v in ggt.vertices():
        f.writelines(ggt.vp.id[v] + ',' + str(ggt.vp.bc[v]) + '\n')

    graph_tool.centrality.betweenness(ggt, norm=False)
    nx.betweenness_centrality(gnx, normalized=False)
示例#13
0
def mask_iou(mask1, mask2, iscrowd=False):
    """
    Inputs inputs are matricies of size _ x N. Output is size _1 x _2.
    Note: if iscrowd is True, then mask2 should be the crowd.
    """
    timer.start('Mask IoU')

    intersection = torch.matmul(mask1, mask2.t())
    area1 = torch.sum(mask1, dim=1).view(1, -1)
    area2 = torch.sum(mask2, dim=1).view(1, -1)
    union = (area1.t() + area2) - intersection

    if iscrowd:
        # Make sure to brodcast to the right dimension
        ret = intersection / area1.t()
    else:
        ret = intersection / union
    timer.stop('Mask IoU')
    return ret.cpu()
示例#14
0
def find_all_motifs(f, ft, gnx, motif_path, motifs_number=3):
    gnx_copy = gnx.copy()
    start = timer.start(ft, 'Find Motifs ' + str(motifs_number) + ' ')

    if motifs_number == 3:
        motifsHist = find_motifs_3(gnx_copy, motif_path)
    if motifs_number == 4:
        motifsHist = find_motifs_4(gnx_copy, motif_path)

    timer.stop(ft, start)

    print 'start write to file:  ' + str(datetime.now())
    for i in motifsHist:
        line = str(i)
        for h in motifsHist[i]:
            line = line + ',' + str(h)
        f.writelines(line + '\n')
    print 'finish write to file:  ' + str(datetime.now())

    return motifsHist
示例#15
0
def prep_metrics(ap_data, nms_outs, gt, gt_masks, h, w, num_crowd, image_id, make_json, cocoapi):
    """ Returns a list of APs for this image, with each element being for a class  """

    with timer.env('After NMS'):
        class_ids, classes, boxes, masks = after_nms(nms_outs, h, w)

        if class_ids.size(0) == 0:
            return

        class_ids = list(class_ids.cpu().numpy().astype(int))
        classes = list(classes.cpu().numpy().astype(float))
        masks = masks.view(-1, h * w).cuda() if cuda else masks.view(-1, h * w)
        boxes = boxes.cuda() if cuda else boxes

    if cocoapi:
        with timer.env('Output json'):
            boxes = boxes.cpu().numpy()
            masks = masks.view(-1, h, w).cpu().numpy()

            for i in range(masks.shape[0]):
                # Make sure that the bounding box actually makes sense and a mask was produced
                if (boxes[i, 3] - boxes[i, 1]) * (boxes[i, 2] - boxes[i, 0]) > 0:
                    make_json.add_bbox(image_id, class_ids[i], boxes[i, :], classes[i])
                    make_json.add_mask(image_id, class_ids[i], masks[i, :, :], classes[i])
        return

    with timer.env('Prepare gt'):
        gt_boxes = torch.Tensor(gt[:, :4])
        gt_boxes[:, [0, 2]] *= w
        gt_boxes[:, [1, 3]] *= h
        gt_classes = list(gt[:, 4].astype(int))
        gt_masks = torch.Tensor(gt_masks).view(-1, h * w)

        if num_crowd > 0:
            split = lambda x: (x[-num_crowd:], x[:-num_crowd])
            crowd_boxes, gt_boxes = split(gt_boxes)
            crowd_masks, gt_masks = split(gt_masks)
            crowd_classes, gt_classes = split(gt_classes)

    with timer.env('Eval Setup'):
        num_pred = len(class_ids)
        num_gt = len(gt_classes)

        mask_iou_cache = mask_iou(masks, gt_masks)
        bbox_iou_cache = bbox_iou(boxes.float(), gt_boxes.float())

        if num_crowd > 0:
            crowd_mask_iou_cache = mask_iou(masks, crowd_masks, iscrowd=True)
            crowd_bbox_iou_cache = bbox_iou(boxes.float(), crowd_boxes.float(), iscrowd=True)
        else:
            crowd_mask_iou_cache = None
            crowd_bbox_iou_cache = None

        iou_types = [('box', lambda i, j: bbox_iou_cache[i, j].item(), lambda i, j: crowd_bbox_iou_cache[i, j].item()),
                     ('mask', lambda i, j: mask_iou_cache[i, j].item(), lambda i, j: crowd_mask_iou_cache[i, j].item())]

    timer.start('Main loop')
    for _class in set(class_ids + gt_classes):
        num_gt_for_class = sum([1 for x in gt_classes if x == _class])

        for iouIdx in range(len(iou_thresholds)):
            iou_threshold = iou_thresholds[iouIdx]

            for iou_type, iou_func, crowd_func in iou_types:
                gt_used = [False] * len(gt_classes)
                ap_obj = ap_data[iou_type][iouIdx][_class]
                ap_obj.add_gt_positives(num_gt_for_class)

                for i in range(num_pred):
                    if class_ids[i] != _class:
                        continue

                    max_iou_found = iou_threshold
                    max_match_idx = -1
                    for j in range(num_gt):
                        if gt_used[j] or gt_classes[j] != _class:
                            continue

                        iou = iou_func(i, j)

                        if iou > max_iou_found:
                            max_iou_found = iou
                            max_match_idx = j

                    if max_match_idx >= 0:
                        gt_used[max_match_idx] = True
                        ap_obj.push(classes[i], True)
                    else:
                        # If the detection matches a crowd, we can just ignore it
                        matched_crowd = False

                        if num_crowd > 0:
                            for j in range(len(crowd_classes)):
                                if crowd_classes[j] != _class:
                                    continue

                                iou = crowd_func(i, j)

                                if iou > iou_threshold:
                                    matched_crowd = True
                                    break

                        # All this crowd code so that we can make sure that our eval code gives the
                        # same result as COCOEval. There aren't even that many crowd annotations to
                        # begin with, but accuracy is of the utmost importance.
                        if not matched_crowd:
                            ap_obj.push(classes[i], False)
    timer.stop('Main loop')
示例#16
0
def prep_metrics(ap_data,
                 dets,
                 img,
                 gt,
                 gt_masks,
                 h,
                 w,
                 num_crowd,
                 image_id,
                 detections: Detections = None):
    """ Returns a list of APs for this image, with each element being for a class  """
    if not args.output_coco_json:
        with timer.env('Prepare gt'):
            gt_boxes = jt.array(gt[:, :4])
            gt_boxes[:, [0, 2]] *= w
            gt_boxes[:, [1, 3]] *= h
            gt_classes = list(gt[:, 4].astype(int))
            gt_masks = jt.array(gt_masks).view(-1, h * w)

            if num_crowd > 0:
                split = lambda x: (x[-num_crowd:], x[:-num_crowd])
                crowd_boxes, gt_boxes = split(gt_boxes)
                crowd_masks, gt_masks = split(gt_masks)
                crowd_classes, gt_classes = split(gt_classes)
    with timer.env('Postprocess'):
        classes, scores, boxes, masks = postprocess(
            dets,
            w,
            h,
            crop_masks=args.crop,
            score_threshold=args.score_threshold)

        if classes.size(0) == 0:
            return

        classes = list(classes.numpy().astype(int))
        if isinstance(scores, list):
            box_scores = list(scores[0].numpy().astype(float))
            mask_scores = list(scores[1].numpy().astype(float))
        else:
            scores = list(scores.numpy().astype(float))
            box_scores = scores
            mask_scores = scores
        masks = masks.view(-1, h * w)
        boxes = boxes

    #print('GG')
    if args.output_coco_json:
        with timer.env('JSON Output'):
            boxes = boxes.numpy()
            masks = masks.view(-1, h, w).numpy()
            for i in range(masks.shape[0]):
                # Make sure that the bounding box actually makes sense and a mask was produced
                if (boxes[i, 3] - boxes[i, 1]) * (boxes[i, 2] -
                                                  boxes[i, 0]) > 0:
                    detections.add_bbox(image_id, classes[i], boxes[i, :],
                                        box_scores[i])
                    detections.add_mask(image_id, classes[i], masks[i, :, :],
                                        mask_scores[i])
            return

    #print('GG')
    with timer.env('Eval Setup'):
        num_pred = len(classes)
        num_gt = len(gt_classes)

        mask_iou_cache = _mask_iou(masks, gt_masks).numpy()
        bbox_iou_cache = _bbox_iou(boxes.float(), gt_boxes.float()).numpy()
        if num_crowd > 0:
            crowd_mask_iou_cache = _mask_iou(masks, crowd_masks,
                                             iscrowd=True).numpy()
            crowd_bbox_iou_cache = _bbox_iou(boxes.float(),
                                             crowd_boxes.float(),
                                             iscrowd=True).numpy()
        else:
            crowd_mask_iou_cache = None
            crowd_bbox_iou_cache = None

        box_indices = sorted(range(num_pred), key=lambda i: -box_scores[i])
        mask_indices = sorted(box_indices, key=lambda i: -mask_scores[i])

        iou_types = [('box', lambda i, j: bbox_iou_cache[i, j].item(),
                      lambda i, j: crowd_bbox_iou_cache[i, j].item(),
                      lambda i: box_scores[i], box_indices),
                     ('mask', lambda i, j: mask_iou_cache[i, j].item(),
                      lambda i, j: crowd_mask_iou_cache[i, j].item(),
                      lambda i: mask_scores[i], mask_indices)]
    #print('GG')

    #print(bbox_iou_cache)

    timer.start('Main loop')
    for _class in set(classes + gt_classes):
        ap_per_iou = []
        num_gt_for_class = sum([1 for x in gt_classes if x == _class])

        for iouIdx in range(len(iou_thresholds)):
            iou_threshold = iou_thresholds[iouIdx]

            for iou_type, iou_func, crowd_func, score_func, indices in iou_types:
                gt_used = [False] * len(gt_classes)

                ap_obj = ap_data[iou_type][iouIdx][_class]
                ap_obj.add_gt_positives(num_gt_for_class)

                for i in indices:
                    if classes[i] != _class:
                        continue

                    max_iou_found = iou_threshold
                    max_match_idx = -1
                    for j in range(num_gt):
                        if gt_used[j] or gt_classes[j] != _class:
                            continue

                        iou = iou_func(i, j)

                        if iou > max_iou_found:
                            max_iou_found = iou
                            max_match_idx = j

                    if max_match_idx >= 0:
                        gt_used[max_match_idx] = True
                        ap_obj.push(score_func(i), True)
                    else:
                        # If the detection matches a crowd, we can just ignore it
                        matched_crowd = False

                        if num_crowd > 0:
                            for j in range(len(crowd_classes)):
                                if crowd_classes[j] != _class:
                                    continue

                                iou = crowd_func(i, j)

                                if iou > iou_threshold:
                                    matched_crowd = True
                                    break

                        # All this crowd code so that we can make sure that our eval code gives the
                        # same result as COCOEval. There aren't even that many crowd annotations to
                        # begin with, but accuracy is of the utmost importance.
                        if not matched_crowd:
                            ap_obj.push(score_func(i), False)
    timer.stop('Main loop')
示例#17
0
def nms(boxes, scores, overlap=0.5, top_k=200, force_cpu=True):
    """Apply non-maximum suppression at test time to avoid detecting too many
    overlapping bounding boxes for a given object.
    Args:
        boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
        scores: (tensor) The class predscores for the img, Shape:[num_priors].
        overlap: (float) The overlap thresh for suppressing unnecessary boxes.
        top_k: (int) The Maximum number of box preds to consider.
    Return:
        The indices of the kept boxes with respect to num_priors.
    """

    cuda_enabled = boxes.is_cuda

    if force_cpu:
        boxes = boxes.cpu()
        scores = scores.cpu()

    timer.start('NMS')
    keep = scores.new(scores.size(0)).zero_().long()
    if boxes.numel() == 0:
        return keep
    x1 = boxes[:, 0]
    y1 = boxes[:, 1]
    x2 = boxes[:, 2]
    y2 = boxes[:, 3]
    area = torch.mul(x2 - x1, y2 - y1)
    v, idx = scores.sort(0)  # sort in ascending order
    # I = I[v >= 0.01]
    idx = idx[-top_k:]  # indices of the top-k largest vals
    xx1 = boxes.new()
    yy1 = boxes.new()
    xx2 = boxes.new()
    yy2 = boxes.new()
    w = boxes.new()
    h = boxes.new()

    # keep = torch.Tensor()
    count = 0
    while idx.numel() > 0:
        i = idx[-1]  # index of current largest val
        # keep.append(i)
        keep[count] = i
        count += 1
        if idx.size(0) == 1:
            break
        idx = idx[:-1]  # remove kept element from view
        # load bboxes of next highest vals
        torch.index_select(x1, 0, idx, out=xx1)
        torch.index_select(y1, 0, idx, out=yy1)
        torch.index_select(x2, 0, idx, out=xx2)
        torch.index_select(y2, 0, idx, out=yy2)
        # store element-wise max with next highest score
        xx1 = torch.clamp(xx1, min=x1[i])
        yy1 = torch.clamp(yy1, min=y1[i])
        xx2 = torch.clamp(xx2, max=x2[i])
        yy2 = torch.clamp(yy2, max=y2[i])
        w.resize_as_(xx2)
        h.resize_as_(yy2)
        w = xx2 - xx1
        h = yy2 - yy1
        # check sizes of xx1 and xx2.. after each iteration
        w = torch.clamp(w, min=0.0)
        h = torch.clamp(h, min=0.0)
        inter = w*h
        # IoU = i / (area(a) + area(b) - i)
        rem_areas = torch.index_select(area, 0, idx)  # load remaining areas)
        union = (rem_areas - inter) + area[i]
        IoU = inter/union  # store result in iou
        # keep only elements with an IoU <= overlap
        idx = idx[IoU.le(overlap)]
        
    if cuda_enabled:
        keep = keep.cuda()

    timer.stop()
    return keep, count