Esempio n. 1
0
 def draw(self, ctx, area, fill_or_stroke=True):
     """Draw the complete drawing by drawing each object in turn."""
     
     self._painter.background(ctx, area, color=(1,1,1), clip=True) # paint white background
     self._painter.setup(ctx, transform={"translate":self.get_offset(), "scale": self.get_ratio()})
     
     # paishysime tik tuos tashkus kurie pakliuna i vartotojo langa
     # reikia device_area konvertuoti i user koordinates
     x, y = self.device_to_user(Point(area.x, area.y))
     x2, y2 = self.device_to_user(Point(area.x + area.width, area.y + area.height))
     
     radius = 0 #self.pixel_radius + self.line_width # ne cia reikia prideti radiusa, o ten kur dedame i cavas'a shape'us
     # geriau cia, nes paprasciau yra iskviesti nupaisyti didesni gabala nei paskaiciuoti tikslu pvz linijo simboliu dydi...
     
     elements = self._index_rtree.intersection((x-radius, y-radius, x2+radius, y2+radius))
     elements_zindex = self._styler.create_zindex(elements) # jis yra pilnas visu galimu zindex'u sarasas - pradzioje dalis ju gali buti ir tusti []
     
     timer.start("draw")
     for zindex in sorted(elements_zindex.keys()):
         for element in elements_zindex[zindex]:
             #print "element: ", element[0][0]
             self._painter.draw(element, update=elements_zindex, fill_or_stroke=fill_or_stroke) # kazka visada nupaiso - bet dar papildomai gali iterpti elementu su didesniu zindex'u
             # tie elementai bus nupaisomi veliau.
         
     timer.end("draw")
Esempio n. 2
0
    def update_surface_buffer(self, area, offset=Point(0, 0)):
        """update self._surface_buffer"""
        #self._surface_buffer = None
        if not self._surface_buffer:
            self._surface_buffer = cairo.ImageSurface(cairo.FORMAT_ARGB32, area.width,  area.height)
            #self._surface_buffer = cairo.RecordingSurface(cairo.CONTENT_COLOR_ALPHA, (0,0, area.width, area.height))

            context = cairo.Context(self._surface_buffer)
            timer.start("expose full redraw")
            self._canvas.draw(context, area)
            timer.end("expose full redraw")
        else: # reikia paslinkti arba tiesiog perpaisyti is buferio
            #print "expose buffer"
            # nupaisome buferi (jeigu reikia su postumiu)
            merged_surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, area.width, area.height)
            #merged_surface = cairo.RecordingSurface(cairo.CONTENT_COLOR_ALPHA, (0,0, area.width, area.height))
            merged_ctx = cairo.Context(merged_surface)

            merged_ctx.set_source_surface(self._surface_buffer, offset.x, offset.y)
            merged_ctx.paint()
            # pridejome sena padraginta gabala
            
            # dabar pridesime papildomus gabalus, jeigu atliktas dragas
            self.invalidate_drag_regions(area, offset) # paskaiciuojame ka reikia perpaisyti po draginimo virsuje/apacioje/sonuose
            self.draw_invalid_regions(merged_surface) # perpaisome viska, kas buvo irasyra i self._redraw_regions = []
            self._surface_buffer = merged_surface
Esempio n. 3
0
        def test(obj, tool):
            if obj[0] == 4:
                #data = Shape.decompress(obj).get_data()
                #data1 = geometry.curve2ocad(data)
                #data2 = geometry.triplets2bezier(data)
                
                geometry.bezier_points = []
                
                timer.start("BezierLength") # praleidziama pirma ir paskutines koordinates
                length = geometry.bezier_length(obj[3:], maxdepth=4, distance_tolerance=0.25)
                # bent dvigubai greiciau nei, kai distance_tolerance=None, panasu kai maxdepth=3
                # reikia apsispresti kas geriau (maxdepth=4, distance_tolerance=0.25), ar (maxdepth=3, distance_tolerance=None)
                # panasu, kad maxdepth=3 - duoda daugiau tasku
                # maxdepth=3 - rezultatas mane tenkina, ar galima pagreitinti pridejus distance_tolerance?
                timer.end("BezierLength")
                print "bezier length: ", length
                
                timer.start("CairoLength")
                length2 = geometry.cairo_curve_length(obj[3:])
                timer.end("CairoLength")
                print "cairo length: ", length2
                
                
                if tool._curve_id:
                    screen.replace(tool._curve_id, ScreenLine(geometry.bezier_points))
                else:
                    tool._curve_id = screen.add(ScreenLine(geometry.bezier_points))

                #timer.start("BezierLength22")
                #print "lnegth: ", geometry.bezier_length(data2, maxdepth=4, distance_tolerance=None)
                #timer.end("BezierLength22")
                
                return "Length: " + str(length)
            else:
                return str(obj[1]) + " - z-index: " + str(screen.get_canvas().get_styler().get_style(obj).get("z-index", "?")) #obj[1]
Esempio n. 4
0
    def paint_buffer(self, zoom=0):
        """zoom:0, tai atliekam iprasta perpaisyma (zoom nebuvo paspaustas)
            zoom:1 - pradinis perpaisymas po zoom paspaudimo, nupaisome tik vartotojui matoma dali
            zoom:2 - perpaisymas po expose ivykdymo, dapaisome likusia buferio dali
        """
        #print "paint_buffer :", zoom
        self.ui_set_cursor("wait_arrow")
        area = self._device_area
        self._canvas.drag2(Point(area.width, area.height)) #pastumiam canvas, kuri paskui atstumsime atgal
        if not zoom:
            #timer.start("update 3x3")
            #self._surface_buffer = None - pilnai perpiesti draginimo metu apsimoka tik kartais, kai zoomlevelis pats maziausias - nes tada
            # ta pati figura patenka tiek i kaire tiek ir i desine drago puse ir yra perpaisoma du kartus
            self.update_surface_buffer(Area(0, 0, area.width*3, area.height*3),  self._buffer_offset)
            #timer.end("update 3x3")
        elif zoom == 1: # perpaisome po zoom paspaudimo virsutine kaire dali
            #self._surface_buffer = None
            timer.start("update 2x2")
            self.update_surface_buffer(Area(0, 0, area.width*2, area.height*2)) 
            timer.end("update 2x2")
            self._zoom = True # po expose 
        elif zoom == 2: # perpaisysime likusia dali
            timer.start("update invalid")
            self.invalidate(Area(area.width*2, 0, area.width, area.height*2)) # invaliduojame buferio desini sona
            self.invalidate(Area(0, area.height*2, area.width*3, area.height)) # invaliduojame buferio apacia
            self.update_surface_buffer(Area(0, 0, area.width*3, area.height*3))
            timer.end("update invalid")
            self._zoom = False

        self._canvas.drag2(Point(-area.width, -area.height))
        self.ui_reset_cursor()
Esempio n. 5
0
 def load_fcad_file(self, file_path):
     timer.start("loading shapes.fcad")
     self._shapes = ShapeFile.read_fcad_file(file_path)
 
     if len(self._shapes):
         def generator_function(points):
             for i, obj in enumerate(points):
                 if obj == None: continue
                 yield (i, self._styler.get_bbox(fshape.Shape.decompress(obj)), obj)
         self._index_rtree = Rtree(generator_function(self._shapes))
     timer.end("loading shapes.fcad")
Esempio n. 6
0
 def press(self, screen, point):
     timer.start("press")
     if self._object_index != None:
         id = self._selected_objects[self._object_index]
         found = list(reversed(screen.get_canvas().find_objects_at_position(point))) # apverciame kad pirmi eitu tie kuriuos nupaiseme veliausiai
         if id in found:
             self._drag_object_id = id # start dragging object
             self._drag_object_point = point # start of the dragging
         else:
             self._drag_object_id = None
     timer.end("press")
Esempio n. 7
0
 def load_pickle(self, file_path):
     timer.start("loading shapes.p")
     if os.path.exists(file_path):
         self._shapes = pickle.load(open(file_path, "rb"))
         
         if len(self._shapes):
             def generator_function(points):
                 for i, obj in enumerate(points):
                     if obj == None: continue
                     yield (i, self._styler.get_bbox(fshape.Shape.decompress(obj)), obj)
             self._index_rtree = Rtree(generator_function(self._shapes))
     timer.end("loading shapes.p")
Esempio n. 8
0
 def draw_object(self, ctx, id, fill_or_stroke=True):
     #self.apply_transforms(ctx) # drag and scale
     self._painter.setup(ctx, transform={"translate":self.get_offset(), "scale": self.get_ratio()})
     
     elements_zindex = self._styler.create_zindex([id])
     
     timer.start("draw single object")
     for zindex in sorted(elements_zindex.keys()):
         for element in elements_zindex[zindex]:
             self._painter.draw(element, update=elements_zindex, fill_or_stroke=fill_or_stroke) # kazka visada nupaiso - bet dar papildomai gali iterpti elementu su didesniu zindex'u
         
     timer.end("draw single object")
Esempio n. 9
0
def louvainCommunityDetection(f, ft, gnx):
    start = timer.start(ft, 'Louvain')
    bp = community.best_partition(gnx)
    comSizeBp = getCommunitySize(gnx, bp)
    timer.stop(ft, start)
    writeTofile(comSizeBp, f)
    return comSizeBp
Esempio n. 10
0
def k_core(f,ft,gnx):
    start = timer.start(ft, 'K-Core')
    result = nx.core_number(gnx)
    timer.stop(ft, start)
    for k in result:
        f.writelines(str(k) + ',' + str(result[k]) + '\n');
    return result
Esempio n. 11
0
def page_rank(gnx, f, ft):
    start = timer.start(ft, 'Page Rank')
    page_rank_values = nx.pagerank(gnx, alpha=0.9)
    timer.stop(ft, start)

    for k in page_rank_values.keys():
        f.writelines(str(k) + ',' + str(page_rank_values[k]) + '\n')
    return page_rank_values
Esempio n. 12
0
def attractor_basin(gnx, f, ft):
    if(not gnx.is_directed()):
        return
    start = timer.start(ft, 'Attractor Basin')
    attractor_dict = calc_attractor_basin(gnx)
    timer.stop(ft, start)
    for k in attractor_dict.keys():
        f.writelines(str(k) + ',' + str(attractor_dict[k]) + '\n')
    return attractor_dict
Esempio n. 13
0
def hierarchy_energy(gnx, f, ft):
    start = timer.start(ft, 'hierarchyEnergy')
    hierarchyEnergy_list, vet_index = calculate_hierarchyEnergy_index(gnx)
    timer.stop(ft, start)
    #writing the results in to file
    for n in range(0, len(vet_index)):
        f.writelines(
            str(vet_index[n]) + ',' + str(hierarchyEnergy_list[n][0]) + '\n')
    return hierarchyEnergy_list
Esempio n. 14
0
def mask_iou(mask1, mask2, iscrowd=False):
    """
    Inputs inputs are matricies of size _ x N. Output is size _1 x _2.
    Note: if iscrowd is True, then mask2 should be the crowd.
    """
    timer.start('Mask IoU')

    intersection = torch.matmul(mask1, mask2.t())
    area1 = torch.sum(mask1, dim=1).view(1, -1)
    area2 = torch.sum(mask2, dim=1).view(1, -1)
    union = (area1.t() + area2) - intersection

    if iscrowd:
        # Make sure to brodcast to the right dimension
        ret = intersection / area1.t()
    else:
        ret = intersection / union
    timer.stop('Mask IoU')
    return ret.cpu()
Esempio n. 15
0
    def release(self, screen, point):
        timer.start("release")
        if self._drag_object_id != None: # object drag end
            offset = (point.x - self._drag_object_point.x, point.y - self._drag_object_point.y)
            if offset != (0,0):
                Invoker.execute(MoveShapeCommand(self, screen, self._drag_object_id, offset))
                text = "Object %i was moved." % self._drag_object_id
                self._drag_object_id = None
                return text

        shape_id = self.get_next(screen, point)
        
        if shape_id != None:
            self.show_handlers(screen, ScreenSelectedObject(shape_id))
            timer.end("release")
            return "%i:%i of %i" % (self._object_index, shape_id, len(self._selected_objects))
        else: 
            timer.end("release")
            return "No objects found"
Esempio n. 16
0
def general_information_undirected(gnx, f, ft):
    degrees = []
    start = timer.start(ft, 'Genral information')
    nodes = gnx.nodes()
    [degrees.append([n, gnx.degree(n)]) for n in nodes]
    timer.stop(ft, start)
    [f.writelines(str(degree[0]) + ',' + str(degree[1]) + '\n') for degree in degrees]
    map_degree = {}
    for degree in degrees:
        map_degree[degree[0]] = [degree[1]]
    return map_degree
Esempio n. 17
0
def find_all_motifs(f, ft, ggt, motifs_number):
    motifs_veriations = get_motif_veriation_list(motifs_number)

    start = timer.start(ft, 'Find Motifs ' + str(motifs_number) + ' ')
    result = gt.clustering.motifs(ggt,
                                  motif_list=motifs_veriations,
                                  k=motifs_number,
                                  return_maps=True)
    timer.stop(ft, start)

    return parse_motif_result(f, ft, ggt, motifs_number, result,
                              motifs_veriations)
Esempio n. 18
0
def flow_mesure(f, ft, gnx):

    start = timer.start(ft, 'Flow Mesure')

    flow_map = calculate_flow_index(gnx)

    timer.stop(ft, start)

    for n in flow_map:
        f.writelines(str(n)+','+str(flow_map[n]) + '\n')

    return flow_map
Esempio n. 19
0
def bfs_distance_distribution(f, ft, gnx):
    start = timer.start(ft, 'BFS distance distribution')
    bfs_dist = calc_bfs_dist(gnx)
    dist_moments = {}
    for key in bfs_dist.keys():
        lst = []
        lst.append(float(np.mean(bfs_dist[key])))
        lst.append(float(np.std(bfs_dist[key])))
        dist_moments[key] = lst
    timer.stop(ft, start)
    write_bfs_moments_to_file(dist_moments, f)
    return dist_moments
Esempio n. 20
0
def general_information_directed(gnx, f, ft):
    out_deg = []
    in_deg = []
    start = timer.start(ft,'Genral information')
    nodes = gnx.nodes()
    [out_deg.append([n, gnx.out_degree(n)]) for n in nodes]
    [in_deg.append([n, gnx.in_degree(n)]) for n in nodes]
    timer.stop(ft,start)
    [f.writelines(str(i) + ',' + str(in_deg[i][1]) +',' + str(out_deg[i][1]) + '\n') for i in nodes]
    map_degree ={}
    for n in nodes:
        map_degree[n] = [in_deg[n][1], out_deg[n][1]]
    return map_degree
Esempio n. 21
0
    def draw_100(self, ctx, area):
        """100% draw for printing, no scale, area in user coordinates"""
        page_area = Area(0, 0, area.width, area.height)
        #ctx.rectangle(0, 0, area.width, area.height) 
        #ctx.clip() # tam kad nepaisytu uzh sito staciakampio ribu (tada gaunasi dubliuotos linijos)
        #self.background(ctx, page_area) # paint white background
        
        self._painter.background(ctx, page_area, color=(1,1,1), clip=True) # paint white background
        self._painter.setup(ctx, transform={"translate":(-area.x, -area.y)})

        radius = 0 #self.pixel_radius + self.line_width # ne cia reikia prideti radiusa, o ten kur dedame i cavas'a shape'us
        
        elements = self._index_rtree.intersection((area.x-radius, area.y-radius, area.x+area.width+radius, area.y+area.height+radius))
        elements_zindex = self._styler.create_zindex(elements) # jis yra pilnas visu galimu zindex'u sarasas - pradzioje dalis ju gali buti ir tusti []
        
        timer.start("draw100")
        for zindex in sorted(elements_zindex.keys()):
            for element in elements_zindex[zindex]:
                self._painter.draw(element, update=elements_zindex) # kazka visada nupaiso - bet dar papildomai gali iterpti elementu su didesniu zindex'u
                # tie elementai bus nupaisomi veliau.
            
        timer.end("draw100")
Esempio n. 22
0
def find_all_circuits(f, ft, ggt):
    start = timer.start(ft, 'Find Cycles')
    circuits = graph_tool.topology.all_circuits(ggt)
    timer.stop(ft, start)
    for c in circuits:
        first = True
        for v in c:
            if (first):
                f.writelines('[' + str(ggt.vp.id[v]))
                first = False
            else:
                f.writelines(',' + str(ggt.vp.id[v]))
        f.writelines(']\n')
Esempio n. 23
0
def betweenness_centrality(ggt, f, ft, normalized=False):
    b_prop = ggt.new_vertex_property('float')
    ggt.vp.bc = b_prop

    start = timer.start(ft, 'Betweenness Centrality')
    graph_tool.centrality.betweenness(ggt, vprop=b_prop, norm=normalized)
    timer.stop(ft, start)

    for v in ggt.vertices():
        f.writelines(ggt.vp.id[v] + ',' + str(ggt.vp.bc[v]) + '\n')

    graph_tool.centrality.betweenness(ggt, norm=False)
    nx.betweenness_centrality(gnx, normalized=False)
Esempio n. 24
0
    def add_random_points(self, number, area, generator=True):
        """Kai generator=False - sunaudoja maziau atminties ikrovimo metu, bet trunka gerokai leciau
        """
        self._shapes = []
        timer.start("Adding random data")

        from random import randint
        for x in range(0, number):
            color = 65536 * randint(0,255) + 256 * randint(0,255) + randint(0,255) # RGBint
            x, y = randint(2, area.width), randint(2, area.height)

            if not generator: # darysime rtree.add kiekvienam taskui atskirai
                self.add(fshape.Shape(1, Point(x, y), color=color))
            else:
                self._shapes.append((1, color, x, y))
        
        if generator:
            def generator_function(points):
                for i, obj in enumerate(points):
                    yield (i, (obj[2], obj[3], obj[2], obj[3]), obj)
            self._index_rtree = Rtree(generator_function(self._shapes))
        
        timer.end("Adding random data")
Esempio n. 25
0
    def load_ocad_file(self, file_path, generator=True):
        #import ocadfile
        #self.add(Shape(1, Point(0, 0), symbol="graphics")) # koordinaciu centras

        self._prj = GisProjection()
        self._zoom_level = self._ratio_index.index(4)
        
        of = OcadFile(file_path, self._prj)
        #self._styler.load_ocad_symbols(of, prj)

        timer.start("Adding ocad symbols")
        self._styler.set_symbols_style(of.get_symbols_style())
        timer.end("Adding ocad symbols")
        
        
        timer.start("Adding ocad elements")
        shapes = of.get_shapes()
        print "Shapes: ", len(shapes)
        for shape in shapes:
            self.add(shape)
        timer.end("Adding ocad elements")
        
        self.center()
Esempio n. 26
0
    def load_shape_file(self, file_path, generator=True):
        import shapefile
        from random import randint

        sf = shapefile.Reader(file_path)
        print "Number of shapes: ", sf.numRecords
        
        self.add(fshape.Shape(1, Point(0, 0), style={"z-index":99})) # koordinaciu centras
        
        if self.prj.scale == 1: # pirmas kartas
            prj = GisProjection(self, sf.bbox)
            #po sito ciklo jau turesime zemelapio ribas
            center = prj.map_to_user(prj.get_center())
            self.center(center)
            self.prj = prj
                    
        timer.start("Adding shapes")
        symbol = sf.shapeName.split("/")[-1]
        self._styler.set_symbol_style(symbol, {"color": (randint(0,255),randint(0,255),randint(0,255))})
        
        for shape in sf.ogis_shapes(self.prj):
            self.add(fshape.Shape(shape.shapeType, shape.points, symbol=symbol))
        timer.end("Adding shapes")
Esempio n. 27
0
def find_all_motifs(f, ft, gnx, motif_path, motifs_number=3):
    gnx_copy = gnx.copy()
    start = timer.start(ft, 'Find Motifs ' + str(motifs_number) + ' ')

    if motifs_number == 3:
        motifsHist = find_motifs_3(gnx_copy, motif_path)
    if motifs_number == 4:
        motifsHist = find_motifs_4(gnx_copy, motif_path)

    timer.stop(ft, start)

    print 'start write to file:  ' + str(datetime.now())
    for i in motifsHist:
        line = str(i)
        for h in motifsHist[i]:
            line = line + ',' + str(h)
        f.writelines(line + '\n')
    print 'finish write to file:  ' + str(datetime.now())

    return motifsHist
Esempio n. 28
0
def prep_metrics(ap_data, nms_outs, gt, gt_masks, h, w, num_crowd, image_id, make_json, cocoapi):
    """ Returns a list of APs for this image, with each element being for a class  """

    with timer.env('After NMS'):
        class_ids, classes, boxes, masks = after_nms(nms_outs, h, w)

        if class_ids.size(0) == 0:
            return

        class_ids = list(class_ids.cpu().numpy().astype(int))
        classes = list(classes.cpu().numpy().astype(float))
        masks = masks.view(-1, h * w).cuda() if cuda else masks.view(-1, h * w)
        boxes = boxes.cuda() if cuda else boxes

    if cocoapi:
        with timer.env('Output json'):
            boxes = boxes.cpu().numpy()
            masks = masks.view(-1, h, w).cpu().numpy()

            for i in range(masks.shape[0]):
                # Make sure that the bounding box actually makes sense and a mask was produced
                if (boxes[i, 3] - boxes[i, 1]) * (boxes[i, 2] - boxes[i, 0]) > 0:
                    make_json.add_bbox(image_id, class_ids[i], boxes[i, :], classes[i])
                    make_json.add_mask(image_id, class_ids[i], masks[i, :, :], classes[i])
        return

    with timer.env('Prepare gt'):
        gt_boxes = torch.Tensor(gt[:, :4])
        gt_boxes[:, [0, 2]] *= w
        gt_boxes[:, [1, 3]] *= h
        gt_classes = list(gt[:, 4].astype(int))
        gt_masks = torch.Tensor(gt_masks).view(-1, h * w)

        if num_crowd > 0:
            split = lambda x: (x[-num_crowd:], x[:-num_crowd])
            crowd_boxes, gt_boxes = split(gt_boxes)
            crowd_masks, gt_masks = split(gt_masks)
            crowd_classes, gt_classes = split(gt_classes)

    with timer.env('Eval Setup'):
        num_pred = len(class_ids)
        num_gt = len(gt_classes)

        mask_iou_cache = mask_iou(masks, gt_masks)
        bbox_iou_cache = bbox_iou(boxes.float(), gt_boxes.float())

        if num_crowd > 0:
            crowd_mask_iou_cache = mask_iou(masks, crowd_masks, iscrowd=True)
            crowd_bbox_iou_cache = bbox_iou(boxes.float(), crowd_boxes.float(), iscrowd=True)
        else:
            crowd_mask_iou_cache = None
            crowd_bbox_iou_cache = None

        iou_types = [('box', lambda i, j: bbox_iou_cache[i, j].item(), lambda i, j: crowd_bbox_iou_cache[i, j].item()),
                     ('mask', lambda i, j: mask_iou_cache[i, j].item(), lambda i, j: crowd_mask_iou_cache[i, j].item())]

    timer.start('Main loop')
    for _class in set(class_ids + gt_classes):
        num_gt_for_class = sum([1 for x in gt_classes if x == _class])

        for iouIdx in range(len(iou_thresholds)):
            iou_threshold = iou_thresholds[iouIdx]

            for iou_type, iou_func, crowd_func in iou_types:
                gt_used = [False] * len(gt_classes)
                ap_obj = ap_data[iou_type][iouIdx][_class]
                ap_obj.add_gt_positives(num_gt_for_class)

                for i in range(num_pred):
                    if class_ids[i] != _class:
                        continue

                    max_iou_found = iou_threshold
                    max_match_idx = -1
                    for j in range(num_gt):
                        if gt_used[j] or gt_classes[j] != _class:
                            continue

                        iou = iou_func(i, j)

                        if iou > max_iou_found:
                            max_iou_found = iou
                            max_match_idx = j

                    if max_match_idx >= 0:
                        gt_used[max_match_idx] = True
                        ap_obj.push(classes[i], True)
                    else:
                        # If the detection matches a crowd, we can just ignore it
                        matched_crowd = False

                        if num_crowd > 0:
                            for j in range(len(crowd_classes)):
                                if crowd_classes[j] != _class:
                                    continue

                                iou = crowd_func(i, j)

                                if iou > iou_threshold:
                                    matched_crowd = True
                                    break

                        # All this crowd code so that we can make sure that our eval code gives the
                        # same result as COCOEval. There aren't even that many crowd annotations to
                        # begin with, but accuracy is of the utmost importance.
                        if not matched_crowd:
                            ap_obj.push(classes[i], False)
    timer.stop('Main loop')
Esempio n. 29
0
def evaluate(net, cfg, step=None):
    dataset = COCODetection(cfg, mode='val')
    data_loader = data.DataLoader(dataset,
                                  1,
                                  num_workers=4,
                                  shuffle=False,
                                  pin_memory=True,
                                  collate_fn=val_collate)
    ds = len(data_loader)
    progress_bar = ProgressBar(40, ds)
    timer.reset()

    ap_data = {
        'box': [[APDataObject() for _ in cfg.class_names] for _ in iou_thres],
        'mask': [[APDataObject() for _ in cfg.class_names] for _ in iou_thres]
    }

    with torch.no_grad():
        for i, (img, gt, gt_masks, img_h, img_w) in enumerate(data_loader):
            if i == 1:
                timer.start()

            if cfg.cuda:
                img, gt, gt_masks = img.cuda(), gt.cuda(), gt_masks.cuda()

            with timer.counter('forward'):
                class_p, box_p, coef_p, proto_p, anchors = net(img)

            with timer.counter('nms'):
                ids_p, class_p, box_p, coef_p, proto_p = nms(
                    class_p, box_p, coef_p, proto_p, anchors, cfg)

            with timer.counter('after_nms'):
                ids_p, class_p, boxes_p, masks_p = after_nms(
                    ids_p, class_p, box_p, coef_p, proto_p, img_h, img_w)
                if ids_p is None:
                    continue

            with timer.counter('metric'):
                ids_p = list(ids_p.cpu().numpy().astype(int))
                class_p = list(class_p.cpu().numpy().astype(float))

                if cfg.coco_api:
                    boxes_p = boxes_p.cpu().numpy()
                    masks_p = masks_p.cpu().numpy()

                    for j in range(masks_p.shape[0]):
                        if (boxes_p[j, 3] - boxes_p[j, 1]) * (
                                boxes_p[j, 2] - boxes_p[j, 0]) > 0:
                            make_json.add_bbox(dataset.ids[i], ids_p[j],
                                               boxes_p[j, :], class_p[j])
                            make_json.add_mask(dataset.ids[i], ids_p[j],
                                               masks_p[j, :, :], class_p[j])
                else:
                    prep_metrics(ap_data, ids_p, class_p, boxes_p, masks_p, gt,
                                 gt_masks, img_h, img_w, iou_thres)

            aa = time.perf_counter()
            if i > 0:
                batch_time = aa - temp
                timer.add_batch_time(batch_time)
            temp = aa

            if i > 0:
                t_t, t_d, t_f, t_nms, t_an, t_me = timer.get_times(
                    ['batch', 'data', 'forward', 'nms', 'after_nms', 'metric'])
                fps, t_fps = 1 / (t_d + t_f + t_nms + t_an), 1 / t_t
                bar_str = progress_bar.get_bar(i + 1)
                print(
                    f'\rTesting: {bar_str} {i + 1}/{ds}, fps: {fps:.2f} | total fps: {t_fps:.2f} | '
                    f't_t: {t_t:.3f} | t_d: {t_d:.3f} | t_f: {t_f:.3f} | t_nms: {t_nms:.3f} | '
                    f't_after_nms: {t_an:.3f} | t_metric: {t_me:.3f}',
                    end='')

        if cfg.coco_api:
            make_json.dump()
            print(
                f'\nJson files dumped, saved in: \'results/\', start evaluating.'
            )

            gt_annotations = COCO(cfg.val_ann)
            bbox_dets = gt_annotations.loadRes(f'results/bbox_detections.json')
            mask_dets = gt_annotations.loadRes(f'results/mask_detections.json')

            print('\nEvaluating BBoxes:')
            bbox_eval = COCOeval(gt_annotations, bbox_dets, 'bbox')
            bbox_eval.evaluate()
            bbox_eval.accumulate()
            bbox_eval.summarize()

            print('\nEvaluating Masks:')
            bbox_eval = COCOeval(gt_annotations, mask_dets, 'segm')
            bbox_eval.evaluate()
            bbox_eval.accumulate()
            bbox_eval.summarize()
        else:
            table, box_row, mask_row = calc_map(ap_data,
                                                iou_thres,
                                                len(cfg.class_names),
                                                step=step)
            print(table)
            return table, box_row, mask_row
Esempio n. 30
0
def main():
    parser = argparse.ArgumentParser(description='YOLACT Detection.')
    parser.add_argument('--weight', default='weights/best_30.5_res101_coco_392000.pth', type=str)
    parser.add_argument('--image', default=None, type=str, help='The folder of images for detecting.')
    parser.add_argument('--video', default=None, type=str, help='The path of the video to evaluate.')
    parser.add_argument('--img_size', type=int, default=544, help='The image size for validation.')
    parser.add_argument('--traditional_nms', default=False, action='store_true', help='Whether to use traditional nms.')
    parser.add_argument('--hide_mask', default=False, action='store_true', help='Hide masks in results.')
    parser.add_argument('--hide_bbox', default=False, action='store_true', help='Hide boxes in results.')
    parser.add_argument('--hide_score', default=False, action='store_true', help='Hide scores in results.')
    parser.add_argument('--cutout', default=False, action='store_true', help='Cut out each object and save.')
    parser.add_argument('--save_lincomb', default=False, action='store_true', help='Show the generating process of masks.')
    parser.add_argument('--no_crop', default=False, action='store_true',
                        help='Do not crop the output masks with the predicted bounding box.')
    parser.add_argument('--real_time', default=False, action='store_true', help='Show the detection results real-timely.')
    parser.add_argument('--visual_thre', default=0.3, type=float,
                        help='Detections with a score under this threshold will be removed.')

    args = parser.parse_args()
    prefix = re.findall(r'best_\d+\.\d+_', args.weight)[0]
    suffix = re.findall(r'_\d+\.pth', args.weight)[0]
    args.cfg = args.weight.split(prefix)[-1].split(suffix)[0]
    cfg = get_config(args, mode='detect')

    net = Yolact(cfg)
    net.load_weights(cfg.weight, cfg.cuda)
    net.eval()

    if cfg.cuda:
        cudnn.benchmark = True
        cudnn.fastest = True
        net = net.cuda()

    # detect images
    if cfg.image is not None:
        dataset = COCODetection(cfg, mode='detect')
        data_loader = data.DataLoader(dataset, 1, num_workers=2, shuffle=False, pin_memory=True, collate_fn=detect_collate)
        ds = len(data_loader)
        assert ds > 0, 'No .jpg images found.'
        progress_bar = ProgressBar(40, ds)
        timer.reset()

        for i, (img, img_origin, img_name) in enumerate(data_loader):
            if i == 1:
                timer.start()

            if cfg.cuda:
                img = img.cuda()

            img_h, img_w = img_origin.shape[0:2]

            with torch.no_grad(), timer.counter('forward'):
                class_p, box_p, coef_p, proto_p = net(img)

            with timer.counter('nms'):
                ids_p, class_p, box_p, coef_p, proto_p = nms(class_p, box_p, coef_p, proto_p, net.anchors, cfg)

            with timer.counter('after_nms'):
                ids_p, class_p, boxes_p, masks_p = after_nms(ids_p, class_p, box_p, coef_p, proto_p,
                                                            img_h, img_w, cfg, img_name=img_name)

            with timer.counter('save_img'):
                img_numpy = draw_img(ids_p, class_p, boxes_p, masks_p, img_origin, cfg, img_name=img_name)
                cv2.imwrite(f'results/images/{img_name}', img_numpy)

            aa = time.perf_counter()
            if i > 0:
                batch_time = aa - temp
                timer.add_batch_time(batch_time)
            temp = aa

            if i > 0:
                t_t, t_d, t_f, t_nms, t_an, t_si = timer.get_times(['batch', 'data', 'forward',
                                                                    'nms', 'after_nms', 'save_img'])
                fps, t_fps = 1 / (t_d + t_f + t_nms + t_an), 1 / t_t
                bar_str = progress_bar.get_bar(i + 1)
                print(f'\rTesting: {bar_str} {i + 1}/{ds}, fps: {fps:.2f} | total fps: {t_fps:.2f} | '
                    f't_t: {t_t:.3f} | t_d: {t_d:.3f} | t_f: {t_f:.3f} | t_nms: {t_nms:.3f} | '
                    f't_after_nms: {t_an:.3f} | t_save_img: {t_si:.3f}', end='')

        print('\nFinished, saved in: results/images.')

    # detect videos
    elif cfg.video is not None:
        vid = cv2.VideoCapture(cfg.video)

        target_fps = round(vid.get(cv2.CAP_PROP_FPS))
        frame_width = round(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
        frame_height = round(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
        num_frames = round(vid.get(cv2.CAP_PROP_FRAME_COUNT))

        name = cfg.video.split('/')[-1]
        video_writer = cv2.VideoWriter(f'results/videos/{name}', cv2.VideoWriter_fourcc(*"mp4v"), target_fps,
                                    (frame_width, frame_height))

        progress_bar = ProgressBar(40, num_frames)
        timer.reset()
        t_fps = 0

        for i in range(num_frames):
            if i == 1:
                timer.start()

            frame_origin = vid.read()[1]
            img_h, img_w = frame_origin.shape[0:2]
            frame_trans = val_aug(frame_origin, cfg.img_size)

            frame_tensor = torch.tensor(frame_trans).float()
            if cfg.cuda:
                frame_tensor = frame_tensor.cuda()

            with torch.no_grad(), timer.counter('forward'):
                class_p, box_p, coef_p, proto_p = net(frame_tensor.unsqueeze(0))

            with timer.counter('nms'):
                ids_p, class_p, box_p, coef_p, proto_p = nms(class_p, box_p, coef_p, proto_p, net.anchors, cfg)

            with timer.counter('after_nms'):
                ids_p, class_p, boxes_p, masks_p = after_nms(ids_p, class_p, box_p, coef_p, proto_p, img_h, img_w, cfg)

            with timer.counter('save_img'):
                frame_numpy = draw_img(ids_p, class_p, boxes_p, masks_p, frame_origin, cfg, fps=t_fps)

            if cfg.real_time:
                cv2.imshow('Detection', frame_numpy)
                cv2.waitKey(1)
            else:
                video_writer.write(frame_numpy)

            aa = time.perf_counter()
            if i > 0:
                batch_time = aa - temp
                timer.add_batch_time(batch_time)
            temp = aa

            if i > 0:
                t_t, t_d, t_f, t_nms, t_an, t_si = timer.get_times(['batch', 'data', 'forward',
                                                                    'nms', 'after_nms', 'save_img'])
                fps, t_fps = 1 / (t_d + t_f + t_nms + t_an), 1 / t_t
                bar_str = progress_bar.get_bar(i + 1)
                print(f'\rDetecting: {bar_str} {i + 1}/{num_frames}, fps: {fps:.2f} | total fps: {t_fps:.2f} | '
                    f't_t: {t_t:.3f} | t_d: {t_d:.3f} | t_f: {t_f:.3f} | t_nms: {t_nms:.3f} | '
                    f't_after_nms: {t_an:.3f} | t_save_img: {t_si:.3f}', end='')

        if not cfg.real_time:
            print(f'\n\nFinished, saved in: results/videos/{name}')

        vid.release()
        video_writer.release()
Esempio n. 31
0
args = parser.parse_args()
cfg = Config(args=args.__dict__, mode='Detect')
cfg.show_config()

test_dataset = Seg_dataset(cfg)

model = DLASeg(cfg).cuda()
model.load_state_dict(torch.load(cfg.trained_model), strict=True)
model.eval()

timer.reset()
with torch.no_grad():
    for i, (data_tuple, img_name) in enumerate(test_dataset):
        if i == 1:
            timer.start()  # timer does not timing for the first image.

        img_name = img_name.replace('tif', 'png')
        image = data_tuple[0].unsqueeze(0).cuda().detach()

        with timer.counter('forward'):
            output = model(image)

        with timer.counter('save result'):
            pred = torch.max(output, 1)[1].squeeze(0).cpu().numpy()

            if cfg.colorful:
                pred = PALLETE[pred].astype('uint8')
                cv2.imwrite(f'results/{img_name}', pred)
            if cfg.overlay:
                pred = PALLETE[pred].astype('uint8')
sess = ort.InferenceSession(cfg.weight)
input_name = sess.get_inputs()[0].name

# detect images
if cfg.image is not None:
    dataset = COCODetection(cfg, mode='detect')
    data_loader = data.DataLoader(dataset, 1, num_workers=4, shuffle=False,
                                  pin_memory=True, collate_fn=detect_onnx_collate)
    ds = len(data_loader)
    assert ds > 0, 'No .jpg images found.'
    progress_bar = ProgressBar(40, ds)
    timer.reset()

    for i, (img, img_origin, img_name) in enumerate(data_loader):
        if i == 1:
            timer.start()

        img_h, img_w = img_origin.shape[0:2]

        with timer.counter('forward'):
            class_p, box_p, coef_p, proto_p, anchors = sess.run(None, {input_name: img})

        with timer.counter('nms'):
            ids_p, class_p, box_p, coef_p, proto_p = nms_numpy(class_p, box_p, coef_p, proto_p, anchors, cfg)

        with timer.counter('after_nms'):
            ids_p, class_p, boxes_p, masks_p = after_nms_numpy(ids_p, class_p, box_p, coef_p,
                                                               proto_p, img_h, img_w, cfg)

        with timer.counter('save_img'):
            img_numpy = draw_img(ids_p, class_p, boxes_p, masks_p, img_origin, cfg, img_name=img_name)
Esempio n. 33
0
# if cfg.MODEL.USE_SYNCBN:  # TODO: figure this out
#     model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)

optim = Optimizer(model, cfg)
checkpointer = Checkpointer(cfg, model.module, optim.optimizer)
start_iter = int(cfg.resume.split('_')[-1].split('.')[0]) if cfg.resume else 0
data_loader = make_data_loader(cfg, start_iter=start_iter)
max_iter = len(data_loader) - 1
timer.reset()
main_gpu = dist.get_rank() == 0
num_gpu = dist.get_world_size()

for i, (img_list_batch, box_list_batch) in enumerate(data_loader, start_iter):
    if main_gpu and i == start_iter + 1:
        timer.start()

    optim.update_lr(step=i)

    img_tensor_batch = torch.stack([aa.img for aa in img_list_batch], dim=0).cuda()
    for box_list in box_list_batch:
        box_list.to_cuda()

    with timer.counter('for+loss'):
        category_loss, box_loss, iou_loss = model(img_tensor_batch, box_list_batch)
        all_loss = torch.stack([category_loss, box_loss, iou_loss], dim=0)
        dist.reduce(all_loss, dst=0)

        if main_gpu:  # get the mean loss across all GPUS
            l_c = all_loss[0].item() / num_gpu  # seems when printing, need to call .item(), not sure
            l_b = all_loss[1].item() / num_gpu
Esempio n. 34
0
def inference(model, cfg, during_training=False):
    model.eval()
    predictions, coco_results = {}, []
    val_loader = make_data_loader(cfg, during_training=during_training)
    dataset = val_loader.dataset
    dl = len(val_loader)
    bar = ProgressBar(length=40, max_val=dl)
    timer.reset()

    with torch.no_grad():
        for i, (img_list_batch, _) in enumerate(val_loader):
            if i == 1:
                timer.start()

            with timer.counter('forward'):
                img_tensor_batch = torch.stack(
                    [aa.img for aa in img_list_batch], dim=0).cuda()
                c_pred, box_pred, iou_pred, anchors = model(img_tensor_batch)

            with timer.counter('post_process'):
                resized_size = [aa.resized_size for aa in img_list_batch]
                pred_batch = post_process(cfg, c_pred, box_pred, iou_pred,
                                          anchors, resized_size)

            with timer.counter('accumulate'):
                for pred in pred_batch:
                    pred.to_cpu()

                for img_list, pred in zip(img_list_batch, pred_batch):
                    if pred.box.shape[0] == 0:
                        continue

                    original_id = dataset.id_img_map[img_list.id]
                    pred.resize(img_list.ori_size)
                    pred.convert_mode("x1y1wh")

                    boxes = pred.box.tolist()
                    score = pred.score.tolist()
                    label = pred.label.tolist()

                    mapped_labels = [dataset.to_category_id[i] for i in label]
                    coco_results.extend([{
                        "image_id": original_id,
                        "category_id": mapped_labels[k],
                        "bbox": box,
                        "score": score[k]
                    } for k, box in enumerate(boxes)])

            aa = time.perf_counter()
            if i > 0:
                batch_time = aa - temp
                timer.add_batch_time(batch_time)

                time_name = [
                    'batch', 'data', 'forward', 'post_process', 'accumulate'
                ]
                t_t, t_d, t_f, t_pp, t_acc = timer.get_times(time_name)
                fps, t_fps = 1 / (t_d + t_f + t_pp), 1 / t_t
                bar_str = bar.get_bar(i + 1)
                print(
                    f'\rTesting: {bar_str} {i + 1}/{dl}, fps: {fps:.2f} | total fps: {t_fps:.2f} | t_t: {t_t:.3f} | '
                    f't_d: {t_d:.3f} | t_f: {t_f:.3f} | t_pp: {t_pp:.3f} | t_acc: {t_acc:.3f}',
                    end='')

            temp = aa

    print('\n\nTest ended, doing evaluation...')

    json_name = cfg.weight.split('/')[-1].split('.')[0]
    file_path = f'results/{json_name}.json'
    with open(file_path, "w") as f:
        json.dump(coco_results, f)

    coco_dt = dataset.coco.loadRes(file_path)

    if cfg.val_api == 'Improved COCO':
        from my_cocoeval.cocoeval import SelfEval
        bbox_eval = SelfEval(dataset.coco, coco_dt, all_points=True)
    else:
        from pycocotools.cocoeval import COCOeval
        bbox_eval = COCOeval(dataset.coco, coco_dt, iouType='bbox')

    bbox_eval.evaluate()
    bbox_eval.accumulate()
    bbox_eval.summarize()

    if not during_training:
        if cfg.val_api == 'Improved COCO':
            bbox_eval.draw_curve()
        else:
            compute_thre_per_class(bbox_eval)
Esempio n. 35
0
def prep_metrics(ap_data,
                 dets,
                 img,
                 gt,
                 gt_masks,
                 h,
                 w,
                 num_crowd,
                 image_id,
                 detections: Detections = None):
    """ Returns a list of APs for this image, with each element being for a class  """
    if not args.output_coco_json:
        with timer.env('Prepare gt'):
            gt_boxes = jt.array(gt[:, :4])
            gt_boxes[:, [0, 2]] *= w
            gt_boxes[:, [1, 3]] *= h
            gt_classes = list(gt[:, 4].astype(int))
            gt_masks = jt.array(gt_masks).view(-1, h * w)

            if num_crowd > 0:
                split = lambda x: (x[-num_crowd:], x[:-num_crowd])
                crowd_boxes, gt_boxes = split(gt_boxes)
                crowd_masks, gt_masks = split(gt_masks)
                crowd_classes, gt_classes = split(gt_classes)
    with timer.env('Postprocess'):
        classes, scores, boxes, masks = postprocess(
            dets,
            w,
            h,
            crop_masks=args.crop,
            score_threshold=args.score_threshold)

        if classes.size(0) == 0:
            return

        classes = list(classes.numpy().astype(int))
        if isinstance(scores, list):
            box_scores = list(scores[0].numpy().astype(float))
            mask_scores = list(scores[1].numpy().astype(float))
        else:
            scores = list(scores.numpy().astype(float))
            box_scores = scores
            mask_scores = scores
        masks = masks.view(-1, h * w)
        boxes = boxes

    #print('GG')
    if args.output_coco_json:
        with timer.env('JSON Output'):
            boxes = boxes.numpy()
            masks = masks.view(-1, h, w).numpy()
            for i in range(masks.shape[0]):
                # Make sure that the bounding box actually makes sense and a mask was produced
                if (boxes[i, 3] - boxes[i, 1]) * (boxes[i, 2] -
                                                  boxes[i, 0]) > 0:
                    detections.add_bbox(image_id, classes[i], boxes[i, :],
                                        box_scores[i])
                    detections.add_mask(image_id, classes[i], masks[i, :, :],
                                        mask_scores[i])
            return

    #print('GG')
    with timer.env('Eval Setup'):
        num_pred = len(classes)
        num_gt = len(gt_classes)

        mask_iou_cache = _mask_iou(masks, gt_masks).numpy()
        bbox_iou_cache = _bbox_iou(boxes.float(), gt_boxes.float()).numpy()
        if num_crowd > 0:
            crowd_mask_iou_cache = _mask_iou(masks, crowd_masks,
                                             iscrowd=True).numpy()
            crowd_bbox_iou_cache = _bbox_iou(boxes.float(),
                                             crowd_boxes.float(),
                                             iscrowd=True).numpy()
        else:
            crowd_mask_iou_cache = None
            crowd_bbox_iou_cache = None

        box_indices = sorted(range(num_pred), key=lambda i: -box_scores[i])
        mask_indices = sorted(box_indices, key=lambda i: -mask_scores[i])

        iou_types = [('box', lambda i, j: bbox_iou_cache[i, j].item(),
                      lambda i, j: crowd_bbox_iou_cache[i, j].item(),
                      lambda i: box_scores[i], box_indices),
                     ('mask', lambda i, j: mask_iou_cache[i, j].item(),
                      lambda i, j: crowd_mask_iou_cache[i, j].item(),
                      lambda i: mask_scores[i], mask_indices)]
    #print('GG')

    #print(bbox_iou_cache)

    timer.start('Main loop')
    for _class in set(classes + gt_classes):
        ap_per_iou = []
        num_gt_for_class = sum([1 for x in gt_classes if x == _class])

        for iouIdx in range(len(iou_thresholds)):
            iou_threshold = iou_thresholds[iouIdx]

            for iou_type, iou_func, crowd_func, score_func, indices in iou_types:
                gt_used = [False] * len(gt_classes)

                ap_obj = ap_data[iou_type][iouIdx][_class]
                ap_obj.add_gt_positives(num_gt_for_class)

                for i in indices:
                    if classes[i] != _class:
                        continue

                    max_iou_found = iou_threshold
                    max_match_idx = -1
                    for j in range(num_gt):
                        if gt_used[j] or gt_classes[j] != _class:
                            continue

                        iou = iou_func(i, j)

                        if iou > max_iou_found:
                            max_iou_found = iou
                            max_match_idx = j

                    if max_match_idx >= 0:
                        gt_used[max_match_idx] = True
                        ap_obj.push(score_func(i), True)
                    else:
                        # If the detection matches a crowd, we can just ignore it
                        matched_crowd = False

                        if num_crowd > 0:
                            for j in range(len(crowd_classes)):
                                if crowd_classes[j] != _class:
                                    continue

                                iou = crowd_func(i, j)

                                if iou > iou_threshold:
                                    matched_crowd = True
                                    break

                        # All this crowd code so that we can make sure that our eval code gives the
                        # same result as COCOEval. There aren't even that many crowd annotations to
                        # begin with, but accuracy is of the utmost importance.
                        if not matched_crowd:
                            ap_obj.push(score_func(i), False)
    timer.stop('Main loop')
Esempio n. 36
0
def nms(boxes, scores, overlap=0.5, top_k=200, force_cpu=True):
    """Apply non-maximum suppression at test time to avoid detecting too many
    overlapping bounding boxes for a given object.
    Args:
        boxes: (tensor) The location preds for the img, Shape: [num_priors,4].
        scores: (tensor) The class predscores for the img, Shape:[num_priors].
        overlap: (float) The overlap thresh for suppressing unnecessary boxes.
        top_k: (int) The Maximum number of box preds to consider.
    Return:
        The indices of the kept boxes with respect to num_priors.
    """

    cuda_enabled = boxes.is_cuda

    if force_cpu:
        boxes = boxes.cpu()
        scores = scores.cpu()

    timer.start('NMS')
    keep = scores.new(scores.size(0)).zero_().long()
    if boxes.numel() == 0:
        return keep
    x1 = boxes[:, 0]
    y1 = boxes[:, 1]
    x2 = boxes[:, 2]
    y2 = boxes[:, 3]
    area = torch.mul(x2 - x1, y2 - y1)
    v, idx = scores.sort(0)  # sort in ascending order
    # I = I[v >= 0.01]
    idx = idx[-top_k:]  # indices of the top-k largest vals
    xx1 = boxes.new()
    yy1 = boxes.new()
    xx2 = boxes.new()
    yy2 = boxes.new()
    w = boxes.new()
    h = boxes.new()

    # keep = torch.Tensor()
    count = 0
    while idx.numel() > 0:
        i = idx[-1]  # index of current largest val
        # keep.append(i)
        keep[count] = i
        count += 1
        if idx.size(0) == 1:
            break
        idx = idx[:-1]  # remove kept element from view
        # load bboxes of next highest vals
        torch.index_select(x1, 0, idx, out=xx1)
        torch.index_select(y1, 0, idx, out=yy1)
        torch.index_select(x2, 0, idx, out=xx2)
        torch.index_select(y2, 0, idx, out=yy2)
        # store element-wise max with next highest score
        xx1 = torch.clamp(xx1, min=x1[i])
        yy1 = torch.clamp(yy1, min=y1[i])
        xx2 = torch.clamp(xx2, max=x2[i])
        yy2 = torch.clamp(yy2, max=y2[i])
        w.resize_as_(xx2)
        h.resize_as_(yy2)
        w = xx2 - xx1
        h = yy2 - yy1
        # check sizes of xx1 and xx2.. after each iteration
        w = torch.clamp(w, min=0.0)
        h = torch.clamp(h, min=0.0)
        inter = w*h
        # IoU = i / (area(a) + area(b) - i)
        rem_areas = torch.index_select(area, 0, idx)  # load remaining areas)
        union = (rem_areas - inter) + area[i]
        IoU = inter/union  # store result in iou
        # keep only elements with an IoU <= overlap
        idx = idx[IoU.le(overlap)]
        
    if cuda_enabled:
        keep = keep.cuda()

    timer.stop()
    return keep, count