示例#1
0
    def __gt__(self, other):
        if self.source.point != other.source.point and self.to.point != other.to.point:
            line_level = max(self.source.point.y, other.source.point.y)
            line_p1 = Point(0, line_level)
            line_p2 = Point(1, line_level)
            x_self, _ = utils.intersect(self.source.point, self.to.point,
                                        line_p1, line_p2)
            x_other, _ = utils.intersect(other.source.point, other.to.point,
                                         line_p1, line_p2)
            if x_self is None:
                x_self = self.source.point.x
            if x_other is None:
                x_other = self.to.point.x
            return x_self > x_other

        if self.source.point == other.source.point:
            origin = self.source.point
            self_angle = utils.polar_angle(origin, self.to.point)
            other_angle = utils.polar_angle(origin, other.to.point)
            return self_angle < other_angle
        else:
            origin = self.to.point
            self_angle = utils.polar_angle(origin, self.source.point)
            other_angle = utils.polar_angle(origin, other.source.point)
            return self_angle > other_angle
    def search(self):
        db = connect()

        game_search = list(tokenize(self.search_box.get()))

        game_ids = None
        mechanics_ids = None
        categories_ids = None

        if len(game_search) > 0:
            ids = []
            for token in game_search:
                # Append array of ids
                ids.append(
                    list(
                        map(lambda g: g['id'],
                            db.get_by_posting('games', 'word', token))))

            game_ids = list(reduce(reduce_intersection, ids))

        if len(self.mechanics) > 0:
            ids = []
            for mechanic_item in self.mechanics:
                mechanic_id = mechanic_item.value
                ids.append(
                    list(
                        map(
                            lambda tuple: tuple[0],
                            db.get_by_posting('game_mechanic', 'mechanic',
                                              mechanic_id))))

            mechanics_ids = list(reduce(reduce_intersection, ids))

        if len(self.categories) > 0:
            ids = []
            for category_item in self.categories:
                category_id = category_item.value
                ids.append(
                    list(
                        map(
                            lambda tuple: tuple[0],
                            db.get_by_posting('game_category', 'category',
                                              category_id))))

            categories_ids = list(reduce(reduce_intersection, ids))

        results = list(
            intersect(intersect(game_ids, mechanics_ids), categories_ids)
            or [])

        self.present_results(results)
示例#3
0
    def pick_entity(self, pos, ray):
        """Picks an entity and returns it.

        :param pos: The origin of the ray
        :type pos: :class:`mathlib.Vec`

        :param ray: The normalized ray vector
        :type ray: :class:`mathlib.Vec`

        :returns: The entity picked
        :rtype: :class:`game.entities.entity.Entity` or None
        """
        # This is the list of the entities, sorted by z-axis (reverse order)
        entities = sorted([
            self.entities[e_id] for e_id in self.server_entities_map.values()
            if self.entities[e_id].bounding_box
        ],
                          key=lambda entity: entity.bounding_box[1].z,
                          reverse=True)

        # Check eventual intersections between the ray and the bounding boxes
        for e in entities:
            if intersect(pos, ray, e.bounding_box):
                return e

        return None
 def otherSegmentConnected(self, other):
     '''Need to know if the other segment is connected to this segment.  If we test intersection including endpoints, then the other segment is connected if the intersection is at this segements endpoints where s = 0 or s = 1'''
     s, t = utils.intersect(self.p1, other.p1, self.vector, other.vector)
     #print("connection check: " + str(s) + ", " + str(t))
     if (s == 0 or s == 1) and (t == 0 or t == 1):
         return True
     return False
示例#5
0
def crashing_moves(game):
    """Return banned moves where we could potentially crash into another snake."""
    banned_moves = []

    our_neigh = game.me.head().neighbours(False)

    # Find heads that are 8 directional neighbours to us
    for s in game.other_snakes:
        # If another snakes neighbours are neighbours to us

        # Only do this if the snake's length is >= ours
        if s.length() < game.me.length():
            continue

        other_neigh = s.head().neighbours(False)
        same_neighbours = utils.intersect(our_neigh, other_neigh)

        # If we have some of the same neighbours,
        #   don't move in that direction
        if len(same_neighbours) > 0:
            for n in same_neighbours:
                # Rank how bad the move is
                # if there we are sharing a neighbour that is a food, that is really bad
                # if we are just sharing a neighbour, less bad
                goodness = 0
                if n not in game.foods:
                    goodness = 0.5
                banned_moves = banned_moves + map(lambda d: Move(d, goodness, 'crashing'), game.me.moves_to(n))

    return banned_moves
示例#6
0
    def most_frequent_old(self):
        freq_words = set(self.news[0].all_text)

        for i in range(1, len(self.news)):
            freq_words = intersect(freq_words, self.news[i].all_text)

        freq_words = {word for word in freq_words if word.upper() not in COUNTRIES}
        freq_dict = dict.fromkeys(freq_words, 0)

        for word in freq_words:
            for new in self.news:
                all_text = new.translated['title'] + new.translated['lead'] + new.translated['content']
                all_text = all_text.lower()
                c = all_text.count(word.lower())
                freq_dict[word] += c

        for word, c in freq_dict.items():
            for ent in self.name:
                if word in ent and len(ent) > len(word):
                    try:
                        freq_dict[ent] += freq_dict[word]
                        freq_dict[word] = 0
                    except KeyError:
                        try:
                            freq_dict[ent] = freq_dict[word]
                            del freq_dict[word]
                        except KeyError:
                            continue

        ans = sorted(freq_dict.items(), key=operator.itemgetter(1), reverse=True)
        ans = [a[0] for a in ans if a[1] != 0]

        self.frequent = ans

        return ans
示例#7
0
 def seg_intersect(self, segment: Tuple[Point, Point]) -> bool:
     """
     Return true if segment intersects the polygon, but does not share a vertex.
     """
     return any(
         utils.intersect(*segment, *edge, closed=False)
         for edge in self.segments)
示例#8
0
def computeWordOvelap(imgc, word_gt, words, wordsOk, wordsFp):

    best_match = 0
    best_match2 = 0
    for det_word in words:
        try:
            cv2.rectangle(imgc, (det_word[0], det_word[1]),
                          (det_word[2], det_word[3]), (0, 0, 255))
            for gt_box in word_gt:
                rect_int = utils.intersect(det_word, gt_box)
                int_area = utils.area(rect_int)
                union_area = utils.area(utils.union(det_word, gt_box))

                ratio = int_area / float(union_area)
                ratio2 = int_area / utils.area(gt_box)
                if ratio > best_match:
                    best_match = ratio
                    w = det_word
                    best_match2 = ratio2

            if best_match2 > 0.3:
                wordsOk.append(det_word)
            elif best_match == 0:
                wordsFp.append(det_word)
        except:
            pass

    return (best_match, best_match2)
示例#9
0
def computeWordOvelap(imgc, word_gt, words, wordsOk, wordsFp):
    
    best_match = 0
    best_match2 = 0
    for det_word in words:
        try:
            cv2.rectangle(imgc, (det_word[0], det_word[1]), (det_word[2], det_word[3]), (0, 0, 255))
            for gt_box in word_gt:
                rect_int =  utils.intersect( det_word, gt_box )
                int_area = utils.area(rect_int)
                union_area = utils.area(utils.union(det_word, gt_box))
                
                ratio = int_area / float(union_area)
                ratio2 = int_area / utils.area(gt_box)
                if ratio > best_match:
                    best_match = ratio
                    w = det_word
                    best_match2 = ratio2
                    
            if best_match2 > 0.3:
                wordsOk.append(det_word)
            elif best_match == 0:
                wordsFp.append(det_word)
        except:
            pass
            
    return (best_match, best_match2)
示例#10
0
def ss(xq, xb, G):
    n, d = xb.shape
    ks = [1, 5, 10, 20, 50, 100, 1000]
    Ts = [2**i for i in range(2 + int(math.log2(n)))]

    nlist = 100
    m = 8
    quantizer = faiss.IndexFlatL2(d)  # this remains the same
    index = faiss.IndexIVFPQ(quantizer, d, nlist, m, 8)
    # 8 specifies that each sub-vector is encoded as 8 bits
    index.train(xb)
    index.add(xb)

    print("# Probed \t Items \t", end="")
    for top_k in ks:
        print("top-%d\t" % (top_k), end="")
    print()
    for t in Ts:
        index.nprobe = t  # make comparable with experiment above
        D, ids = index.search(xq, t)  # search
        items = np.mean([len(id) for id in ids])
        print("%6d \t %6d \t" % (t, items), end="")
        for top_k in ks:
            rc = intersect(G[:, :top_k], ids)
            print("%.4f \t" % (rc / float(top_k)), end="")
        print()
 def intersectSegmentRaw(self, segment):
     '''Same as above, but instead we are only looking ahead of ray not behind and we want full range of t: 0<=t<=1'''
     s, t = utils.intersect(self.position, segment.p1, self.direction,
                            segment.vector)
     if s > 0:
         return t
     return None
    def search(self):
        self.results.clear()
        self.results.add_item('No items found!')

        string = self.search_bar.get()

        if string == None:
            return

        tokens = tokenize(string)

        if len(tokens) <= 0:
            return

        db = connect()
        indexes = None

        for token in tokens:
            values = db.postings['publishers_word'].get_values(token)
            if len(values) > 0:
                indexes = list(intersect(indexes, values))

        if indexes == None or len(indexes) <= 0:
            return

        self.results.clear()
        for idx in indexes:
            publisher = db.tables['publishers'].load(idx)
            item = ListItem(publisher, publisher['name'])
            self.results.add_item(item)

        self.ui.move_focus(self.results)
示例#13
0
    def pick_entity(self, pos, ray):
        """Picks an entity and returns it.

        :param pos: The origin of the ray
        :type pos: :class:`mathlib.Vec`

        :param ray: The normalized ray vector
        :type ray: :class:`mathlib.Vec`

        :returns: The entity picked
        :rtype: :class:`game.entities.entity.Entity` or None
        """
        # This is the list of the entities, sorted by z-axis (reverse order)
        entities = sorted(
            [
                self.entities[e_id]
                for e_id in self.server_entities_map.values()
                if self.entities[e_id].bounding_box
            ],
            key=lambda entity: entity.bounding_box[1].z, reverse=True)

        # Check eventual intersections between the ray and the bounding boxes
        for e in entities:
            if intersect(pos, ray, e.bounding_box):
                return e

        return None
示例#14
0
def test_split_rectangles():
    for tc in test_cases:
        result = split_rectangles(tc)
        info = f'{tc} {result}'
        assert not utils.intersect(result), info
        assert utils.total_area(tc) == sum(r.area for r in result), info
        assert utils.bounding_box(tc) == utils.bounding_box(result), info
示例#15
0
def find_sequence(layers, periods, symbols, global_start, global_target, args,
                  recorder):
    """Main Algorithm"""

    # number of scales available
    nscales = len(periods)

    # active symbols - everything on scale 0
    active_symbols = [global_start
                      ]  # layers[0].ts[symbols[global_start].t[0]].domain
    targets = [global_target]

    i = 0
    tick = 0
    scale = 0
    any_target_found = utils.intersect(targets, active_symbols)
    while not any_target_found:
        for s in active_symbols:
            symbols[s].retrieval_tick = tick

        # fetch all symbols that follow from a given set of active symbols
        next_symbols = layers[scale].expand(active_symbols, symbols, tick)

        # the next active symbols are all those that are not already in the set
        # of active symbols, and for which the retrieval tick is not yet set
        active_symbols = [
            s for s in next_symbols
            if s not in active_symbols and symbols[s].retrieval_tick < 0
        ]

        recorder.record_expansion(scale, tick, global_start, targets,
                                  active_symbols,
                                  utils.intersect(targets, active_symbols))

        # increase scale if we didn't find anything on this one
        if i >= args.max_i:
            scale += 1
            if scale >= nscales:
                scale = nscales - 1
            i = 0
        i += 1
        tick += 1

        # check if we reached the destination
        any_target_found = utils.intersect(targets, active_symbols) != []
        if any_target_found:
            break
 def intersectVector(self, pos, vec):
     '''Does this segment intersect a vector?'''
     s, t = utils.intersect(self.p1, pos, self.vector, vec)
     #print(self.name + " intersects " + other.name + " using (s, t) -> ("+str(s)+", "+str(t)+")")
     #print("s="+str(s)+", t="+str(t))
     if 0 < s < 1 and t >= 0:
         return True
     return False
 def intersectSegmentEndpoints(self, other):
     '''In a special case we want to know when this segment is intersecting the other segment only at this segments endpoints'''
     #print("Checking endpoint intersections")
     #print(self.name + " ..... " + other.name)
     s, t = utils.intersect(self.p1, other.p1, self.vector, other.vector)
     if (s == 0 or s == 1) and 0 < t < 1:
         return t
     return None
示例#18
0
    def _is_simple_split(self, u: int, p: Point, v: int) -> bool:
        """
        Return True if connecting u->p and p->v creates two mutually simple 
        polygons within the original polygon.
        """
        u = self.pts[u]
        v = self.pts[v]

        # Checks if u->p or p->v intersects any side of the polygon.
        for i, j in self.segments:
            if u not in (i, j):
                if utils.intersect(u, p, i, j):
                    return False
            if v not in (i, j):
                if utils.intersect(v, p, i, j):
                    return False

        return True
示例#19
0
def find_sequence_on_scale(layers, scale, symbols, start, targets, args,
                           recorder):
    """Find a sequence from start to target on a certain scale."""

    # initializiation
    # active_symbols = [start]

    # this is the horror...
    # XXX: change to [start] to prevent back-activation of symbols?
    active_symbols = layers[scale].ts[symbols[start].t[scale]].domain

    tick = 0
    any_target_found = False
    recorder.record_expansion(scale, tick, start, targets, active_symbols,
                              utils.intersect(targets, active_symbols))

    any_target_found = utils.intersect(targets, active_symbols) != []
    while not any_target_found:
        # update 'retrieval tick' -> this emulates refractory periods of place
        # cells. This comes from marking symbols as "expanded"
        for s in active_symbols:
            symbols[s].retrieval_tick = tick

        # predict next batch of symbols, and remove currently active ones
        next_symbols = layers[scale].expand(active_symbols, symbols, tick)
        active_symbols = [
            s for s in next_symbols
            if s not in active_symbols and symbols[s].retrieval_tick < 0
        ]

        # update time
        tick += 1

        # record everything!!1
        recorder.record_expansion(scale, tick, start, targets, active_symbols,
                                  utils.intersect(targets, active_symbols))

        # check if we reached the destination
        any_target_found = utils.intersect(targets, active_symbols) != []
        if any_target_found:
            break

    return utils.intersect(targets, active_symbols)
示例#20
0
def computeSegmOverlap(gt_rects, segmentations, MIN_SEGM_OVRLAP=0.6):

    segm2chars = 0

    for k in range(len(gt_rects)):
        gt_rect = gt_rects[k]
        best_match = 0
        best_match_line = 0
        if (gt_rect[4] == ',' or gt_rect[4] == '.' or gt_rect[4] == '\''
                or gt_rect[4] == ':'
                or gt_rect[4] == '-') and not evalPunctuation:
            continue

        best_match2 = 0
        for detId in range(segmentations.shape[0]):
            rectn = segmentations[detId, :]
            rect_int = utils.intersect(rectn, gt_rect)
            int_area = utils.area(rect_int)
            union_area = utils.area(utils.union(rectn, gt_rect))

            ratio = int_area / float(union_area)

            if ratio > best_match:
                best_match = ratio

            if ratio > best_match_line and rectn[7] == 1.0:
                best_match_line = ratio

            gt_rect[5] = best_match
            if best_match < MIN_SEGM_OVRLAP:
                if k < len(gt_rects) - 1:
                    gt_rect2 = gt_rects[k + 1]
                    chars2Rect = utils.union(gt_rect2, gt_rect)
                    rect_int = utils.intersect(rectn, chars2Rect)
                    int_area = utils.area(rect_int)
                    union_area = utils.area(utils.union(rectn, chars2Rect))
                    ratio = int_area / float(union_area)
                    if ratio > best_match2:
                        if ratio > MIN_SEGM_OVRLAP:
                            segm2chars += 1
                            best_match2 = ratio
                            gt_rect[5] = ratio
                            gt_rect2[5] = ratio
示例#21
0
def lower_envelop(intervals, key):
    assert (np.all(
        [float('inf') > key(interval, interval[0]) for interval in intervals]))
    assert (np.all([
        math.fabs(interval[0] - interval[1]) > eps for interval in intervals
    ]))

    start_points = [(interval[0], i) for (i, interval) in enumerate(intervals)]
    end_points = [(interval[1], i) for (i, interval) in enumerate(intervals)]
    points = sorted(start_points + end_points)
    active_intervals = []
    minimum = []
    while len(points) > 0:
        current_x = points[0][0]
        # process all events
        while len(points) > 0 and points[0][0] <= current_x + eps:
            x, index = points.pop(0)
            if index >= 0:
                interval = intervals[index]
                if interval[0] + eps >= current_x:
                    active_intervals.append(index)
                else:
                    assert (interval[1] + eps >= current_x)
                    active_intervals.remove(index)
        if len(active_intervals) > 0:
            next_x = points[0][0]
            epsilon_key = lambda a: (eps_round(a[0]), eps_round(a[1]))
            sorted_intervals = sorted([(key(intervals[index], current_x),
                                        key(intervals[index], next_x), index)
                                       for index in active_intervals],
                                      key=epsilon_key)
            current_y = sorted_intervals[0][0]
            minimum.append((current_x, sorted_intervals[0][2]))
            for (y_1, y_1_next, index_1), (y_2, y_2_next, index_2) in zip(
                    sorted_intervals[:-1], sorted_intervals[1:]):
                # FIXME this need to be replaced by a real intersection test
                #if y_1 < y_2 and y_1_next > y_2_next:
                x_1_min, x_1_max, _, f_1 = intervals[index_1]
                x_2_min, x_2_max, _, f_2 = intervals[index_2]
                x_min, x_max = intersect((x_1_min, x_1_max),
                                         (x_2_min, x_2_max))
                x_min = max(current_x, x_min)
                if x_min < x_max:
                    intersections = intersect_functions(
                        f_1, f_2, (x_min, x_max))
                    points += [(x, -1) for x in intersections]
                    points = sorted(points)

    xs = [x for x, _ in minimum] + [float('inf')]
    idxs = [index for _, index in minimum]

    result = []
    for x_min, x_max, idx in zip(xs[:-1], xs[1:], idxs):
        result.append((x_min, x_max, idx))
    return result
示例#22
0
 def getIntersectionMatrix(self):
     '''Get the sMatrix which shows how all of the segments intersect each other'''
     sMatrix = {}  #Value for key segment to intersect the other segments
     for segment in self.segments:
         sMatrix[segment.name] = {}
         for other in self.segments:
             if other is not segment:
                 s, t = utils.intersect(segment.p1, other.p1,
                                        segment.vector, other.vector)
                 sMatrix[segment.name][other.name] = t
     return sMatrix
示例#23
0
def run():
    seed(time.time())
    g = lambda: randint(1, 100)
    while True:
        n = randint(2, 100)
        print(f'n = {n}')
        recs = [Rect(g(), g(), g(), g()) for _ in range(n)]
        result = split_rectangles(recs)
        assert not utils.intersect(result)
        assert utils.bounding_box(recs) == utils.bounding_box(result)
        assert utils.total_area(recs) == sum(r.area for r in result)
 def intersectSegment(self, other, includeEndpoints=False):
     '''Segment-Segment intersection.  Segments physically intersect each other'''
     s, t = utils.intersect(self.p1, other.p1, self.vector, other.vector)
     #print(self.name + " intersects " + other.name + " using (s, t) -> ("+str(s)+", "+str(t)+")")
     if includeEndpoints:
         if 0 < s < 1 and 0 <= t <= 1:
             return True
     else:
         if 0 < s < 1 and 0 < t < 1:
             return True
     return False
 def getOtherIntersectionValue(self, other, includeEndpoints=False):
     '''Get the value of intersection for the other segment where this segment has its endpoint on the other segment'''
     s, t = utils.intersect(self.p1, other.p1, self.vector, other.vector)
     #print(self.name + " intersects " + other.name + " using (s, t) -> ("+str(s)+", "+str(t)+")")
     if includeEndpoints:
         if (s == 0 or s == 1) and 0 <= t <= 1:
             return t
     else:
         if (s == 0 or s == 1) and 0 < t < 1:
             return t
     return None
示例#26
0
def computeSegmOverlap(gt_rects, segmentations, MIN_SEGM_OVRLAP = 0.6):
    
    segm2chars = 0
    
    for k in range(len(gt_rects)):
        gt_rect = gt_rects[k]
        best_match = 0
        best_match_line = 0
        if (gt_rect[4] == ',' or gt_rect[4] == '.' or gt_rect[4] == '\'' or gt_rect[4] == ':' or gt_rect[4] == '-') and not evalPunctuation:
            continue    

        best_match2 = 0 
        for detId in range(segmentations.shape[0]):
            rectn = segmentations[detId, :]
            rect_int =  utils.intersect( rectn, gt_rect )
            int_area = utils.area(rect_int)
            union_area = utils.area(utils.union(rectn, gt_rect))
        
            ratio = int_area / float(union_area)
        
            if ratio > best_match:
                best_match = ratio
                
            if ratio > best_match_line and rectn[7] == 1.0 :
                best_match_line = ratio
            
            gt_rect[5] = best_match
            if best_match < MIN_SEGM_OVRLAP: 
                if k < len(gt_rects) - 1:
                    gt_rect2 = gt_rects[k + 1]
                    chars2Rect = utils.union(gt_rect2, gt_rect)
                    rect_int = utils.intersect( rectn, chars2Rect )
                    int_area = utils.area(rect_int)
                    union_area = utils.area(utils.union(rectn, chars2Rect))
                    ratio = int_area / float(union_area)
                    if ratio > best_match2:
                        if ratio > MIN_SEGM_OVRLAP:
                            segm2chars += 1
                            best_match2 = ratio
                            gt_rect[5] = ratio
                            gt_rect2[5] = ratio
 def intersectSegment(self, segment, includeEndpoints=False):
     '''Returns the s value indicating where the intersection is occuring on the segment.  If no intersection, then None is returned. If s is negative, then the intersection is happening behind it.'''
     s, t = utils.intersect(self.position, segment.p1, self.direction,
                            segment.vector)
     if includeEndpoints:
         #print("s, t = " + str(s) + ", " + str(t))
         if 0 <= t <= 1:
             if s != 0 and s != 1:  #still need to ignore endpoints where this segment is connected to
                 return s
     else:
         if 0 < t < 1:
             return s
     return None
    def split(self, other):
        '''Split this segment into 2 segments.  We really just create 1 new Segment and then modify this segment.'''
        s, t = utils.intersect(self.p1, other.p1, self.vector, other.vector)
        if t == 0: #other.p1 intersects this segment
            segment2 = Segment(other.p1, self.p2, self.virtual, self.name+"_2")
            self.p2 = other.p1
            #segment1 = Segment(self.p1, other.p1, self.virtual)
            
        elif t == 1: #other.p2 intersects this segment
            segment2 = Segment(other.p2, self.p2, self.virtual, self.name+"_2")
            self.p2 = other.p2
            #segment1 = Segment(self.p1, other.p2, self.virtual)

            

        return segment2
示例#29
0
def cutout_process(image, boxes, labels, fill_val=0, bbox_remove_thres=0.4):
    image = image[0]
    boxes = boxes[0]
    labels = labels[0]
    original_h = image.size(1)
    original_w = image.size(2)
    original_channel = image.size(0)

    new_image = image

    for _ in range(50):
        # Random cutout size: [0.15, 0.5] of original dimension
        cutout_size_h = random.uniform(0.15 * original_h, 0.5 * original_h)
        cutout_size_w = random.uniform(0.15 * original_w, 0.5 * original_w)

        # Random position for cutout
        left = random.uniform(0, original_w - cutout_size_w)
        right = left + cutout_size_w
        top = random.uniform(0, original_h - cutout_size_h)
        bottom = top + cutout_size_h
        cutout = torch.FloatTensor(
            [int(left), int(top), int(right),
             int(bottom)])

        # Calculate intersect between cutout and bounding boxes
        overlap_size = intersect(cutout.unsqueeze(0), boxes)
        area_boxes = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
        ratio = overlap_size / area_boxes
        # If all boxes have Iou greater than bbox_remove_thres, try again
        if ratio.min().item() > bbox_remove_thres:
            continue

        cutout_arr = torch.full(
            (original_channel, int(bottom) - int(top), int(right) - int(left)),
            fill_val)
        new_image[:, int(top):int(bottom), int(left):int(right)] = cutout_arr

        # Create new boxes and labels
        boolean = ratio < bbox_remove_thres

        new_boxes = boxes[boolean[0], :]

        new_labels = labels[boolean[0]]

        return new_image.unsqueeze(dim=0), new_boxes.unsqueeze(
            dim=0), new_labels.unsqueeze(dim=0)
示例#30
0
    def find_topics(self, mode={"country": 1, "not_country": 2}):

        """ Find initial topics """

        for row in self.data:
            others = [r for r in self.data if r.country != row.country]
            # others = [r for r in self.data if r.id != row.id]
            for ot in others:

                cw = intersect(row.all_text, ot.all_text)
                # cw = {w for w in cw if w[0].islower() or w in COUNTRIES}

                if count_not_countries(cw) >= mode["not_country"] and count_countries(cw) >= mode["country"]:
                    # if len(cw) >= 4 and count_countries(cw) >= 1:
                    news = [row, ot]
                    new_topic = Topic(cw, news)
                    self.topics.append(new_topic)

        self.topics = list(set(self.topics))
示例#31
0
 def narrow_docids(self, idx):
   m0 = [ decode_array(idx[w]) for w in self.r0 if idx.has_key(w) ]
   if self.r0 and not m0:
     return []
   m2 = [ decode_array(idx[w]) for w in self.r2 if idx.has_key(w) ]
   if self.r2 and not m2:
     return []
   if self.r1:
     try:
       refs = intersect( decode_array(idx[w]) for w in self.r1 )
     except KeyError:
       return []
     refs = union(refs, [ m for m in (m0,m2) if m ])
   elif not self.r2:
     refs = merge(m0)
   else:
     refs = union(merge(m0), [m2])
   # Now: refs = [ docid1,sentid1, docid2,sentid2, ... ]
   locs = [ (refs[i], refs[i+1]) for i in xrange(0, len(refs), 2) ]
   return locs
示例#32
0
 def narrow_docids(self, idx):
     m0 = [decode_array(idx[w]) for w in self.r0 if idx.has_key(w)]
     if self.r0 and not m0:
         return []
     m2 = [decode_array(idx[w]) for w in self.r2 if idx.has_key(w)]
     if self.r2 and not m2:
         return []
     if self.r1:
         try:
             refs = intersect(decode_array(idx[w]) for w in self.r1)
         except KeyError:
             return []
         refs = union(refs, [m for m in (m0, m2) if m])
     elif not self.r2:
         refs = merge(m0)
     else:
         refs = union(merge(m0), [m2])
     # Now: refs = [ docid1,sentid1, docid2,sentid2, ... ]
     locs = [(refs[i], refs[i + 1]) for i in xrange(0, len(refs), 2)]
     return locs
示例#33
0
    def encode(self, s, block=None):
        """ Encode a message.

        Parameters
        ----------
        s : str
            A message to encode.
        block : int, optional
            Divide output into blocks of this size.  All non-transcodable
            symbols will be stripped.  Specify the value `0` to strip all
            non-transcodable symbols and not divide into blocks.
            Specify the value `None` to disable chunking.  Default `None`.

        Returns
        -------
        out : str
            The encoded message.

        Notes
        -----
        Although this can invoke either `self._encode` or `super().encode`, it
        essentially falls prey to the "call super" antipattern and should
        probably be refactored. [TODO]

        """
        if block is not None:
            # filter message to characters in ciphertext alphabet
            s = intersect(s, self.alphabet)

            if block > 0:
                padding = upward_factor(block, len(s))
                s = s.ljust(padding, self.DEFAULT_NULLCHAR)

        out = super().encode(s)

        if block is not None and block > 0:
            out = ' '.join(chunks(out, block))

        return ''.join(out)
示例#34
0
    def update_vision(self):
        agents = set()
        for tile in self.tile.neighbors(self.sight):
            agents.update(tile.agents)
        agents.remove(self)

        vision = []
        for sight_dir in utils.linspace(-self.aov / 2, self.aov / 2,
                                        self.n_cell):
            sight_dir = self.direction + sight_dir
            min_sight = 1.0
            for agent in agents:
                sight = utils.intersect(self.x, self.y, self.sight, sight_dir,
                                        agent.x, agent.y, agent.size / 2)
                min_sight = min(sight, min_sight)
            vision.append(min_sight)
        self.vision = vision

        self.reward = 0
        for agent in agents:
            if self.is_collide_with(agent):
                self.reward -= 1
	def handleCollisions(self):
		carNumber = 0
		for car in self.cars:
			if car.isAlive:
				car.update()
				if self.canBeUpdated(carNumber):
					rayPoints = [[], [], [], [], [], [], []]
					car.rayPoints = [[], [], [], [], [], [], []]
					
					for i in range(0, len(self.walls)-1):
						for j in range(0, len(car.edgesPointsAprox)-1):
							if utils.intersect(car.edgesPointsAprox[j], car.edgesPointsAprox[j+1], self.walls[i], self.walls[i+1]):
								if car.isAlive:
									car.isAlive = False
									self.deadCars += 1
								break
						
						points = utils.lineRayIntersectionPoint(car.position, (car.leftPoint[0] - car.position[0], car.leftPoint[1] - car.position[1]), self.walls[i], self.walls[i+1])
						if points:
							rayPoints[0] += points
						points = utils.lineRayIntersectionPoint(car.position, (car.frontLeft2Point[0] - car.position[0], car.frontLeft2Point[1] - car.position[1]), self.walls[i], self.walls[i+1])
						if points:
							rayPoints[1] += points
						points = utils.lineRayIntersectionPoint(car.position, (car.frontLeftPoint[0] - car.position[0], car.frontLeftPoint[1] - car.position[1]), self.walls[i], self.walls[i+1])
						if points:
							rayPoints[2] += points
						points = utils.lineRayIntersectionPoint(car.position, (car.frontPoint[0] - car.position[0], car.frontPoint[1] - car.position[1]), self.walls[i], self.walls[i+1])
						if points:
							rayPoints[3] += points
						points = utils.lineRayIntersectionPoint(car.position, (car.frontRightPoint[0] - car.position[0], car.frontRightPoint[1] - car.position[1]), self.walls[i], self.walls[i+1])
						if points:
							rayPoints[4] += points
						points = utils.lineRayIntersectionPoint(car.position, (car.frontRight2Point[0] - car.position[0], car.frontRight2Point[1] - car.position[1]), self.walls[i], self.walls[i+1])
						if points:
							rayPoints[5] += points
						points = utils.lineRayIntersectionPoint(car.position, (car.rightPoint[0] - car.position[0], car.rightPoint[1] - car.position[1]), self.walls[i], self.walls[i+1])
						if points:
							rayPoints[6] += points
					
					for i in range(0, len(rayPoints)):
						if rayPoints[i]:
							index = 0
							minDist = utils.distance(rayPoints[i][0], car.position)
							for j in range(0, len(rayPoints[i])):
								dist = utils.distance(rayPoints[i][j], car.position)
								if dist < minDist:
									minDist = dist;
									index = j
							car.inputs[i] = minDist
							car.rayPoints[i] = int(round(rayPoints[i][index][0])), int(round(rayPoints[i][index][1]))
						else:
							car.rayPoints[i] = (0, 0)
					
					car.inputs = [float(i)/max(car.inputs) for i in car.inputs]
					
					for i in range(0, len(self.cookies)):
						for j in range(0, len(car.edgesPointsAprox)-1):
							if utils.intersect(car.edgesPointsAprox[j], car.edgesPointsAprox[j+1], self.cookies[i][0], self.cookies[i][1]):
								f = lambda a,b: a if (a > b) else b
								maxCookie = -1
								if car.lastsCookies:
									maxCookie = functools.reduce(f, car.lastsCookies)
								if i not in car.lastsCookies and i > maxCookie:
									car.lastsCookies.append(i)
									car.incrementFitness()
									if len(car.lastsCookies) > 5:
										car.lastsCookies.pop(0)
			carNumber += 1
 def getGenerator(self):
     """Build and return a Generator instance from an abstract
     specification."""
     ### Instantiate (flatten) target model structured specification
     ## Flatten ModelSpec self.mspec using indepvarname global and inputs
     # and using connectivity bindings (latter not yet implemented)
     globalRefs = [self.indepvarname] + self.inputs.keys()
     try:
         flatspec = self.mspec.flattenSpec(self.unravelInfo, globalRefs, ignoreInputs=True)
     except:
         print "Problem flattening Model Spec '%s'" % self.mspec.name
         print "Global refs: ", globalRefs
         raise
     FScompatibleNames = flatspec["FScompatibleNames"]
     FScompatibleNamesInv = flatspec["FScompatibleNamesInv"]
     ## Check target Generator info
     if self.targetGen in self.mspec.compatibleGens and self.targetGen in theGenSpecHelper:
         gsh = theGenSpecHelper(self.targetGen)
         if gsh.lang not in self.mspec.targetLangs:
             raise ValueError("Incompatible target language between supplied" " ModelSpec and target Generator")
     else:
         print "ModelSpec's compatible Generators:", ", ".join(self.mspec.compatibleGens)
         print "ModelConstructor target Generator:", self.targetGen
         raise ValueError("Target Generator mismatch during generator " "construction")
     self.targetLang = gsh.lang
     ## Make Generator initialization argument dictionary
     a = args()
     if self.abseps is not None:
         a.abseps = self.abseps
     a.pars = {}
     parnames = flatspec["pars"].keys()
     for p, valstr in flatspec["pars"].iteritems():
         if valstr == "":
             if FScompatibleNamesInv(p) not in self.parvalues:
                 raise ValueError("Parameter %s is missing a value" % FScompatibleNamesInv(p))
         else:
             try:
                 a.pars[p] = float(valstr)
             except ValueError:
                 raise ValueError("Invalid parameter value set in ModelSpec" " for '%s'" % p)
     # override any par vals set in ModelSpec with those explicitly set
     # here
     for p, val in self.parvalues.iteritems():
         try:
             pr = FScompatibleNames(p)
         except KeyError:
             raise NameError("Parameter '%s' missing from ModelSpec" % p)
         if pr not in flatspec["pars"]:
             raise NameError("Parameter '%s' missing from ModelSpec" % p)
         a.pars[pr] = val
     if self.icvalues != {}:
         a.ics = {}
         for v, val in self.icvalues.iteritems():
             try:
                 vr = FScompatibleNames(v)
             except KeyError:
                 raise NameError("Variable '%s' missing from ModelSpec" % v)
             if vr not in flatspec["vars"]:
                 raise NameError("Variable '%s' missing from ModelSpec" % v)
             a.ics[vr] = val
     a.tdomain = self.indepvardomain
     # a.ttype = 'float' or 'int' ?
     a.inputs = self.inputs
     a.name = self.mspec.name
     xdomain = {}
     pdomain = {}
     for k, d in flatspec["domains"].iteritems():
         # e.g. d == (float, Continuous, [-Inf, Inf])
         assert d[1] == gsh.domain, "Domain mismatch with target Generator"
         if k in flatspec["vars"]:
             assert len(d[2]) == 2, "Domain spec must be a valid interval"
             xdomain[k] = d[2]
         elif k in flatspec["pars"]:
             assert len(d[2]) == 2, "Domain spec must be a valid interval"
             pdomain[k] = d[2]
     a.xdomain = xdomain
     a.pdomain = pdomain
     exp_vars = [v for (v, t) in flatspec["spectypes"].items() if t == "ExpFuncSpec"]
     rhs_vars = [v for (v, t) in flatspec["spectypes"].items() if t == "RHSfuncSpec"]
     imp_vars = [v for (v, t) in flatspec["spectypes"].items() if t == "ImpFuncSpec"]
     if gsh.specType == "RHSfuncSpec":
         assert imp_vars == [], "Cannot use implicitly defined variables"
         assert self.forcedAuxVars == [], "Cannot force auxiliary variables"
         varnames = rhs_vars
         auxvarnames = exp_vars
     elif gsh.specType == "ExpFuncSpec":
         assert imp_vars == [], "Cannot use implicitly defined variables"
         assert rhs_vars == [], "Cannot use RHS-type variables"
         varnames = exp_vars
         invalid_auxvars = remain(self.forcedAuxVars, varnames)
         if invalid_auxvars == []:
             # then all forced aux varnames were legitimate
             # so remove them from varnames and put them in auxvarnames
             varnames = remain(varnames, self.forcedAuxVars)
             auxvarnames = self.forcedAuxVars
         else:
             print "Invalid auxiliary variable names:"
             print invalid_auxvars
             raise ValueError("Forced auxiliary variable names were invalid")
     elif gsh.specType == "ImpFuncSpec":
         assert rhs_vars == [], "Cannot use RHS-type variables"
         varnames = imp_vars
         auxvarnames = exp_vars
     # search for explicit variable interdependencies and resolve by
     # creating 'reuseterms' declarations, substituting in the cross-ref'd
     # definitions
     # e.g. state variables v and w, and explicit aux vars are given by:
     #     x = 1+v
     #     y = f(x) + w
     # Here, y illegally depends on x, so define a 'reused' temporary
     # definition, and re-write in terms of that:
     #     temp = 1+v
     #     x = temp
     #     y = f(temp) + w
     # e.g. state variables v and w, aux var x:
     #     v' = 1-v -f(x)
     # Here, v illegally uses an auxiliary variable on the RHS, so make
     # a 'reused' substitution as before
     #
     # first pass to find which substitutions are needed
     reuseTerms, subsExpr = processReused(
         varnames + auxvarnames, auxvarnames, flatspec, self.mspec._registry, FScompatibleNames, FScompatibleNamesInv
     )
     clash_reused = intersect(reuseTerms.keys(), self.reuseTerms.keys())
     if clash_reused != []:
         print "Clashing terms:", clash_reused
         raise ValueError("User-supplied reused terms clash with auto-" "generated terms")
     # second pass, this time to actually make the substitutions
     for v in subsExpr:
         flatspec["vars"][v] = subsExpr[v]
     reuseTerms.update(self.reuseTerms)
     a.reuseterms = reuseTerms
     a.varspecs = dict(zip(varnames + auxvarnames, [flatspec["vars"][v] for v in varnames + auxvarnames]))
     a.auxvars = auxvarnames
     try:
         a.fnspecs = flatspec["auxfns"]
     except KeyError:
         # no aux fns defined!
         pass
     a.checklevel = self.checklevel
     a.algparams = self.algparams
     if self.vfcodeinsert_start != "":
         a.vfcodeinsert_start = self.vfcodeinsert_start
     if self.vfcodeinsert_end != "":
         a.vfcodeinsert_end = self.vfcodeinsert_end
     ## Events
     events = []
     # make events from bound constraints (activated accordingly)
     # (parameter bounds only useful for continuation with PyCont)
     domnames = varnames + parnames
     for xname in domnames:
         hier_name_lo = FScompatibleNamesInv(xname) + "_domlo"
         FScompatibleNames[hier_name_lo] = xname + "_domlo"
         FScompatibleNamesInv[xname + "_domlo"] = hier_name_lo
         hier_name_hi = FScompatibleNamesInv(xname) + "_domi"
         FScompatibleNames[hier_name_hi] = xname + "_domhi"
         FScompatibleNamesInv[xname + "_domhi"] = hier_name_hi
     if self.activateAllBounds:
         a.activatedbounds = {}.fromkeys(domnames, (True, True))
     else:
         a.activatedbounds = self.activatedBounds
     a.enforcebounds = True
     # add events from user events
     for e in self.userevents:
         if e not in events:
             events.append(e)
         else:
             raise ValueError, "Repeated event definition!"
     #        events.extend(self.userevents)
     a.events = events
     # Add any additional special options (e.g. 'nobuild' directive)
     for k, v in self.optDict.iteritems():
         if hasattr(a, k):
             raise KeyError("'%s' already exists as a Generator argument" % k)
         a.k = v
     # keep a copy of the arguments in self for users to see what was done
     self.conargs = a
     self.FScompatibleNames = FScompatibleNames
     self.FScompatibleNamesInv = FScompatibleNamesInv
     ## Make Generator
     try:
         return eval("Generator." + self.targetGen + "(a)")
     except:
         print "Problem initializing target Generator '%s'" % self.targetGen
         raise
示例#37
0
def evaluate_image(batch, detections, word_gto, iou_th=0.3, iou_th_vis=0.5, iou_th_eval=0.4):
    
  '''
  Summary : Returns end-to-end true-positives, detection true-positives, number of GT to be considered for eval (len > 2).
  Description : For each predicted bounding-box, comparision is made with each GT entry. Values of number of end-to-end true
                positives, number of detection true positives, number of GT entries to be considered for evaluation are computed.
  
  Parameters
  ----------
  iou_th_eval : float
      Threshold value of intersection-over-union used for evaluation of predicted bounding-boxes
  iou_th_vis : float
      Threshold value of intersection-over-union used for visualization when transciption is true but IoU is lesser.
  iou_th : float
      Threshold value of intersection-over-union between GT and prediction.
  word_gto : list of lists
      List of ground-truth bounding boxes along with transcription.
  batch : list of lists
      List containing data (input image, image file name, ground truth).
  detections : tuple of tuples
      Tuple of predicted bounding boxes along with transcriptions and text/no-text score.
  
  Returns
  -------
  tp : int
      Number of predicted bounding-boxes having IoU with GT greater than iou_th_eval.
  tp_e2e : int
      Number of predicted bounding-boxes having same transciption as GT and len > 2.
  gt_e2e : int
      Number of GT entries for which transcription len > 2.
  '''
  
  gt_to_detection = {}
  tp = 0
  tp_e2e = 0
  gt_e2e = 0
  
  draw = batch[4][0]    
  normFactor = math.sqrt(draw.shape[1] * draw.shape[1] + draw.shape[0] * draw.shape[0]) # Normalization factor
  for i in range(0, len(detections)):
      
    det = detections[i]
    boxr = det[0]
    box = cv2.boxPoints(boxr) # Predicted bounding-box parameters
    box = np.array(box, dtype="int") # Convert predicted bounding-box to numpy array
    bbox = cv2.boundingRect(box)
    
    bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]
    bbox[2] += bbox[0] # Convert width to right-coordinate
    bbox[3] += bbox[1] # Convert height to bottom-coordinate
    
    vis.draw_box_points(draw, box, color = (255, 0, 0))
    
    det_text = det[1][0] # Predicted transcription for bounding-box
    #print(det_text)
    
    for gt_no in range(len(word_gto)):
        
      gt = word_gto[gt_no]
      txt = gt[5] # GT transcription for given GT bounding-box
      gtbox  = ((gt[0] * draw.shape[1], gt[1] * draw.shape[0]), (gt[2] * normFactor, gt[3] * normFactor), gt[4] * 180 / 3.14) # Re-scaling GT values
      gtbox = cv2.boxPoints(gtbox)
      gtbox = np.array(gtbox, dtype="int")
      rect_gt = cv2.boundingRect(gtbox)
      
      
      rect_gt = [rect_gt[0], rect_gt[1], rect_gt[2], rect_gt[3]]
      rect_gt[2] += rect_gt[0] # Convert GT width to right-coordinate
      rect_gt[3] += rect_gt[1] # Convert GT height to bottom-coordinate 

      inter = intersect(bbox, rect_gt) # Intersection of predicted and GT bounding-boxes
      uni = union(bbox, rect_gt) # Union of predicted and GT bounding-boxes
      ratio = area(inter) / float(area(uni)) # IoU measure between predicted and GT bounding-boxes
      
      # 1). Visualize the predicted-bounding box if IoU with GT is higher than IoU threshold (iou_th) (Always required)
      # 2). Visualize the predicted-bounding box if transcription matches the GT and condition 1. holds
      # 3). Visualize the predicted-bounding box if transcription matches and IoU with GT is less than iou_th_vis and 1. and 2. hold
      if ratio > iou_th:
        vis.draw_box_points(draw, box, color = (0, 128, 0))
        if not gt_to_detection.has_key(gt_no):
          gt_to_detection[gt_no] = [0, 0]
            
        if txt.lower() == det_text.lower():
          to_cls_x.append([len(det_text), det[1][1], det[1][2], det[1][3]])
          to_cls_y.append(1)
          vis.draw_box_points(draw, box, color = (0, 255, 0), thickness=2)
          gt[7] = 1 # Change this parameter to 1 when predicted transcription is correct.
          
          if ratio < iou_th_vis:
              vis.draw_box_points(draw, box, color = (255, 255, 255), thickness=2)
              cv2.imshow('draw', draw) 
              #cv2.waitKey(0)
                
        else:
          to_cls_x.append([len(det_text), det[1][1], det[1][2], det[1][3]])
          to_cls_y.append(0)
          
        tupl = gt_to_detection[gt_no] 
        if tupl[0] < ratio:
          tupl[0] = ratio 
          tupl[1] = i   
                  
  # Count the number of end-to-end and detection true-positives
  for gt_no in range(len(word_gto)):
    gt = word_gto[gt_no]
    txt = gt[5]
    if len(txt) > 2:
      gt_e2e += 1
      if gt[7] == 1:
        tp_e2e += 1
            
    if gt_to_detection.has_key(gt_no):
      tupl = gt_to_detection[gt_no] 
      if tupl[0] > iou_th_eval: # Increment detection true-positive, if IoU is greater than iou_th_eval
        tp += 1             
          
  cv2.imshow('draw', draw)             
  return tp, tp_e2e, gt_e2e 
示例#38
0
def run_evaluation(inputDir, outputDir, process_color = 0, processTest = 0):
    
    if not os.path.exists(outputDir):
        os.mkdir(outputDir)

    edgeThreshold = 14
    fastex = FASTex(edgeThreshold = edgeThreshold)
    
    modelFile = '/home/busta/outModel.boost'
    model = cv2.Boost()
    model.load(modelFile)
    images = glob.glob('{0}/*.jpg'.format(inputDir))
    
    segmDir = '{0}/segmentations'.format(inputDir)
    
    precision = 0;
    precisionDen = 0
    recall = 0
    recall05 = 0
    recallNonMax = 0
    recallDen = 0
    wordRecall = 0
    wordRecallDen = 0
    segm2chars = 0 
    
    regionsCount = 0
    regionsCountNonMax = 0
    missing_segmNonMaxCount = 0
    
    letterKeypointHistogram = defaultdict(lambda : defaultdict(float))
    octaveLetterKeypointHistogram = defaultdict(lambda : defaultdict(float))
    missing_letters = {}
    letterHistogram = defaultdict(int)
    missing_segm = {}
    missing_segm2 = {}
    missing_segmNonMax = {}
    diffMaxOctavesMap = {}
    diffScoreOctavesMap = {}
    segmHistogram = []
    segmWordHistogram = []
    
    results = []  
    hist = None
    histFp = None
    histDist = None
    histDistFp = None
    histDistMax = None
    histDistMaxWhite = None
    histDistMaxFp = None
    hist2dDist =None
    hist2dDistFp = None
    hist2dDistScore = None
    hist2dDistScoreFp = None
    histDistMaxWhiteFp = None
    
    histSegm = np.zeros((256), dtype = np.float)
    histSegmCount = np.zeros((256), dtype = np.int)
    stat = np.asarray([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=np.float)
    times = []
    gtSegmCount = 0
    wordsOk = []
    wordsFp = []
    
    keypointsTotal = 0
    keypointsTotalInside = 0
    orbTime = 0
    
    lineNo = 0
    perfectWords = 0;
    perfectWordsNS = 0;
    
    hasSegm = False
    
    for image in images:
        print('Processing {0}'.format(image))
        
        img = cv2.imread(image, 0)
        imgc = cv2.imread(image)
        imgcO = cv2.imread(image)
        if process_color == 1:
            imgproc = imgc
        else:
            imgproc = img
        
        baseName = os.path.basename(image)
        
        
        baseName = baseName[:-4]
        workPoint = 0.3
        segmentations = fastex.getCharSegmentations(imgproc, outputDir, baseName)
        segmentations = segmentations[:, 0:10]
    
        segmentations = np.column_stack( [ segmentations , np.zeros( (segmentations.shape[0], 2), dtype = np.float ) ] )
        maskDuplicates = segmentations[:, 8] == -1
        segmentationsDuplicates = segmentations[maskDuplicates, :]
        maskNoNei = segmentationsDuplicates[:, 9] > workPoint
        segmentationsNoNei = segmentationsDuplicates[maskNoNei, :]
        if segmentations.shape[0] > 0:
            print( 'Dupl ratio: {0} - {1}/ {2} - {3}'.format(segmentationsDuplicates.shape[0] / float(segmentations.shape[0]), segmentationsDuplicates.shape[0], segmentations.shape[0], segmentationsNoNei.shape[0] ) )
        keypoints = fastex.getLastDetectionKeypoints()
        keypointsTotal += keypoints.shape[0]
        statc =  fastex.getDetectionStat()
    
        times.append([ statc[1], statc[2], statc[3], statc[4], statc[5], statc[6], statc[7], statc[8], statc[9], statc[10]])
        stat += statc
        values = img[ keypoints[:, 1].astype(int), keypoints[:, 0].astype(int) ]
        valuesMax = img[keypoints[:, 6].astype(int), keypoints[:, 5].astype(int)]
        diffValMax = np.abs(values - valuesMax)
        
        
        regionsCount += segmentations.shape[0]
        regionsCountNonMax += segmentationsNoNei.shape[0]
       
        segmentations[:, 2] += segmentations[:, 0]
        segmentations[:, 3] += segmentations[:, 1]
        
        keypointsOrb = fastex.getLastDetectionOrbKeypoints()
        orbTime += keypointsOrb[0][9]
            
            
        segmGt = '{0}/{1}_GT.txt'.format(segmDir, baseName)
        pden = 0
        rden = 0
        if os.path.exists(segmGt):
            hasSegm = True
            (gt_rects, groups) = utls.read_icdar2013_segm_gt(segmGt)
            segmImg = '{0}/{1}_GT.bmp'.format(segmDir, baseName)
            if not os.path.exists(segmImg):
                segmImg = '{0}/gt_{1}.png'.format(segmDir, baseName)
            segmImg = cv2.imread(segmImg)
            
            try:
                (hist, histFp, histDist, histDistMax, histDistMaxWhite, hist2dDist, hist2dDistScore, histDistFp, histDistMaxFp, histDistMaxWhiteFp, hist2dDistFp, hist2dDistScoreFp, keypointsInside) = collect_histograms(img, segmImg, keypoints, values, diffValMax, keypointsTotalInside, diffMaxOctavesMap, diffScoreOctavesMap, hist, histFp, histDist, histDistMax, histDistMaxWhite, hist2dDist, hist2dDistScore, histDistFp, histDistMaxFp, histDistMaxWhiteFp, hist2dDistFp, hist2dDistScoreFp)
            except:
                pass
                    
            rcurrent = 0
            rcurrent05 = 0
            rcurrentNonMax = 0
            for k in range(len(gt_rects)):
                gt_rect = gt_rects[k]
                best_match = 0
                best_match_line = 0
                if (gt_rect[4] == ',' or gt_rect[4] == '.' or gt_rect[4] == '\'' or gt_rect[4] == ':' or gt_rect[4] == '-') and not evalPunctuation:
                    continue
                
                gtSegmCount += 1
                
                rectMask = np.bitwise_and(np.bitwise_and( keypointsInside[:, 0] >= gt_rect[0], keypointsInside[:, 0] <= gt_rect[2]), np.bitwise_and(keypointsInside[:, 1] >= gt_rect[1], keypointsInside[:, 1] <= gt_rect[3]))
                letterInside =  keypointsInside[rectMask, :]
                
                #make keypoints histogram 
                if letterInside.shape[0] > 0:
                    octaves = np.unique( letterInside[:, 2])
                    maxOctave = np.max(octaves)
                    maxOctavePoints = 0
                    
                    for i in range(int(maxOctave) + 1):
                        octavePoints = letterInside[letterInside[:, 2] == i, :]
                        maxOctavePoints = max(maxOctavePoints, octavePoints.shape[0])
                    if maxOctavePoints > 0:
                        octaveLetterKeypointHistogram[gt_rect[4]][0] += 1
                    if maxOctavePoints > 1:
                        octaveLetterKeypointHistogram[gt_rect[4]][1] += 1
                    if maxOctavePoints > 2:
                        octaveLetterKeypointHistogram[gt_rect[4]][2] += 1
                    if maxOctavePoints > 3:
                        octaveLetterKeypointHistogram[gt_rect[4]][3] += 1
                    
                    
                
                if letterInside.shape[0] == 0:
                    if not missing_letters.has_key(gt_rect[4]):
                        missing_letters[gt_rect[4]] = []
                    missing_letters[gt_rect[4]].append( (image, gt_rect) )  
                if letterInside.shape[0] > 0:
                    letterKeypointHistogram[gt_rect[4]][0] += 1
                if letterInside.shape[0] > 1:
                    letterKeypointHistogram[gt_rect[4]][1] += 1
                if letterInside.shape[0] > 2:
                    letterKeypointHistogram[gt_rect[4]][2] += 1
                if letterInside.shape[0] > 3:
                    letterKeypointHistogram[gt_rect[4]][3] += 1
                     
                letterHistogram[gt_rect[4]] += 1
                
                best_match2 = 0 
                minSingleOverlap = MIN_SEGM_OVRLAP
                if gt_rect[4] == 'i' or gt_rect[4] == '!':
                    minSingleOverlap = 0.5
                 
                for detId in range(segmentations.shape[0]):
                    rectn = segmentations[detId, :]
                    rect_int =  utils.intersect( rectn, gt_rect )
                    int_area = utils.area(rect_int)
                    union_area = utils.area(utils.union(rectn, gt_rect))
                
                    ratio = int_area / float(union_area)
                    rectn[10] = max(ratio, rectn[10])
                    
                    if rectn[9] > workPoint:
                        gt_rect[6] =  max(ratio, gt_rect[6])
                    
                    if ratio > best_match:
                        best_match = ratio
                        best_segm = segmentations[detId, :]
                        
                    if ratio > best_match_line and rectn[7] == 1.0 :
                        best_match_line = ratio
                        
                    if best_match < minSingleOverlap: 
                        if k < len(gt_rects) - 1:
                            gt_rect2 = gt_rects[k + 1]
                            chars2Rect = utils.union(gt_rect2, gt_rect)
                            rect_int = utils.intersect( rectn, chars2Rect )
                            int_area = utils.area(rect_int)
                            union_area = utils.area(utils.union(rectn, chars2Rect))
                            ratio = int_area / float(union_area)
                            rectn[10] = max(ratio, rectn[10]) 
                            if ratio > best_match2:
                                if ratio > MIN_SEGM_OVRLAP:
                                    segm2chars += 1
                                    best_match2 = ratio
                                    gt_rect[5] = ratio
                                    gt_rect2[5] = ratio
                       
                thickness = 1
                color = (255, 0, 255)
                if best_match >= minSingleOverlap:
                    color = (0, 255, 0)
                if best_match > 0.7:
                    thickness = 2
                cv2.rectangle(imgc, (gt_rect[0], gt_rect[1]), (gt_rect[2], gt_rect[3]), color, thickness)
                        
                recall += best_match
                recallNonMax += gt_rect[6]
                if best_match >= minSingleOverlap:
                    recall05 += best_match
                    rcurrent05 += best_match
                else:
                    if not missing_segm.has_key(image):
                        missing_segm[image] = []
                    missing_segm[image].append(gt_rect)
                    
                    if gt_rect[5] < MIN_SEGM_OVRLAP:
                        if not missing_segm2.has_key(image):
                            missing_segm2[image] = []
                        missing_segm2[image].append(gt_rect)
                        segm2chars += 1
                
                if gt_rect[6] < minSingleOverlap:
                    if not missing_segmNonMax.has_key(image):
                        missing_segmNonMax[image] = []
                    missing_segmNonMax[image].append(gt_rect)
                    missing_segmNonMaxCount += 1
                        
                    
                rcurrent += best_match
                rcurrentNonMax += gt_rect[6]
                recallDen +=  1   
                rden += 1
                
                if best_match > 0 and process_color != 1:
                    val = img[best_segm[5], best_segm[4]]
                    histSegm[val] += best_match
                    histSegmCount[val] += 1
                
            pcurrent = 0
            for detId in range(segmentations.shape[0]):
                best_match = 0
                rectn = segmentations[detId, :]
                
                for gt_rect in gt_rects:
                    rect_int =  utils.intersect( rectn, gt_rect )
                    int_area = utils.area(rect_int)
                    union_area = utils.area(utils.union(rectn, gt_rect))
                    
                    ratio = int_area / float(union_area)
                    
                    if ratio > best_match:
                        best_match = ratio
                
                precision += best_match
                pcurrent += best_match
                precisionDen +=  1   
                pden += 1
                
        
        if pden == 0:
            pcurrent = 0
        else:
            pcurrent = pcurrent / pden
            
        if rden == 0:
            rcurrent = 0
            rcurrent05 = 0
            rcurrentNonMax = 0
        else:
            rcurrent = rcurrent / rden
            rcurrent05 = rcurrent05 / rden
            rcurrentNonMax = rcurrentNonMax / rden
        
        
        segmHistogram.append([ segmentations.shape[0], segmentations[segmentations[:, 10] > 0.4].shape[0], segmentations[segmentations[:, 10] > 0.5].shape[0], segmentations[segmentations[:, 10] > 0.6].shape[0], segmentations[segmentations[:, 10] > 0.7].shape[0] ])
        
        segmWordHistogram.append([segmentations.shape[0], segmentations[np.bitwise_or(segmentations[:, 10] > 0.5, segmentations[:, 11] > 0.5 )].shape[0]])
        
        results.append((baseName, rcurrent, pcurrent, rcurrent05))

    
    if precisionDen == 0:
        pcurrent = 0
    else:
        precision = precision / precisionDen
        
    if recallDen == 0:
        rcurrent = 0
    else:
        recall = recall / recallDen
        recall05 = recall05 / recallDen
        recallNonMax = recallNonMax / recallDen
        
    wordRecall = wordRecall / max(1, wordRecallDen)
            
    try:
        histSegm = histSegm / max(1, histSegmCount)
    except ValueError:
        pass
    
    print('Evalation Results:')
    print( 'recall: {0}, precision: {1}, recall 0.5: {2}, recall NonMax: {3}'.format(recall, precision, recall05, recallNonMax) )
    
    kpTimes = np.histogram(np.asarray(times)[:, 0], bins=20)
    print('Keypoint Time Histogram: {0}'.format(kpTimes))
    
    
    print('Detection statistics:')    
    print(stat)
    
    for letter in letterKeypointHistogram.keys():
        for num in letterKeypointHistogram[letter].keys():
            letterKeypointHistogram[letter][num] = letterKeypointHistogram[letter][num] / float(letterHistogram[letter])
        for num in octaveLetterKeypointHistogram[letter].keys():
            octaveLetterKeypointHistogram[letter][num] = octaveLetterKeypointHistogram[letter][num] / float(letterHistogram[letter])
        letterKeypointHistogram[letter] = dict(letterKeypointHistogram[letter])
        octaveLetterKeypointHistogram[letter] = dict(octaveLetterKeypointHistogram[letter])
    
    print('Perfect words: {0}'.format(perfectWords))
        
    eval_date = datetime.date.today()
    np.savez('{0}/evaluation'.format(outputDir), recall=recall, recall05 = recall05, recallNonMax=recallNonMax, precision=precision, eval_date=eval_date, regionsCount=regionsCount, inputDir = inputDir, hist = hist, histSegm = histSegm, stat=stat, letterKeypointHistogram = dict(letterKeypointHistogram), missing_letters=missing_letters, octaveLetterKeypointHistogram=dict(octaveLetterKeypointHistogram), missing_segm=missing_segm, 
             times=np.asarray(times), histFp = histFp, gtSegmCount = gtSegmCount, wordRecall=wordRecall, histDist=histDist, histDistFp = histDistFp, histDistMax=histDistMax, histDistMaxFp=histDistMaxFp, hist2dDist=hist2dDist, hist2dDistFp=hist2dDistFp, hist2dDistScore=hist2dDistScore, hist2dDistScoreFp=hist2dDistScoreFp, histDistMaxWhite=histDistMaxWhite, histDistMaxWhiteFp=histDistMaxWhiteFp, wordsOk=wordsOk, wordsFp=wordsFp, diffMaxOctavesMap = diffMaxOctavesMap, diffScoreOctavesMap = diffScoreOctavesMap, 
             missing_segm2=missing_segm2, segmHistogram=segmHistogram, segmWordHistogram=segmWordHistogram, regionsCountNonMax=regionsCountNonMax, missing_segmNonMax=missing_segmNonMax)
    
    print( "GT segmentations count {0}".format(gtSegmCount) )
    print('FasTex Inside {0}/{1} ({2})'.format(keypointsTotalInside, keypointsTotal, keypointsTotalInside / float(keypointsTotal) ))
    print('FasText time: {0}, Orb time: {1} '.format( np.sum(times, 0)[0], orbTime))
    print('2 Chars Segmentation: {0}'.format(segm2chars) )
    print('NonMax Regions Count: {0}/{1}'.format(regionsCountNonMax, missing_segmNonMaxCount))
示例#39
0
def process_batch(nets, optim, optim2, image_size, args):
  global it, mean_loss, mean_rec
  
  net, net_ctc = nets
  
  net = net.net
  net_ctc = net_ctc.net
  
  
  net.blobs['data'].reshape(args.batch_size,1,image_size[1],image_size[0])
  net.reshape()
      
  it += 1 
  
  optim2.step(1)
  
  im = net.blobs['data'].data[...]
  draw = np.swapaxes(im,2,3)
  draw = np.swapaxes(draw,1,3)
  im_ctc = np.copy(draw)
  draw += 1
  draw *= 128
  draw = np.array(draw, dtype="uint8").copy() 
  
  
  if args.debug:
    grid_step = 16
    line = 0
    while line < image_size[0]:
      cv2.line(draw[0], (0, line), (image_size[1], line), (128, 128, 128))
      line += grid_step
  
  boxes  =  net.blobs['boxes'].data[...]
                 
  word_gtob = net.blobs['gt_boxes'].data[...]
  word_txt = net.blobs['gt_labels'].data[...]
  
  lines_gtob = net.blobs['line_boxes'].data[...]
  lines_txt = net.blobs['line_labels'].data[...]
  
  #nms = boxeso[:, 0, 0, 8] == 0
  #boxes = boxes[:, :, nms, :]
  
  boxes[:, 0, :, 0] *= image_size[0]
  boxes[:, 0, :, 1] *= image_size[1]
  normFactor = math.sqrt(image_size[1] * image_size[1] + image_size[0] * image_size[0])
  boxes[:, 0, :, 2] *= normFactor
  boxes[:, 0, :, 3] *= normFactor
  
  sum_cost = 0
  count = 0
  
  labels_gt = []
  labels_det = []
  
  gt_to_detection = {}
  net_ctc.clear_param_diffs()
  
  
  batch_buckets = []    
  dummy = {} 
  
  matched_detections = 0
  for bid in range(im.shape[0]):
    
    o_image = net.layers[0].get_image_file_name(bid)
    o_image = cv2.imread(o_image, cv2.IMREAD_GRAYSCALE)
    cx = net.layers[0].get_crop(bid, 0)
    cy = net.layers[0].get_crop(bid, 1)
    cmx = net.layers[0].get_crop(bid, 2)
    cmy = net.layers[0].get_crop(bid, 3)
    o_image = o_image[cy:cmy, cx:cmx]
    
    boxes_count = 0
    for i in range(0, boxes.shape[2]):
      det_word = boxes[bid, 0, i]
      if (det_word[0] == 0 and det_word[1] == 0) or det_word[5] < 0.01:
          break
      boxes_count += 1
        
    x = [i for i in range(boxes_count)]
    #random.shuffle(x)
    
    bucket_images = {}
    batch_buckets.append(bucket_images)
    
    word_gto = word_gtob[bid]
    word_gto_txt = word_txt[bid]
    gt_count = 0 
    for gt_no in range(word_gto.shape[0]):
      gt = word_gto[gt_no, :]
      gt = gt.reshape(6)
      gtnum = 1000 * bid +  gt_no
      
      if gt[5] == -1:
        #print("ignore gt!")
        continue
      
      gt_count += 1
                  
      txt = word_gto_txt[gt_no, :]
      gtbox  = ((gt[0] * image_size[0], gt[1] * image_size[1]), (gt[2] * normFactor, gt[3] * normFactor), gt[4] * 180 / 3.14)
      gtbox = cv2.boxPoints(gtbox)
      
      gtbox = np.array(gtbox, dtype="int")
      rect_gt = cv2.boundingRect(gtbox)

      if rect_gt[0] == 0 or rect_gt[1] == 0 or  rect_gt[0] + rect_gt[2]  >= image_size[0] or rect_gt[1] + rect_gt[3]  >= image_size[1]:
        continue
      
      if gt[3] * normFactor <  3:
        if args.debug:
          #print('too small gt!')
          vis.draw_box_points(draw[bid], gtbox, color = (255, 255, 0))
          cv2.imshow('draw', draw[bid])
        continue
        
      if args.debug:
        vis.draw_box_points(draw[bid], gtbox, color = (0, 0, 0), thickness=2)
      
      #vis.draw_box_points(draw[bid], gtbox, color = (255, 255, 255))
      #cv2.imshow('draw', draw[bid])
      
      rect_gt = [rect_gt[0], rect_gt[1], rect_gt[2], rect_gt[3]]
      rect_gt[2] += rect_gt[0]
      rect_gt[3] += rect_gt[1]

      for i in range(0, min(100, boxes_count)):
        if math.fabs(gt[4] - det_word[4]) > math.pi / 16:
          continue
        
        det_word = boxes[bid, 0, x[i], :]
        
        if (det_word[0] == 0 and det_word[1] == 0) or det_word[5] < 0.01:
          break
        
        box  = ((det_word[0], det_word[1]), (det_word[2], det_word[3]), det_word[4] * 180 / 3.14)
        box = cv2.boxPoints(box)
        
        if args.debug:
          boxp = np.array(box, dtype="int")
          vis.draw_box_points(draw[bid], boxp, color = (0, 255, 0))
        
        box = np.array(box, dtype="int")
        bbox = cv2.boundingRect(box)
        bbox = [bbox[0], bbox[1], bbox[2], bbox[3]]
        bbox[2] += bbox[0]
        bbox[3] += bbox[1]
   
        #rectangle intersection ... 
        inter = intersect(bbox, rect_gt)
        uni = union(bbox, rect_gt)
        ratio = area(inter) / float(area(uni))
        
        ratio_gt = area(inter) / float(area(rect_gt))
        if ratio_gt < 0.95:
          continue 
        
        if ratio < 0.5:
          continue
        
        if not gt_to_detection.has_key(gtnum):
            gt_to_detection[gtnum] = [0, 0, 0]
        tupl = gt_to_detection[gtnum] 
        if tupl[0] < ratio:
          tupl[0] = ratio 
          tupl[1] = x[i]  
          tupl[2] = ratio_gt       
        
        det_word = boxes[bid, 0, x[i], :]
        box  = ([det_word[0], det_word[1]], [det_word[2], det_word[3]], det_word[4] * 180 / 3.14)
        
        boxO = get_obox(im_ctc[bid], o_image, box)
        boxO = ((boxO[0][0], boxO[0][1]), (boxO[1][0], boxO[1][1]), boxO[2])
        norm2, rot_mat = get_normalized_image(o_image, boxO)
        #norm3, rot_mat = get_normalized_image(im_ctc[bid], ([det_word[0], det_word[1]], [det_word[2] * 1.2, det_word[3] * 1.1], det_word[4] * 180 / 3.14))
        if norm2 is None:
          continue
        #if norm3 is None:
        #  continue
        #continue
        #cv2.imshow('ts', norm2)
        #cv2.imshow('ts3', norm3)
        #cv2.waitKey(1)
        width_scale = 32.0 / norm2.shape[0]
        width = norm2.shape[1] * width_scale
        best_diff = width
        bestb = 0
        for b in range(0, len(buckets)):
          if best_diff > abs(width * 1.3 - buckets[b]):
            best_diff = abs(width * 1.3 - buckets[b])
            bestb = b
        
        scaled = cv2.resize(norm2, (buckets[bestb], 32))  
        scaled = np.asarray(scaled, dtype=np.float)
        delta = scaled.max() - scaled.min()
        scaled = (scaled) / (delta / 2)
        scaled -= scaled.mean()
                
        if not bucket_images.has_key(bestb):
          bucket_images[bestb] = {}
          bucket_images[bestb]['img'] = []  
          bucket_images[bestb]['sizes'] = []    
          bucket_images[bestb]['txt'] = []
          bucket_images[bestb]['gt_enc'] = []
          dummy[bestb] = 1
        else:
          if args.debug and len(bucket_images[bestb]) > 4:
            continue    
          elif  len(bucket_images[bestb]) > 32:
            continue
        
        gt_labels = []
        txt_enc = ''
        for k in range(txt.shape[1]):
          if txt[0, k] > 0:
            if codec_rev.has_key(txt[0, k]):                
              gt_labels.append( codec_rev[txt[0, k]] )
            else:
              gt_labels.append( 3 )
                              
            txt_enc += unichr(txt[0, k])
          else:
            gt_labels.append( 0 )
        
        if scaled.ndim == 3:
          scaled = cv2.cvtColor(scaled, cv2.COLOR_BGR2GRAY)
        if args.debug:
          cv2.imshow('scaled', scaled)
        bucket_images[bestb]['sizes'].append(len(gt_labels))
        bucket_images[bestb]['gt_enc'].append(gt_labels)
        bucket_images[bestb]['txt'].append(txt_enc)
        bucket_images[bestb]['img'].append(scaled)
        matched_detections += 1   
      
  #and learn OCR
  for bucket in bucket_images.keys():
      
    imtf = np.asarray(bucket_images[bucket]['img'], dtype=np.float)
    imtf = np.reshape(imtf, (imtf.shape[0], -1, imtf.shape[1], imtf.shape[2]))    
    #imtf = imtf.reshape((imtf.shape[0], imtf.shape[1], imtf.shape[2], 1))
    #imtf = np.swapaxes(imtf,1,3)
    
    
    net_ctc.blobs['data'].reshape(imtf.shape[0],imtf.shape[1],imtf.shape[2], imtf.shape[3]) 
    net_ctc.blobs['data'].data[...] = imtf
    
    labels = bucket_images[bucket]['gt_enc']
    txt = bucket_images[bucket]['txt']
    
    max_len = 0
    for l in range(0, len(labels)):
      max_len = max(max_len, len(labels[l]))
    for l in range(0, len(labels)):
      while len(labels[l]) <  max_len:
        labels[l].append(0)
      
    
    labels = np.asarray(labels, np.float)
    
    net_ctc.blobs['label'].reshape(labels.shape[0], labels.shape[1])
    
    net_ctc.blobs['label'].data[...] = labels
    
    if args.debug:
        vis.vis_square(imtf[0])
        cv2.imshow('draw', draw[0])
        cv2.waitKey(5)
         
     
    optim.step(1)  
    sum_cost += net_ctc.blobs['loss'].data[...]
    if net_ctc.blobs['loss'].data[...] > 10:
      vis.vis_square(imtf[0])
      cv2.imshow('draw', draw[0])
      sf = net_ctc.blobs['transpose'].data[...]
      labels2 = sf.argmax(3)
      out = utils.print_seq(labels2[:, 0, :])
      print(u'{0} - {1}'.format(out, txt[0])  )
      cv2.waitKey(5)
          
          
    count += imtf.shape[0]
              
  correct_cout = 0    
  for i in range(len(labels_gt)):
    det_text = labels_det[i]
    gt_text = labels_gt[i]
    
    if it % 100 == 0:
      print( u"{0} - {1}".format(det_text, gt_text).encode('utf8') )
    if det_text == gt_text:
      correct_cout += 1
      
  count = max(count, 1)    
  mean_loss = 0.99 * mean_loss + 0.01 * sum_cost / count
  mean_rec = mean_rec * 0.99 + 0.01 * correct_cout / float(max(1, len(labels_gt)))
  
  #count detection ratio

  tp = 0
  for bid in range(im.shape[0]):
    word_gto = word_gtob[bid]
    for gt_no in range(len(word_gto)):
      gt = word_gto[gt_no]
      gtnum = 1000 * bid +  gt_no
      if gt_to_detection.has_key(gtnum):
        tupl = gt_to_detection[gtnum] 
        if tupl[0] > 0.5:
          tp += 1
          
                      
  loc_recall = tp / float(max(1, gt_count))             
  if args.debug:
    cv2.imshow('draw', draw[0])
    if im.shape[0] > 1:
        cv2.imshow('draw2', draw[1])
        
    cv2.waitKey(10)
  
  if it % 10 == 0:
    print('{0} - lr:{1:.3e} ctc:{2:.4f}/{3:.4f} wr:{4:.2f}/{5:.2f}, loc:{6:.2f} {7}'.format(it, 0.0001, sum_cost / count, mean_loss, correct_cout / float(max(1, len(labels_gt))), mean_rec, loc_recall, matched_detections))
  
  if it % 1000 == 0:
    optim.snapshot()
    optim2.snapshot()