Exemplo n.º 1
0
    def transform_to_fit_masks(self, origin_mask, target_mask, starting_angle=0, origin_is_on_down=True):
        # Calculate best fit
        translation, angle = transformationutils.calculate_best_transformation_from_img(
            origin_mask, target_mask, starting_angle
        )

        # Translate
        # Distinguish if we are looking at the delta between the shape position on finger down or the at last frame
        if origin_is_on_down:
            self.cnt = self.cnt_on_down + np.flip(translation)
            for touchet in self.keypoints.keys():
                self.keypoints[touchet] = self.keypoints_on_down[touchet] + np.flip(translation)
        else:
            self.cnt += np.flip(translation)
            for touchet in self.keypoints.keys():
                self.keypoints[touchet] += np.flip(translation)

        # Rotate
        unpacked_cnt = self.cnt[:, 0]
        centroid = transformationutils.calculate_centroid(unpacked_cnt)
        unpacked_cnt = transformationutils.rotate_points(unpacked_cnt, centroid, angle)
        for touchet in self.keypoints.keys():
            self.keypoints[touchet] = transformationutils.rotate_points(self.keypoints[touchet], centroid, angle)
        self.cnt = unpacked_cnt.reshape(-1, 1, 2)

        # Adjust bbox and save most recent angle
        self.bbox = Bbox(*cv2.boundingRect(self.cnt))
        self.current_degs = -angle
        return translation, angle
def get_two_roads(node_id, lon_lat, location):
    lat = lon_lat[1]
    lon = lon_lat[0]
    bbox = Bbox()
    left_lat, left_lon, right_lat, right_lon = bbox.boundingBox(lat, lon, 0.7)

    index_file = "index_ways_" + str(location)
    idx = index.Index(index_file)
    nodes_around = list(
        idx.intersection((left_lat, left_lon, right_lat, right_lon),
                         objects="raw"))
    idx.close()

    # Group all the nodes such that all nodes on a way occur together.
    close_ways = {}
    for node in nodes_around:
        if close_ways.get(node['way_id'], None) is None:
            close_ways[node['way_id']] = [node]
        else:
            close_ways[node['way_id']].append(node)

    #START
    #min_dist_1 will have the node with minimum distance, min_dist_2 will have the node with next minimum distance.
    count = 0  #count=0 is to get min_dist_1, count = 1 is to get min_dist_2 avoiding nodes from same way.
    min_way_1 = min_way_2 = min_dist_1 = min_dist_2 = None
    if len(close_ways) > 2:
        for way_id in close_ways:
            node_list = close_ways[way_id]
            if count == 0:
                min_dist_1 = dist_btw(lon_lat, node_list)
                min_way_1 = way_id
                count = count + 1

            elif count == 1:
                min_dist_2 = dist_btw(lon_lat, node_list)
                d = min_dist_2
                min_way_2 = way_id
                if min_dist_2 < min_dist_1:
                    min_dist_2 = min_dist_1
                    min_way_2 = min_way_1
                    min_dist_1 = d
                    min_way_1 = way_id
                count = count + 1

            else:
                d = dist_btw(lon_lat, node_list)
                if d < min_dist_1:
                    min_dist_2 = min_dist_1
                    min_way_2 = min_way_1
                    min_dist_1 = d
                    min_way_1 = way_id
                elif d < min_dist_2:
                    min_dist_2 = d
                    min_way_2 = way_id
    elif len(close_ways) == 1:
        min_way_1 = close_ways.keys()[0]
        min_dist_1 = dist_btw(lon_lat, close_ways[min_way_1])
    #END

    return min_way_1, min_dist_1, min_way_2, min_dist_2
Exemplo n.º 3
0
    def on_finger_pressing(self, _, data):
        if not self.active or self.must_lift_finger:
            return

        xy = data['fingertip_pos']
        if not self.shape.bbox.contains(*xy):
            self.current_point = None
            return

        if self.current_point is None:
            self.current_point = xy
            self.stable_since = time.time()
        else:
            tolerance_bbox = Bbox(*xy, 0, 0)
            tolerance_bbox.extend_by(15)
            if not tolerance_bbox.contains(*self.current_point):
                self.stable_since = time.time()
                self.current_point = xy

        if time.time() - self.stable_since >= 1:
            self.picked_regions.append(self.current_point)
            self.current_point = None
            self.must_lift_finger = True
            if self.amount_target_regions > 0 and len(self.picked_regions) >= self.amount_target_regions:
                self.on_hand_exit()  # Simulate hand exit in order to terminate the regions
            return
Exemplo n.º 4
0
 def get_window_info(self):
     handle = win32gui.FindWindow(0, self.window_name)
     if handle == 0:
         raise AttributeError("Please open emulator")
     else:
         x1, y1, x2, y2 = tuple(win32gui.GetWindowRect(handle))
         return Bbox((x1, y1, x2, y2))
Exemplo n.º 5
0
    def get_gem_board_bbox(self):
        window_bbox = self.get_window_info()
        x1, y1, x2, y2 = window_bbox.xyxy()
        w = window_bbox.w
        h = window_bbox.h

        gems_bbox = Bbox((x1 + w * 0.01, y1 + h * 0.55, x2 - w * 0.01, y2 - h * 0.007))
        return gems_bbox
Exemplo n.º 6
0
 def __mc_for_xy(self, xy):
     for x in range(3):
         for y in range(6):
             x0 = int(x * self.outter_w + self.margin)
             y0 = int(y * self.outter_h + self.margin)
             if Bbox(x0, y0, int(self.inner_w), int(self.inner_h)).contains(*xy):
                 return (x, y)
     return None
Exemplo n.º 7
0
    def __init__(self, cnt):
        super().__init__()

        # Calculate the bbox
        bbox = Bbox(*cv2.boundingRect(cnt))

        # Draw an isolated footprint of the shape
        offset = tuple(- np.array(bbox.position()))
        isolated = np.zeros(bbox.size(True), np.uint8)
        cv2.drawContours(isolated, [cnt], 0, 255, -1, offset=offset)
        footprint = cv2.copyMakeBorder(isolated, 15, 15, 15, 15, cv2.BORDER_CONSTANT, 0)

        # Determine the color of the shape
        x, y, w, h = bbox.xywh()
        patch = realsensecam().bgr[y:y + h, x:x + w, :][int(h / 3):int(2 * h / 3), int(w / 3):int(2 * w / 3), :]
        patch = cv2.GaussianBlur(patch, (51, 51), 0)
        if patch is None:
            color = (0, 0, 0)
        else:
            ph, pw, _ = patch.shape
            color = patch[int(ph / 2), int(pw / 2)]
            color = tuple([int(x) for x in color])
            color_hsv = cv2.cvtColor(np.array([[color]], np.uint8), cv2.COLOR_BGR2HSV)[0][0]

        self.cnt = cnt
        self.bbox = bbox
        self.color = color
        self.color_hsv = color_hsv
        self.footprint = footprint
        self.angle = 0
        self.state = 'fresh'
        self.state_stable_since = shapetracker().epoch
        self.pressed = False
        self.moving = False
        self.initial_swipe_xy = None
        self.current_swipe_xy = None
        self.initial_move_xy = None
        self.current_move_xy = None
        self.initial_degs = 0
        self.current_degs = None
        self.cnt_on_down = None
        self.needs_transform_to_fit_shape = False
        self.keypoints = {}
        self.keypoints_on_down = None
        self.action_name = ""
def reverse_geocode():
    try:
        client = MongoClient()
        db = client.flickr
        clustersCollection = db.clusters
        clusters = clustersCollection.find({})

        for cluster in clusters:
            lat = cluster['latitude']
            lon = cluster['longitude']
            bbox = Bbox()

            bottom, left, top, right = bbox.boundingBox(lat, lon, 0.05)
            poi, poi_id, poi_lat, poi_lon = get_closest_poi(
                left, bottom, right, top)

            if poi is None:
                bottom, left, top, right = bbox.boundingBox(
                    lat, lon, 0.1)  #left_lat, left_lon, right_lat, right_lon
                poi, poi_id, poi_lat, poi_lon = get_closest_poi(
                    left, bottom, right, top)

                if poi is None:
                    bottom, left, top, right = bbox.boundingBox(
                        lat, lon,
                        0.2)  #left_lat, left_lon, right_lat, right_lon
                    poi, poi_id, poi_lat, poi_lon = get_closest_poi(
                        left, bottom, right, top)

                    if poi is None:
                        bottom, left, top, right = bbox.boundingBox(
                            lat, lon,
                            0.5)  #left_lat, left_lon, right_lat, right_lon
                        poi, poi_id, poi_lat, poi_lon = get_closest_poi(
                            left, bottom, right, top)

            print cluster['cluster_id'], poi, poi_id, poi_lat, poi_lon

            clustersCollection.update({"cluster_id": cluster["cluster_id"]}, {
                "$set": {
                    "address": poi,
                    "poi_id": poi_id,
                    "poi_lat": poi_lat,
                    "poi_lon": poi_lon
                }
            })

        client.close()

    except Exception as e:
        print(e)
Exemplo n.º 9
0
from PIL import Image
from bbox import Bbox
from flask import request
from search import Search
from io import StringIO
from flask import send_file
from enums import *
from common import *
from common import str2bool

try:
    from urllib.parse import urlparse
except ImportError:
    from urlparse import urlparse

bbox = Bbox()

root_dir = config.ROOT_PATH
sample_dir = config.SAMPLE_PATH
upload_dir = config.UPLOAD_PATH
show_bbox = config.SHOW_BBOX

CONTENT_TYPE_KEY = config.CONTENT_TYPE_KEY
CONTENT_TYPE = config.CONTENT_TYPE
SUBSCRIPTION_KEY = config.SUBSCRIPTION_KEY
AUTHORIZATION_HEADER = config.AUTHORIZATION_HEADER
BASE_URL = config.BASE_URL
API_VERSION = config.API_VERSION

classify_format = '{0}/species-recognition/v{1}/predict?topK={2}&predictMode={3}'
Exemplo n.º 10
0
 def __init__(self, bbox: Bbox):
     self.bbox = bbox
     self.img = bbox.get_img()
     self.color = bbox.get_avg_color()
     self.center = bbox.get_center_pos()
     self.gem_type = self.get_self_gem_type()
Exemplo n.º 11
0
class Shape(Publisher):
    def __init__(self, cnt):
        super().__init__()

        # Calculate the bbox
        bbox = Bbox(*cv2.boundingRect(cnt))

        # Draw an isolated footprint of the shape
        offset = tuple(- np.array(bbox.position()))
        isolated = np.zeros(bbox.size(True), np.uint8)
        cv2.drawContours(isolated, [cnt], 0, 255, -1, offset=offset)
        footprint = cv2.copyMakeBorder(isolated, 15, 15, 15, 15, cv2.BORDER_CONSTANT, 0)

        # Determine the color of the shape
        x, y, w, h = bbox.xywh()
        patch = realsensecam().bgr[y:y + h, x:x + w, :][int(h / 3):int(2 * h / 3), int(w / 3):int(2 * w / 3), :]
        patch = cv2.GaussianBlur(patch, (51, 51), 0)
        if patch is None:
            color = (0, 0, 0)
        else:
            ph, pw, _ = patch.shape
            color = patch[int(ph / 2), int(pw / 2)]
            color = tuple([int(x) for x in color])
            color_hsv = cv2.cvtColor(np.array([[color]], np.uint8), cv2.COLOR_BGR2HSV)[0][0]

        self.cnt = cnt
        self.bbox = bbox
        self.color = color
        self.color_hsv = color_hsv
        self.footprint = footprint
        self.angle = 0
        self.state = 'fresh'
        self.state_stable_since = shapetracker().epoch
        self.pressed = False
        self.moving = False
        self.initial_swipe_xy = None
        self.current_swipe_xy = None
        self.initial_move_xy = None
        self.current_move_xy = None
        self.initial_degs = 0
        self.current_degs = None
        self.cnt_on_down = None
        self.needs_transform_to_fit_shape = False
        self.keypoints = {}
        self.keypoints_on_down = None
        self.action_name = ""

    def update_from(self, other):
        self.cnt = other.cnt
        self.bbox = other.bbox
        self.color = other.color
        self.angle = other.angle

    def set_state(self, new_state):
        self.state = new_state
        self.state_stable_since = shapetracker().epoch

    # Recommended threshold for detecting another shape: 2%
    def position_difference(self, other_shape):
        return np.linalg.norm(other_shape.bbox.center_nparr() - self.bbox.center_nparr()) / realsensecam().diagonal

    # Recommended threshold for detecting another shape: 1%
    def hue_difference(self, other_shape):
        hs = self.color_hsv[0] * 2  # Due to 8 bit resolution in OpenCV, H is between 0 and 180 -> multiply by 2
        ho = other_shape.color_hsv[0] * 2
        return (180 - abs(abs(hs - ho) - 180)) / 360

    # Recommended threshold for detecting another shape: 10%
    def shape_difference(self, other_shape):
        return min(1, cv2.matchShapes(self.cnt, other_shape.cnt, 1, 0))

    def on_finger_down(self, data, do_not_check_xy=False, initiated_by_shape=False):
        xy = np.array(data['fingertip_pos'])
        degs = data['finger_deg_delta']
        if do_not_check_xy or self.bbox.contains(*xy):
            self.pressed = True
            self.initial_swipe_xy = xy
            self.current_swipe_xy = xy
            self.initial_move_xy = xy
            self.current_move_xy = xy
            self.initial_degs = degs
            self.current_degs = degs
            self.cnt_on_down = self.cnt.copy()
            self.keypoints_on_down = self.keypoints.copy()
            handtracker().touched_shape = self
            self.publish('finger_down', {
                **data,
                'shape': self,
                'shape_fingertip_pos': self.__offset_by_my_position(xy),
                'initiated_by_shape': initiated_by_shape
            })

    def on_finger_up(self, data, initiated_by_shape=False):
        xy = np.array(data['fingertip_pos'])
        if self.pressed:
            self.pressed = False
            self.needs_transform_to_fit_shape = True
            self.publish('finger_up', {
                **data,
                'shape': self,
                'shape_fingertip_pos': self.__offset_by_my_position(xy),
                'shape_was_moving': self.moving,
                'shape_move_delta': self.current_move_xy - self.initial_move_xy,
                'shape_swipe_delta': self.current_swipe_xy - self.initial_swipe_xy,
                'shape_degs': self.current_degs - self.initial_degs,
                'initiated_by_shape': initiated_by_shape
            })
            self.moving = False
            self.initial_swipe_xy = None
            self.current_swipe_xy = None
            self.initial_move_xy = None
            self.current_move_xy = None
            self.initial_degs = None
            self.current_degs = None
            self.cnt_on_down = None
            self.keypoints_on_down = None
            handtracker().touched_shape = None

    def on_finger_moved(self, data):
        self.on_finger_pressing(data, has_moved=True)

    def on_finger_pressing(self, data, has_moved=False):
        xy = np.array(data['fingertip_pos'])
        degs = data['finger_deg_delta']
        if self.pressed:
            if self.intersects_with(xy):
                self.publish('finger_moved' if has_moved else 'finger_pressing', {
                    **data,
                    'shape': self,
                })
                if self.moving:
                    self.current_move_xy = xy
                    self.initial_swipe_xy = xy  # Reset swipe vector because the shape has moved
                    self.current_swipe_xy = xy
                    self.publish('moved', {
                        **data,
                        'shape': self,
                        'shape_move_delta': self.current_move_xy - self.initial_move_xy,
                        'shape_degs': self.current_degs - self.initial_degs
                    })
                else:
                    self.current_swipe_xy = xy
                    self.current_move_xy = xy
                    self.publish('swiped', {
                        **data,
                        'shape': self,
                        'shape_swipe_delta': self.current_swipe_xy - self.initial_swipe_xy,
                        'shape_degs': self.current_degs - self.initial_degs
                    })
            else:
                if not self.moving:  # Prevent finger_up while moving
                    self.on_finger_up(data, initiated_by_shape=True)
        elif self.intersects_with(xy):
            self.on_finger_down(data, True, initiated_by_shape=True)

    def start_moving(self, xy):
        self.initial_move_xy = xy
        self.current_move_xy = xy
        self.initial_swipe_xy = xy  # Reset swipe vector because the shape has moved
        self.current_swipe_xy = xy
        self.moving = True
        self.publish('start_moving', {'shape': self, 'fingertip_pos': xy})

    def stop_moving(self):
        if self.moving:
            self.publish('stop_moving', {'shape': self})
        self.moving = False

    def intersects_with(self, xy):
        if not self.bbox.contains(*xy):  # Faster check with no false negatives
            return False
        if not handdetector().cnt_intersects_with_hand(self.cnt):  # More thorough check to avoid false positives
            return False
        return True

    def transform_to_fit_shape(self, other_shape):
        my_mask = np.zeros((realsensecam().H, realsensecam().W), np.uint8)
        other_mask = np.zeros_like(my_mask)
        cv2.drawContours(my_mask, [self.cnt], 0, 255, -1)
        cv2.drawContours(other_mask, [other_shape.cnt], 0, 255, -1)
        translation, angle = self.transform_to_fit_masks(my_mask, other_mask, origin_is_on_down=False)
        if abs(angle) < 8:  # Otherwise we will need to do this again
            self.needs_transform_to_fit_shape = False
        self.publish('transformation_adjusted', {'shape': self, 'shape_move_delta': translation, 'shape_degs': -angle})

    def transform_to_fit_masks(self, origin_mask, target_mask, starting_angle=0, origin_is_on_down=True):
        # Calculate best fit
        translation, angle = transformationutils.calculate_best_transformation_from_img(
            origin_mask, target_mask, starting_angle
        )

        # Translate
        # Distinguish if we are looking at the delta between the shape position on finger down or the at last frame
        if origin_is_on_down:
            self.cnt = self.cnt_on_down + np.flip(translation)
            for touchet in self.keypoints.keys():
                self.keypoints[touchet] = self.keypoints_on_down[touchet] + np.flip(translation)
        else:
            self.cnt += np.flip(translation)
            for touchet in self.keypoints.keys():
                self.keypoints[touchet] += np.flip(translation)

        # Rotate
        unpacked_cnt = self.cnt[:, 0]
        centroid = transformationutils.calculate_centroid(unpacked_cnt)
        unpacked_cnt = transformationutils.rotate_points(unpacked_cnt, centroid, angle)
        for touchet in self.keypoints.keys():
            self.keypoints[touchet] = transformationutils.rotate_points(self.keypoints[touchet], centroid, angle)
        self.cnt = unpacked_cnt.reshape(-1, 1, 2)

        # Adjust bbox and save most recent angle
        self.bbox = Bbox(*cv2.boundingRect(self.cnt))
        self.current_degs = -angle
        return translation, angle

    def __offset_by_my_position(self, xy):
        x, y = xy
        center = self.bbox.center()
        x -= center[0]
        y -= center[1]
        return x, y