Esempio n. 1
0
def test_point_at_initial_point(fixture):
  position = Coordinate(2, 1, 2)

  fixture.point_at(position)

  assert str(fixture.current_aim) == str(position)
  assert str(fixture.position_step) == str(Coordinate(0.0, 0.0, 0.0))
  assert fixture.steps_taken == 0
Esempio n. 2
0
def test_point_at_existing_point(fixture):
  position = Coordinate(2, 1, 2)
  fixture.point_at(position)
  position = Coordinate(3, 1, 3)
  fixture.point_at(position)

  assert str(fixture.current_aim) == str(position)
  assert str(fixture.position_step) == str(Coordinate(0.1, 0.0, 0.1))
  assert fixture.steps_taken == 0
Esempio n. 3
0
def apply_rotation(initial_position, rotation):
    identity_position = [1, 0, 0]

    rotation_matrix = create_rotation_matrix(*rotation)

    # rotated_position = rotation_matrix[1].dot(identity_position)
    # rotated_position = rotation_matrix[0].dot(rotated_position)
    # rotated_position = rotation_matrix[2].dot(rotated_position)

    rotated_position = rotation_matrix[1].dot(identity_position)
    rotated_position = rotation_matrix[0].dot(rotated_position)
    rotated_position = rotation_matrix[2].dot(rotated_position)

    rotated_position = Coordinate(*rotated_position)

    return rotated_position.displace_by(initial_position)
Esempio n. 4
0
def test_sub(coordinate):
  secondary = Coordinate(0.2, 0.4, 0.6)

  diff = secondary - coordinate

  assert diff.x == 0.1
  assert diff.y == 0.2
  assert diff.z == 0.3
Esempio n. 5
0
  def center(self):
    """
    Gets the centerpoint of the room

    Returns:
      Coordinate -- centerpoint of the room
    """

    return Coordinate(self.width / 2.0, self.height / 2.0, self.depth / 2.0)
Esempio n. 6
0
    def __init__(self, config, fixture_id, stop_flags):
        """
    Create fixture from given config

    Returns:
      Fixture
    """

        self.stop_flags = stop_flags
        self.fixture_id = fixture_id
        self.personality = find_personality_by_id(config['personality'],
                                                  config['mode'])
        self.address = {
            'net': config['net'],
            'subnet': config['subnet'],
            'universe': config['universe'],
            'address': config['address']
        }

        self.levels = np.zeros(self.personality.channels)

        for attribute in self.personality.attributes:
            channel = attribute.offset
            if attribute.multiplier_type == 'wide':
                self.levels[channel] = attribute.default  # Course channel
                self.levels[channel + 1] = attribute.default  # Fine channel
            else:
                self.levels[channel] = attribute.default

        pos = config['position']
        self.location = Coordinate(pos['x'], pos['y'], pos['z'])
        self.position = Coordinate(0, 0, 0)

        self.last_position = None
        self.current_aim = None
        self.position_step = None
        self.steps_taken = 0

        self.pan_offset = math.radians(config['rotation']['y'])
        self.tilt_invert = False

        self.rotation_matrix = create_rotation_matrix(config['rotation']['x'],
                                                      config['rotation']['y'],
                                                      config['rotation']['z'])
Esempio n. 7
0
def test_update_position(point_of_interest):
    position = Coordinate(11, 12, 13)
    location = (100, 50)
    point_of_interest.update_position(position, location)

    assert point_of_interest.position == position
    assert point_of_interest.location == location
    assert all([
        a == b
        for a, b in zip(point_of_interest.direction_vector, [10, 10, 10])
    ])
Esempio n. 8
0
def test_init(fixture):
  assert fixture.stop_flags == {'fixture': False}
  assert fixture.fixture_id == 0
  assert fixture.personality == find_personality_by_id(0, 0)
  assert fixture.address == {
    'net': 0,
    'subnet': 0,
    'universe': 0,
    'address': 1
  }
  assert len(fixture.levels) == 16

  assert str(fixture.location) == str(Coordinate(1, 2, 3))
  assert str(fixture.position) == str(Coordinate(0, 0, 0))
  assert fixture.last_position is None
  assert fixture.current_aim is None
  assert fixture.position_step is None
  assert fixture.steps_taken == 0

  assert fixture.pan_offset == math.radians(180)
  assert fixture.tilt_invert is False
Esempio n. 9
0
    def calculate_real_world_coordinate(self, location):
        """
    Calculates a real world coordinate from the camera's physical properties
    Takes into account viewing angle, camera rotation in space and position

    Arguments:
      location (x, y) -- Relative pixel coordinates of a point

    Returns:
      Coordinate
    """

        center_x, center_y = location

        displacement_horizontal = center_x - self.horiz_midpoint
        displacement_vertical = -(center_y - self.vert_midpoint)

        angular_displacement_horizontal = math.radians(
            scale(displacement_horizontal, 0, self.horiz_midpoint, 0,
                  self.angular_horiz_midpoint))
        angular_displacement_vertical = math.radians(
            scale(displacement_vertical, 0, self.vert_midpoint, 0,
                  self.angular_vert_midpoint))

        identity_x = 1.0
        identity_y = identity_x * math.tan(angular_displacement_vertical)
        identity_z = identity_x * math.tan(angular_displacement_horizontal)

        identity_position = [identity_x, identity_y, identity_z]

        rotated_position = self.rotation_matrix[2].dot(identity_position)
        rotated_position = self.rotation_matrix[1].dot(rotated_position)
        rotated_position = self.rotation_matrix[0].dot(rotated_position)

        rotated_position = Coordinate(*rotated_position)

        return rotated_position + self.position
Esempio n. 10
0
    rotated_position = rotation_matrix[1].dot(identity_position)
    rotated_position = rotation_matrix[0].dot(rotated_position)
    rotated_position = rotation_matrix[2].dot(rotated_position)

    rotated_position = Coordinate(*rotated_position)

    return rotated_position.displace_by(initial_position)


# "position": { "x": 2.128, "y": 2.253, "z": 0.610 },
# initial_position = Coordinate(2.128, 2.253, 0.610)
# aim = Coordinate(2.14, 1.89, 1.42)
# rotation = [-22, 90, 0]

# "position": { "x": 0.15, "y": 2.284, "z": 6.903 },
initial_position = Coordinate(0.15, 2.284, 6.903)
aim = Coordinate(0.76, 1.88, 6.222)
rotation = [30, -120, 0]

print(np.cross(aim.as_vector(), initial_position.as_vector()))

thresh = 0.2
step = 0.005

# increase in xrot increases y and z
# increase in yrot decreases x and increases z
# increase in zrot decreases x and y
made_update = False
while made_update:
    rotated = apply_rotation(initial_position, rotation)
    diff = aim.diff(rotated)
Esempio n. 11
0
    def __init__(self, json, camera_id, calibration, stop_flags):
        """
    Create Camera object
      Also creates a camera capture object, so initialisation takes a second or so

    Arguments:
      json {JSON} -- Config for camera
      calibration {list} -- Calibration data for frame restore

    Returns:
      Camera
    """

        self.stop_flags = stop_flags
        self.cam_id = camera_id
        self.url = json['url']

        self.position = Coordinate(json['position']['x'],
                                   json['position']['y'],
                                   json['position']['z'])

        self.rotation = Coordinate(json['rotation']['x'],
                                   json['rotation']['y'],
                                   json['rotation']['z'])

        self.viewing_angle = {
            'vertical': json['viewing_angle']['vertical'],
            'horizontal': json['viewing_angle']['horizontal']
        }

        self.resolution = {
            'vertical': json['resolution']['vertical'],
            'horizontal': json['resolution']['horizontal'],
        }

        self.virtual_resolution = {'horizontal': 960, 'vertical': 720}

        self.angular_horiz_midpoint = self.viewing_angle['horizontal'] / 2
        self.angular_vert_midpoint = self.viewing_angle['vertical'] / 2
        self.horiz_midpoint = self.virtual_resolution['horizontal'] / 2
        self.vert_midpoint = self.virtual_resolution['vertical'] / 2

        self.calibration = calibration

        self.capture = cv.VideoCapture(self.url)

        self.resolution_yx = (self.virtual_resolution['vertical'],
                              self.virtual_resolution['horizontal'])
        self.resolution_xy = (self.virtual_resolution['horizontal'],
                              self.virtual_resolution['vertical'])

        self.current_background = np.zeros(self.resolution_yx, dtype=np.uint8)
        self.current_frame = None

        self.kernel = np.ones((4, 4), np.uint8)
        self.big_kernel = np.ones((10, 10), np.uint8)
        self.blur_kernel = np.ones((4, 4), np.uint8) / (4**2)

        self.points_of_interest = []

        self.rotation_matrix = create_rotation_matrix(self.rotation.x,
                                                      self.rotation.y,
                                                      self.rotation.z)

        midpoint = (self.horiz_midpoint, self.vert_midpoint)
        self.initial_point = self.calculate_real_world_coordinate(midpoint)
Esempio n. 12
0
def coordinate():
    return Coordinate(0.1, 0.2, 0.3)
Esempio n. 13
0
def test_diff_from_position(point_of_interest):
    position = Coordinate(1, 3, 5)
    assert point_of_interest.diff_from_position(position) == 1.7320508075688772
Esempio n. 14
0
    def combine_points(self):
        """
    Intersects all points from all cameras to find possible 3d world points of
    interest
    """

        fixture_positions = []
        for universe in self.universes.universes:
            for fixture in universe.fixtures:
                if fixture.last_position is not None:
                    fixture_positions.append(fixture.last_position)

        threshold = 25

        pois = []
        for camera in self.cameras:
            fixture_camera_coordinates = []
            # inverse_rotation = np.linalg.inv(camera.rotation_matrix)
            # for fixt_pos in fixture_positions:
            #   displaced = (fixt_pos - camera.position).as_vector()

            #   # print('displaced:', displaced)

            #   # print('inverse_rotation:', inverse_rotation)

            #   identity = inverse_rotation[0].dot(displaced)
            #   identity = inverse_rotation[1].dot(identity)
            #   identity = inverse_rotation[2].dot(identity)

            #   # identity = identity

            #   print('identity:', identity)

            #   angular_displacement_horizontal = math.degrees(math.atan2(identity[2], identity[0]))
            #   angular_displacement_vertical = -math.degrees(math.atan2(identity[1], identity[0]))

            #   print('angular_displacement_horizontal:', angular_displacement_horizontal)
            #   print('angular_displacement_vertical:', angular_displacement_vertical)

            #   displacement_horizontal = round(
            #     scale(
            #       angular_displacement_horizontal,
            #       0, camera.angular_horiz_midpoint,
            #       0, camera.horiz_midpoint
            #     ) + camera.horiz_midpoint
            #   )
            #   displacement_vertical = round(
            #     scale(
            #       angular_displacement_vertical,
            #       0, camera.angular_vert_midpoint,
            #       0, camera.vert_midpoint
            #     ) + camera.vert_midpoint
            #   )

            #   print('predicted camera coordinates are', displacement_horizontal, displacement_vertical)

            #   fixture_camera_coordinates.append((displacement_horizontal, displacement_vertical))

            #   camera.current_background = np.zeros(camera.resolution_yx, dtype=np.uint8)

            #   camera_fixture_position = (displacement_vertical, displacement_horizontal)
            #   cv.circle(camera.current_background, camera_fixture_position, 15, 255, 2)

            possible_camera_pois = camera.points_of_interest
            camera_pois = []
            for poi in possible_camera_pois:
                collision = False
                for fixt_pos in fixture_camera_coordinates:
                    lx, ly = poi.location
                    fx, fy = fixt_pos

                    if abs(lx - fx) < threshold and abs(ly - fy) < threshold:
                        print('poi got too close')
                        # collision = True

                if not collision:
                    camera_pois.append(poi)

            pois.append(camera_pois)

        if len(pois) == 0:
            return []

        pois = list(itertools.product(*pois))

        # print('There are', len(pois), 'possible pois')

        points_of_interest = []

        for poi in pois:
            close_enough = True

            points = []

            for poi1, poi2 in itertools.combinations(poi, len(poi)):
                close, points = self.calculate_intersection(
                    poi1, poi2, close_enough, points)
                if not close:
                    close_enough = False

            if close_enough:
                poi = PointOfInterest(
                    Coordinate(*np.mean(list(zip(*points)), axis=1)))
                points_of_interest.append(poi)

        return points_of_interest
Esempio n. 15
0
                 daemon=daemon).start()
threading.Thread(target=spotted.start_artnet_reply,
                 args=(transmit, ),
                 daemon=daemon).start()
threading.Thread(target=spotted.artnet_transmitter,
                 args=(transmit, ),
                 daemon=daemon).start()

for universe in spotted.universes.universes:
    for fixture in universe.fixtures:
        threading.Thread(target=fixture.follow, daemon=daemon).start()

time.sleep(3)

for _ in range(255):
    point = Coordinate(2.0, 0.0, 4.5)
    for fixture in spotted.universes.universes[0].fixtures:
        fixture.point_at(point)
        fixture.open()
        # self.current_state['maps'][fixture.fixture_id] = id(live_pois[index])
time.sleep(2)

outframe = None
if spotted.cameras[0].current_frame is not None:
    out_frame = spotted.cameras[0].current_frame
if spotted.cameras[1].current_frame is not None:
    if out_frame is not None:
        out_frame = np.vstack((out_frame, spotted.cameras[1].current_frame))
    else:
        out_frame = spotted.cameras[1].current_frame
if out_frame is not None:
Esempio n. 16
0
def test_center(room):
    center = Coordinate(1.5, 2, 2.5)
    assert room.center().x == center.x
    assert room.center().y == center.y
    assert room.center().z == center.z
Esempio n. 17
0
def point_of_interest():
    camera_position = Coordinate(1, 2, 3)
    position = Coordinate(2, 4, 6)
    location = (200, 100)
    return PointOfInterest(position, location, camera_position)