Example #1
0
def process_event(event):
    """Pretty prints events.

    Prints all events that occur with two spaces between each new
    conversation and a single space between turns of a conversation.

    Args:
        event(event.Event): The current event to process.
    """
    if event.type == EventType.ON_CONVERSATION_TURN_STARTED:
        print()

    print(event)

    if (event.type == EventType.ON_CONVERSATION_TURN_FINISHED and
            event.args and not event.args['with_follow_on_turn']):
        print()
    if event.type == EventType.ON_DEVICE_ACTION:
        for command, params in event.actions:
            print('Do command', command, 'with params', str(params))
            if command == 'action.devices.commands.OnOff':
                threads[1].join()
            if command == 'luxo.command.LookAtMe':
                t = vision.Camera(feed, 0)
                threads.append(t)
                t.start()
Example #2
0
def look_at_object(subject):
    if len(threads) > 0:
        threads[0].join()
        del threads[0]
    if subject == 'me':
        t = vision.Camera(feed, 0)
        threads.append(t)
        t.start()
        return statement('Looking at you')
    if subject == 'this':
        t = vision.Camera(feed, 2)
        threads.append(t)
        t.start()
        return statement('Looking for it')
    if subject == 'my desk':
        t = vision.Camera(feed, 2)
        threads.append(t)
        t.start()
        return statement('Looking at your desk')
Example #3
0
    def __init__(self, sid, log):
        import vision
        self.log = log

        # Initialise camera
        self.cam = vision.Camera(sid=sid, log=log)

        # Initialise servos
        self.servo = vision.Servo(sid=sid,
                                  log=log,
                                  img_width=self.cam.cam.resolution[0],
                                  img_height=self.cam.cam.resolution[1])
        # Should return data dictionary and the modified frame to the drawing utilities.
        return data, modified_frame


# Test tracking objects of certain color
if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("pitch", help="[0] Main pitch, [1] Secondary pitch")
   # parser.add_argument("color", help="The color to adjust")
    args = parser.parse_args()

    pitch_number = int(args.pitch)

    capture = vision.Camera(pitch=pitch_number)
    #capture = cv2.VideoCapture('output.avi')
    calibration = tools.get_colors(pitch=pitch_number)
    croppings = tools.get_croppings(pitch=pitch_number)
    tracker = MyTracker(calibration=calibration, pitch_number=pitch_number)

    while True:

        frame = capture.get_frame()
        #re,frame = capture.read()

        data, modified_frame = tracker.get_world_state(frame)

        cv2.imshow('Tracker', modified_frame)

        if cv2.waitKey(1) & 0xFF == ord('q'):
Example #5
0
viz.defaultAxisSetup(ax)

R = rb.roty(math.pi/2.0) @ rb.rotz(-math.pi/2.0)
print(R)


line1 = vsn.Line(ax, [6,3,0], [6,3,2], color = np.array([255, 0, 0]))
line2 = vsn.Line(ax, [6,3,2], [7,4,2], color = np.array([0, 255, 0]))
line3 = vsn.Line(ax, [7,4,2], [7,4,0], color = np.array([0,0,255]))
line4 = vsn.Line(ax, [7,4,0], [6,3,0], color = np.array([255,0, 255]))


print(R)

t = np.array([1,3,1])

p1 = np.array([6.0, 4.0, 2.0, 1.0])
viz.plotHomgPoint(ax, p1)

intrinsic = np.array([[1500, 0, 640], [0, 1500, 512], [0,0,1]])

cam1 = vsn.Camera(ax, R, t, intrinsic,name = "c")
cam1.drawBody()
cam1.takePicture([line1, line2, line3, line4])
cam1.showNormalizedImagePlane(ax)

#ax.scatter(2,2,2)

plt.show()

if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("pitch", help="[0] Main pitch, [1] Secondary pitch")
    args = parser.parse_args()

    pitch_number = int(args.pitch)

    # Initialize calibration window
    colors = ['red', 'blue', 'bright_green', 'pink', 'yellow']
    calibration = tools.get_colors(pitch=pitch_number)

    print calibration['yellow']

    # Initializecamera
    cam = vision.Camera(pitch=pitch_number)

    auto_brightness = True
    settings = calibration['camera']
    desired = 50

    while (auto_brightness):
        print "Getting best brightness"
        best_brightness = 0
        best_distance = 100

        for test_brightness in range(100):

            settings['brightness'] = test_brightness

            # Update camera settings
Example #7
0
print(R2)
print("t1")
print(t1)
print("t2")
print(t2)

R = rb.roty(math.pi / 2)

T = rb.makeTrans(R1, t1)
print("T")
print(T)

print("Norm of t:")
print(np.linalg.norm(t1))

cam1 = vsn.Camera(ax)
cam2 = vsn.Camera(ax, R2, t1)

s1 = np.array([0.0098, 0.1004, 1.0])
s2 = np.array([0.1920, 0.1078, 1.0])

line1 = np.concatenate((s1, np.array([.0, .0, .0])))

l2 = R2.transpose() @ s2
l2_dash = -R2.transpose() @ (np.cross(t1, s2))
print("l2")
print(l2)
print("l2 dash")
print(l2_dash)

line2 = np.hstack((l2, l2_dash))