def test_random_operation():
    ''' Randomly connect and disconnect the client, this is very likely to fail '''
    echo_server = MessageServer((localhost, echo_port))
    echo_server.start()
    client = unrealcv.Client((localhost, echo_port))

    num_random_trial = 10
    print('Try random operation %d times' % num_random_trial)
    for i in range(num_random_trial):
        msg = 'Trial %d' % i
        choice = random.randrange(2)
        if choice == 1:
            client.connect()
            time.sleep(0.1)
            assert client.isconnected() == True, msg
        elif choice == 0:
            client.disconnect()
            time.sleep(0.1)
            assert client.isconnected() == False, msg

    for i in range(10):
        client.connect()
        assert client.isconnected() == True
    for i in range(10):
        client.disconnect()
        assert client.isconnected() == False
    echo_server.shutdown()
def test_client_release():
    '''
    If the previous client release the connection, further connection should be accepted. This will also test the server code
    '''
    echo_server = MessageServer((localhost, echo_port))
    echo_server.start()
    client = unrealcv.Client((localhost, echo_port))

    num_release_trial = 10
    print('Try to release client %d times', num_release_trial)
    for i in range(num_release_trial):
        msg = 'Trial %d' % i
        client.connect()
        assert client.isconnected() == True, msg

        # Do something
        req = 'ok'
        res = client.request(req)
        assert req == res, msg

        client.disconnect(
        )  # Make sure the server can correctly handle the disconnection signal
        assert client.isconnected() == False, msg
        print('Trial %d is finished.' % i)
    echo_server.shutdown()
Exemplo n.º 3
0
    def __init__(self, rc_x: 0, rc_y: 0):

        ue4.connect(timeout=5)

        self.connected = False
        if not ue4.isconnected():
            print("UnrealCV server is not running.")
        else:
            self.connected = True
            print(ue4.request("vget /unrealcv/status"))

        # TODO: these are specific to the default maze
#         self.initial_x = 700
#         self.initial_y = -700
        self.initial_x = 1050 - (100 * rc_x)
        self.initial_y = -1050 + (100 * rc_y)
        self.initial_angle = 0

        self.x = self.initial_x
        self.y = self.initial_y
        self.angle = self.initial_angle

        self.turn_speed = 5
        self.walk_speed = 50

        if self.connected:
            self.set_pose()
    def begin_deux(self):
        # Connect to the game
        from unrealcv import client
        client.connect()
        if not client.isconnected():
            print('UnrealCV server is not running. Run the game downloaded ',
                  'from http://unrealcv.github.io first.')
            sys.exit(-1)

        # Make sure the connection works well
        res = client.request('vget /unrealcv/status')
        # The image resolution and port is configured in the config file.
        print(res)

        scene_objects = client.request('vget /objects').split(' ')
        print('Number of objects in this scene:', len(scene_objects))

        # Creates a JSON file that maps each objects ID to a class
        # We use this to group stuff together
        obj_id_to_class = {}
        for obj_id in scene_objects:
            obj_id_parts = obj_id.split('_')
            class_name = obj_id_parts[1]
            obj_id_to_class[obj_id] = class_name

        # Write JSON file
        with open('../data/neighborhood_deux_object_ids.json', 'w') as outfile:
            json.dump(obj_id_to_class, outfile)
Exemplo n.º 5
0
def main():
    loc = None
    rot = None

    fig, ax = plt.subplots()
    img = np.zeros((480, 640, 4))
    ax.imshow(img)

    def onpress(event):
        rot_offset = 10 # Rotate 5 degree for each key press
        loc_offset = 10 # Move 5.0 when press a key

        # Up and Down in cam-plane
        if event.key == 'w': loc[0] += loc_offset
        if event.key == 's': loc[0] -= loc_offset
        # Left and Right movement in cam-plane
        if event.key == 'a': loc[1] -= loc_offset
        if event.key == 'd': loc[1] += loc_offset
        # In and Out movement into cam-plane
        if event.key == 'q': loc[2] += loc_offset
        if event.key == 'e': loc[2] -= loc_offset

        # cmd = 'vset /camera/0/rotation %s' % ' '.join([str(v) for v in rot])
        # client.request(cmd)
        cmd = 'vset /camera/0/moveto %s' % ' '.join([str(v) for v in loc])
        client.request(cmd)

        print(client.request('vget /camera/0/location'))
        print(client.request('vget /camera/0/rotation'))
        res = client.request('vget /camera/2/lit png')
        img = read_png(res)

        ax.imshow(img)
        fig.canvas.draw()

    client.connect()
    if not client.isconnected():
        print('UnrealCV server is not running. Run the game from http://unrealcv.github.io first.')
        return
    else:
        print(help_message)

    print("-------------------------------------------------------------------")
    print(client.request('vget /objects'))
    print("-------------------------------------------------------------------")
    print(client.request('vget /cameras'))
    print("-------------------------------------------------------------------")
    # print(client.request('vget /cameras'))
    # print("-------------------------------------------------------------------")

    init_loc = [float(v) for v in client.request('vget /camera/0/location').split(' ')]
    init_rot = [float(v) for v in client.request('vget /camera/0/rotation').split(' ')]

    loc = init_loc; rot = init_rot

    fig.canvas.mpl_connect('key_press_event', onpress)
    plt.title('Keep this window in focus, it will be used to receive key press event')
    plt.axis('off')
    plt.show() # Add event handler
    def __init__(self):

        client.connect()
        if not client.isconnected():
            print(
                'UnrealCV server is not running. Run the game downloaded from http://unrealcv.github.io first.'
            )
            sys.exit(-1)
def test_multi_connection():
    '''
    Only one client is allowed for the server
    Make a second connection to the server, when one connection exists
    '''
    echo_server = MessageServer((localhost, echo_port))
    echo_server.start()
    client = unrealcv.Client((localhost, echo_port))
    client.connect(timeout=0.1)
    assert client.isconnected() == True
    response = client.request('hi')
    assert response == 'hi'
    for i in range(10):
        client = unrealcv.Client((localhost, echo_port))
        client.connect(0.1)
        # print client.connect()
        assert client.isconnected() == False
    client.disconnect()
    echo_server.shutdown()
Exemplo n.º 8
0
def main():
    loc = None
    rot = None

    fig, ax = plt.subplots()
    img = np.zeros((480, 640, 4))
    ax.imshow(img)

    def onpress(event):
        rot_offset = 10 # Rotate 5 degree for each key press
        loc_offset = 10 # Move 5.0 when press a key

        if event.key == 'a': rot[1] -= rot_offset
        if event.key == 'd': rot[1] += rot_offset
        if event.key == 'q': loc[2] += loc_offset # Move up
        if event.key == 'e': loc[2] -= loc_offset # Move down

        if event.key == 'w': loc[1] -= loc_offset
        if event.key == 's': loc[1] += loc_offset
        if event.key == 'up': loc[1] -= loc_offset
        if event.key == 'down': loc[1] += loc_offset
        if event.key == 'left': loc[0] -= loc_offset
        if event.key == 'right': loc[0] += loc_offset

        cmd = 'vset /camera/0/rotation %s' % ' '.join([str(v) for v in rot])
        client.request(cmd)
        cmd = 'vset /camera/0/location %s' % ' '.join([str(v) for v in loc])
        client.request(cmd)

        res = client.request('vget /camera/0/lit png')
        img = read_png(res)

        # print(event.key)
        # print('Requested image %s' % str(img.shape))

        ax.imshow(img)
        fig.canvas.draw()

    client.connect()
    if not client.isconnected():
        print 'UnrealCV server is not running. Run the game from http://unrealcv.github.io first.'
        return
    else:
        print help_message

    init_loc = [float(v) for v in client.request('vget /camera/0/location').split(' ')]
    init_rot = [float(v) for v in client.request('vget /camera/0/rotation').split(' ')]

    loc = init_loc; rot = init_rot

    fig.canvas.mpl_connect('key_press_event', onpress)
    plt.title('Keep this window in focus, it will be used to receive key press event')
    plt.axis('off')
    plt.show() # Add event handler
def test_no_server():
    ''' What if server is not started yet? '''

    no_port = 9012
    client = unrealcv.Client((localhost, no_port), None)
    client.connect()
    assert client.isconnected() == False
    cmds = ['hi', 'hello']
    for cmd in cmds:
        res = client.request(cmd)
        assert res == None
Exemplo n.º 10
0
def test_request_timeout():
    ''' What if the server did not respond with a correct reply. '''

    null_port = 9011
    null_server = NullServer((localhost, null_port))
    null_server.start()

    client = unrealcv.Client((localhost, null_port))
    client.connect()
    assert client.isconnected() == True
    response = client.request('hi', timeout=1)
    assert response == None
Exemplo n.º 11
0
def test_request():
    ''' Simple test for basic functions '''
    server = MessageServer((localhost, echo_port))
    server.start()
    client = unrealcv.Client((localhost, echo_port))
    cmds = ['hi', 'hello', 'asdf' * 70]
    client.connect()
    assert client.isconnected() == True
    for cmd in cmds:
        res = client.request(cmd)
        assert res == cmd
    client.disconnect()  # TODO: What if forgot to disconnect
    server.shutdown()
Exemplo n.º 12
0
    def collect(self, count=400, preprocess=True):
        client.connect()
        if not client.isconnected():
            raise RuntimeError("Could not connect to client. ")

        # First we prepare the Unreal Engine environment by preprocessing it
        if preprocess:
            PreProcessor(self.environment_folder).preprocess()

        # Then we build our dataset
        Builder(self.environment_folder).build(count)

        client.disconnect()
Exemplo n.º 13
0
def main():
    client.connect()
    if not client.isconnected():
        print("LOG: UnrealCV server is not running")
    else:
        root = Tk()

        #size of the window
        root.geometry("1200x610")

        app = Window(root)
        
        root.mainloop()

        client.disconnect()
Exemplo n.º 14
0
def build():
    default_environment_folder = UnrealCollector(
        "testing_default").environment_folder

    client.connect()
    if not client.isconnected():
        raise RuntimeError("Could not connect to client. ")

    builder = Builder(default_environment_folder)
    points = builder.get_random_points(400)
    builder.build_from_points(points)

    new_environment_folder = UnrealCollector("testing_new").environment_folder
    builder = Builder(new_environment_folder)
    PreProcessor(new_environment_folder).preprocess()
    builder.build_from_points(points)

    client.disconnect()
Exemplo n.º 15
0
def main():
    client.connect()
    if not client.isconnected():
        print 'UnrealCV server is not running. Run the game from http://unrealcv.github.io first.'
        return
    else:
        print help_message

    init_loc = [float(v) for v in client.request('vget /camera/0/location').split(' ')]
    init_rot = [float(v) for v in client.request('vget /camera/0/rotation').split(' ')]
    global rot, loc
    loc = init_loc; rot = init_rot
    image = np.zeros((300, 300))

    fig, ax = plt.subplots()
    fig.canvas.mpl_connect('key_press_event', onpress)
    ax.imshow(image)
    plt.title('Keep this window in focus, used to receive key press event')
    plt.axis('off')
    plt.show() # Add event handler
Exemplo n.º 16
0
def test_message_handler():
    ''' Check message handler can correctly handle events from the server.
    And the thread is correctly handled that we can do something in the message_handler '''
    echo_server = MessageServer((localhost, echo_port))
    echo_server.start()
    client = unrealcv.Client((localhost, echo_port))

    def handle_message(msg):
        print('Got server message %s' % repr(msg))
        res = unrealcv.client.request('ok', 1)
        assert res == 'ok'
        print('Server response %s' % res)

    client.connect()
    assert client.isconnected() == True
    client.message_handler = handle_message

    res = client.request('ok')
    assert res == 'ok'

    echo_server.send('Hello from server')
    time.sleep(5)
    echo_server.shutdown()
Exemplo n.º 17
0
 def __init__(self):
     client.connect()
     if not client.isconnected():
         raise IOError
Exemplo n.º 18
0
from unrealcv import client
client.connect()  # Connect to the game
if not client.isconnected(
):  # Check if the connection is successfully established
    print 'UnrealCV server is not running. Run the game from http://unrealcv.github.io first.'
else:
    filename = client.request('vget /camera/0/lit')
    print 'Image is saved to %s' % filename
    for gt_type in ['normal', 'object_mask']:
        filename = client.request('vget /camera/0/%s' % gt_type)
        print '%s is saved to %s' % (gt_type, filename)
    filename = client.request('vget /camera/0/depth depth.exr')
    print 'depth is saved to %s' % filename
    # Depth needs to be saved to HDR image to ensure numerical accuracy
Exemplo n.º 19
0
import json
import numpy as np
import cv2
import matplotlib.pyplot as plt
from unrealcv.util import read_npy, read_png
from unrealcv import client

while not client.isconnected():
    client.connect()
    if client.isconnected():
        print('Connected to Unreal Engine4')
        break

img_file_id = '0'
output_folder = 'C:/Code/'
camera_id = '0'

target_name_list = ['Chair']
actor_name_list = client.request('vget /objects').encode().split()

# Store rgba tuple as value
actor_color_dict = {}
target_actor_dict = {}
target_rect_dict = {}

for actor_name in actor_name_list:
    tmp_color = client.request('vget /object/' + actor_name +
                               '/color').encode()[1:-1].split(',')
    rgba = tuple([int(color[2:]) for color in tmp_color])
    actor_color_dict[actor_name] = rgba
    for target_name in target_name_list:
from unrealcv import client

trajectory = []

if __name__ == '__main__':

    client.connect()
    print("connect....after 3s start record camera_trajectory")
    #延遲三秒後開始
    time.sleep(3)
    # client.message_handler = message_handler
    for i in range(1, 100):
        time.sleep(0.5)
        rot = [
            float(v)
            for v in client.request('vget /camera/0/rotation').split(' ')
        ]
        loc = [
            float(v)
            for v in client.request('vget /camera/0/location').split(' ')
        ]
        print("rot:", rot, "loc", loc)
        trajectory.append(dict(rotation=rot, location=loc))

    if not client.isconnected():
        print(
            'Can not connect to the game, please run the game downloaded from http://unrealcv.github.io first'
        )
    else:
        time.sleep(5)
    def __init__(self):
        '''  Initialize ros node, unrealcv client and params '''
        rospy.on_shutdown(self.reset_client)
        self.should_terminate = False

        # Read in params
        self.mode = rospy.get_param(
            '~mode', "standard")  # Client mode (test, standard, fast, fast2)
        self.collision_on = rospy.get_param('~collision_on',
                                            True)  # Check for collision
        self.publish_tf = rospy.get_param(
            '~publish_tf',
            False)  # If true publish the camera transformation in tf
        self.slowdown = rospy.get_param(
            '~slowdown',
            0.0)  # Artificially slow down rate for UE to finish rendering
        self.camera_id = rospy.get_param(
            '~camera_id',
            0)  # CameraID for unrealcv compatibility (usually use 0)
        self.queue_size = rospy.get_param('~queue_size',
                                          1)  # How many requests are kept

        # Select client mode
        mode_types = {
            'standard': 'standard',
            'fast': 'fast',
            'test': 'test',
            'generate': 'generate'
        }
        selected = mode_types.get(self.mode, 'NotFound')
        if selected == 'NotFound':
            warning = "Unknown client mode '" + self.mode + "'. Implemented modes are: " + \
                      "".join(["'" + m + "', " for m in mode_types])
            rospy.logfatal(warning[:-2])

        # Setup unrealcv client
        client.connect()
        if not client.isconnected():
            rospy.logfatal(
                "No unreal game running to connect to. Please start a game before launching the node."
            )

        status = client.request('vget /unrealcv/status')
        if status is None:
            rospy.logfatal(
                "Error addressing the unrealcv client. Try restarting the game."
            )

        rospy.loginfo("Unrealcv client status:\n" + status)

        # Setup camera parameters from unrealcv config
        loc_width = status.find('Width:')
        loc_height = status.find('Height:')
        loc_fov = status.find('FOV:')
        loc_end = status.find('EnableInput:')
        width = int(status[loc_width + 7:loc_height])
        height = int(status[loc_height + 8:loc_fov])
        fov = float(status[loc_fov + 5:loc_end])
        f = width / 2 / np.tan(fov * math.pi / 180 / 2)
        rospy.set_param(
            '~camera_params', {
                'width': float(width),
                'height': float(height),
                'focal_length': float(f)
            })

        # Initialize relative coordinate system (so camera starts at [0, 0, 0] position and [0, 0, yaw]).
        location = client.request('vget /camera/%i/location' % self.camera_id)
        self.coord_origin = np.array(
            [float(x) for x in str(location).split(' ')])
        rot = client.request('vget /camera/%i/rotation' % self.camera_id)
        self.coord_yaw = float(str(rot).split(' ')[1])
        client.request("vset /camera/{0:d}/rotation 0 {1:f} 0".format(
            self.camera_id, self.coord_yaw))

        # tf broadcaster
        if self.mode == 'test' or self.publish_tf:
            self.tf_br = tf.TransformBroadcaster(
            )  # Publish camera transforms in tf

        # Setup mode
        if self.mode == 'test':
            rospy.Timer(rospy.Duration(0.01),
                        self.test_callback)  # 100 Hz try capture frequency

        elif self.mode == 'generate':
            rospy.Timer(rospy.Duration(0.05),
                        self.generate_traj_callback)  # 20 Hz capture
            self.posepub = rospy.Publisher("~ue_sensor_pose",
                                           PoseStamped,
                                           queue_size=10)

        elif self.mode == 'standard':
            self.sub = rospy.Subscriber("odometry",
                                        Odometry,
                                        self.odom_callback,
                                        queue_size=self.queue_size,
                                        buff_size=(2**24) * self.queue_size)
            # The buffersize needs to be large enough to fit all messages, otherwise strange things happen
            self.previous_odom_msg = None  # Previously processed Odom message
            self.collision_tolerance = rospy.get_param(
                '~collision_tol', 10)  # Distance threshold in UE units

        elif self.mode == 'fast':
            self.sub = rospy.Subscriber("odometry",
                                        Odometry,
                                        self.fast_callback,
                                        queue_size=self.queue_size,
                                        buff_size=(2**24) * self.queue_size)
            self.previous_odom_msg = None  # Previously processed Odom message

        # Finish setup
        self.pub = rospy.Publisher("~ue_sensor_raw",
                                   UeSensorRaw,
                                   queue_size=10)
        rospy.Service('~terminate_with_reset', SetBool,
                      self.terminate_with_reset_srv)
        if self.collision_on:
            self.collision_pub = rospy.Publisher("~collision",
                                                 String,
                                                 queue_size=10)
        rospy.loginfo("unreal_ros_client is ready in %s mode." % self.mode)
Exemplo n.º 22
0
import sys, atexit, argparse, json, time
sys.path.append('..')
from unrealcv import client

trajectory = []

def message_handler(message):
    if message == 'clicked':
        rot = [float(v) for v in client.request('vget /camera/0/rotation').split(' ')]
        loc = [float(v) for v in client.request('vget /camera/0/location').split(' ')]
        trajectory.append(dict(rotation = rot, location = loc))

def save_to_file(filename):
    if len(trajectory) != 0:
        with open(filename, 'w') as f:
            json.dump(trajectory, f, indent = 4)

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--filename', default='camera-trajectory.json')
    args = parser.parse_args()

    atexit.register(save_to_file, args.filename)
    client.connect()
    client.message_handler = message_handler
    if not client.isconnected():
        print 'Can not connect to the game, please run the game downloaded from http://unrealcv.github.io first'
    else:
        time.sleep(60 * 60 * 24)
def main():
    client.connect()  # Connect to the game
    if not client.isconnected(
    ):  # Check if the connection is successfully established
        print 'UnrealCV server is not running. Run the game from http://unrealcv.github.io first.'
        return
    label = 'unrealPic_detail_'
    #label = 'unrealPic_front_'
    #label = 'unrealPic_oblique_'
    iterations = 1300
    displamentVal = 2

    camLocations = [  #'-955.886', '339', '286.650',
        #'-1235.886', '339', '286.650',
        # detailed view
        #['-900', '-200', '320'],
        #['-900', '-200', '250'],
        #['-900', '-200', '180'],
        #['-900', '-200', '90'],
        #['-900', '-200', '10']
        # front view
        #['-1100', '-200', '280'],
        #['-1100', '-200', '140']
        #['-1160', '-455', '280'] # side view test
        # oblique view
        #['-1300', '-200', '200']

        # test view
        ['-1000', '-200', '280'],
        ['-1000', '-200', '140']
        #'-1035.886', '339', '286.650']#,
        #'-1135.886', '339', '286.650',
        #'-955.886', '339', '386.650',
        #'-1135.886', '339', '16.650']
    ]

    # front view
    # rotation : 0 0 0
    #            340.871 80.879 360.000
    camRotations = [
        '0 0 0'
    ]  #, '353.586 10.463 360.000', '353.586 -10.463 360.000', '340.586 0.463 360.000','340.586 0.463 10.000', '353.586 10.463 -10.000']
    # Get a list of all objects in the scene
    ret = client.request('vget /objects')
    if ret is not None:
        scene_objects = ret.split(' ')
        print 'There are %d objects in this scene' % len(scene_objects)
        color_mapping = get_color_mapping(client, scene_objects)
        startFileNum = 0
        for rot in range(len(camRotations)):
            client.request('vset /camera/0/rotation ' + camRotations[rot])
            for loc in range(len(camLocations)):
                loc_x = float(camLocations[loc][0])
                loc_y = float(camLocations[loc][1])
                loc_z = float(camLocations[loc][2])
                for i in range(iterations):
                    client.request('vset /camera/0/location ' + str(loc_x) +
                                   ' ' + str(loc_y) + ' ' + str(loc_z))
                    filename = client.request('vget /camera/0/object_mask')
                    img = io.imread(filename)
                    print img.shape

                    anno = ET.Element('annotation')
                    fileN = ET.SubElement(anno, 'filename')
                    FileId = startFileNum + (i + iterations *
                                             (rot * len(camLocations) + loc))
                    fileN.text = 'ScreenShot' + '{0:05d}'.format(
                        FileId) + '.png'
                    # fileN = ET.SubElement(anno, 'depth_filename')
                    # fileN.text = 'ScreenShot'+ '{0:05d}'.format(startFileNum + (loc + iterations*rot) * 2 + 1) + '.png'
                    folder = ET.SubElement(anno, 'folder')
                    source = ET.SubElement(anno, 'source')
                    source = ET.SubElement(source, 'submittedBy')
                    source.text = 'Unreal Engine'
                    imagesize = ET.SubElement(anno, 'imagesize')
                    nrows = ET.SubElement(imagesize, 'nrows')
                    nrows.text = str(img.shape[0])
                    ncols = ET.SubElement(imagesize, 'ncols')
                    ncols.text = str(img.shape[1])

                    for scene_obj in range(len(scene_objects)):
                        color = color_mapping[scene_objects[scene_obj]]
                        poly = getPoly(img, color.R, color.G, color.B)
                        nameParts = scene_objects[scene_obj].split("_")
                        if poly != [] and len(poly[0]) > 100 and \
                            ('chips' in scene_objects[scene_obj] or 'pop' in scene_objects[scene_obj]) and \
                            'shelf_chips' not in scene_objects[scene_obj]:#nameParts[0] == 'chips':
                            obj = ET.SubElement(anno, 'object')
                            name = ET.SubElement(obj, 'name')
                            if nameParts[0] != 'shelf':
                                name.text = nameParts[0]
                            else:
                                name.text = ''
                            if len(nameParts) > 1:
                                for k in range(1, len(nameParts) - 1):
                                    name.text += ' ' + nameParts[k]
                            deletion = ET.SubElement(obj, 'deleted')
                            verification = ET.SubElement(obj, 'verified')
                            occlusion = ET.SubElement(obj, 'occluded')
                            reverse = ET.SubElement(obj, 'reversed')
                            if 'BACK' in scene_objects[scene_obj]:
                                reverse.text = 'true'
                            else:
                                reverse.text = 'false'
                            parts = ET.SubElement(obj, 'parts')
                            polygon = ET.SubElement(obj, 'polygon')
                            username = ET.SubElement(polygon, 'username')
                            username.text = 'Unreal Engine'
                            fullPoly = np.concatenate((poly[0:]))
                            for ply in range(len(fullPoly)):
                                pt = ET.SubElement(polygon, 'pt')
                                ptx = ET.SubElement(pt, 'x')
                                ptx.text = str(fullPoly[ply, 1])
                                pty = ET.SubElement(pt, 'y')
                                pty.text = str(fullPoly[ply, 0])
                    loc_y += displamentVal
                    f = open(
                        label +
                        '{0:05d}'.format(i + iterations *
                                         (rot * len(camLocations) + loc)) +
                        '.xml', 'w')
                    f.write(ET.tostring(anno))
                    f.close()
                    # ScreenShot Location from vrun shot
                    # ${UnrealCV Project}/Saved/Screenshots/Linux/ScreenShot*.png
                    client.request('vset /viewmode lit')
                    print FileId, client.request('vrun shot')
                    # client.request('vset /viewmode depth')
                    # print client.request('vrun shot')
        return
def main():
    client.connect()  # Connect to the game
    if not client.isconnected():  # Check if the connection is successfully established
        print('UnrealCV server is not running.')
        sys.exit(-1)
    else:
        res = client.request('vget /unrealcv/status')
        # The image resolution and port is configured in the config file.
        print(res)

        traj_file = '../../docs/tutorials_source/camera_traj.json'
        camera_trajectory = json.load(open(traj_file))

        idx = 1
        loc, rot = camera_trajectory[idx]
        # Set position of the first camera
        client.request('vset /camera/0/location {x} {y} {z}'.format(**loc))
        client.request('vset /camera/0/rotation {pitch} {yaw} {roll}'.format(**rot))

        # Get image
        res = client.request('vget /camera/0/lit lit.png')
        print('The image is saved to %s' % res)
        im = read_png(res)

        # Generate Ground Truth
        res = client.request('vget /camera/0/object_mask object_mask.png')
        print('The image is saved to %s' % res)
        object_mask = read_png(res)
        res = client.request('vget /camera/0/normal normal.png')
        print('The image is saved to %s' % res)
        normal = read_png(res)

        # Generate Depth
        res = client.request('vget /camera/0/depth depth.png')
        print('The image is saved to %s' % res)

        # Get objects from the scene
        scene_objects = client.request('vget /objects').split(' ')
        print('Number of objects in this scene: ', len(scene_objects))
        print('They are: ', scene_objects)

        # Map from object id to the labeling color
        id2color = {}
        for obj_id in scene_objects:
            color = Color(client.request('vget /object/%s/color' % obj_id))
            id2color[obj_id] = color
            # print('%s : %s' % (obj_id, str(color)))

        id2mask = {}
        for obj_id in scene_objects:
            color = id2color[obj_id]
            mask = match_color(object_mask, [color.R, color.G, color.B], tolerance=3)
            if mask is not None:
                id2mask[obj_id] = mask

        obj_file = '../../docs/tutorials_source/object_category.json'
        with open(obj_file) as f:
            id2category = json.load(f)
        categories = set(id2category.values())
        # Show statistics of this frame
        image_objects = id2mask.keys()
        print('Number of objects in this image:', len(image_objects))
        print('%20s : %s' % ('Category name', 'Object name'))
        for category in categories:
            objects = [v for v in image_objects if id2category.get(v) == category]
            if len(objects) > 6:  # Trim the list if too long
                objects[6:] = ['...']
            if len(objects) != 0:
                print('%20s : %s' % (category, objects))

        # Plot objs
        vase_instance = [v for v in image_objects if id2category.get(v) == 'Vase']
        mask = sum(id2mask[v] for v in vase_instance)
        plt.figure()
        plt.imshow(mask, cmap="gray")
        plt.show()

        client.disconnect()
Exemplo n.º 25
0
 def setUpClass(cls):
     client.connect()
     if not client.isconnected():
         raise Exception('Can not connect to a running game instance')
Exemplo n.º 26
0
def main():
    loc = None
    rot = None

    fig, ax = plt.subplots()
    img = np.zeros((480, 640, 4))
    ax.imshow(img)

    def onpress(event):
        rot_offset = 10  # Rotate 5 degree for each key press
        loc_offset = 10  # Move 5.0 when press a key

        if event.key == 'a':
            rot[1] -= rot_offset
        if event.key == 'd':
            rot[1] += rot_offset
        if event.key == 'q':
            loc[2] += loc_offset  # Move up
        if event.key == 'e':
            loc[2] -= loc_offset  # Move down

        if event.key == 'w':
            loc[1] -= loc_offset
        if event.key == 's':
            loc[1] += loc_offset
        if event.key == 'up':
            loc[1] -= loc_offset
        if event.key == 'down':
            loc[1] += loc_offset
        if event.key == 'left':
            loc[0] -= loc_offset
        if event.key == 'right':
            loc[0] += loc_offset

        cmd = 'vset /camera/0/rotation %s' % ' '.join([str(v) for v in rot])
        client.request(cmd)
        cmd = 'vset /camera/0/location %s' % ' '.join([str(v) for v in loc])
        client.request(cmd)

        res = client.request('vget /camera/0/lit png')
        img = read_png(res)

        # print(event.key)
        # print('Requested image %s' % str(img.shape))

        ax.imshow(img)
        fig.canvas.draw()

    client.connect()
    if not client.isconnected():
        print(
            'UnrealCV server is not running. Run the game from http://unrealcv.github.io first.'
        )
        return
    else:
        print(help_message)

    init_loc = [
        float(v) for v in client.request('vget /camera/0/location').split(' ')
    ]
    init_rot = [
        float(v) for v in client.request('vget /camera/0/rotation').split(' ')
    ]

    loc = init_loc
    rot = init_rot

    fig.canvas.mpl_connect('key_press_event', onpress)
    plt.title(
        'Keep this window in focus, it will be used to receive key press event'
    )
    plt.axis('off')
    plt.show()  # Add event handler
Exemplo n.º 27
0
# Get object information and divide objects into lists or coloring
scene_objects = client.request('vget /objects').split(' ')
floor_objects = [i for i in scene_objects if ('Floor' in i) or ('Path' in i)]
line_objects = [i for i in scene_objects if 'Line' in i]

# print("Assigning colors...")
# for obj_id in scene_objects:
#     client.request('vset /object/%s/color %s' % (obj_id, OBSTACLE_COLOR))
# for obj_id in floor_objects:
#     client.request('vset /object/%s/color %s' % (obj_id, FLOOR_COLOR))
# for obj_id in line_objects:
#     client.request('vset /object/%s/color %s' % (obj_id, LINE_COLOR))
# print("Colors assigned...")

run = True
while client.isconnected() & run:

    # for coord in camera_trajectory:
    for count, coord in enumerate(camera_trajectory[::8]):

        rot = coord['rotation']
        loc = coord['location']

        client.request('vset /camera/1/location %f %f %f' %
                       (loc[0], loc[1], loc[2]))
        client.request('vset /camera/1/rotation %f %f %f' %
                       (rot[0], rot[1], rot[2]))
        # This is a stupid hack to get the LODs to load, because UE sucks...
        client.request('vset /camera/0/location %f %f %f' %
                       (loc[0], loc[1], loc[2] - 190))
Exemplo n.º 28
0
        depth_dir = os.path.join(save_dir, 'depths')
        os.makedirs(depth_dir)
        if args.vis_depth:
            depth_vis_dir = os.path.join(depth_dir, 'vis')
            os.makedirs(depth_vis_dir)

    print(Fore.YELLOW + '- colmap poses: {}'.format(colmap_pose_fn))
    print(Fore.YELLOW + '- ue poses: {}'.format(ue_pose_fn))
    print(Fore.YELLOW + '- images: {}'.format(img_dir))
    print(Fore.YELLOW + '- intrinsics: {}'.format(cam_fn))

    cam_intri = uu.readCameraIntri(args.unrealcv_ini)
    print("Read camera intrinsics: {}".format(cam_intri))

    client.connect()
    assert client.isconnected()
    st = uu.getUnrealcvStatus(client)
    print(Fore.GREEN + st)

    times, poses_ue = uu.readUnrealPoses(args.ue_pose_txt)
    print('Read {} Unreal poses.'.format(len(poses_ue)))

    print(Fore.RED + "Step 1: check intri and calculate focal")
    st = uu.getUnrealcvStatus(client)
    print(Fore.GREEN + "Intrinsics from unrealcv command" + st)
    print(Fore.GREEN +
          "Intrisincs from the configuration file {}".format(cam_intri))
    focal = uu.focalLength(cam_intri['width'], cam_intri['horizontal_fov'])
    print('- The focal length is {}px.'.format(focal))

    img_names = []
Exemplo n.º 29
0
from unrealcv import client
client.connect() # Connect to the game
if not client.isconnected(): # Check if the connection is successfully established
    print 'UnrealCV server is not running. Run the game from http://unrealcv.github.io first.'
else:
    filename = client.request('vget /camera/0/lit')
    print 'Image is saved to %s' % filename
    for gt_type in ['normal', 'object_mask']:
        filename = client.request('vget /camera/0/%s' % gt_type)
        print '%s is saved to %s' % (gt_type, filename)
    filename = client.request('vget /camera/0/depth depth.exr')
    print 'depth is saved to %s' % filename
    # Depth needs to be saved to HDR image to ensure numerical accuracy