def __init__(self, list_port):
        self.input_port = []
        self.port = []

        plt.ion()

        self.fig = plt.figure()
        self.ax = self.fig.add_subplot(111, projection='3d')

        data = np.zeros((2, 66))

        color = ['b', 'r']
        self.lines = []
        self.skeleton = []

        self.skeleton = Skeleton('dhm66_ISB_Xsens.urdf')

        for i, name_port in enumerate(list_port):
            self.input_port.append(name_port)
            self.port.append(yarp.BufferedPortBottle())
            self.port[i].open('/plot_skeleton' + name_port)

            yarp.Network.connect(name_port, self.port[i].getName())

        self.lines = self.skeleton.visualise_from_joints(data,
                                                         color_list=color,
                                                         lines=[])
    def parsing(self):

        with open(self.sampleFilePath, 'r') as f:

            frameCount = f.readline().split(' ')
            frameCount = int(frameCount[0])

            for frameIte in range(frameCount):

                currentFrame = Frame(frameIte)
                bodyCount = f.readline().split(' ')
                bodyCount = int(bodyCount[0])

                for _ in range(bodyCount):

                    metaData = f.readline().strip().split(' ')
                    jointCount = f.readline().strip().split(' ')
                    jointCount = int(jointCount[0])

                    currentSkeleton = Skeleton(jointCount, metaData)

                    for jointIte in range(jointCount):
                        jointInfoLine = f.readline()
                        jointInfo = np.array(jointInfoLine.strip().split(' '),
                                             dtype='float')

                        currentSkeleton.appendJoint(jointIte,
                                                    jointVec=jointInfo)
                        #jointList.append(Joint(jointInfo))

                    currentFrame.appendSkeleton(currentSkeleton)

                self.frameList.append(currentFrame)
                self.frameNum += 1
Exemple #3
0
def frame_to_skel(frame, _id):
    frame_num = [val[4] for val in frame.values()][0]
    skel = Skeleton(_id, frame_num)
    for key, val in frame.items():
        skel.addJoint(Joint(key, Position(
            val[CSVColumn['X_coord']], val[CSVColumn['Y_coord']]), 
            ConfidenceState[val[CSVColumn['JointConfidence']]]))
    return skel
Exemple #4
0
    def __init__(self, *args, **KWs):

        Skeleton.__init__(self, *args, **KWs)
        if not self._CHEETAH__instanceInitialized:
            cheetahKWArgs = {}
            allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
            for k,v in KWs.items():
                if k in allowedKWs: cheetahKWArgs[k] = v
            self._initCheetahInstance(**cheetahKWArgs)
    def __init__(self, *args, **KWs):

        Skeleton.__init__(self, *args, **KWs)
        if not self._CHEETAH__instanceInitialized:
            cheetahKWArgs = {}
            allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
            for k,v in KWs.items():
                if k in allowedKWs: cheetahKWArgs[k] = v
            self._initCheetahInstance(**cheetahKWArgs)
Exemple #6
0
def build_reference_skeleton(pos_file):
    with open(pos_file, 'r') as f:
        reader = csv.reader(f)
        skel = Skeleton()
        for row in reader:
            skel.addJoint(Joint(row[CSVColumn['JointType']], Position(
                row[CSVColumn['X_coord']], row[CSVColumn['Y_coord']]), 
                ConfidenceState[row[CSVColumn['JointConfidence']]]))
        return skel
    def __init__(self, parent=None):
        list_port = ['/processing/xsens/JointAngles:o', '/AE/reconstruct:o']

        self.input_port = []
        self.port = []

        self.fig = plt.figure()
        self.ax = self.fig.add_subplot(111, projection='3d')

        data = np.zeros((1, 66))

        color = ['b', 'r']
        self.lines = []
        self.skeleton = []

        self.skeleton = Skeleton('dhm66_ISB_Xsens.urdf')

        for i, name_port in enumerate(list_port):
            self.input_port.append(name_port)
            self.port.append(yarp.BufferedPortBottle())
            self.port[i].open('/plot_skeleton' + name_port)

            yarp.Network.connect(name_port, self.port[i].getName())

        QtWidgets.QWidget.__init__(self, parent)  # Inherit from QWidget

        self.fig = Figure()

        self.pltCanv = Canvas(self.fig)
        self.pltCanv.setSizePolicy(QSizePolicy.Expanding,
                                   QSizePolicy.Expanding)
        self.pltCanv.setFocusPolicy(QtCore.Qt.ClickFocus)
        self.pltCanv.setFocus()
        self.pltCanv.updateGeometry()
        self.ax = self.fig.add_subplot(111, projection='3d')

        self.ax.mouse_init()

        #=============================================
        # Main plot widget layout
        #=============================================
        self.layVMainMpl = QVBoxLayout()
        self.layVMainMpl.addWidget(self.pltCanv)
        self.setLayout(self.layVMainMpl)

        self.lines = []
        self.skeleton = []

        self.skeleton = Skeleton('dhm66_ISB_Xsens.urdf')

        self.timer = self.pltCanv.new_timer(100,
                                            [(self.update_canvas, (), {})])
        self.timer.start()
def skeleton_motion_data_collect(skeleton_file_list):
    train_data = []
    total_frame = 0
    fc_list = []  # frame count list
    index = 0
    while index < len(skeleton_file_list):
        # -----------------------
        # -----[ Data Load ]-----
        # -----------------------
        print('list number: ', index)
        print('skeleton: ', skeleton_file_list[index])

        file = open(skeleton_file_list[index])

        frameCount = np.fromstring(file.readline(), sep=' ', dtype='int32')[0]
        skel = Skeleton()
        for f in range(frameCount):  # frameCount
            bodyCount = np.fromstring(file.readline(), sep=' ',
                                      dtype='int32')[0]

            body = Body_struct()
            for b in range(bodyCount):
                bodyInfo = np.fromstring(file.readline(),
                                         sep=' ',
                                         dtype='float64')
                jointCount = np.fromstring(file.readline(),
                                           sep=' ',
                                           dtype='int32')[0]
                body.set_body_info(bodyInfo, jointCount, f * 1.0 / frameCount)

                for j in range(jointCount):
                    jointInfo = np.fromstring(file.readline(),
                                              sep=' ',
                                              dtype='float64')
                    joint = Joint_struct(jointInfo)
                    body.joints.append(joint)
                skel.append_body(f, b, body)
            # print('phase: ', body.phase)
        train_data.append(skel)
        file.close()
        # print(skel.print_skeleton_info())
        index += 1
        total_frame += len(skel.iBody)
        fc_list.append(len(skel.iBody))
    print('total number of frames: ', total_frame)
    print('train data len: ', len(train_data))

    return train_data, total_frame, fc_list
 def get_meta(self):
     s1 = Skeleton.get_meta(self)
     s2 = self.__parser.get('meta', '')
     if s1 and s2:
         return s1 + "\n" + s2
     else:
         return s1 or s2
 def get_meta(self):
     s1 = Skeleton.get_meta(self)
     s2 = self.__parser.get('meta', '')
     if s1 and s2:
         return s1 + "\n" + s2
     else:
         return s1 or s2
 def setUp(self):
     im = np.eye(40, 40, 0)
     for k in range(1, 10):
         im += np.eye(40, 40, k)
     im = -100. * im
     im = 100. + im
     self.SkeletonInst = Skeleton(im)
 def setUp(self):
     im = np.zeros((10, 10))
     for k in range(4, 6):
         im += np.eye(10, 10, k)
     imT = np.transpose(im)
     im = im + imT
     im = -100. * im
     im = 100. + im
     self.SkeletonInst = Skeleton(im)
Exemple #13
0
def img_preprocessing_op(image_op):
    """
    Creates preprocessing operations that are going to be applied on a single frame.

    You can do any preprocessing (masking, normalization/scaling of inputs, augmentation, etc.) by using tensorflow
    operations. Here I provided some examples commented in the code. You can find more built-in image operations at
    https://www.tensorflow.org/api_docs/python/tf/image .

    :param image_op:
    :return:
    """
    with tf.name_scope("img_preprocessing"):
        # Convert from RGB to greyscale.
        # image_op = tf.image.rgb_to_grayscale(image_op)

        # Crop
        #image_op = tf.image.resize_image_with_crop_or_pad(image_op, 60, 60)

        # Resize operation requires 4D tensors (i.e., batch of images).
        # Reshape the image so that it looks like a batch of one sample: [1,60,60,1]
        #image_op = tf.expand_dims(image_op, 0)
        # Resize
        #image_op = tf.image.resize_bilinear(image_op, np.asarray([32,32]))
        # Reshape the image: [32,32,1]
        #image_op = tf.squeeze(image_op, 0)

        # Normalize (zero-mean unit-variance) the image locally, i.e., by using statistics of the
        # image not the whole data or sequence.
        # image_op = tf.image.per_image_standardization(image_op)

        # Flatten image
        # y.set_shape(image_op.get_shape())
        # image_op_ = tf.TensorArray(
        #     dtype=tf.float32, size=0, dynamic_size=True)

        image_op_ = np.ones(shape=[image_op.shape[0], 80, 80, 3],
                            dtype=np.float32)

        for i in range(image_op.shape[0]):
            skeleton = Skeleton(image_op[i])
            skeleton.resizePixelCoordinates()
            image_op_[i] = skeleton.toImage(80, 80)
        # return skeletonimage [0,255]
        return image_op_.astype(np.float32)
 def get_style(self):
     s1 = Skeleton.get_style(self)
     s2 = self.__parser.get('local-css')
     if s1:
         if s2:
             return s1 + "\n" + s2
         else:
             return s1
     else:
         return s2
 def get_style(self):
     s1 = Skeleton.get_style(self)
     s2 = self.__parser.get('local-css')
     if s1:
         if s2:
             return s1 + "\n" + s2
         else:
             return s1
     else:
         return s2
Exemple #16
0
def build_time_series(joints_file):
    with open(joints_file, 'r') as f:
        reader = csv.reader(f)
        bodies = {}
        prev_time = -1
        prev_id = -1
        skel = None
        for row in reader:
            body_id = row[CSVColumn['Skeleton_Id']]
            time = row[CSVColumn['Time']]
            frame = row[CSVColumn['Frame']]
            if time != prev_time or body_id != prev_id:
                skel = Skeleton(body_id, frame)
            skel.addJoint(Joint(row[CSVColumn['JointType']], Position(
                    row[CSVColumn['X_coord']], row[CSVColumn['Y_coord']]), 
                    ConfidenceState[row[CSVColumn['JointConfidence']]]))
            if body_id not in bodies:
                bodies[body_id] = BodyTimeSeries(body_id)
            bodies[body_id].addTimeInstance(time, skel)
            prev_id = body_id
            prev_time = time
        return list(bodies.values())
    def monsterGenerator(self):

        if self.Randomizer() <= 5:
            self.aliveMonsters.append(Troll())

        if self.Randomizer() <= 15:
            self.aliveMonsters.append(Skeleton())

        if self.Randomizer() <= 10:
            self.aliveMonsters.append(Orc())

        if self.Randomizer() <= 20:
            self.aliveMonsters.append(GiantSpider())
 def setUp(self):
     im = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
                    [0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
                    [0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
                    [0, 1, 1, 0, 0, 0, 0, 1, 1, 0],
                    [0, 1, 1, 0, 0, 0, 0, 1, 1, 0],
                    [0, 1, 1, 0, 0, 0, 0, 1, 1, 0],
                    [0, 1, 1, 0, 0, 0, 0, 1, 1, 0],
                    [0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
                    [0, 1, 1, 1, 1, 1, 1, 1, 1, 0],
                    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
     im = -100. * im
     im = 100. + im
     self.SkeletonInst = Skeleton(im)
Exemple #19
0
from Hero import Hero
from Skeleton import Skeleton
from Boss import Boss

IMG_SIZE = 52
WIDTH = 10 * IMG_SIZE
HEIGHT = (10 * IMG_SIZE) + IMG_SIZE

root = Tk()
root.title('Wanderer Game')
canvas = Canvas(root, width=WIDTH, height=HEIGHT)
canvas.pack()

hero = Hero()
game_map = Map()
skeleton1 = Skeleton(6, 1, True)
skeleton2 = Skeleton(2, 6)
skeleton3 = Skeleton(8, 1)
boss = Boss(9, 1)
enemies = [skeleton1, skeleton2, skeleton3, boss]


def draw_screen():
    canvas.delete("all")
    for i in range(len(game_map.tiles)):
        for j in range(len(game_map.tiles[i])):
            if game_map.tiles[i][j] == 0:
                image = root.floor
            else:
                image = root.wall
            canvas.create_image(j * IMG_SIZE,
class SkeletonWidget(QtWidgets.QWidget):
    def __init__(self, parent=None):
        list_port = ['/processing/xsens/JointAngles:o', '/AE/reconstruct:o']

        self.input_port = []
        self.port = []

        self.fig = plt.figure()
        self.ax = self.fig.add_subplot(111, projection='3d')

        data = np.zeros((1, 66))

        color = ['b', 'r']
        self.lines = []
        self.skeleton = []

        self.skeleton = Skeleton('dhm66_ISB_Xsens.urdf')

        for i, name_port in enumerate(list_port):
            self.input_port.append(name_port)
            self.port.append(yarp.BufferedPortBottle())
            self.port[i].open('/plot_skeleton' + name_port)

            yarp.Network.connect(name_port, self.port[i].getName())

        QtWidgets.QWidget.__init__(self, parent)  # Inherit from QWidget

        self.fig = Figure()

        self.pltCanv = Canvas(self.fig)
        self.pltCanv.setSizePolicy(QSizePolicy.Expanding,
                                   QSizePolicy.Expanding)
        self.pltCanv.setFocusPolicy(QtCore.Qt.ClickFocus)
        self.pltCanv.setFocus()
        self.pltCanv.updateGeometry()
        self.ax = self.fig.add_subplot(111, projection='3d')

        self.ax.mouse_init()

        #=============================================
        # Main plot widget layout
        #=============================================
        self.layVMainMpl = QVBoxLayout()
        self.layVMainMpl.addWidget(self.pltCanv)
        self.setLayout(self.layVMainMpl)

        self.lines = []
        self.skeleton = []

        self.skeleton = Skeleton('dhm66_ISB_Xsens.urdf')

        self.timer = self.pltCanv.new_timer(100,
                                            [(self.update_canvas, (), {})])
        self.timer.start()

    def update_canvas(self):
        color = ['b', 'r']

        data_joint = []
        for i, port in enumerate(self.port):
            b_in = self.port[i].read()
            data = b_in.toString().split(' ')

            if len(data) == 67:
                del data[0]

            data = list(map(float, data))
            data = np.asarray(data)

            data_joint.append(np.deg2rad(data))

        self.ax.clear()
        self.lines = self.skeleton.visualise_from_joints(self.ax,
                                                         data_joint,
                                                         color_list=color,
                                                         lines=[])
        self.pltCanv.figure.canvas.draw()
    def run(self):
        # -----[ For socket Communication, Server ] -----
        if socket_comm:
            conn = SocketCom('localhost', tcp_port)
            print('Port is Opened and Wait for the connection..')

            if retarget_mode == 'Analytic':  # direct connect to the NAO agent
                nao = NAO_AMR()
                conn.socket_connect()
                conn.write_socket({
                    'header': 'start Kinect!',
                    'data': []
                })  # start signal
            else:  # indirect connect to the NAO by C-3PO model
                demo = MR_Demo(target_robot=target_robot,
                               target_env=target_env,
                               tcp_port=tcp_port)
                if target_env == 'CHREO': conn.socket_connect()

        # -------- Main Program Loop -----------
        demo_body_idx = -1  # only for the firstly detected human
        socket_count = 0
        while not self._done:
            # --- Main event loop
            for event in pygame.event.get():  # User did something
                if event.type == pygame.QUIT:  # If user clicked close
                    self._done = True  # Flag that we are done so we exit this loop

                elif event.type == pygame.VIDEORESIZE:  # window resized
                    self._screen = pygame.display.set_mode(
                        event.dict['size'],
                        pygame.HWSURFACE | pygame.DOUBLEBUF | pygame.RESIZABLE,
                        32)

            # --- Game logic should go here

            # --- Getting frames and drawing
            # --- Woohoo! We've got a color frame! Let's fill out back buffer surface with frame's data
            if self._kinect.has_new_color_frame():
                frame = self._kinect.get_last_color_frame()
                self.draw_color_frame(frame, self._frame_surface)
                frame = None

            # --- Cool! We have a body frame, so can get skeletons
            if self._kinect.has_new_body_frame():
                self._bodies = self._kinect.get_last_body_frame()

            # --- draw skeletons to _frame_surface
            if self._bodies is not None:
                # Get the index of the first body only
                if demo_body_idx < 0:  # no body index
                    # Get new body index
                    for i in range(0, self._kinect.max_body_count):
                        body = self._bodies.bodies[i]
                        if body.is_tracked:
                            demo_body_idx = i
                            break
                else:  # already have a body index
                    body = self._bodies.bodies[demo_body_idx]
                    if not body.is_tracked:
                        demo_body_idx = -1

                if demo_body_idx >= 0:
                    # print('Demo body index: ', demo_body_idx)
                    body = self._bodies.bodies[demo_body_idx]
                    joints = body.joints
                    joint_points = self._kinect.body_joints_to_color_space(
                        joints)

                    # get a skeleton frame from Kinect
                    skel = Skeleton()
                    body_struct = Body_struct()
                    bodyInfo = np.array([
                        demo_body_idx, 0, 0, 0, 0, 0, 0, 0, 0, body.is_tracked
                    ])
                    jointCount = PyKinectV2.JointType_Count
                    body_struct.set_body_info(bodyInfo, jointCount, phase=0)

                    for j in range(jointCount):  # 25 body joints
                        jointInfo = np.array([
                            joints[j].Position.x, joints[j].Position.y,
                            joints[j].Position.z, 0, 0, 0, 0, 0, 0, 0, 0,
                            body.is_tracked
                        ])
                        joint = Joint_struct(jointInfo)
                        body_struct.joints.append(joint)
                    skel.append_body(0, demo_body_idx, body_struct)

                    if socket_comm:
                        socket_count += 1
                        if socket_count >= 1:  # 6, for sync of socket delay
                            socket_count = 0
                            _from = _to = 'None'
                            skel_data = skeleton_coordinate_transform(
                                [skel], 1)  # numpy data

                            if retarget_mode == 'Analytic':
                                data = skel_data[0].tolist()[:-1]
                                lMotors = nao.left_arm_solve(data)
                                rMotors = nao.right_arm_solve(data)
                                motors = lMotors + rMotors
                                conn.write_socket({
                                    'header': 'Analytic',
                                    'data': motors,
                                    'from': _from,
                                    'to': _to
                                })
                            else:
                                if target_env == 'CHREO':
                                    retargeted_skel = demo.do_retargeting(
                                        skel_data[0].tolist()[:-1])
                                    conn.write_socket({
                                        'header': 'SetMotor',
                                        'data': retargeted_skel,
                                        'from': _from,
                                        'to': _to
                                    })
                                elif target_env == 'VREP':
                                    ret = demo.do_retargeting_vrep(
                                        skel_data[0].tolist()[:-1])
                        conn.flush()
                        print('Send Success')

                    self.draw_body(joints, joint_points,
                                   SKELETON_COLORS[demo_body_idx])

            # --- copy back buffer surface pixels to the screen, resize it if needed and keep aspect ratio
            # --- (screen size may be different from Kinect's color frame size)
            h_to_w = float(self._frame_surface.get_height(
            )) / self._frame_surface.get_width()  # (w: 1920, h: 1080)
            target_height = int(h_to_w * self._screen.get_width())
            surface_to_draw = pygame.transform.scale(
                self._frame_surface, (self._screen.get_width(), target_height))
            self._screen.blit(surface_to_draw, (0, 0))
            surface_to_draw = None
            pygame.display.update()

            # --- Go ahead and update the screen with what we've drawn.
            pygame.display.flip()

            # --- Limit to 60 frames per second
            self._clock.tick(60)

        # Close our Kinect sensor, close the window and quit.
        self._kinect.close()
        pygame.quit()
Exemple #22
0
# -----[ Main Loop ]-----
while index < len(video_file_list):
    # -----------------------
    # -----[ Data Load ]-----
    # -----------------------
    print('list number: ', index, ' progress: {0:0.2f}'.format((index/len(video_file_list) * 100.0)),
          ' in ', len(video_file_list))
    print('video: ', video_file_list[index])
    print('skeleton: ', skeleton_file_list[index])

    cap = cv2.VideoCapture(video_file_list[index])
    file = open(skeleton_file_list[index])

    frameCount = np.fromstring(file.readline(), sep=' ', dtype='int32')[0]
    skel = Skeleton()
    for f in range(frameCount):  # frameCount
        bodyCount = np.fromstring(file.readline(), sep=' ', dtype='int32')[0]

        for b in range(bodyCount):
            body = Body_struct()
            bodyInfo = np.fromstring(file.readline(), sep=' ', dtype='float64')
            jointCount = np.fromstring(file.readline(), sep=' ', dtype='int32')[0]
            body.set_body_info(bodyInfo, jointCount, f*1.0/frameCount)

            for j in range(jointCount):
                jointInfo = np.fromstring(file.readline(), sep=' ', dtype='float64')
                joint = Joint_struct(jointInfo)
                body.joints.append(joint)
            try:
                skel.append_body(f, b, body)
Exemple #23
0
    def train_model(self, nbr_epoch=100, list_metric=['jointAngle']):
        loss_score = []

        skeleton = Skeleton('dhm66_ISB_Xsens.urdf')

        b_x = self.train_loader.view(-1, self.input_dim)
        b_y = self.data_loss.view(-1, self.dim_loss)

        input_data = np.copy(b_y.detach().numpy())
        input_data = tools.denormalization(input_data, self.loss_data_min,
                                           self.loss_data_max)

        list_loss = []
        for epoch in range(self.nbr_epoch):

            encoded, decoded = self.autoencoder(b_x)

            # if 'ergo_score' in self.list_metric:
            # 	data_eval = self.prepare_data('ergo_score', decoded)

            loss = self.loss_func(decoded, b_y)  # mean square error

            self.optimizer.zero_grad(
            )  # clear gradients for this training step
            loss.backward()  # backpropagation, compute gradients
            self.optimizer.step()  # apply gradients

            output_data = np.copy(decoded.detach().numpy())

            if self.output_type == 'ergo':
                output_data = tools.denormalization(output_data,
                                                    self.loss_data_min,
                                                    self.loss_data_max)

            elif self.output_type == 'ergo_posture':
                output_data = tools.denormalization(output_data,
                                                    self.loss_data_min,
                                                    self.loss_data_max)

                score1 = self.evaluate_model(input_data[0:66],
                                             output_data[0:66], 'jointAngle')
                score2 = self.evaluate_model(input_data[66:], output_data[66:],
                                             '')

            else:
                output_data = tools.denormalization(output_data,
                                                    self.loss_data_min,
                                                    self.loss_data_max)

            score = self.evaluate_model(input_data, output_data,
                                        list_metric[0])

            # loss_score.append(np.sqrt(np.square(input_data - output_data).mean()))
            loss_score.append(score)

            list_loss.append(score)
            if epoch % 100 == 0:
                print('Epoch: ', epoch, '| train loss:', loss.data.numpy(),
                      score)

            if len(list_loss) > 500:
                del list_loss[0]
                if np.std(list_loss) < self.stop_criterion:
                    return loss_score

        return loss_score
Exemple #24
0
    def gameLoop(self):

        #Needed to increment the displacement of the grid
        i = 0

        #Needed to check if the grid is moving upwards or downwards
        state = "DECREASING"

        clock = pygame.time.Clock()

        running = True

        game = Skeleton()
        background_sound.play(-1)

        while running:

            #Draws the screen
            self.updateScreen(self.inactive_robot1, self.inactive_robot2, i)

            #Checks whether the screen is moving downwards
            if i == 0 or state == "DECREASING":
                i += 1
                #Checks to see if the grid reached the maximum downward displacement
                if i == const.FPS:
                    #Makes the grid move upwards
                    state = "INCREASING"
            else:
                i -= 1
                if i == 0:
                    state = "DECREASING"

            #Loops through all the events of the game
            for event in pygame.event.get():
                #Checks whether the user exited the window
                if event.type == pygame.QUIT:
                    running = False

                if self.game_over:
                    if event.type == pygame.MOUSEBUTTONDOWN:
                        pos = pygame.mouse.get_pos()
                        running = self.newGame(pos, game)
                else:
                    #Checks whether the user clicked anywhere on the screen
                    if event.type == pygame.MOUSEBUTTONDOWN:
                        #The position of the mouse click is stored
                        pos = pygame.mouse.get_pos()
                        #Checks whether both players made their first move
                        if self.player1_first_move[
                                -1] and self.player2_first_move[-1]:
                            self.handle_click(pos, game)
                        else:
                            #Checks whether player 1 has made his first move
                            if not self.player1_first_move[-1]:
                                #waits for player 1 to activate his box in order to start playing the game
                                self.activateNameBox(
                                    pos, boxPlayer1, const.BOX_PLAYER1_X,
                                    const.BOX_PLAYER_Y, self.box1_active,
                                    self.inactive_robot1,
                                    self.player1_first_move, 1, game)
                            else:
                                self.activateNameBox(
                                    pos, boxPlayer2, const.BOX_PLAYER2_X,
                                    const.BOX_PLAYER_Y, self.box2_active,
                                    self.inactive_robot2,
                                    self.player2_first_move, 2, game)

                    #Checks whether player 1 currently clicked on his box
                    if self.box1_active[-1]:
                        #Updating of player's 1 name
                        self.updateName(event, self.player1Name, game)
                    if self.box2_active[-1]:
                        self.updateName(event, self.player2Name, game)

            self.gameOver(game)

            pygame.display.update()

            #Restricts the game to run on a specifed number of FPS
            clock.tick(const.FPS)
def main():
    init = Initialize()
    skely = Skeleton()
    skely.body(init.server)
def main(unused_argv):
    # Create a list of TFRecord input files.
    filenames = [
        os.path.join(config['data_dir'], config['file_format'] % i)
        for i in config['file_ids']
    ]
    # Create data loading operators. This will be represented as a node in the computational graph.
    batch_rgb_op, batch_skeleton_op, batch_labels_op, batch_seq_len_op = input_pipeline(
        filenames, config)
    # TODO: your model can take batch_rgb, batch_labels and batch_seq_len ops as an input.

    # Create tensorflow session and initialize the variables (if any).
    sess = tf.Session()
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    sess.run(init_op)
    # Create threads to prefetch the data.
    # https://www.tensorflow.org/programmers_guide/reading_data#creating_threads_to_prefetch_using_queuerunner_objects
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    """
    # Training Loop
    # The input pipeline creates input batches for config['num_epochs'] epochs,
    # You can iterate over the training data by using coord.should_stop() signal.
    try:
        while not coord.should_stop():
            # TODO: Model training

    except tf.errors.OutOfRangeError:
        print('Done training for %d epochs, %d steps.' % (FLAGS.num_epochs, step))
    finally:
        # When done, ask the threads to stop.
        coord.request_stop()

    # Wait for threads to finish.
    coord.join(threads)
    # Close session
    sess.close()
    """

    # Instead of running training, fetch a sample and save one frame.
    # Fetch a batch of samples.
    batch_rgb, batch_skeleton, batch_labels, batch_seq_len = sess.run(
        [batch_rgb_op, batch_skeleton_op, batch_labels_op, batch_seq_len_op])

    # Print
    print("# Samples: " + str(len(batch_rgb)))
    print("Sequence lengths: " + str(batch_seq_len))
    print("Sequence labels: " + str(batch_labels))

    # Note that the second dimension will give maximum-length in the batch, i.e., the padded sequence length.
    print("Sequence type: " + str(type(batch_rgb)))
    print("Sequence shape: " + str(batch_rgb.shape))

    # Fetch first clips 10th frame.
    img = batch_rgb[0][9]
    print("Image shape: " + str(img.shape))

    # Create a skeleton object.
    skeleton = Skeleton(batch_skeleton[0][10])
    # Resize the pixel coordinates.
    skeleton.resizePixelCoordinates()
    # Draw skeleton image.
    skeleton_img = skeleton.toImage(img.shape[0], img.shape[1])

    imsave(config['model_dir'] + 'rgb_test_img.png', img)
    imsave(config['model_dir'] + 'skeleton_test_img.png', skeleton_img)
Exemple #27
0
    # # Display legend
    # ax.legend(loc = 'best')


if __name__ == '__main__':
    list_metric = ['jointAngle']

    flag_ergo = True

    size_list = [2, 3, 5, 7, 10]
    # size_list = [10]
    loss = [[]]
    autoencoder = []

    # id_model = 2
    skeleton = Skeleton('dhm66_ISB_Xsens.urdf')

    all_score = []
    all_size = []
    type_data = []
    df_score = pd.DataFrame()

    for metric in list_metric:
        print(metric)
        for k, size in enumerate(size_list):
            print(size)
            path = "save/AE/" + metric + "/" + str(size) + '/'
            list_files = [
                f for f in os.listdir(path)
                if os.path.isfile(os.path.join(path, f))
            ]
class RealTimePlotModule():
    """
	This module plots a bar chart with the probability distribution on the states.
	Usage
	python plot_probabilities.py
	Input port: /processing/NamePort:o
	"""
    def __init__(self, list_port):
        self.input_port = []
        self.port = []

        plt.ion()

        self.fig = plt.figure()
        self.ax = self.fig.add_subplot(111, projection='3d')

        data = np.zeros((2, 66))

        color = ['b', 'r']
        self.lines = []
        self.skeleton = []

        self.skeleton = Skeleton('dhm66_ISB_Xsens.urdf')

        for i, name_port in enumerate(list_port):
            self.input_port.append(name_port)
            self.port.append(yarp.BufferedPortBottle())
            self.port[i].open('/plot_skeleton' + name_port)

            yarp.Network.connect(name_port, self.port[i].getName())

        self.lines = self.skeleton.visualise_from_joints(data,
                                                         color_list=color,
                                                         lines=[])
        # self.skeleton[i].visualise_from_joints(data, color=color[i])

    def update_plot(self):
        color = ['b', 'r']
        data_joint = []
        for i, port in enumerate(self.port):
            b_in = self.port[i].read()
            data = b_in.toString().split(' ')

            if len(data) == 67:
                del data[0]

            data = list(map(float, data))
            data = np.asarray(data)

            self.ax.cla()
            plt.sca(self.ax)
            data_joint.append(np.deg2rad(data))

            # self.lines[i] = self.skeleton[i].visualise_from_joints(data, color=color[i], lines=self.lines[i])
        self.skeleton.visualise_from_joints(data_joint,
                                            color_list=color,
                                            lines=self.lines)

        self.fig.canvas.draw()

        return

    def close(self):
        for i_port, port in zip(self.input_port, self.port):
            yarp.Network.disconnect(i_port, port.getName())
            port.close()
Exemple #29
0
from Skeleton import Skeleton

if __name__ == '__main__':

    Skeleton.run()
Exemple #30
0
    local_path = os.path.abspath(os.path.dirname(__file__))

    # Parameters configuration
    config = configparser.ConfigParser()
    config.read('config/' + config_file)

    path = config[config_type]["path_data"]

    tracks = ['details']

    list_features, data_np, real_labels, timestamps, list_states = tools.load_data(
        path, tracks, 'jointAngle_')
    list_states = list_states[0]

    timestamp_all = []
    time_init = 0
    labels_all = []

    # Compute ergo score
    posture = Skeleton('dhm66_ISB_Xsens.urdf')
    # posture.update_posture(data_np[0][5000])

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    # posture.visualise_from_joints()

    posture.animate_skeleton([data_np[0][0::50]])
    # tools.animate_skeleton()

    plt.show()
	def __init__(self):
		self.app = pg.mkQApp()
		pg.setConfigOption('background', 'w')
		pg.setConfigOption('foreground', 'k')

		self.view = pg.PlotWidget()
		self.view.resize(800, 600)
		self.view.setWindowTitle('Ergonomic score in latent space')
		self.view.setAspectLocked(True)
		self.view.show()

		self.port = yarp.BufferedPortBottle()
		self.port.open('/plot_latentspace')

		metric = 'jointAngle'
		ergo_name = ['TABLE_REBA_C']

		size_latent = 2
		dx = 0.1

		loss = [[]]
		autoencoder = []

		all_score = []
		all_size = []
		type_data = []
		path_src = "/home/amalaise/Documents/These/code/ergo_prediction/ergonomic_assessment/src/"
		path = path_src + "save/AE/" + metric + "/" + str(size_latent) + '/'
		list_files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
		list_files.sort()
		file = list_files[0]

		with open(path + file, 'rb') as input:
			autoencoder = pickle.load(input)

		input_data = autoencoder.get_data_test()
		data_output, encoded_data, score = autoencoder.test_model(input_data)
		score = autoencoder.evaluate_model(input_data, data_output, metric)
		
		Max = np.max(encoded_data, axis = 0)
		Min = np.min(encoded_data, axis = 0)
		Mean = np.mean(encoded_data, axis = 0)

		# Compute ergo score
		ergo_assessment = ErgoAssessment(path_src + 'config/rula_config.json')
		list_ergo_score = ergo_assessment.get_list_score_name()
		list_ergo_score.sort()

		reduce_posture = HumanPosture(path_src + 'config/mapping_joints.json')
		posture = Skeleton('dhm66_ISB_Xsens.urdf')

		self.X = np.arange(0.0, 1.0+dx, dx)

		self.ergo_grid = np.zeros((len(self.X), len(self.X)))

		for i, data_x in enumerate(self.X):
			for j, data_y in enumerate(self.X):

				x = np.zeros((1,size_latent))
				x[0, 0] = data_x
				x[0, 1] = data_y

				decoded_data = autoencoder.decode_data(x)
				if metric == 'posture':
					whole_body = reduce_posture.reduce2complete(decoded_data[0])
					posture.update_posture(whole_body)
				else:
					posture.update_posture(decoded_data[0])

				ergo_score = tools.compute_sequence_ergo(decoded_data[0], 0, ergo_name, path_src)[0]
				if ergo_score == 1:
					ergo_score = 1
				elif 1 < ergo_score < 5:
					ergo_score = 2
				elif 4 < ergo_score < 6:
					ergo_score = 3
				else:
					ergo_score = 4

				self.ergo_grid[j,i] = ergo_score

		self.flag = 0

		self.plot_latent_space()
def main():
    init = Initialize()
    skely = Skeleton()
    skely.body(init.server)
Exemple #33
0
	def create_skeleton(self, sName, recvData=False, tcp_ip = None, tcp_port = None, num_floats = None):
		new_skel = Skeleton(sName, recvData, tcp_ip, tcp_port, num_floats)
		self.skeleton = new_skel
		return new_skel
Exemple #34
0
    cnt = 0
    for batch in data_iterator:
        b_size = batch["rgb"].shape[0]
        num_samples += b_size

        for i in range(b_size):

            length = batch['length'][i]
            tmp = []

            print(length)

            for l in range(length):
                img_rgb = batch['rgb'][i][l]
                skeleton = Skeleton(batch['skeleton'][i][l])
                skeleton.resizePixelCoordinates()
                tmp.append(skeleton.toImage(img_rgb.shape[0], img_rgb.shape[1]))
        
            skeleton_img = np.array(tmp).astype(np.float32)

            print(skeleton_img.shape)
            cnt +=1
            print (cnt)

            sample = {
                "rgb"          : np.array(batch['rgb'][i][:length][:,:,:,::-1]),
                "depth"        : np.array(batch['depth'][i][:length]),
                "segmentation" : np.array(batch['segmentation'][i][:length]),
                "skeleton"     : np.array(batch['skeleton'][i][:length]),
                "skeleton_img" : skeleton_img,
 def get_meta(self):
     skel = Skeleton.get_meta(self)
     extra = '\n<LINK REV="made" HREF="mailto:[email protected]">'
     return skel + extra