コード例 #1
0
    def set_visuomotor_dataset(self):
        for i, trial_fold in enumerate(
                get_folders(DIR, key=lambda name: int(name[5:]))):
            print(i)
            IMG = pathname(trial_fold, 'IMG')
            # scipy.misc.imsave('/data/barry/IMITATION/dataset/THROW/TEST/img_%d.png'%i, np.load(pathname(IMG, 'img_1472.npy'))[:,:,:3])
            img_files = get_files(IMG, key=lambda order: int(order[4:-4]))
            img_array = np.concatenate([[np.load(img_file)[:, :, :3]]
                                        for img_file in img_files[1:DATA_NUM +
                                                                  1]])
            joint_conf_array = np.load(
                pathname(trial_fold, 'joints_configurations.npy'))[0:DATA_NUM]
            velocities = np.load(pathname(trial_fold, 'velocities.npy'))
            velocities_array = velocities[1:DATA_NUM + 1]
            gripper_array = np.zeros(DATA_NUM)

            first_gripper = velocities_array[:, 6]
            closing, opening = np.min(np.where(first_gripper > 0.9)), np.min(
                np.where(first_gripper < -0.9))
            gripper_array[closing:opening] = 1.0

            assert velocities_array.shape[0] == joint_conf_array.shape[
                0] == img_array.shape[0] == gripper_array.shape[
                    0]  # == velocities_conf.shape[0]

            write_tf_records_db(pathname(DIR, '..', 'VISUOMOTOR_GRP',
                                         'demo_%d.tfrecord' % i),
                                img_array,
                                joint_conf_array,
                                velocities_array,
                                gripper_array=gripper_array,
                                step_array=np.array([
                                    'step_%d_%d' % (i, j)
                                    for j in range(DATA_NUM)
                                ]))
コード例 #2
0
 def restore(model_path=None, filename='model.ckpt'):
     if model_path is None:
         model_path = model_dirname
     if is_file(pathname(model_path, 'checkpoint')):
         saver.restore(sess, pathname(model_path, filename))
         print('Layers weights restored')
         return True
     else:
         print('Layers weights not restored')
         return False
コード例 #3
0
    def evaluate2(self):
        # folder = DATA_PATH + 'Trial_950'

        indice = np.random.randint(1000, 1020)
        folder = pathname(EVALUATION_PATH, 'demo_%d' % indice)
        scene_objects = 'scene_objects.npy'
        with Interface(mode='direct') as I:
            robot = I.load_model(ROBOT_URDF_PATH)
            table = I.load_model(TABLE,
                                 Pose(Point(0.4, 0.0, 0.05),
                                      Euler(1.57, 0.0, 0.0)),
                                 fixed_base=True,
                                 scaling=1.0)
            I.set_camera_pose(0.6, 155, -50, [0.2, 0.0, 0.5])
            scene = np.load(os.path.join(folder, scene_objects))[2:, 0]
            block = I.load_model(CUBE_URDF,
                                 Pose(Point(*scene[0])),
                                 fixed_base=False)
            basket = I.load_model(BASKET_URDF,
                                  Pose(Point(*scene[1])),
                                  fixed_base=False)

            img = np.concatenate([[
                np.load(pathname(folder, 'IMG', 'img_%d.npy' % i))[:, :, :3]
            ] for i in range(1, STEPS + 1)])
            joints = I.movable_a_joints + I.movable_g_joints

            eval_path = mkdir(RES_PATH, 'EVAL_%d' % (self.eval_indice + 1))
            img_path = mkdir(eval_path, 'IMG')
            with open(pathname(eval_path, 'eval.txt'), 'w') as f:
                f.write('Evaluation number: %d' % indice)

            for i in range(STEPS):
                joints_configurations = [[
                    state.jointPosition
                    for state in I.get_joint_state(robot, joints)
                ]]
                # rgb = self.sess.run(tf.image.per_image_standardization(img[i]))
                rgb = img[i]
                vel = self.sess.run(
                    self.vel, {
                        self.rgb: rgb.reshape(-1, WIDTH, HEIGHT, CHANNELS),
                        self.conf: joints_configurations
                    })
                # vel = np.concatenate([vel, [[0.0, 0.0, 0.0]]], axis=1)
                # print vel
                # import sys
                # sys.exit(0)
                # print vel[0][6]
                I.step_with_gripper(robot, vel[0][:6], vel[0][-1])

                img_array = I.get_camera_image(WIDTH, HEIGHT)[:, :, :3]
                scipy.misc.imsave(pathname(img_path, 'img_%d.png' % i),
                                  img_array)
        self.eval_indice += 1
コード例 #4
0
def model(sess, train_dir, model_name=None, vars=None):
    if model_name is None:
        model_folder = datetime.datetime.now().strftime("%Y_%b_%d_%H_%M")
        model_dirname = mkdir(train_dir, model_folder, 'model')
        log_dirname = mkdir(train_dir, model_folder, 'summary')
    else:
        model_folder = pathname(train_dir, model_name, **dict(flag=1))
        model_dirname = pathname(model_folder, 'model', **dict(flag=1))
        log_dirname = pathname(model_folder, 'summary', **dict(flag=1))

    saver = tf.train.Saver(vars)

    def save(filename='model.ckpt'):
        saver.save(sess, pathname(model_dirname, filename))

    def restore(model_path=None, filename='model.ckpt'):
        if model_path is None:
            model_path = model_dirname
        if is_file(pathname(model_path, 'checkpoint')):
            saver.restore(sess, pathname(model_path, filename))
            print('Layers weights restored')
            return True
        else:
            print('Layers weights not restored')
            return False

    def summary(graph=None, **kwargs):
        for key, values in kwargs.items():
            tf.summary.scalar(key, values)
        writer = tf.summary.FileWriter(log_dirname, graph=graph)
        merged = tf.summary.merge_all()

        def run():
            return merged

        def add(summaries, step=None):
            writer.add_summary(summaries, global_step=step)

        summary.run = run
        summary.add = add

        return summary

    def get_folder_name():
        return pathname(train_dir, model_folder)

    model.save = save
    model.restore = restore
    model.summary = summary
    model.folder = get_folder_name

    return model
コード例 #5
0
def write_tjr_dataset():
    for i, trial_fold in enumerate(
            get_folders(DIR, key=lambda name: int(name[5:]))):
        print(i)
        IMG = pathname(trial_fold, 'IMG')
        # scipy.misc.imsave('/data/barry/IMITATION/dataset/THROW/TEST/img_%d.png'%i, np.load(pathname(IMG, 'img_1472.npy'))[:,:,:3])
        img_files = get_files(IMG, key=lambda order: int(order[4:-4]))

        img_array = np.concatenate([[np.load(img_file)[:, :, :3]]
                                    for img_file in img_files[0:DATA_NUM + 1]])
        prev_img_array = img_array[0:DATA_NUM]
        next_img_array = img_array[1:DATA_NUM + 1]
        goal_array = img_array[-1]
        initial_img_array = img_array[0]

        joint_conf_array = np.load(
            pathname(trial_fold, 'joints_configurations.npy'))[0:DATA_NUM + 1]
        prev_joint_conf_array = joint_conf_array[0:DATA_NUM]
        next_joint_conf_array = joint_conf_array[1:DATA_NUM + 1]

        vel_conf = np.load(pathname(trial_fold, 'velocities.npy'))[0:DATA_NUM]
        assert prev_img_array.shape[0] == next_img_array.shape[0] == \
               prev_joint_conf_array.shape[0] == next_joint_conf_array.shape[0] \
               == vel_conf.shape[0]  # == velocities_conf.shape[0]

        step_array = np.array(['step_%d_%d' % (i, j) for j in range(DATA_NUM)])

        with tf.python_io.TFRecordWriter(
                pathname(DIR, '..', 'TJR', 'demo_%d.tfrecord' % i)) as writer:
            for k in range(step_array.shape[0]):
                example = tf.train.Example(features=tf.train.Features(
                    feature={
                        'prev_img':
                        _bytes_feature(prev_img_array[k].tostring()),
                        'next_img':
                        _bytes_feature(next_img_array[k].tostring()),
                        'prev_jnt_conf':
                        _bytes_feature(prev_joint_conf_array[k].tostring()),
                        'next_jnt_conf':
                        _bytes_feature(next_joint_conf_array[k].tostring()),
                        'goal_img':
                        _bytes_feature(goal_array.tostring()),
                        'initial_img':
                        _bytes_feature(initial_img_array.tostring()),
                        'vel_conf':
                        _bytes_feature(vel_conf[k].tostring()),
                        'step':
                        _bytes_feature(step_array[k].tostring())
                    }))
                writer.write(example.SerializeToString())
コード例 #6
0
def check(indice=0):
    TEST_PATH = '/data/barry/IMITATION/dataset/THROW/TEST_%d' % indice

    joint_sim = np.concatenate(
        [[np.load(pathname(TEST_PATH, 'SIM', 'JOINTS', 'joints_%d.npy') % j)]
         for j in range(1472)])
    joint_tf = np.concatenate(
        [[np.load(pathname(TEST_PATH, 'TF', 'JOINTS', 'joints_%d.npy') % j)]
         for j in range(1472)])

    for i in range(5):
        k = np.random.randint(0, 1472)
        print('k: ', k)

        print joint_sim[k]
        print joint_tf[k]
コード例 #7
0
    def __init__(self,
                 sess,
                 width=256,
                 height=256,
                 channels=3,
                 action_dim=9,
                 learning_rate=0.0001,
                 model_name=None,
                 graph=None,
                 export=False):
        self.sess = sess
        self.width = width
        self.height = height
        self.channels = channels
        self.action_dim = action_dim
        self.learning_rate = learning_rate
        self.rgb, self.conf, self.vel, = self.create_network('visuonet')

        if not export:
            self.velocities = tf.placeholder(tf.float32, [None, 9])
            self.loss = tf.reduce_mean(
                tf.square(self.velocities - self.vel)
            )  #+ 0.001 * tf.reduce_mean(tf.square(self.gripper - self.vel[:,6]))
            self.optimizer = tf.train.AdamOptimizer(
                self.learning_rate).minimize(self.loss)

            self.model = model(sess, MODELS_DIR, model_name=model_name)
            self.summary = self.model.summary(graph=graph,
                                              **dict(loss=self.loss))
            self.eval_indice = len(
                get_folders(pathname(RES_PATH, **dict(flag=1))))
コード例 #8
0
def get_model_variables(model_name, tensors):
    graph = tf.Graph()
    restored_variables = {}
    with tf.Session(graph=graph) as sess:
        saver = tf.train.import_meta_graph(model_name)
        saver.restore(
            sess,
            pathname(MODELS_DIR,
                     "posenet_autoencoder_for_irlgan2/model/model.ckpt"))
        for name in tensors:
            t = graph.get_tensor_by_name(name)
            restored_variables[name] = sess.run(t)
    return restored_variables
コード例 #9
0
def create_visuomotor_dataset():
    # for datasets in  get_files(pathname(DIR, '..', 'RECORDS', 'demo_%d.tfrecord'),
    #                            key = lambda f : int(f[5:-9])):

    filename_list = get_files(
        pathname('/home/mtb/sim_ws/pybullet_ws/data/Trial_950/IMG'),
        key=lambda f: int(f[4:-4]))
    q = tf.FIFOQueue(capacity=5, dtypes=tf.string)
    enqueue_op = q.enqueue_many([filename_list])

    qr = tf.train.QueueRunner(q, [enqueue_op] * 1)
    tf.train.add_queue_runner(qr)

    data = q.dequeue()

    # data = tf.Print(data, data = [q.size(),data], message = 'Remaining file in q')
    file_queued = data
コード例 #10
0
def create_queue():
    filename_list = get_files(
        pathname('/home/mtb/sim_ws/pybullet_ws/data/Trial_950/IMG'),
        key=lambda f: int(f[4:-4]))
    q = tf.FIFOQueue(capacity=2, dtypes=tf.string)
    enqueue_op = q.enqueue([filename_list])

    data = q.dequeue()

    # data = tf.Print(data, data = [data], message = 'Remaining file in q')
    file_queued = data

    with tf.Session() as sess:
        # coord = tf.train.Coordinator()
        # threads = tf.train.start_queue_runners(coord = coord)
        sess.run(enqueue_op)
        queue = sess.run(file_queued)
コード例 #11
0
    def __init__(self,
                 sess,
                 width=256,
                 height=256,
                 channels=3,
                 learning_rate=0.0001,
                 z_shape=8,
                 j_shape=9,
                 model_name=None,
                 graph=None,
                 coeff=0.002):
        self.sess = sess
        self.width = width
        self.height = height
        self.channels = channels
        self.learning_rate = learning_rate
        self.z_shape = z_shape
        self.j_shape = j_shape

        self.img, self.p_t, self.p_t_1 = self.gen_network('generator')
        # self.p_t = tf.placeholder(tf.float32, shape = (None, self.j_shape))
        # self.std = tf.exp(self.logstds)
        # self.var = self.std ** 2

        # self.loss = 0.5 * tf.reduce_sum(tf.square((self.p_t - self.action_mean) / self.var)) \
        #             + 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(self.p_t)[0]) \
        #             + tf.reduce_sum(self.logstds)

        self.std = tf.exp(self.logstds)
        # self.var = tf.square(self.std)
        self.p_t_1_l = tf.placeholder(tf.float32, (None, 9))

        self.mse_loss = tf.reduce_mean(tf.square(self.p_t_1 - self.p_t_1_l))
        lh_loss = 0.5 * tf.reduce_sum(tf.square((self.p_t_1_l - self.p_t_1) / self.std), axis=-1) \
               + 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(self.p_t_1)[-1]) + tf.reduce_sum(self.logstds, axis=-1)
        self.lh_loss = tf.reduce_mean(lh_loss)

        self.optimizer = tf.train.AdamOptimizer(learning_rate).minimize(
            self.lh_loss)

        self.model = model(sess, MODELS_DIR, model_name=model_name)
        self.summary = self.model.summary(graph=graph,
                                          **dict(mse_loss=self.mse_loss,
                                                 lh_loss=self.lh_loss))
        self.eval_indice = len(get_folders(pathname(RES_PATH, **dict(flag=1))))
コード例 #12
0
def main():
    with open(pathname(os.getcwd(), '..', '..', 'machines.json'), 'r') as f:
        js = json.load(f)
    DATA_PATH = js[IP]['DATA_PATH']
    trial = js[IP]['trial_start']
    while True:
        if trial == js[IP]['trial_stop']:
            break

        start = time.time()
        while time.time() - start <= 120.0:
            SAVE_PATH = DATA_PATH + 'Trial_%d/' % trial
            mkdir(SAVE_PATH)
            p_mode_velocity_file = SAVE_PATH + 'p_mode_velocities.csv'
            scene_object_file = SAVE_PATH + 'scene_objects'
            with Interface(mode='direct',
                           save=True,
                           filename=p_mode_velocity_file) as I:
                # LOAD ROBOT
                robot = I.load_model(ROBOT_URDF_PATH)

                # LOAD TABLE
                table = I.load_model(TABLE,
                                     Pose(Point(0.4, 0.0, 0.05),
                                          Euler(1.57, 0.0, 0.0)),
                                     fixed_base=True,
                                     scaling=1.0)

                # TODO set camera
                I.set_camera_pose(1.6, 60, -25, [0.2, 0.0, 0.7])

                # LOAD SCENE OBJECTS
                scene_object = load_objects(
                    I, **dict(zip(SCENE_OBJECT, [CUBE_URDF, BASKET_URDF])))

                if len(scene_object) != len(SCENE_OBJECT):
                    continue

                # Gripper ID
                gripper_link = I.get_link_from_name(robot, END_EFFECTOR_LINK)

                # Neutral Pose
                neutralPose = Pose(Point(0.35, 0.0, 0.5),
                                   Euler(0.0, 3.14, 1.57))

                I.save_body_conf(save=True, filename=scene_object_file)
                I.set_initial_state(robot)

                offset = 0.005
                x_offset = [
                    0.0, 0.0, 0.0, offset, offset, offset, -offset, -offset,
                    -offset
                ]
                y_offset = [
                    0.0, offset, -offset, 0.0, offset, -offset, 0.0, offset,
                    -offset
                ]

                for k in range(2):
                    #    Set initial state
                    I.restore_body_conf()
                    success = False

                    initialize = True
                    path = plan(I,
                                robot,
                                gripper_link,
                                scene_object,
                                neutralPose,
                                initialize=initialize,
                                x_offset=x_offset[k],
                                y_offset=y_offset[k])

                    if len(path) > 0:
                        I.reset_simulation(robot)
                        time.sleep(1.0)

                        I.command(robot, path[0])
                        I.command(robot, path[1])
                        I.command(robot, path[2])
                        I.gripper(robot)

                        I.command(robot, path[3])
                        I.command(robot, path[4])
                        I.command(robot, path[5])

                        I.gripper(robot, 'open')
                        I.command(robot, path[6])

                        if inside_basket(I, *scene_object):
                            success = True
                            I.keep_pose(body=robot)
                            I.vel.write_csv()

                            velocities = I.vel.array
                            I.restore_body_conf()
                            I.reset_simulation(robot)

                            I.replay_velocities(robot, velocities)

                            if inside_basket(I, *scene_object):
                                I.restore_body_conf()
                                I.reset_simulation(robot)
                                time.sleep(1.0)
                                I.replay_velocities(
                                    robot,
                                    velocities,
                                    path=os.path.abspath(SAVE_PATH))
                                if inside_basket(I, *scene_object):
                                    np.save(
                                        os.path.join(SAVE_PATH, 'velocities'),
                                        np.array(I.target_vel))
                                    np.save(
                                        os.path.join(SAVE_PATH,
                                                     'joints_configurations'),
                                        np.array(I.joint_val))
                                    trial += 1
                                else:
                                    shutil.rmtree(SAVE_PATH)

                            else:
                                shutil.rmtree(SAVE_PATH)

                            break
                else:

                    shutil.rmtree(SAVE_PATH)
            if success:
                break
コード例 #13
0
        u'autoencoder/deconv3-0/deconv3-0/kernel:0',
        u'autoencoder/deconv3-0/deconv3-0/bias:0',
        u'autoencoder/deconv3-1/deconv3-1/kernel:0',
        u'autoencoder/deconv3-1/deconv3-1/bias:0',
        u'autoencoder/deconv3-1-0/deconv3-1-0/kernel:0',
        u'autoencoder/deconv3-1-0/deconv3-1-0/bias:0',
        u'autoencoder/deconv2-0/deconv2-0/kernel:0',
        u'autoencoder/deconv2-0/deconv2-0/bias:0',
        u'autoencoder/deconv2-1-0/deconv2-1-0/kernel:0',
        u'autoencoder/deconv2-1-0/deconv2-1-0/bias:0',
        u'autoencoder/deconv1-0/deconv1-0/kernel:0',
        u'autoencoder/deconv1-0/deconv1-0/bias:0',
        u'autoencoder/recons_img/recons_img/kernel:0',
        u'autoencoder/recons_img/recons_img/bias:0'
    ]
    variables = get_model_variables(model_name=pathname(
        MODELS_DIR, "posenet_autoencoder_for_irlgan2/model/model.ckpt.meta"),
                                    tensors=VARS)
    tf.global_variables_initializer()

    graph = tf.reset_default_graph()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(graph=graph, config=config) as sess:
        filenames, iterator, next_features = data_func()
        sess.run(
            iterator.initializer, {
                filenames: [
                    pathname(DATASET, 'demo_%d.tfrecord' % i)
                    for i in range(DATASET_NUM)
                ]
            })
コード例 #14
0
def read_tf_records(in_file, indice=0):
    record_iterator = tf.python_io.tf_record_iterator(path=in_file)
    TRIAL_PATH = pathname(DIR, 'Trial_%d' % indice)
    img_files = get_files(pathname(TRIAL_PATH, 'IMG'),
                          key=lambda order: int(order[4:-4]))
    img_array = np.concatenate([[np.load(img_file)[:, :, :3]]
                                for img_file in img_files[1:1473]])
    vel_array = np.load(pathname(TRIAL_PATH, 'velocities.npy'))[1:1473]
    joint_array = np.load(pathname(TRIAL_PATH,
                                   'joints_configurations.npy'))[0:1472]
    RES_PATH = pathname(DIR, '..', 'TEST_%d' % indice)
    mkdir(pathname(RES_PATH, 'SIM'))
    mkdir(pathname(RES_PATH, 'TF'))
    for s in ['TF', 'SIM']:
        for elem in ['IMG', 'JOINTS', 'VELOCITIES']:
            mkdir(pathname(RES_PATH, s, elem))
            mkdir(pathname(RES_PATH, s, elem))
            mkdir(pathname(RES_PATH, s, elem))

    for i, string_record in enumerate(record_iterator):
        example = tf.train.Example()
        example.ParseFromString(string_record)
        img_tf = np.fromstring(
            example.features.feature['img'].bytes_list.value[0],
            dtype=np.uint8).reshape(256, 256, 3)

        scipy.misc.imsave(pathname(RES_PATH, 'TF', 'IMG', 'img_%d.png' % i),
                          img_tf)
        scipy.misc.imsave(pathname(RES_PATH, 'SIM', 'IMG', 'img_%d.png' % i),
                          img_array[i])

        pose_tf = np.fromstring(
            example.features.feature['joint_conf'].bytes_list.value[0],
            dtype=np.float64)
        np.save(pathname(RES_PATH, 'TF', 'JOINTS', 'joints_%d' % i), pose_tf)
        np.save(pathname(RES_PATH, 'SIM', 'JOINTS', 'joints_%d' % i),
                joint_array[i])

        vel_tf = np.fromstring(
            example.features.feature['velocities'].bytes_list.value[0],
            dtype=np.float64)
        np.save(pathname(RES_PATH, 'TF', 'VELOCITIES', 'velocities_%d' % i),
                vel_tf)
        np.save(pathname(RES_PATH, 'SIM', 'VELOCITIES', 'velocities_%d' % i),
                vel_array[i])
コード例 #15
0
 def save(filename='model.ckpt'):
     saver.save(sess, pathname(model_dirname, filename))
コード例 #16
0
    iterator = dataset.make_initializable_iterator()
    next_features = iterator.get_next()
    return filenames, iterator, next_features


if __name__ == '__main__':
    graph = tf.get_default_graph()
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(graph=graph, config=config) as sess:
        filenames, iterator, next_features = data_func()
        sess.run(
            iterator.initializer, {
                filenames: [
                    pathname(DATASET, 'demo_%d.tfrecord' % i)
                    for i in range(DATASET_NUM)
                ]
            })

        visuo_net = VisuoNet(sess,
                             WIDTH,
                             HEIGHT,
                             CHANNELS,
                             model_name='visuonet8',
                             graph=graph)

        if not visuo_net.model.restore():
            sess.run(tf.global_variables_initializer())

        for i in range(EPISODES * int(STEPS / BATCH_SIZE)):
コード例 #17
0
# coding=utf-8

import cv2
import argparse
import os
from utils import get_files, pathname

SRC = '/home/mtb/RES4'
width = 256
height = 256

FOLDERS = ['EVAL_%d' % d for d in range(1, 4)]

for t in FOLDERS:
    i = int(t[5:])
    img_src = pathname(SRC, t)
    video_src = pathname(SRC, 'VIDEO_RES4', 'output_%d.mp4' % i)

    fourcc = cv2.VideoWriter_fourcc(*'mp4v')  # Be sure to use lower case
    out = cv2.VideoWriter(video_src, fourcc, 20.0, (width, height))
    for img in get_files(pathname(img_src, 'IMG'),
                         key=lambda f: int(f[4:f.find('.png')])):
        frame = cv2.imread(img)
        out.write(frame)

    out.release()
    cv2.destroyAllWindows()
コード例 #18
0

folder = DATA_PATH + 'Trial_1110'
p_mode_velocity_file = 'p_mode_velocities.csv'
scene_objects = 'scene_objects.npy'
with Interface(mode = 'gui') as I:
    robot = I.load_model(ROBOT_URDF_PATH)
    table = I.load_model(TABLE, Pose(Point(0.4, 0.0, 0.05), Euler(1.57, 0.0, 0.0)),
                         fixed_base = True, scaling = 1.0)
    I.set_camera_pose(0.6, 155, -50, [0.2, 0.0, 0.5])
    scene = np.load(os.path.join(folder, scene_objects))[2:, 0]
    block = I.load_model(CUBE_URDF, Pose(Point(*scene[0])), fixed_base = False)
    basket = I.load_model(BASKET_URDF, Pose(Point(*scene[1])), fixed_base = False)


    velocity =np.load(pathname(folder,'velocities.npy'))

    STEPS=velocity.shape[0]
    # img = np.concatenate([[np.load(pathname(folder,'IMG', 'img_%d.npy'%i))[:,:,:3] for i in range(STEPS)]])
    # joints = I.movable_a_joints + I.movable_g_joints
    # vels = []
    for i in range(STEPS):
        print i
    #     joints_configurations = [[state.jointPosition for state in I.get_joint_state(robot, joints)]]
    #     rgb = self.sess.run(tf.image.per_image_standardization(img[i]))
    #     vel = self.sess.run(self.vel,
    #         {
    #         self.rgb:rgb.reshape(-1,WIDTH,HEIGHT,CHANNELS),
    #         self.conf:joints_configurations
    #     }
    #     )
コード例 #19
0
def screenshot_path():
    path = datetime.datetime.now().strftime(pathname() + jpg)
    return path
コード例 #20
0
def main():
    with open(pathname(os.getcwd(), '..', '..', 'machines.json'), 'r') as f:
        js = json.load(f)
    DATA_PATH = js[IP]['DATA_PATH']
    trial = js[IP]['trial_start']
    while True:
        if trial == js[IP]['trial_stop']:
            break
        start = time.time()
        while time.time() - start <= 120.0:
            SAVE_PATH = DATA_PATH + 'Trial_%d/' % trial
            mkdir(SAVE_PATH)
            p_mode_velocity_file = SAVE_PATH + 'p_mode_velocities.csv'
            scene_object_file = SAVE_PATH + 'scene'

            with Interface(mode='direct',
                           save=True,
                           filename=p_mode_velocity_file) as I:
                # LOAD ROBOT
                robot = I.load_model(ROBOT_URDF_PATH)

                # LOAD TABLE
                table = I.load_model(TABLE,
                                     Pose(Point(0.4, 0.0, 0.05),
                                          Euler(1.57, 0.0, 0.0)),
                                     fixed_base=True,
                                     scaling=1.0)

                # TODO set camera
                I.set_camera_pose(1.6, 60, -25, [0.2, 0.0, 0.7])

                # LOAD_BLOCKS
                blocks = load_blocks(I)
                if len(blocks) != len(BLOCKS):
                    continue
                time.sleep(1.0)

                # Gripper ID
                gripper_link = I.get_link_from_name(robot, END_EFFECTOR_LINK)

                # Neutral Pose
                neutralPose = Pose(Point(0.35, 0.0, 0.5),
                                   Euler(0.0, 3.14, 1.57))

                I.save_body_conf(save=True, filename=scene_object_file)
                I.set_initial_state(robot)

                offset = 0.005
                x_offset = [0.0, offset, -offset, -offset]
                y_offset = [0.0, offset, -offset, 0.0]

                shuffle(blocks)

                color = ['red']

                for k in range(1):
                    # Set initial state
                    I.restore_body_conf()
                    success = False

                    # Plan motion
                    paths = []
                    velocities = OrderedDict()

                    for j in range(1):
                        initialize = True if j == 0 else False
                        path = plan(I,
                                    robot,
                                    gripper_link,
                                    blocks,
                                    neutralPose,
                                    initialize=initialize,
                                    x_offset=x_offset[k],
                                    y_offset=y_offset[k])
                        if path is not None:
                            paths.append(path)
                        else:
                            paths = []
                            break

                    if len(paths) > 0:
                        I.reset_simulation(robot)
                        # time.sleep(1.0)

                        I.command(robot, paths[0][0])
                        I.command(robot, paths[0][1])
                        I.command(robot, paths[0][2])
                        I.gripper(robot, 'close')
                        I.command(robot, paths[0][3])
                        I.command(robot, paths[0][4])
                        I.command(robot, paths[0][5])

                        I.gripper(robot, 'open')
                        I.command(robot, paths[0][6])

                        if on_top(I, *blocks[0:2]):
                            I.keep_pose(body=robot)
                            I.vel.write_csv()

                            velocities = I.vel.array
                            I.restore_body_conf()
                            I.reset_simulation(robot)

                            I.replay_velocities(robot, velocities)

                            if on_top(I, *blocks[0:2]):
                                I.restore_body_conf()
                                I.reset_simulation(robot)
                                time.sleep(1.0)
                                I.replay_velocities(
                                    robot,
                                    velocities,
                                    path=os.path.abspath(SAVE_PATH))

                                if on_top(I, *blocks[0:2]):
                                    success = True
                                    np.save(
                                        os.path.join(SAVE_PATH, 'velocities'),
                                        np.array(I.target_vel))
                                    np.save(
                                        os.path.join(SAVE_PATH,
                                                     'joints_configurations'),
                                        np.array(I.joint_val))
                                    trial += 1
                                    break

                else:
                    shutil.rmtree(SAVE_PATH)
            if success:
                break
            else:
                if folder_exists(SAVE_PATH):
                    shutil.rmtree(SAVE_PATH)
コード例 #21
0
 def get_folder_name():
     return pathname(train_dir, model_folder)
コード例 #22
0
# coding=utf-8

import scipy.misc
from imageio import imwrite
import numpy as np
import os
from utils import pathname, get_files

PATH = '/home/mtb/sim_ws/pybullet_ws/data/Trial_950/IMG'

# for i,p in enumerate(os.listdir(path)):
#     rgb = np.
# rgb = rgb.reshape((-1,128,128,4))
#
# print rgb.shape
# print np.load('/media/mtb/76f4874c-25bd-4dd1-b7a5-dc5732ba8511/mtb/THROW/Trial_0/velocities.npy').shape

for i in range(1500):
    rgb = np.load(pathname(PATH, 'img_%d.npy' % i))
    scipy.misc.imsave('/home/mtb/IMG/img_%d.jpg' % i, rgb[:, :, :3])
コード例 #23
0
# Networking Project
# Ruben, Emalee, Jake
# This is the TCP Client
# This establishes a TCP socket and connection

# Libraries
import socket
from zlib import decompress
import sys
import os
import pygame
import datetime
from utils import pathname

direct_path = pathname()
jpg = '/screenshot_%Y-%m-%d_%H_%M_%S.jpg'


# Takes the screenshot, names the screesnshot using the date and time
# returns the new path
def screenshot_path():
    path = datetime.datetime.now().strftime(pathname() + jpg)
    return path


def recvall(conn, length):
    buf = b''
    while len(buf) < length:
        data = conn.recv(length - len(buf))
        if not data:
            return data