dataset_t0 = database.get_dataset(path, tactile_map=True)
dataset_t0 = dataset_t0.prefetch(5 * batch_size)
dataset_t1 = dataset_t0.skip(1)
dataset = tf.data.Dataset.zip((dataset_t0, dataset_t1))
dataset = dataset.batch(batch_size)
dataset = dataset.shuffle(buffer_size=buffer_size)
dataset = dataset.repeat()

iterator = dataset.make_initializable_iterator()
batch_t0, batch_t1 = iterator.get_next()

tactile_map = batch_t0["tactile_map"]
target_tactile_map = batch_t1["tactile_map"]

tactile_map_predictor = predictor_maker(tactile_map_length, tactile_map_length)
out = tactile_map_predictor(tactile_map)
loss = mse(out, target_tactile_map)
op = tf.train.AdamOptimizer(5e-4).minimize(loss)

saver = tf.train.Saver()

with tf.Session() as sess:
    sess.run([iterator.initializer, tf.global_variables_initializer()])
    for i in range(args.n_batches):
        _, l = sess.run([op, loss])
        if i % 200 == 0:
            print(l)
    ts = time.time()
    st = datetime.datetime.fromtimestamp(ts).strftime('%Y_%m_%d_%H_%M_%S')
    path = saver.save(sess, args.output + "tactile_t0__tactile_t1_{}/".format(st))
Beispiel #2
0
n_records = int(re.match(regex, os.listdir(path + '/positions')[0]).group(1))

dataset_t0 = database.get_dataset(path, positions=True, actions=True, vision=True)
dataset_t0 = dataset_t0.map(database.discretize_dataset(N_DISCRETE))
dataset_t1 = dataset_t0.skip(1)
dataset = tf.data.Dataset.zip((dataset_t0, dataset_t1))
dataset = dataset.batch(batch_size)

iterator = dataset.make_initializable_iterator()
batch_t0, batch_t1 = iterator.get_next()

discrete_positions = [tf.squeeze(x, axis=1) for x in tf.split(batch_t0["positions"], 4, axis=1)]
discrete_actions = [tf.squeeze(x, axis=1) for x in tf.split(batch_t0["actions"], 4, axis=1)]
discrete_positions_target = [tf.squeeze(x, axis=1) for x in tf.split(batch_t1["positions"], 4, axis=1)]

joint_predictors = [predictor_maker(2 * N_DISCRETE, N_DISCRETE) for a in discrete_actions]
inps = [tf.concat([p, a], axis=1) for p, a in zip(discrete_positions, discrete_actions)]
outs = [joint_predictor(inp) for inp, joint_predictor in zip(inps, joint_predictors)]
losses = [mse(out, target, axis=-1) for out, target in zip(outs, discrete_positions_target)]

saver = tf.train.Saver()

losses_per_joint = np.zeros((n_records - 1, 4))

with tf.Session() as sess:
    sess.run(iterator.initializer)
    saver.restore(sess, args.network_path)
    try:
        while True:
            ind, l = sess.run([batch_t0["index"], losses])
            for j, ll in enumerate(l):
iterator = dataset.make_initializable_iterator()
batch_t0, batch_t1 = iterator.get_next()

discrete_positions = [
    tf.squeeze(x, axis=1) for x in tf.split(batch_t0["positions"], 4, axis=1)
]
discrete_actions = [
    tf.squeeze(x, axis=1) for x in tf.split(batch_t0["actions"], 4, axis=1)
]
discrete_positions_target = [
    tf.squeeze(x, axis=1) for x in tf.split(batch_t1["positions"], 4, axis=1)
]

joint_predictors = [
    predictor_maker(2 * args.n_discrete, args.n_discrete)
    for a in discrete_actions
]
inps = [
    tf.concat([p, a], axis=1)
    for p, a in zip(discrete_positions, discrete_actions)
]
outs = [
    joint_predictor(inp)
    for inp, joint_predictor in zip(inps, joint_predictors)
]
losses = [
    mse(out, target) for out, target in zip(outs, discrete_positions_target)
]
ops = [tf.train.AdamOptimizer(5e-4).minimize(loss) for loss in losses]
Beispiel #4
0
dataset = dataset.map(database.vision_to_float32)
dataset = dataset.prefetch(5 * batch_size)
dataset = dataset.shuffle(buffer_size=buffer_size)
dataset = dataset.batch(batch_size)
dataset = dataset.repeat()

iterator = dataset.make_initializable_iterator()
batch = iterator.get_next()

size = np.prod(batch["vision"].get_shape().as_list()[1:])
vision = tf.reshape(batch["vision"], [-1, size])
discrete_positions = [
    tf.squeeze(x, axis=1) for x in tf.split(batch["positions"], 4, axis=1)
]

predictors = [predictor_maker(size, N_DISCRETE) for pos in discrete_positions]
outs = [pred(vision) for pred in predictors]
losses = [mse(out, pos) for out, pos in zip(outs, discrete_positions)]
ops = [tf.train.AdamOptimizer(5e-4).minimize(loss) for loss in losses]

saver = tf.train.Saver()

with tf.Session() as sess:
    sess.run([iterator.initializer, tf.global_variables_initializer()])
    for i in range(args.n_batches):
        _, l = sess.run([ops, losses])
        if i % 200 == 0:
            print(l)
    ts = time.time()
    st = datetime.datetime.fromtimestamp(ts).strftime('%Y_%m_%d_%H_%M_%S')
    path = saver.save(
Beispiel #5
0
dataset = dataset.batch(batch_size)

iterator = dataset.make_initializable_iterator()
batch_t0, batch_t1 = iterator.get_next()

size = 4 * N_DISCRETE
discrete_positions = tf.reshape(batch_t0["positions"], (-1, size))
discrete_actions = tf.reshape(batch_t0["actions"], (-1, size))
tactile_map = batch_t0["tactile_map"]
tactile_map_target = batch_t1["tactile_map"]

inp = tf.concat([discrete_positions, discrete_actions, tactile_map], axis=-1)

in_size = tactile_map_length + 4 * 2 * N_DISCRETE
out_size = tactile_map_length
predictor = predictor_maker(in_size, out_size)
out = predictor(inp)
loss = mse(out, tactile_map_target, axis=-1)

saver = tf.train.Saver()

losses = np.zeros(n_records - 1)

with tf.Session() as sess:
    sess.run(iterator.initializer)
    saver.restore(sess, args.network_path)
    try:
        while True:
            ind, l = sess.run([batch_t0["index"], loss])
            losses[ind] = l
    except tf.errors.OutOfRangeError: