예제 #1
0
def main(args):
    # 1. Get datasets
    train_ds = scenarios.planning(args.scenario_path)
    #val_ds = scenarios.planning(args.scenario_path.replace("train", "val"))

    # 2. Define model
    model = PlanningNetworkMP(7, (args.batch_size, 6))

    # 3. Optimization

    optimizer = tf.train.AdamOptimizer(args.eta)

    # 4. Restore, Log & Save
    experiment_handler = ExperimentHandler(args.working_path, args.out_name, args.log_interval, model, optimizer)

    experiment_handler.restore("./monster/last_mix/checkpoints/best-7274")

    i = -2

    a = 60
    x = np.linspace(-12.5, -3., a)
    b = 30
    y = np.linspace(-21.0, -16., b)
    c = 76
    #d = 4
    #th = np.linspace(-np.pi / d, np.pi / d,  c)
    th = np.linspace(- 10 * np.pi / 180, 65 * np.pi / 180,  c)
    X, Y, TH = np.meshgrid(x, y, th)
    x, y = np.meshgrid(x, y)
    X = X.flatten()
    Y = Y.flatten()
    TH = TH.flatten()
    p0 = np.stack([X, Y, TH], 1).astype(np.float32)
    n = a * b * c

    map = train_ds[i][2].numpy()[np.newaxis]
    map = np.tile(map, (n, 1, 1, 1))

    for w in [17]:#, 17]:#range(30):
        pk = train_ds[i][1][w].numpy()[np.newaxis]
        pk = np.tile(pk, (n, 1))

        p0a = train_ds[i][0][w].numpy()

        data = (p0, pk, map)
        # 5.2.1 Make inference of the model for validation and calculate losses
        #dummy_data = (p0[:1], pk[:1], map[:1])
        #output, last_ddy = model(dummy_data, None, training=True)
        start = time()
        output, last_ddy = model(data, None, training=True)
        end = time()
        print("TIME:", end - start)
        model_loss, invalid_loss, overshoot_loss, curvature_loss, non_balanced_loss, x_path, y_path, th_path = plan_loss(
            output, last_ddy, data)
        #print(invalid_loss, curvature_loss)

        l = invalid_loss + curvature_loss + overshoot_loss
        gidx = l.numpy()
        gidx = np.argwhere(gidx == 0)
        l = tf.reshape(l, (-1, c))
        color = tf.reduce_sum(tf.cast(tf.equal(l, 0.0), tf.float32), -1)

        #for i in range(map.shape[1]):
        #    for j in range(4):
        #        fs = map
        #        plt.plot([fs[0, i, j - 1, 0], fs[0, i, j, 0]], [fs[0, i, j - 1, 1], fs[0, i, j, 1]], zorder=1, color='orange')
        #c = 'brown'
        plt.fill([-100., 100., 100., -100., -100.], [-100., -100., 100., 100., -100.], 'brown', zorder=1)
        plt.xlim(-13.25, 8.25)
        plt.ylim(-22., 3.)

        m = map[0]
        seq = [(1, 3), (1, 0), (0, 3), (0, 0), (0, 1), (0, 2), (1, 1), (1, 2), (2, 1), (2, 2), (2, 3), (2, 0), (1, 3)]
        plt.fill([m[s][0] for s in seq], [m[s][1] for s in seq], 'w', zorder=2)

        #plt.plot([map[0, 0, -1, 0], map[0, 0, 0, 0]], [map[0, 0, -1, 1], map[0, 0, 0, 1]], zorder=2, color=c)
        #plt.plot([map[0, 0, 0, 0], map[0, 0, 1, 0]], [map[0, 0, 0, 1], map[0, 0, 1, 1]], zorder=2, color=c)
        #plt.plot([map[0, 0, 1, 0], map[0, 0, 2, 0]], [map[0, 0, 1, 1], map[0, 0, 2, 1]], zorder=2, color=c)
        #plt.plot([map[0, 1, -1, 0], map[0, 1, 0, 0]], [map[0, 1, -1, 1], map[0, 1, 0, 1]], zorder=2, color=c)
        #plt.plot([map[0, 1, 0, 0], map[0, 0, -1, 0]], [map[0, 1, 0, 1], map[0, 0, -1, 1]], zorder=2, color=c)
        #plt.plot([map[0, 1, 0, 0], map[0, 1, 1, 0]], [map[0, 0, 2, 1], map[0, 1, 1, 1]], zorder=2, color=c)
        #plt.plot([map[0, 1, 1, 0], map[0, 1, 2, 0]], [map[0, 1, 1, 1], map[0, 1, 2, 1]], zorder=2, color=c)
        #plt.plot([map[0, 1, 2, 0], map[0, 1, 3, 0]], [map[0, 2, 0, 1], map[0, 1, 3, 1]], zorder=2, color=c)
        #plt.plot([map[0, 2, -1, 0], map[0, 2, 0, 0]], [map[0, 2, -1, 1], map[0, 2, 0, 1]], zorder=2, color=c)
        #plt.plot([map[0, 2, 1, 0], map[0, 2, 2, 0]], [map[0, 2, 1, 1], map[0, 2, 2, 1]], zorder=2, color=c)
        #plt.plot([map[0, 2, 2, 0], map[0, 2, 3, 0]], [map[0, 2, 2, 1], map[0, 2, 3, 1]], zorder=2, color=c)
        plt.scatter(tf.reshape(x, [-1])[::-1], tf.reshape(y, [-1])[::-1], c=color[::-1], s=1.5*np.ones_like(color), zorder=3, cmap='hot_r')
        plt.colorbar()
        plt.arrow(pk[0, 0], pk[0, 1], np.cos(pk[0, 2]), np.sin(pk[0, 2]), width=0.1, zorder=10, color='r')
        plt.arrow(p0a[0], p0a[1], np.cos(p0a[2]), np.sin(p0a[2]), width=0.2, zorder=11, color='b')
        print(w, "P0:", p0a[0], p0a[1])
        print(w, "PK:", pk[0, 0], pk[0, 1])
        _plot(x_path, y_path, th_path, gidx)
        plt.show()
예제 #2
0
def main(args):
    # 1. Get datasets
    train_ds, train_size = scenarios.planning_dataset(args.scenario_path)
    val_ds, val_size = scenarios.planning_dataset(
        args.scenario_path.replace("train", "val"))

    val_ds = val_ds \
        .batch(args.batch_size) \
        .prefetch(args.batch_size)

    # 2. Define model
    model = PlanningNetworkMP(7, (args.batch_size, 6))  # N = 6
    #model = PlanningNetworkMP(7, (args.batch_size, 6)) # N = 4
    #model = PlanningNetworkMP(7, (args.batch_size, 6)) # N = 2

    # 3. Optimization

    optimizer = tf.keras.optimizers.Adam(args.eta)
    l2_reg = tf.keras.regularizers.l2(1e-5)

    # 4. Restore, Log & Save
    experiment_handler = ExperimentHandler(args.working_path, args.out_name,
                                           args.log_interval, model, optimizer)

    # 5. Run everything
    train_step, val_step = 0, 0
    best_accuracy = 0.0
    for epoch in range(args.num_epochs):
        # workaround for tf problems with shuffling
        dataset_epoch = train_ds.shuffle(train_size)
        dataset_epoch = dataset_epoch.batch(args.batch_size).prefetch(
            args.batch_size)

        # 5.1. Training Loop
        experiment_handler.log_training()
        acc = []
        for i, data in _ds('Train', dataset_epoch, train_size, epoch,
                           args.batch_size):
            # 5.1.1. Make inference of the model, calculate losses and record gradients
            with tf.GradientTape(persistent=True) as tape:
                output, last_ddy = model(data, None, training=True)
                model_loss, invalid_loss, overshoot_loss, curvature_loss, total_curvature_loss, _, x_path, y_path, th_path = plan_loss(
                    output, data, last_ddy)
            grads = tape.gradient(model_loss, model.trainable_variables)

            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            # 5.1.3 Calculate statistics
            t = tf.reduce_mean(tf.cast(tf.equal(invalid_loss, 0.0),
                                       tf.float32))
            s = tf.reduce_mean(
                tf.cast(tf.equal(invalid_loss + curvature_loss, 0.0),
                        tf.float32))
            u = tf.reduce_mean(
                tf.cast(
                    tf.equal(invalid_loss + curvature_loss + overshoot_loss,
                             0.0), tf.float32))
            acc.append(
                tf.cast(
                    tf.equal(invalid_loss + curvature_loss + overshoot_loss,
                             0.0), tf.float32))

            # 5.1.4 Save logs for particular interval
            with tf.summary.record_if(train_step % args.log_interval == 0):
                tf.summary.scalar('metrics/model_loss',
                                  tf.reduce_mean(model_loss),
                                  step=train_step)
                tf.summary.scalar('metrics/invalid_loss',
                                  tf.reduce_mean(invalid_loss),
                                  step=train_step)
                tf.summary.scalar('metrics/overshoot_loss',
                                  tf.reduce_mean(overshoot_loss),
                                  step=train_step)
                tf.summary.scalar('metrics/curvature_loss',
                                  tf.reduce_mean(curvature_loss),
                                  step=train_step)
                tf.summary.scalar('metrics/total_curvature_loss',
                                  tf.reduce_mean(total_curvature_loss),
                                  step=train_step)
                tf.summary.scalar('metrics/good_paths', t, step=train_step)
                tf.summary.scalar('metrics/really_good_paths',
                                  s,
                                  step=train_step)
                tf.summary.scalar('metrics/ideal_paths', u, step=train_step)

            # 5.1.5 Update meta variables
            train_step += 1
            if train_step % 100 == 0:
                _plot(x_path, y_path, th_path, data, train_step)
            #_plot(x_path, y_path, th_path, data, train_step)
        epoch_accuracy = tf.reduce_mean(tf.concat(acc, -1))

        # 5.1.6 Take statistics over epoch
        with tf.summary.record_if(True):
            tf.summary.scalar('epoch/good_paths', epoch_accuracy, step=epoch)

        # 5.2. Validation Loop
        experiment_handler.log_validation()
        acc = []
        for i, data in _ds('Validation', val_ds, val_size, epoch,
                           args.batch_size):
            # 5.2.1 Make inference of the model for validation and calculate losses
            output, last_ddy = model(data, None, training=True)
            model_loss, invalid_loss, overshoot_loss, curvature_loss, total_curvature_loss, _, x_path, y_path, th_path = plan_loss(
                output, data, last_ddy)

            t = tf.reduce_mean(tf.cast(tf.equal(invalid_loss, 0.0),
                                       tf.float32))
            s = tf.reduce_mean(
                tf.cast(tf.equal(invalid_loss + curvature_loss, 0.0),
                        tf.float32))
            u = tf.reduce_mean(
                tf.cast(
                    tf.equal(invalid_loss + curvature_loss + overshoot_loss,
                             0.0), tf.float32))
            acc.append(
                tf.cast(
                    tf.equal(invalid_loss + curvature_loss + overshoot_loss,
                             0.0), tf.float32))

            # 5.2.3 Print logs for particular interval
            with tf.summary.record_if(val_step % args.log_interval == 0):
                tf.summary.scalar('metrics/model_loss',
                                  tf.reduce_mean(model_loss),
                                  step=val_step)
                tf.summary.scalar('metrics/invalid_loss',
                                  tf.reduce_mean(invalid_loss),
                                  step=val_step)
                tf.summary.scalar('metrics/overshoot_loss',
                                  tf.reduce_mean(overshoot_loss),
                                  step=val_step)
                tf.summary.scalar('metrics/curvature_loss',
                                  tf.reduce_mean(curvature_loss),
                                  step=val_step)
                tf.summary.scalar('metrics/total_curvature_loss',
                                  tf.reduce_mean(total_curvature_loss),
                                  step=val_step)
                tf.summary.scalar('metrics/good_paths', t, step=val_step)
                tf.summary.scalar('metrics/really_good_paths',
                                  s,
                                  step=val_step)
                tf.summary.scalar('metrics/ideal_paths', u, step=val_step)

            # 5.2.4 Update meta variables
            val_step += 1

        epoch_accuracy = tf.reduce_mean(tf.concat(acc, -1))

        # 5.2.5 Take statistics over epoch
        with tf.summary.record_if(True):
            tf.summary.scalar('epoch/good_paths', epoch_accuracy, step=epoch)

        # 5.3 Save last and best
        if epoch_accuracy > best_accuracy:
            experiment_handler.save_best()
            best_accuracy = epoch_accuracy
        #experiment_handler.save_last()

        experiment_handler.flush()
예제 #3
0
def main(args):
    # 1. Get datasets
    #train_ds = scenarios.planning(args.scenario_path)
    val_ds = scenarios.planning(args.scenario_path.replace("train", "val"))

    # 2. Define model
    model = PlanningNetworkMP(7, (args.batch_size, 6))

    # 3. Optimization

    optimizer = tf.train.AdamOptimizer(args.eta)

    # 4. Restore, Log & Save
    experiment_handler = ExperimentHandler(args.working_path, args.out_name,
                                           args.log_interval, model, optimizer)

    experiment_handler.restore("./monster/last_mix/checkpoints/best-7274")

    i = -2

    a = 60
    x = np.linspace(-32.0, -20., a)
    b = 40
    y = np.linspace(5.0, 0.5, b)
    c = 91
    d = 4
    th = np.linspace(-np.pi / d, np.pi / d, c)
    X, Y, TH = np.meshgrid(x, y, th)
    x, y = np.meshgrid(x, y)
    X = X.flatten()
    Y = Y.flatten()
    TH = TH.flatten()
    p0 = np.stack([X, Y, TH], 1).astype(np.float32)
    n = a * b * c

    r = 0.1
    w = 2.7
    map = val_ds[0][2].numpy()[np.newaxis]
    map = np.tile(map, (n, 1, 1, 1))
    rng = 0.8 + 2 * r
    map[:, 1, 0, 1] = rng
    map[:, 1, 1, 1] = rng
    map[:, 1, 2, 1] = rng + w
    map[:, 1, 3, 1] = rng + w
    map[:, 0, :2, 1] = 0.
    map[:, 0, 2:, 1] = 5.5
    map[:, 2, :2, 1] = 0.
    map[:, 2, 2:, 1] = 5.5

    map[:, 2, 1, 0] = map[:, 2, 2, 0]
    map[:, 1, 0, 0] = map[:, 2, 1, 0]
    map[:, 1, 3, 0] = map[:, 2, 1, 0]

    map[:, 0, 0, 0] = map[:, 0, 3, 0]
    map[:, 1, 1, 0] = map[:, 0, 0, 0]
    map[:, 1, 2, 0] = map[:, 0, 0, 0]

    for w in [17]:  #, 17]:#range(30):
        pk = np.array([[-3., 1.5, 0.]], dtype=np.float32)
        pk = np.tile(pk, (n, 1))

        data = (p0, pk, map)
        # 5.2.1 Make inference of the model for validation and calculate losses
        pp0 = np.array([[-30., 1.5, 0.]], dtype=np.float32)
        ppk = np.array([[0., 1.5, 0.]], dtype=np.float32)
        dummy_data = (pp0, ppk, map[:1])
        output, last_ddy = model(dummy_data, None, training=True)
        _, _, _, _, _, px_path, py_path, pth_path = plan_loss(
            output, last_ddy, dummy_data)
        start = time()
        output, last_ddy = model(data, None, training=True)
        end = time()
        print("TIME:", end - start)
        model_loss, invalid_loss, overshoot_loss, curvature_loss, non_balanced_loss, x_path, y_path, th_path = plan_loss(
            output, last_ddy, data)
        #print(invalid_loss, curvature_loss)

        l = invalid_loss + curvature_loss + overshoot_loss
        gidx = l.numpy()
        gidx = np.argwhere(gidx == 0)
        l = tf.reshape(l, (-1, c))
        color = tf.reduce_sum(tf.cast(tf.equal(l, 0.0), tf.float32), -1)

        #for i in range(map.shape[1]):
        #    for j in range(4):
        #        fs = map
        #        plt.plot([fs[0, i, j - 1, 0], fs[0, i, j, 0]], [fs[0, i, j - 1, 1], fs[0, i, j, 1]], zorder=1, color='orange')
        #c = 'brown'
        plt.figure(num=None,
                   figsize=(9, 2),
                   dpi=300,
                   facecolor='w',
                   edgecolor='k')
        plt.fill([-100., 100., 100., -100., -100.],
                 [-100., -100., 100., 100., -100.],
                 'brown',
                 zorder=1)
        plt.xlim(-33., 4.5)
        plt.ylim(-0.25, 5.75)

        m = map[0]
        #seq = [(1, 3), (1, 0), (0, 3), (0, 0), (0, 1), (0, 2), (1, 1), (1, 2), (2, 1), (2, 2), (2, 3), (2, 0), (1, 3)]
        seq = [(0, 0), (0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 2), (2, 3),
               (2, 0), (2, 1), (1, 0), (1, 1), (0, 0)]
        plt.fill([m[s][0] for s in seq], [m[s][1] for s in seq], 'w', zorder=2)

        #plt.plot([map[0, 0, -1, 0], map[0, 0, 0, 0]], [map[0, 0, -1, 1], map[0, 0, 0, 1]], zorder=2, color=c)
        #plt.plot([map[0, 0, 0, 0], map[0, 0, 1, 0]], [map[0, 0, 0, 1], map[0, 0, 1, 1]], zorder=2, color=c)
        #plt.plot([map[0, 0, 1, 0], map[0, 0, 2, 0]], [map[0, 0, 1, 1], map[0, 0, 2, 1]], zorder=2, color=c)
        #plt.plot([map[0, 1, -1, 0], map[0, 1, 0, 0]], [map[0, 1, -1, 1], map[0, 1, 0, 1]], zorder=2, color=c)
        #plt.plot([map[0, 1, 0, 0], map[0, 0, -1, 0]], [map[0, 1, 0, 1], map[0, 0, -1, 1]], zorder=2, color=c)
        #plt.plot([map[0, 1, 0, 0], map[0, 1, 1, 0]], [map[0, 0, 2, 1], map[0, 1, 1, 1]], zorder=2, color=c)
        #plt.plot([map[0, 1, 1, 0], map[0, 1, 2, 0]], [map[0, 1, 1, 1], map[0, 1, 2, 1]], zorder=2, color=c)
        #plt.plot([map[0, 1, 2, 0], map[0, 1, 3, 0]], [map[0, 2, 0, 1], map[0, 1, 3, 1]], zorder=2, color=c)
        #plt.plot([map[0, 2, -1, 0], map[0, 2, 0, 0]], [map[0, 2, -1, 1], map[0, 2, 0, 1]], zorder=2, color=c)
        #plt.plot([map[0, 2, 1, 0], map[0, 2, 2, 0]], [map[0, 2, 1, 1], map[0, 2, 2, 1]], zorder=2, color=c)
        #plt.plot([map[0, 2, 2, 0], map[0, 2, 3, 0]], [map[0, 2, 2, 1], map[0, 2, 3, 1]], zorder=2, color=c)
        plt.scatter(tf.reshape(x, [-1])[::-1],
                    tf.reshape(y, [-1])[::-1],
                    c=color[::-1],
                    s=1.5 * np.ones_like(color),
                    zorder=4,
                    cmap='hot_r')
        plt.colorbar(orientation="horizontal")
        print(w, "PK:", pk[0, 0], pk[0, 1])
        _plot(px_path, py_path, pth_path)
        #plt.show()
        #plt.savefig("xD1.pdf")
        plt.savefig("2.pdf")
예제 #4
0
def main(args):
    # 1. Get datasets
    #train_ds = scenarios.planning(args.scenario_path)
    val_ds = scenarios.planning(args.scenario_path.replace("train", "val"))

    # 2. Define model
    model = PlanningNetworkMP(7, (args.batch_size, 6))

    # 3. Optimization

    optimizer = tf.train.AdamOptimizer(args.eta)

    # 4. Restore, Log & Save
    experiment_handler = ExperimentHandler(args.working_path, args.out_name,
                                           args.log_interval, model, optimizer)

    experiment_handler.restore("./monster/last_mix/checkpoints/best-7274")

    # 5.2. Validation Loop
    #for i in range(len(val_ds)):
    #q1 = [[-32., -2.], [-19., -2.], [-19., 5.], [-32., 5.]]
    #q2 = [[-10., -2.], [4., -2.], [4., 5.], [-10., 5.]]
    #y = 0.
    #w = 3.0
    #q3 = [[-19., y], [-10., y], [-10., y+w], [-19., y+w]]
    #map = np.array([q1, q2, q3], dtype=np.float32)[np.newaxis]
    n = 20
    r = 0.1
    y = 0.3
    w = 2.7
    map = val_ds[0][2].numpy()[np.newaxis]
    map = np.tile(map, (n + 1, 1, 1, 1))
    rng = np.linspace(y, y + r * n, n + 1)
    map[:, 1, 0, 1] = rng
    map[:, 1, 1, 1] = rng
    map[:, 1, 2, 1] = rng + w
    map[:, 1, 3, 1] = rng + w
    map[:, 0, :2, 1] = 0.
    map[:, 0, 2:, 1] = 5.5
    map[:, 2, :2, 1] = 0.
    map[:, 2, 2:, 1] = 5.5

    map[:, 2, 1, 0] = map[:, 2, 2, 0]
    map[:, 1, 0, 0] = map[:, 2, 1, 0]
    map[:, 1, 3, 0] = map[:, 2, 1, 0]

    map[:, 0, 0, 0] = map[:, 0, 3, 0]
    map[:, 1, 1, 0] = map[:, 0, 0, 0]
    map[:, 1, 2, 0] = map[:, 0, 0, 0]
    p0 = np.array([[-27., 1.5, 0.]], dtype=np.float32)
    p0 = np.tile(p0, (n + 1, 1))
    pk = np.array([[-3., 1.5, 0.]], dtype=np.float32)
    pk = np.tile(pk, (n + 1, 1))
    for i in [1]:
        data = (p0, pk, map)
        # 5.2.1 Make inference of the model for validation and calculate losses
        output, last_ddy = model(data, None, training=True)
        model_loss, invalid_loss, overshoot_loss, curvature_loss, non_balanced_loss, x_path, y_path, th_path = plan_loss(
            output, last_ddy, data)
        print(invalid_loss, curvature_loss)

        _plot(x_path, y_path, th_path, data, 0, False)