Esempio n. 1
0
def main():
    bs = 1
    model_path = "./trained_models/corl_N_6/best-28"
    #model_path = "./trained_models/corl_N_4/best-25"
    #model_path = "./trained_models/corl_N_2/best-30"
    #model_path = "./models/corl_N_6_notcurv/best-36"
    ds_path = "../data/test/all"
    # 1. Get datasets
    ds, ds_size = scenarios.planning_dataset(ds_path)

    ds = ds \
        .batch(bs) \
        .prefetch(bs)

    # 2. Define model
    model = PlanningNetworkMP(7, (bs, 6))  # N = 6
    #model = PlanningNetworkMP(7, (bs, 6)) # N = 4
    #model = PlanningNetworkMP(7, (bs, 6)) # N = 2

    # 3. Optimization

    optimizer = tf.keras.optimizers.Adam(1e-4)

    # 4. Restore, Log & Save
    experiment_handler = ExperimentHandler(".", "", 1, model, optimizer)
    experiment_handler.restore(model_path)

    # 5. Run everything
    acc = []
    times = []
    for i, data in _ds('Check', ds, ds_size, 0, bs):
        map, path, ddy0 = data
        d = (map, path, ddy0)
        start = time()
        output, last_ddy = model(d, None, training=True)
        end = time()
        times.append(end - start)
        model_loss, invalid_loss, overshoot_loss, curvature_loss, non_balanced_loss, _, x_path, y_path, th_path = plan_loss(
            output, d, last_ddy)

        valid = tf.cast(
            tf.equal(invalid_loss + curvature_loss + overshoot_loss, 0.0),
            tf.float32)
        acc.append(valid)

    epoch_accuracy = tf.reduce_mean(tf.concat(acc, -1))
    print("ACCURACY:", epoch_accuracy)
    print("MEAN PLANNING TIME:", np.mean(times[20:]))
    print("STD PLANNING TIME:", np.std(times[20:]))
Esempio n. 2
0
def main():
    # 1. Get datasets
    ds = scenarios.planning("../../TG_data/train/mix3/")

    # 2. Define model
    model = PlanningNetworkMP(7, (1, 6))

    # 3. Optimization

    optimizer = tf.train.AdamOptimizer(1)

    # 4. Restore, Log & Save
    experiment_handler = ExperimentHandler(".", "", 1, model, optimizer)

    experiment_handler.restore("./monster/last_mix/checkpoints/best-7274")

    acc = []
    for i in range(len(ds)):
        p0, pk, map = ds[i]
        map = tf.tile(map[tf.newaxis], (len(p0), 1, 1, 1))
        data = (p0, pk, map)
        # 5.2.1 Make inference of the model for validation and calculate losses
        output, last_ddy = model(data, None, training=True)
        model_loss, invalid_loss, overshoot_loss, curvature_loss, non_balanced_loss, x_path, y_path, th_path, length = plan_loss(
            output, last_ddy, data)

        #_plot(x_path, y_path, th_path, data, i)
        #print(i)
        #print(invalid_loss)
        #print(curvature_loss)
        #print()
        #print()

        acc.append(
            tf.cast(
                tf.equal(invalid_loss + curvature_loss + overshoot_loss, 0.0),
                tf.float32))

    epoch_accuracy = tf.reduce_mean(tf.concat(acc, -1))
    print(epoch_accuracy)
Esempio n. 3
0
def run_and_plot(model_path, map_path, as_path, xd, yd, thd, ax):
    bs = 128
    map = read_map(map_path)[tf.newaxis]
    p0 = np.array([0.4, 0., 0., 0.], dtype=np.float32)[np.newaxis]
    pk = np.array([xd, yd, thd, 0.], dtype=np.float32)[np.newaxis]
    path = np.stack([p0, pk], axis=1)
    ddy0 = np.array([0.], dtype=np.float32)
    data = (map, path, ddy0)

    # 2. Define model
    model = PlanningNetworkMP(7, (bs, 6))

    # 3. Optimization

    optimizer = tf.keras.optimizers.Adam(1e-4)

    # 4. Restore, Log & Save
    experiment_handler = ExperimentHandler(".", "", 1, model, optimizer)
    experiment_handler.restore(model_path)

    output, last_ddy = model(data, None, training=True)
    model_loss, invalid_loss, overshoot_loss, curvature_loss, non_balanced_loss, _, x_path, y_path, th_path = plan_loss(
        output, data, last_ddy)

    _plot(x_path, y_path, th_path, ax)

    ax.imshow(map[0, ..., 0], cmap='gray')
    if as_path is not None:
        uvc = np.loadtxt(as_path, delimiter="\t")
        u, v, c = np.split(uvc, 3, axis=-1)
        c = 181. * c
        return ax.scatter(u,
                          v,
                          c=c,
                          s=1.5 * np.ones_like(c),
                          zorder=3,
                          cmap='hot_r')
Esempio n. 4
0
def main(args):
    # 1. Get datasets
    train_ds = scenarios.planning(args.scenario_path)
    #val_ds = scenarios.planning(args.scenario_path.replace("train", "val"))

    # 2. Define model
    model = PlanningNetworkMP(7, (args.batch_size, 6))

    # 3. Optimization

    optimizer = tf.train.AdamOptimizer(args.eta)

    # 4. Restore, Log & Save
    experiment_handler = ExperimentHandler(args.working_path, args.out_name, args.log_interval, model, optimizer)

    experiment_handler.restore("./monster/last_mix/checkpoints/best-7274")

    i = -2

    a = 60
    x = np.linspace(-12.5, -3., a)
    b = 30
    y = np.linspace(-21.0, -16., b)
    c = 76
    #d = 4
    #th = np.linspace(-np.pi / d, np.pi / d,  c)
    th = np.linspace(- 10 * np.pi / 180, 65 * np.pi / 180,  c)
    X, Y, TH = np.meshgrid(x, y, th)
    x, y = np.meshgrid(x, y)
    X = X.flatten()
    Y = Y.flatten()
    TH = TH.flatten()
    p0 = np.stack([X, Y, TH], 1).astype(np.float32)
    n = a * b * c

    map = train_ds[i][2].numpy()[np.newaxis]
    map = np.tile(map, (n, 1, 1, 1))

    for w in [17]:#, 17]:#range(30):
        pk = train_ds[i][1][w].numpy()[np.newaxis]
        pk = np.tile(pk, (n, 1))

        p0a = train_ds[i][0][w].numpy()

        data = (p0, pk, map)
        # 5.2.1 Make inference of the model for validation and calculate losses
        #dummy_data = (p0[:1], pk[:1], map[:1])
        #output, last_ddy = model(dummy_data, None, training=True)
        start = time()
        output, last_ddy = model(data, None, training=True)
        end = time()
        print("TIME:", end - start)
        model_loss, invalid_loss, overshoot_loss, curvature_loss, non_balanced_loss, x_path, y_path, th_path = plan_loss(
            output, last_ddy, data)
        #print(invalid_loss, curvature_loss)

        l = invalid_loss + curvature_loss + overshoot_loss
        gidx = l.numpy()
        gidx = np.argwhere(gidx == 0)
        l = tf.reshape(l, (-1, c))
        color = tf.reduce_sum(tf.cast(tf.equal(l, 0.0), tf.float32), -1)

        #for i in range(map.shape[1]):
        #    for j in range(4):
        #        fs = map
        #        plt.plot([fs[0, i, j - 1, 0], fs[0, i, j, 0]], [fs[0, i, j - 1, 1], fs[0, i, j, 1]], zorder=1, color='orange')
        #c = 'brown'
        plt.fill([-100., 100., 100., -100., -100.], [-100., -100., 100., 100., -100.], 'brown', zorder=1)
        plt.xlim(-13.25, 8.25)
        plt.ylim(-22., 3.)

        m = map[0]
        seq = [(1, 3), (1, 0), (0, 3), (0, 0), (0, 1), (0, 2), (1, 1), (1, 2), (2, 1), (2, 2), (2, 3), (2, 0), (1, 3)]
        plt.fill([m[s][0] for s in seq], [m[s][1] for s in seq], 'w', zorder=2)

        #plt.plot([map[0, 0, -1, 0], map[0, 0, 0, 0]], [map[0, 0, -1, 1], map[0, 0, 0, 1]], zorder=2, color=c)
        #plt.plot([map[0, 0, 0, 0], map[0, 0, 1, 0]], [map[0, 0, 0, 1], map[0, 0, 1, 1]], zorder=2, color=c)
        #plt.plot([map[0, 0, 1, 0], map[0, 0, 2, 0]], [map[0, 0, 1, 1], map[0, 0, 2, 1]], zorder=2, color=c)
        #plt.plot([map[0, 1, -1, 0], map[0, 1, 0, 0]], [map[0, 1, -1, 1], map[0, 1, 0, 1]], zorder=2, color=c)
        #plt.plot([map[0, 1, 0, 0], map[0, 0, -1, 0]], [map[0, 1, 0, 1], map[0, 0, -1, 1]], zorder=2, color=c)
        #plt.plot([map[0, 1, 0, 0], map[0, 1, 1, 0]], [map[0, 0, 2, 1], map[0, 1, 1, 1]], zorder=2, color=c)
        #plt.plot([map[0, 1, 1, 0], map[0, 1, 2, 0]], [map[0, 1, 1, 1], map[0, 1, 2, 1]], zorder=2, color=c)
        #plt.plot([map[0, 1, 2, 0], map[0, 1, 3, 0]], [map[0, 2, 0, 1], map[0, 1, 3, 1]], zorder=2, color=c)
        #plt.plot([map[0, 2, -1, 0], map[0, 2, 0, 0]], [map[0, 2, -1, 1], map[0, 2, 0, 1]], zorder=2, color=c)
        #plt.plot([map[0, 2, 1, 0], map[0, 2, 2, 0]], [map[0, 2, 1, 1], map[0, 2, 2, 1]], zorder=2, color=c)
        #plt.plot([map[0, 2, 2, 0], map[0, 2, 3, 0]], [map[0, 2, 2, 1], map[0, 2, 3, 1]], zorder=2, color=c)
        plt.scatter(tf.reshape(x, [-1])[::-1], tf.reshape(y, [-1])[::-1], c=color[::-1], s=1.5*np.ones_like(color), zorder=3, cmap='hot_r')
        plt.colorbar()
        plt.arrow(pk[0, 0], pk[0, 1], np.cos(pk[0, 2]), np.sin(pk[0, 2]), width=0.1, zorder=10, color='r')
        plt.arrow(p0a[0], p0a[1], np.cos(p0a[2]), np.sin(p0a[2]), width=0.2, zorder=11, color='b')
        print(w, "P0:", p0a[0], p0a[1])
        print(w, "PK:", pk[0, 0], pk[0, 1])
        _plot(x_path, y_path, th_path, gidx)
        plt.show()
Esempio n. 5
0
def main(args):
    # 1. Get datasets
    ts = int(1e0)
    vs = int(3e1)
    train_size = int(args.batch_size * ts)
    val_size = int(args.batch_size * vs)

    d = 5
    train_ds = np.random.rand(ts, args.batch_size, d)
    val_ds = np.random.rand(vs, args.batch_size, d)

    # 2. Define model
    model = None
    if args.model == "FC_G-inv":
        model = GroupInvariance(Z5, 64)
    elif args.model == "Conv1D_G-inv":
        model = GroupInvarianceConv(Z5, 116)
    elif args.model == "FC_G-avg":
        model = SimpleNet()
    elif args.model == "Conv1D_G-avg":
        model = Conv1d()
    elif args.model == "Maron":
        model = Maron()
    else:
        print("NO MODEL NAME PROVIDED!!!")

    # 3. Optimization
    optimizer = tf.train.AdamOptimizer(args.eta)
    l2_reg = tf.keras.regularizers.l2(1e-5)

    # 4. Restore, Log & Save
    experiment_handler = ExperimentHandler(args.working_path, args.out_name, args.log_interval, model, optimizer)

    # 5. Run everything
    train_step, val_step = 0, 0
    best_accuracy = 1e10
    for epoch in range(args.num_epochs):
        # 5.1. Training Loop
        experiment_handler.log_training()
        acc = []
        for i in tqdm(range(ts), "Train"):
            # 5.1.1. Make inference of the model, calculate losses and record gradients
            with tf.GradientTape(persistent=True) as tape:
                pred = model(train_ds[i])
                L = 0.
                if len(pred) == 2:
                    pred, L = pred

                ## check model size
                if True:
                    nw = 0
                    for layer in model.layers:
                        for l in layer.get_weights():
                            a = 1
                            for s in l.shape:
                                a *= s
                            nw += a
                    print(nw)

                y = poly_Z5(train_ds[i])
                model_loss = tf.keras.losses.mean_absolute_error(y[:, tf.newaxis], pred)
                reg_loss = tfc.layers.apply_regularization(l2_reg, model.trainable_variables)
                total_loss = model_loss + L

            # 5.1.2 Take gradients (if necessary apply regularization like clipping),
            grads = tape.gradient(total_loss, model.trainable_variables)

            optimizer.apply_gradients(zip(grads, model.trainable_variables),
                                      global_step=tf.train.get_or_create_global_step())

            acc = acc + list(model_loss.numpy())

            # 5.1.4 Save logs for particular interval
            with tfc.summary.record_summaries_every_n_global_steps(args.log_interval, train_step):
                tfc.summary.scalar('metrics/model_loss', model_loss, step=train_step)

            # 5.1.5 Update meta variables
            train_step += 1

        # 5.1.6 Take statistics over epoch
        with tfc.summary.always_record_summaries():
            tfc.summary.scalar('epoch/accuracy', np.mean(acc), step=epoch)

        with open(args.working_path + "/" + args.out_name + "/" + model.name + ".csv", 'a') as fh:
            fh.write("TRAIN, %d, %.6f\n" % (epoch, np.mean(acc)))

        #    accuracy.result()

        # 5.2. Validation Loop
        experiment_handler.log_validation()
        acc = []
        for i in tqdm(range(vs), "Val"):
            # 5.2.1 Make inference of the model for validation and calculate losses
            pred = model(val_ds[i])
            if len(pred) == 2:
                pred, L = pred

            y = poly_Z5(val_ds[i])
            model_loss = tf.keras.losses.mean_absolute_error(y[:, tf.newaxis], pred)

            acc = acc + list(model_loss.numpy())

            # 5.2.3 Print logs for particular interval
            with tfc.summary.record_summaries_every_n_global_steps(args.log_interval, val_step):
                tfc.summary.scalar('metrics/model_loss', model_loss, step=val_step)

            # 5.2.4 Update meta variables
            val_step += 1

        epoch_accuracy = np.mean(acc)
        # 5.2.5 Take statistics over epoch
        with tfc.summary.always_record_summaries():
            tfc.summary.scalar('epoch/accuracy', epoch_accuracy, step=epoch)

        with open(args.working_path + "/" + args.out_name + "/" + model.name + ".csv", 'a') as fh:
            fh.write("VAL, %d, %.6f\n" % (epoch, epoch_accuracy))


        # 5.3 Save last and best
        if epoch_accuracy < best_accuracy:
            model.save_weights(args.working_path + "/" + args.out_name + "/checkpoints/best-" + str(epoch))
            best_accuracy = epoch_accuracy

        experiment_handler.flush()
Esempio n. 6
0
def main(args):
    # 1. Get datasets
    #train_ds = scenarios.planning(args.scenario_path)
    val_ds = scenarios.planning(args.scenario_path.replace("train", "val"))

    # 2. Define model
    model = PlanningNetworkMP(7, (args.batch_size, 6))

    # 3. Optimization

    optimizer = tf.train.AdamOptimizer(args.eta)

    # 4. Restore, Log & Save
    experiment_handler = ExperimentHandler(args.working_path, args.out_name,
                                           args.log_interval, model, optimizer)

    experiment_handler.restore("./monster/last_mix/checkpoints/best-7274")

    i = -2

    a = 60
    x = np.linspace(-32.0, -20., a)
    b = 40
    y = np.linspace(5.0, 0.5, b)
    c = 91
    d = 4
    th = np.linspace(-np.pi / d, np.pi / d, c)
    X, Y, TH = np.meshgrid(x, y, th)
    x, y = np.meshgrid(x, y)
    X = X.flatten()
    Y = Y.flatten()
    TH = TH.flatten()
    p0 = np.stack([X, Y, TH], 1).astype(np.float32)
    n = a * b * c

    r = 0.1
    w = 2.7
    map = val_ds[0][2].numpy()[np.newaxis]
    map = np.tile(map, (n, 1, 1, 1))
    rng = 0.8 + 2 * r
    map[:, 1, 0, 1] = rng
    map[:, 1, 1, 1] = rng
    map[:, 1, 2, 1] = rng + w
    map[:, 1, 3, 1] = rng + w
    map[:, 0, :2, 1] = 0.
    map[:, 0, 2:, 1] = 5.5
    map[:, 2, :2, 1] = 0.
    map[:, 2, 2:, 1] = 5.5

    map[:, 2, 1, 0] = map[:, 2, 2, 0]
    map[:, 1, 0, 0] = map[:, 2, 1, 0]
    map[:, 1, 3, 0] = map[:, 2, 1, 0]

    map[:, 0, 0, 0] = map[:, 0, 3, 0]
    map[:, 1, 1, 0] = map[:, 0, 0, 0]
    map[:, 1, 2, 0] = map[:, 0, 0, 0]

    for w in [17]:  #, 17]:#range(30):
        pk = np.array([[-3., 1.5, 0.]], dtype=np.float32)
        pk = np.tile(pk, (n, 1))

        data = (p0, pk, map)
        # 5.2.1 Make inference of the model for validation and calculate losses
        pp0 = np.array([[-30., 1.5, 0.]], dtype=np.float32)
        ppk = np.array([[0., 1.5, 0.]], dtype=np.float32)
        dummy_data = (pp0, ppk, map[:1])
        output, last_ddy = model(dummy_data, None, training=True)
        _, _, _, _, _, px_path, py_path, pth_path = plan_loss(
            output, last_ddy, dummy_data)
        start = time()
        output, last_ddy = model(data, None, training=True)
        end = time()
        print("TIME:", end - start)
        model_loss, invalid_loss, overshoot_loss, curvature_loss, non_balanced_loss, x_path, y_path, th_path = plan_loss(
            output, last_ddy, data)
        #print(invalid_loss, curvature_loss)

        l = invalid_loss + curvature_loss + overshoot_loss
        gidx = l.numpy()
        gidx = np.argwhere(gidx == 0)
        l = tf.reshape(l, (-1, c))
        color = tf.reduce_sum(tf.cast(tf.equal(l, 0.0), tf.float32), -1)

        #for i in range(map.shape[1]):
        #    for j in range(4):
        #        fs = map
        #        plt.plot([fs[0, i, j - 1, 0], fs[0, i, j, 0]], [fs[0, i, j - 1, 1], fs[0, i, j, 1]], zorder=1, color='orange')
        #c = 'brown'
        plt.figure(num=None,
                   figsize=(9, 2),
                   dpi=300,
                   facecolor='w',
                   edgecolor='k')
        plt.fill([-100., 100., 100., -100., -100.],
                 [-100., -100., 100., 100., -100.],
                 'brown',
                 zorder=1)
        plt.xlim(-33., 4.5)
        plt.ylim(-0.25, 5.75)

        m = map[0]
        #seq = [(1, 3), (1, 0), (0, 3), (0, 0), (0, 1), (0, 2), (1, 1), (1, 2), (2, 1), (2, 2), (2, 3), (2, 0), (1, 3)]
        seq = [(0, 0), (0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 2), (2, 3),
               (2, 0), (2, 1), (1, 0), (1, 1), (0, 0)]
        plt.fill([m[s][0] for s in seq], [m[s][1] for s in seq], 'w', zorder=2)

        #plt.plot([map[0, 0, -1, 0], map[0, 0, 0, 0]], [map[0, 0, -1, 1], map[0, 0, 0, 1]], zorder=2, color=c)
        #plt.plot([map[0, 0, 0, 0], map[0, 0, 1, 0]], [map[0, 0, 0, 1], map[0, 0, 1, 1]], zorder=2, color=c)
        #plt.plot([map[0, 0, 1, 0], map[0, 0, 2, 0]], [map[0, 0, 1, 1], map[0, 0, 2, 1]], zorder=2, color=c)
        #plt.plot([map[0, 1, -1, 0], map[0, 1, 0, 0]], [map[0, 1, -1, 1], map[0, 1, 0, 1]], zorder=2, color=c)
        #plt.plot([map[0, 1, 0, 0], map[0, 0, -1, 0]], [map[0, 1, 0, 1], map[0, 0, -1, 1]], zorder=2, color=c)
        #plt.plot([map[0, 1, 0, 0], map[0, 1, 1, 0]], [map[0, 0, 2, 1], map[0, 1, 1, 1]], zorder=2, color=c)
        #plt.plot([map[0, 1, 1, 0], map[0, 1, 2, 0]], [map[0, 1, 1, 1], map[0, 1, 2, 1]], zorder=2, color=c)
        #plt.plot([map[0, 1, 2, 0], map[0, 1, 3, 0]], [map[0, 2, 0, 1], map[0, 1, 3, 1]], zorder=2, color=c)
        #plt.plot([map[0, 2, -1, 0], map[0, 2, 0, 0]], [map[0, 2, -1, 1], map[0, 2, 0, 1]], zorder=2, color=c)
        #plt.plot([map[0, 2, 1, 0], map[0, 2, 2, 0]], [map[0, 2, 1, 1], map[0, 2, 2, 1]], zorder=2, color=c)
        #plt.plot([map[0, 2, 2, 0], map[0, 2, 3, 0]], [map[0, 2, 2, 1], map[0, 2, 3, 1]], zorder=2, color=c)
        plt.scatter(tf.reshape(x, [-1])[::-1],
                    tf.reshape(y, [-1])[::-1],
                    c=color[::-1],
                    s=1.5 * np.ones_like(color),
                    zorder=4,
                    cmap='hot_r')
        plt.colorbar(orientation="horizontal")
        print(w, "PK:", pk[0, 0], pk[0, 1])
        _plot(px_path, py_path, pth_path)
        #plt.show()
        #plt.savefig("xD1.pdf")
        plt.savefig("2.pdf")
Esempio n. 7
0
def main(args):
    # 1. Get datasets
    train_ds, train_size = scenarios.planning_dataset(args.scenario_path)
    val_ds, val_size = scenarios.planning_dataset(
        args.scenario_path.replace("train", "val"))

    val_ds = val_ds \
        .batch(args.batch_size) \
        .prefetch(args.batch_size)

    # 2. Define model
    model = PlanningNetworkMP(7, (args.batch_size, 6))  # N = 6
    #model = PlanningNetworkMP(7, (args.batch_size, 6)) # N = 4
    #model = PlanningNetworkMP(7, (args.batch_size, 6)) # N = 2

    # 3. Optimization

    optimizer = tf.keras.optimizers.Adam(args.eta)
    l2_reg = tf.keras.regularizers.l2(1e-5)

    # 4. Restore, Log & Save
    experiment_handler = ExperimentHandler(args.working_path, args.out_name,
                                           args.log_interval, model, optimizer)

    # 5. Run everything
    train_step, val_step = 0, 0
    best_accuracy = 0.0
    for epoch in range(args.num_epochs):
        # workaround for tf problems with shuffling
        dataset_epoch = train_ds.shuffle(train_size)
        dataset_epoch = dataset_epoch.batch(args.batch_size).prefetch(
            args.batch_size)

        # 5.1. Training Loop
        experiment_handler.log_training()
        acc = []
        for i, data in _ds('Train', dataset_epoch, train_size, epoch,
                           args.batch_size):
            # 5.1.1. Make inference of the model, calculate losses and record gradients
            with tf.GradientTape(persistent=True) as tape:
                output, last_ddy = model(data, None, training=True)
                model_loss, invalid_loss, overshoot_loss, curvature_loss, total_curvature_loss, _, x_path, y_path, th_path = plan_loss(
                    output, data, last_ddy)
            grads = tape.gradient(model_loss, model.trainable_variables)

            optimizer.apply_gradients(zip(grads, model.trainable_variables))

            # 5.1.3 Calculate statistics
            t = tf.reduce_mean(tf.cast(tf.equal(invalid_loss, 0.0),
                                       tf.float32))
            s = tf.reduce_mean(
                tf.cast(tf.equal(invalid_loss + curvature_loss, 0.0),
                        tf.float32))
            u = tf.reduce_mean(
                tf.cast(
                    tf.equal(invalid_loss + curvature_loss + overshoot_loss,
                             0.0), tf.float32))
            acc.append(
                tf.cast(
                    tf.equal(invalid_loss + curvature_loss + overshoot_loss,
                             0.0), tf.float32))

            # 5.1.4 Save logs for particular interval
            with tf.summary.record_if(train_step % args.log_interval == 0):
                tf.summary.scalar('metrics/model_loss',
                                  tf.reduce_mean(model_loss),
                                  step=train_step)
                tf.summary.scalar('metrics/invalid_loss',
                                  tf.reduce_mean(invalid_loss),
                                  step=train_step)
                tf.summary.scalar('metrics/overshoot_loss',
                                  tf.reduce_mean(overshoot_loss),
                                  step=train_step)
                tf.summary.scalar('metrics/curvature_loss',
                                  tf.reduce_mean(curvature_loss),
                                  step=train_step)
                tf.summary.scalar('metrics/total_curvature_loss',
                                  tf.reduce_mean(total_curvature_loss),
                                  step=train_step)
                tf.summary.scalar('metrics/good_paths', t, step=train_step)
                tf.summary.scalar('metrics/really_good_paths',
                                  s,
                                  step=train_step)
                tf.summary.scalar('metrics/ideal_paths', u, step=train_step)

            # 5.1.5 Update meta variables
            train_step += 1
            if train_step % 100 == 0:
                _plot(x_path, y_path, th_path, data, train_step)
            #_plot(x_path, y_path, th_path, data, train_step)
        epoch_accuracy = tf.reduce_mean(tf.concat(acc, -1))

        # 5.1.6 Take statistics over epoch
        with tf.summary.record_if(True):
            tf.summary.scalar('epoch/good_paths', epoch_accuracy, step=epoch)

        # 5.2. Validation Loop
        experiment_handler.log_validation()
        acc = []
        for i, data in _ds('Validation', val_ds, val_size, epoch,
                           args.batch_size):
            # 5.2.1 Make inference of the model for validation and calculate losses
            output, last_ddy = model(data, None, training=True)
            model_loss, invalid_loss, overshoot_loss, curvature_loss, total_curvature_loss, _, x_path, y_path, th_path = plan_loss(
                output, data, last_ddy)

            t = tf.reduce_mean(tf.cast(tf.equal(invalid_loss, 0.0),
                                       tf.float32))
            s = tf.reduce_mean(
                tf.cast(tf.equal(invalid_loss + curvature_loss, 0.0),
                        tf.float32))
            u = tf.reduce_mean(
                tf.cast(
                    tf.equal(invalid_loss + curvature_loss + overshoot_loss,
                             0.0), tf.float32))
            acc.append(
                tf.cast(
                    tf.equal(invalid_loss + curvature_loss + overshoot_loss,
                             0.0), tf.float32))

            # 5.2.3 Print logs for particular interval
            with tf.summary.record_if(val_step % args.log_interval == 0):
                tf.summary.scalar('metrics/model_loss',
                                  tf.reduce_mean(model_loss),
                                  step=val_step)
                tf.summary.scalar('metrics/invalid_loss',
                                  tf.reduce_mean(invalid_loss),
                                  step=val_step)
                tf.summary.scalar('metrics/overshoot_loss',
                                  tf.reduce_mean(overshoot_loss),
                                  step=val_step)
                tf.summary.scalar('metrics/curvature_loss',
                                  tf.reduce_mean(curvature_loss),
                                  step=val_step)
                tf.summary.scalar('metrics/total_curvature_loss',
                                  tf.reduce_mean(total_curvature_loss),
                                  step=val_step)
                tf.summary.scalar('metrics/good_paths', t, step=val_step)
                tf.summary.scalar('metrics/really_good_paths',
                                  s,
                                  step=val_step)
                tf.summary.scalar('metrics/ideal_paths', u, step=val_step)

            # 5.2.4 Update meta variables
            val_step += 1

        epoch_accuracy = tf.reduce_mean(tf.concat(acc, -1))

        # 5.2.5 Take statistics over epoch
        with tf.summary.record_if(True):
            tf.summary.scalar('epoch/good_paths', epoch_accuracy, step=epoch)

        # 5.3 Save last and best
        if epoch_accuracy > best_accuracy:
            experiment_handler.save_best()
            best_accuracy = epoch_accuracy
        #experiment_handler.save_last()

        experiment_handler.flush()
Esempio n. 8
0
def main(args):
    # 1. Get datasets
    train_ds, train_size = scenarios.quadrangle_area_dataset(
        args.scenario_path)
    val_ds, val_size = scenarios.quadrangle_area_dataset(
        args.scenario_path.replace("train", "val"))

    val_bs = args.batch_size
    val_ds = val_ds \
        .batch(val_bs) \
        .prefetch(val_bs)

    # 2. Define model

    model = None
    if args.model == "FC_G-inv":
        model = GroupInvariance(Z4, args.n)
    elif args.model == "Conv1D_G-inv":
        model = GroupInvarianceConv(Z4, args.n)
    elif args.model == "FC_G-avg":
        model = SimpleNet()
    elif args.model == "Conv1D_G-avg":
        model = Conv1d()
    elif args.model == "Maron":
        model = Maron()
    else:
        print("NO MODEL NAME PROVIDED!!!")

    # 3. Optimization
    optimizer = tf.train.AdamOptimizer(args.eta)
    l2_reg = tf.keras.regularizers.l2(1e-5)

    # 4. Restore, Log & Save
    experiment_handler = ExperimentHandler(args.working_path, args.out_name,
                                           args.log_interval, model, optimizer)

    # 5. Run everything
    train_step, val_step = 0, 0
    best_accuracy = 1e10
    for epoch in range(args.num_epochs):
        # workaround for tf problems with shuffling
        dataset_epoch = train_ds.shuffle(train_size)
        dataset_epoch = dataset_epoch.batch(args.batch_size).prefetch(
            args.batch_size)

        # 5.1. Training Loop
        experiment_handler.log_training()
        acc = []
        for i, quad, area, in _ds('Train', dataset_epoch, train_size, epoch,
                                  args.batch_size):
            # 5.1.1. Make inference of the model, calculate losses and record gradients
            with tf.GradientTape(persistent=True) as tape:
                L = 0.0
                pred = model(quad)
                if len(pred) == 2:
                    pred, L = pred

                ## uncomment to check model size
                # nw = 0
                # for layer in model.layers:
                #    for l in layer.get_weights():
                #        a = 1
                #        for s in l.shape:
                #            a *= s
                #        nw += a
                # print(nw)

                model_loss = tf.keras.losses.mean_absolute_error(
                    area[:, tf.newaxis], pred)
                reg_loss = tfc.layers.apply_regularization(
                    l2_reg, model.trainable_variables)

                total_loss = model_loss + L

            # 5.1.2 Take gradients (if necessary apply regularization like clipping),
            grads = tape.gradient(total_loss, model.trainable_variables)

            optimizer.apply_gradients(
                zip(grads, model.trainable_variables),
                global_step=tf.train.get_or_create_global_step())

            acc = acc + list(model_loss.numpy())

            # 5.1.4 Save logs for particular interval
            with tfc.summary.record_summaries_every_n_global_steps(
                    args.log_interval, train_step):
                tfc.summary.scalar('metrics/model_loss',
                                   model_loss,
                                   step=train_step)

            # 5.1.5 Update meta variables
            train_step += 1

        # 5.1.6 Take statistics over epoch
        with tfc.summary.always_record_summaries():
            tfc.summary.scalar('epoch/accuracy', np.mean(acc), step=epoch)

        with open(
                args.working_path + "/" + args.out_name + "/" + model.name +
                ".csv", 'a') as fh:
            fh.write("TRAIN, %d, %.6f\n" % (epoch, np.mean(acc)))

        # 5.2. Validation Loop
        accuracy = tfc.eager.metrics.Accuracy('metrics/accuracy')
        experiment_handler.log_validation()
        acc = []
        for i, quad, area in _ds('Validation', val_ds, val_size, epoch,
                                 val_bs):
            # 5.2.1 Make inference of the model for validation and calculate losses
            L = 0.0
            pred = model(quad)
            if len(pred) == 2:
                pred, L = pred

            model_loss = tf.keras.losses.mean_absolute_error(
                area[:, tf.newaxis], pred)

            acc = acc + list(model_loss.numpy())

            # 5.2.3 Print logs for particular interval
            with tfc.summary.record_summaries_every_n_global_steps(
                    args.log_interval, val_step):
                tfc.summary.scalar('metrics/model_loss',
                                   model_loss,
                                   step=val_step)

            val_step += 1

        epoch_accuracy = np.mean(acc)
        # 5.2.5 Take statistics over epoch
        with tfc.summary.always_record_summaries():
            tfc.summary.scalar('epoch/accuracy', epoch_accuracy, step=epoch)

        with open(
                args.working_path + "/" + args.out_name + "/" + model.name +
                ".csv", 'a') as fh:
            fh.write("VAL, %d, %.6f\n" % (epoch, epoch_accuracy))

        # 5.3 Save last and best
        if epoch_accuracy < best_accuracy:
            model.save_weights(args.working_path + "/" + args.out_name +
                               "/checkpoints/best-" + str(epoch))
            best_accuracy = epoch_accuracy
        model.save_weights(args.working_path + "/" + args.out_name +
                           "/checkpoints/last_n-" + str(epoch))

        experiment_handler.flush()
Esempio n. 9
0
def main():
    # 1. Get datasets
    ds = scenarios.planning_test("../data/test")

    # 2. Define model
    model = PlanningNetworkMP(7, (1, 6))

    # 3. Optimization

    optimizer = tf.train.AdamOptimizer(1)

    # 4. Restore, Log & Save
    experiment_handler = ExperimentHandler(".", "", 1, model, optimizer)

    experiment_handler.restore("./monster/last_mix/checkpoints/best-7274")

    acc = []
    t = []
    t_sl = []
    t_rrt = []
    l = []
    l_rrt = []
    for i in range(len(ds)):#range(1):
        p0, pk, map, rrt, sl = ds[i]
        bs = p0.shape[0]
        for j in range(bs):
            data = (p0[tf.newaxis, j], pk[tf.newaxis, j], map[tf.newaxis])
        #map = tf.tile(map[tf.newaxis], (len(p0), 1, 1, 1))
        #data = (p0, pk, map)
        # 5.2.1 Make inference of the model for validation and calculate losses
            start = time()
            output, last_ddy = model(data, None, training=True)
            end = time()
            rt = end - start
            model_loss, invalid_loss, overshoot_loss, curvature_loss, non_balanced_loss, x_path, y_path, th_path, length = plan_loss(
                output, last_ddy, data)

            dl = (tf.reduce_sum(length, -1) / sl[j, -1])[0]
            dl_rrt = (1.5 * rrt[j, -1] / sl[j, -1])
            _plot(x_path, y_path, th_path, data, i)
            #print(i, j)
            #print(invalid_loss)
            #print(curvature_loss)
            #print(dl)
            #print(rt)
            #print()

            l.append(dl)
            l_rrt.append(dl_rrt)
            t.append(rt)
            t_sl.append(sl[j, 0])
            t_rrt.append(rrt[j, 0])
            acc.append(tf.cast(tf.equal(invalid_loss + curvature_loss + overshoot_loss, 0.0), tf.float32)[0])

    t = np.array(t)
    t = np.extract(t > 0, t)
    t_sl = np.array(t_sl)
    acc_sl = np.mean((t_sl > 0).astype(np.float32))
    t_sl = np.extract(t_sl > 0, t_sl)
    t_rrt = np.array(t_rrt)
    acc_rrt = np.mean((t_rrt > 0).astype(np.float32))
    t_rrt = np.extract(t_rrt > 0, t_rrt)
    l = np.array(l)
    l = np.extract(l > 0, l)
    l_rrt = np.array(l_rrt)
    l_rrt = np.extract(l_rrt > 0, l_rrt)
    epoch_accuracy = tf.reduce_mean(tf.stack(acc, -1))
    print("ACC:", epoch_accuracy)
    print("T:", np.mean(t[1:]), np.std(t[1:]))
    print("T_RRT:", np.mean(t_rrt[1:]), np.std(t_rrt[1:]))
    print("T_SL:", np.mean(t_sl[1:]), np.std(t_sl[1:]))
    print("L:", np.mean(l), np.std(l))
    print("L_RRT:", np.mean(l_rrt), np.std(l_rrt))
    print("ACC_RRT:", acc_rrt)
    print("ACC_SL:", acc_sl)
Esempio n. 10
0
def main(args):
    # 1. Get datasets
    #train_ds = scenarios.planning(args.scenario_path)
    val_ds = scenarios.planning(args.scenario_path.replace("train", "val"))

    # 2. Define model
    model = PlanningNetworkMP(7, (args.batch_size, 6))

    # 3. Optimization

    optimizer = tf.train.AdamOptimizer(args.eta)

    # 4. Restore, Log & Save
    experiment_handler = ExperimentHandler(args.working_path, args.out_name,
                                           args.log_interval, model, optimizer)

    experiment_handler.restore("./monster/last_mix/checkpoints/best-7274")

    # 5.2. Validation Loop
    #for i in range(len(val_ds)):
    #q1 = [[-32., -2.], [-19., -2.], [-19., 5.], [-32., 5.]]
    #q2 = [[-10., -2.], [4., -2.], [4., 5.], [-10., 5.]]
    #y = 0.
    #w = 3.0
    #q3 = [[-19., y], [-10., y], [-10., y+w], [-19., y+w]]
    #map = np.array([q1, q2, q3], dtype=np.float32)[np.newaxis]
    n = 20
    r = 0.1
    y = 0.3
    w = 2.7
    map = val_ds[0][2].numpy()[np.newaxis]
    map = np.tile(map, (n + 1, 1, 1, 1))
    rng = np.linspace(y, y + r * n, n + 1)
    map[:, 1, 0, 1] = rng
    map[:, 1, 1, 1] = rng
    map[:, 1, 2, 1] = rng + w
    map[:, 1, 3, 1] = rng + w
    map[:, 0, :2, 1] = 0.
    map[:, 0, 2:, 1] = 5.5
    map[:, 2, :2, 1] = 0.
    map[:, 2, 2:, 1] = 5.5

    map[:, 2, 1, 0] = map[:, 2, 2, 0]
    map[:, 1, 0, 0] = map[:, 2, 1, 0]
    map[:, 1, 3, 0] = map[:, 2, 1, 0]

    map[:, 0, 0, 0] = map[:, 0, 3, 0]
    map[:, 1, 1, 0] = map[:, 0, 0, 0]
    map[:, 1, 2, 0] = map[:, 0, 0, 0]
    p0 = np.array([[-27., 1.5, 0.]], dtype=np.float32)
    p0 = np.tile(p0, (n + 1, 1))
    pk = np.array([[-3., 1.5, 0.]], dtype=np.float32)
    pk = np.tile(pk, (n + 1, 1))
    for i in [1]:
        data = (p0, pk, map)
        # 5.2.1 Make inference of the model for validation and calculate losses
        output, last_ddy = model(data, None, training=True)
        model_loss, invalid_loss, overshoot_loss, curvature_loss, non_balanced_loss, x_path, y_path, th_path = plan_loss(
            output, last_ddy, data)
        print(invalid_loss, curvature_loss)

        _plot(x_path, y_path, th_path, data, 0, False)