Ejemplo n.º 1
0
    def eef_velocity_1dof(self):
        plt.figure(figsize=(10, 6))
        load.init_load("pendulum_time.csv", force=True)
        _, labels = load.training_set()

        dof = 1
        state_f = labels[:, 2 * dof:]
        n = 1000
        knn = NearestNeighbors(n_neighbors=1000).fit(state_f[:, :2 * dof])
        d, v_idx = knn.kneighbors([[0, 0]])

        idx = v_idx.reshape(-1)[np.flatnonzero(d.reshape(-1) < 0.15)]
        # idx= v_idx

        nn = state_f[idx.reshape(-1), :]

        plt.subplot(1, 2, 1)
        x1 = np.sin(nn[:, 0])  # + np.cos(nn[:, 1])
        y1 = 1.0 * (np.cos(nn[:, 0]))  # + np.sin(nn[:, 1]))
        omega = np.linalg.norm(nn[:, dof:], axis=1)
        plt.scatter(x1, y1, s=1, alpha=0.5, c=omega)
        plt.colorbar().set_label('Norm of omega')
        plt.axis('equal')
        plt.title('Neighbors within 0.15 to goal state')
        plt.xlabel("x")
        plt.ylabel("y")

        plt.subplot(1, 2, 2)
        plt.hist(d.reshape(-1), bins='auto')
        plt.title('Euclidean distance to goal')
        plt.xlabel('d')

        return None
Ejemplo n.º 2
0
    def phase_scatter(self):

        load.init_load()
        data, labels = load.training_set()

        k = 10000
        idx = np.random.randint(0, labels.shape[0], k)

        dof = self.cfg.simulation.dof
        state_i = labels[idx, :2 * dof]
        state_f = labels[idx, 2 * dof:]
        omega_norm = np.linalg.norm(state_f[:, 2:], axis=1)

        # Plot first link
        plt.subplot(1, 2, 1)
        plt.scatter(state_f[:, 0], state_f[:, 2], s=1, alpha=0.5, c=omega_norm)
        plt.axis("equal")
        plt.xlabel("theta1_f")
        plt.ylabel("omega1_f")

        # Plot second link
        plt.subplot(1, 2, 2)
        plt.scatter(state_f[:, 1], state_f[:, 3], s=1, alpha=0.5, c=omega_norm)
        plt.axis("equal")
        plt.colorbar().set_label("Norm of final velocity")
        plt.xlabel("theta2_f")
        plt.ylabel("omega2_f")

        plt.show()
        return None
Ejemplo n.º 3
0
    def eef_velocity_2dof(self):
        plt.figure(figsize=(10, 6))
        load.init_load("2dof_time.csv", force=True)
        _, labels = load.training_set()
        n = 8000
        dof = 2
        state_f = labels[:, 2 * dof:]
        knn = NearestNeighbors(n_neighbors=n).fit(state_f)
        d, v_idx = knn.kneighbors([[0.25 * np.pi, 0 * np.pi, 0.0, 0.0]])

        # idx = v_idx
        idx = v_idx.reshape(-1)[np.flatnonzero(d.reshape(-1) < 0.30)]

        nn = state_f[idx.reshape(-1), :]
        plt.subplot(1, 2, 1)
        x1 = np.cos(nn[:, 0]) + np.cos(nn[:, 1])
        y1 = 1.0 * (np.sin(nn[:, 0]) + np.sin(nn[:, 1]))
        omega = np.linalg.norm(nn[:, dof:], axis=1)
        plt.scatter(x1, y1, s=1, alpha=0.5, c=omega)
        plt.colorbar().set_label('Norm of omega')
        plt.axis('equal')
        plt.title('{} nearest neighbors to goal state'.format(idx.shape[0]))
        plt.xlabel("x")
        plt.ylabel("y")

        plt.subplot(1, 2, 2)
        plt.hist(d.reshape(-1), bins='auto')
        plt.title('Euclidean distance to goal')
        plt.xlabel('d')

        plt.show()
Ejemplo n.º 4
0
    def torque_switch_factor(self):
        from data.loader import loader as load
        print("Calculating torque switching factor")

        data_set, labels_set = load.training_set()
        n = 10000  # labels_set.shape[0]
        idx = np.random.randint(0, labels_set.shape[0], n)
        labels = labels_set[idx, :]

        states = np.split(labels, 2, axis=1)
        costates = data_set[idx, :]

        switches = 0

        if self.sim.dof == 2:
            for i in tqdm(range(n)):
                mu0 = costates[i, -2:]
                _, _, _, mu1, _ = self.sim.simulate_steer_full(
                    states[0][i, :], costates[i, :])
                if not np.any(np.sign(mu0) + np.sign(mu1)):
                    switches += 1

        # This is done in a stupid way. I could've just checked whether the sign is different
        # between mu0 and mu1...
        if self.sim.dof == 1:
            for i in tqdm(range(n)):
                _, _, s = self.sim.simulate_steer_full(states[0][i, :],
                                                       costates[i, :])
                if int(np.abs(np.sign(s[:, 3]).sum())) != s[:, 3].shape[0]:
                    switches += 1

        print("Switches: {} \t Data points: {} ".format(switches, n))
        logger["model"]["switch_factor"] = switches / n
        return None
Ejemplo n.º 5
0
    def eef_pos_2dof(self):
        """
        Display k=5000 configurations of the end effector
        """
        plt.figure(figsize=(10, 6))
        load.init_load("2dof_time.csv", force=True)
        _, labels = load.training_set()

        # idx = np.random.randint(0, labels.shape[0], k)
        idx_a = np.arange(labels.shape[0])
        np.random.shuffle(idx_a)
        k = 5000
        idx = idx_a[:k]

        dof = 2
        state_i = labels[idx, :2 * dof]
        state_f = labels[idx, 2 * dof:]

        plt.subplot(1, 2, 1)
        # Initial positions of the arm
        x1 = np.cos(state_i[:, 0]) + np.cos(state_i[:, 1])
        y1 = 1.0 * (np.sin(state_i[:, 0]) + np.sin(state_i[:, 1]))
        omega = np.linalg.norm(state_i[:, 2:], axis=1)
        c = np.linalg.norm(np.tile([-0.25 * np.pi, 0, 0, 0],
                                   (state_i.shape[0], 1)) - state_i,
                           axis=1)
        plt.scatter(x1, y1, s=2, alpha=0.8, c=c)
        plt.colorbar().set_label('Euclidean distance to start pos')
        plt.axis('equal')
        plt.title('Initial position end effector')

        # Start position
        x, y = pol2cart(1, -0.25 * np.pi)
        plt.plot([0, x, x + 1], [0, y, y], '-r', linewidth=2)

        # Final positions of the arm
        plt.subplot(1, 2, 2)
        x2 = np.cos(state_f[:, 0]) + np.cos(state_f[:, 1])
        y2 = 1.0 * (np.sin(state_f[:, 0]) + np.sin(state_f[:, 1]))
        omega = np.linalg.norm(state_f[:, 2:], axis=1)
        c = np.linalg.norm(np.tile([0.25 * np.pi, 0, 0, 0],
                                   (state_f.shape[0], 1)) - state_f,
                           axis=1)
        plt.scatter(x2, y2, s=2, alpha=0.8, c=c)
        plt.colorbar().set_label('Euclidean distance to goal')
        plt.axis('equal')
        plt.title('Final position end effector')

        # up up position
        x, y = pol2cart(1, 0.25 * np.pi)
        plt.plot([0, x, x + 1], [0, y, y], '-r', linewidth=2)

        return None
Ejemplo n.º 6
0
    def eef_pos_1dof(self):
        """
        Display k=10000 configurations of the end effector
        """
        plt.figure(figsize=(10, 6))
        load.init_load("pendulum_time.csv", force=True)
        _, labels = load.training_set()

        k = 10000
        idx = np.random.randint(0, labels.shape[0], k)

        state_i = labels[idx, :2]
        state_f = labels[idx, 2:]

        plt.subplot(1, 2, 1)
        # Initial positions of the arm
        x1, y1 = pol2cart(1, state_i[:, 0])
        omega = state_i[:, 1]
        plt.scatter(x1, y1, s=2, alpha=0.8, c=omega)
        plt.colorbar().set_label('Norm of omega')
        plt.axis('equal')
        plt.title('Initial position end effector')

        # Down down position
        x, y = pol2cart(1, -0.5 * np.pi)
        plt.plot([0, x], [0, y], '-k', linewidth=2)

        # Final positions of the arm
        plt.subplot(1, 2, 2)
        x2, y2 = pol2cart(1, state_f[:, 0])
        omega = state_f[:, 1]
        plt.scatter(x2, y2, s=2, alpha=0.8, c=omega)
        plt.colorbar().set_label('Norm of omega')
        plt.axis('equal')
        plt.title('Final position end effector')

        # up up position
        x, y = pol2cart(1, 0.5 * np.pi)
        plt.plot([0, x], [0, y], '-k', linewidth=2)
        return None
Ejemplo n.º 7
0
        self._saver.restore(self._sess, path)
        shutil.rmtree(self.logdir)

    def close_session(self):
        self._sess.close()


if __name__ == "__main__":
    from data.loader import loader

    gan = CLSGAN(6, 8, 32, {"g": [256, 256], "d": [256, 256]})

    loader.init_load("2dof_time.csv")
    batch_size = 100
    train_dataset = tf.data.Dataset.from_tensor_slices(
        loader.training_set()).repeat().batch(batch_size)
    iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
                                               train_dataset.output_shapes)
    train_init_op = iterator.make_initializer(train_dataset)
    with gan.get_session() as sess:
        sess.run(train_init_op)
        training_data = iterator.get_next()
        for it in range(30000):
            X_mb, y_mb = sess.run(training_data)
            Z_mb = gan.sample_z(batch_size)
            for _ in range(1):
                D_loss = gan.train_discriminator(Z_mb, X_mb, y_mb)

            # X_mb, y_mb = load.next_batch(batch_size)
            G_loss = gan.train_generator(Z_mb, y_mb)
            # gan.log(Z_mb, X_mb, y_mb, it)
Ejemplo n.º 8
0
    def train(self,
              data=None,
              labels=None,
              data_test=None,
              labels_test=None,
              epochs=10000,
              batch_size=32,
              d_steps=3,
              verbose=1):

        if data is not None:
            load.set_training_data(data)
        if labels is not None:
            load.set_training_labels(labels)

        print("Training CLSGAN")

        # Push training data into tf.data
        sess = self.network.get_session()
        train_dataset = tf.data.Dataset.from_tensor_slices(
            load.training_set()).repeat().batch(batch_size)
        iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
                                                   train_dataset.output_shapes)
        train_init_op = iterator.make_initializer(train_dataset)
        sess.run(train_init_op)
        training_data = iterator.get_next()

        # Start actual training loop
        start = timer()
        loss = np.zeros((epochs, 2))
        for it in tqdm(range(epochs)):
            Z_mb = self.network.sample_z(batch_size)
            for _ in range(d_steps):
                X_mb, y_mb = sess.run(training_data)
                loss[it,
                     0] = self.network.train_discriminator(Z_mb, X_mb, y_mb)

            # X_mb, y_mb = sess.run(training_data)
            loss[it, 1] = self.network.train_generator(Z_mb, y_mb)
            self.network.log(Z_mb, X_mb, y_mb, it)

            # Set -1 to 0 for plotting
            if it % 1000 == -1:
                n = 1000
                samples = self.network.predict(y_mb)

                n_dof = int(y_mb.shape[1] / 4)

                if n_dof == 1:
                    self.plot_unit_circle(X_mb, samples)

                plt.figure(figsize=(10, 7))
                for dof in range(1, n_dof * 2 + 1):
                    self.plot_hist_costates(dof, n_dof, X_mb, samples)
                plt.savefig('./tmp/out/{}_costates.png'.format(
                    str(0).zfill(3)),
                            bbox_inches='tight')

                norm = np.linalg.norm(samples[:, :-2], axis=1)
                plt.figure()
                plt.hist(norm, bins='auto')
                plt.title("Hypersphere proximity of costates")
                plt.xlabel('Norm of points')
                plt.savefig('./tmp/out/{}_hypersphere.png'.format(
                    str(0).zfill(3)),
                            bbox_inches='tight')
                plt.close('all')

        # End training

        elapsed = timer() - start
        print("Training time for {} epochs: {} seconds".format(
            epochs, elapsed))

        self.plot_training_loss(loss, epochs)
        return None
Ejemplo n.º 9
0
    def cleaning(self):
        loader.init_load()
        train_data, train_labels = loader.training_set()

        pass