def test_jacobian(self):
        starting_pos = [0.0, 0.0]
        starting_theta = 0.0
        if self.use_autoencoder:
            q = self.encoder.predict(boxes.generate_samples(offsets=[starting_pos], thetas=[starting_theta]))[0]
        else:
            q = np.array([*starting_pos, starting_theta])

        print("Actual x: ", self.decode_q_to_x(q))

        cum_error = 0.0
        for x_i in range(8):
            for q_i in range(3):
                # x_i = 0
                # q_i = 0

                def f_xi_qi(a):
                    new_q = q.copy()
                    new_q[q_i] = a
                    return self.decode_q_to_x(new_q)[x_i]

                numeric_d = numeric_derivative(f_xi_qi, q[q_i], dx=1e-6)
                actual_d = self.jac_x_wrt_q(q)[x_i][q_i]
                error = np.abs(numeric_d - actual_d)
                cum_error += error

                print("partial of x", x_i, " wrt q", q_i, sep = "")
                print("Numeric derivative:", numeric_d)
                print("Acutal derivative:", actual_d)
                print("Difference:", error)

                print()
        print("Cumulative error: ", cum_error)
    def __init__(self, use_autoencoder=False, model_path=None):
        self.use_autoencoder = use_autoencoder

        self.time_elapsed = 0.0
        self.world_force = np.array([0.0, -9.8, 0.0, -9.8, 0.0, -9.8, 0.0, -9.8])
        self.world_force += np.array([0.0, 40.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])

        starting_pos = [0.0, 0.0]
        starting_theta = 0.0

        self.dqdt2 = 0.0 # for reporting

        if use_autoencoder:
            self.autoencoder, self.encoder, self.decoder = load_autoencoder(model_path)
            #self.jac_x_wrt_q = jacobian_output_wrt_input(self.decoder)

            starting_box = boxes.generate_samples(offsets=[starting_pos], thetas=[starting_theta])
            gen_pos = self.encoder.predict(starting_box)[0] # need to get the encoded starting position
        else:
            self.jac_x_wrt_q = jacobian(boxes.explicit_decode)
            gen_pos = np.array([*starting_pos, starting_theta])
        
        self.total_optim_its = 0
        self.total_optim_calls = 0

        gen_vel = np.array([0.0, 0.0, 0.0])
        self.state = np.array([*gen_pos, *gen_vel])
Пример #3
0
def animate(autoencoder, encoder, decoder):
    from mpl_toolkits.mplot3d import Axes3D
    from matplotlib import animation

    fig = plt.figure(figsize=(8,4))
    ax = fig.add_subplot(1,2,1)
    ax.set_xlim([-10, 10])
    ax.set_ylim([-10, 10])
    ax.set_aspect('equal')

    ax3d = fig.add_subplot(1,2,2, projection='3d')
    ax3d.set_xlim([0,1.0])
    ax3d.set_ylim([0,1.0])
    ax3d.set_zlim([0,1.0])

    n_samples = 100
    r = 6
    thetas = np.linspace(0.0, 8*math.pi, num=n_samples) #np.zeros(n_samples)
    offsets = np.array([[r * math.sin(theta), r * math.cos(theta)] for theta in np.linspace(0.0, 2*math.pi, num=n_samples)])

    real_boxes = boxes.generate_samples(offsets=offsets, thetas=thetas)
    encoded_boxes = encoder.predict(real_boxes)
    decoded_boxes = decoder.predict(encoded_boxes)

    line, = ax3d.plot(encoded_boxes[0:1,0], encoded_boxes[0:1,1], encoded_boxes[0:1,2])
    def animate(i):
    #     line.set_ydata(np.sin(x + i/10.0))  # update the data
        ax.clear()
        boxes.draw_from_samples(ax, [real_boxes[i]], 'r', linewidth=5)
        boxes.draw_from_samples(ax, [decoded_boxes[i]], 'b')
        
        line.set_data(encoded_boxes[:i,0],encoded_boxes[:i,1])
        line.set_3d_properties(encoded_boxes[:i, 2])
    print("animating")

    anim = animation.FuncAnimation(fig, animate, frames=n_samples, interval=1000/25, blit=False)#True)
    print("loading video")
    #anim.to_html5_video()
    #anim.save(output_path, writer='imagemagick')
    print("done")
    return anim
Пример #4
0
def main():
    start_time = time.time()
    # Setup matplotlib
    x_bounds = [-10, 10]
    y_bounds = [-10, 10]

    fig = plt.figure(figsize=(8, 8))
    ax = fig.add_subplot(111)
    ax.set_xlim(x_bounds)
    ax.set_ylim(y_bounds)
    ax.set_aspect('equal')

    # Setup and train the neural net
    training_sample_size = 1000000
    test_sample_size = 100

    print("Generating training data...")

    # TODO This could be more efficient
    #train_data = generate_box_samples(training_sample_size, x_bounds, y_bounds)
    train_data = boxes.generate_samples(training_sample_size)
    test_data = boxes.generate_samples(test_sample_size, x_bounds, y_bounds)
    # TODO test data should be from a different part of the plane to make sure we are generalizing
    print("Done. Runtime: ", time.time()-start_time)
    
    model_start_time = time.time()
    # this is the size of our encoded representations
    box_dim = 8

    # Needed for relu but apparently not for elu
    # For some reason I can't learn high frequency functions with relu alone, and the positive initializer seems
    # to mess with elu
    # initializer = 'glorot_uniform'
    # activation = 'elu'

    # initializer = keras.initializers.RandomUniform(minval=0.0, maxval=0.01, seed=5)
    # bias_initializer = initializer
    activation = 'relu' #keras.layers.advanced_activations.LeakyReLU(alpha=0.3) #'relu'

    # Single autoencoder
    input = Input(shape=(len(train_data[0]),))
    output = Dense(200, activation=activation)(input)
    output = Dense(100, activation=activation)(output)
    output = Dense(3, activation=activation, name="encoded")(output)
    output = Dense(100, activation=activation)(output)
    output = Dense(200, activation=activation)(output)
    output = Dense(len(train_data[0]), activation='linear')(output)#'linear',)(output) # First test seems to indicate no change on output with linear

    autoencoder = Model(input, output)

    ## Double stacked autoencoder (training resets weights it seems, can't figure out how to stop it)
    # layer1 = Dense(200, activation=activation)
    # layer2 = Dense(100, activation=activation)
    # layer3 = Dense(3, activation=activation, name="encoded")
    # layer4 = Dense(100, activation=activation)
    # layer5 = Dense(200, activation=activation)
    # layer6 = Dense(len(train_data[0]), activation='linear')

    # input = Input(shape=(len(train_data[0]),))
    # output = layer1(input)
    # output = layer2(output)
    # output = layer3(output)
    # output = layer4(output)
    # output = layer5(output)
    # output = layer6(output)
    
    # autoencoder = Model(input, output)

    # output = layer1(output)
    # output = layer2(output)
    # output = layer3(output)
    # output = layer4(output)
    # output = layer5(output)
    # output = layer6(output)

    # double_autoencoder = Model(input, output)

    optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0)
    autoencoder.compile(
        optimizer=optimizer,
        loss='mean_squared_error' #custom_loss(autoencoder)#
    )
   

    start = time.time()

    # class OnEpochEnd(keras.callbacks.Callback):
    #     def on_epoch_end(self, epoch, logs={}):
    #         decoded_boxes = autoencoder.predict(test_data)

    #         # Draw some output
    #         num_to_show = 15
    #         boxes.draw_from_samples(ax, test_data[:num_to_show], 'r', linewidth=5)
    #         boxes.draw_from_samples(ax, decoded_boxes[:num_to_show], 'b',linewidth=2)

    #         from IPython.display import display
    #         display(fig)
    #         ax.clear()
    #         # fig.show()
    print("updated")
    autoencoder.fit(
        add_noise(train_data), train_data,
        epochs=10,
        batch_size=8192,
        shuffle=True,
        #callbacks=[OnEpochEnd()],
        validation_data=(test_data, test_data)
    )

    output_path = 'models/' + datetime.datetime.now().strftime("%I %M%p %B %d %Y") + '.h5'
    autoencoder.save(output_path)

    # output_path = 'models/' + datetime.datetime.now().strftime("%I %M%p %B %d %Y") + '.h5'
    # layer_dims = [box_dim, 20,  7]
    # model = autoencoder.train_model(
    #     train_data,
    #     test_data,
    #     layer_dims=layer_dims,
    #     learning_rate=0.001,
    #     epochs=5,
    #     batch_size=4096,
    #     loss='mean_squared_error',
    #     saved_model_path=output_path
    # )
    print("Total model time: ", time.time() - model_start_time)

    #show
    # encode and decode some digits
    # note that we take them from the *test* set
    predict_start = time.time()
    test_data = add_noise(test_data)
    decoded_boxes = autoencoder.predict(test_data)
    print('Predict took: ', time.time() - predict_start)
    
    print("Total runtime: ", time.time() - start_time)

    # Draw some output
    num_to_show = 15
    boxes.draw_from_samples(ax, test_data[:num_to_show], 'r', linewidth=5)
    boxes.draw_from_samples(ax, decoded_boxes[:num_to_show], 'b',linewidth=2)

    plt.show()