Esempio n. 1
0
	def simulate(self, T, V, state, base, decoder, plot=False):
		predicted_sequence = []
		if T == 0:
			return predicted_sequence, state, 0
		
		start = time.time()
		for t in range(T):
			input_state = np.array(state).reshape(1, 1, NUMBER_DIM)
			params = decoder.predict(input_state)
			sample_value = mdn.sample_from_output(params[0], NUMBER_DIM, NUMBER_MIXTURES)
			sample_value.reshape(NUMBER_DIM,)
			# covert to actual value changes
			diff = np.sign(sample_value[0][0]) * sample_value[0][1]
			base += diff
			predicted_sequence.append(base)
			state = sample_value
			if base >= V:
				break
		end = time.time()
			
			
		if plot:
			print(predicted_sequence)
			plt.plot(range(0, len(self.data)), self.data, color='b')
			plt.plot(range(len(self.data), len(self.data) + len(predicted_sequence)), \
				predicted_sequence, color='black', label='ML-model')
			#plt.hlines(V, 0, T, color='r')
			plt.legend()
			plt.show()

		return predicted_sequence, state, end-start
Esempio n. 2
0
def generate_performance(model,
                         n_mixtures,
                         first_sample,
                         time_limit=None,
                         steps_limit=1000,
                         pi_temp=1.0,
                         sigma_temp=0.0,
                         out_dim=2):
    """Generates a performance of (dt, x) pairs, up to a step_limit.
    Time limit is not presently implemented.
    """
    time = 0
    steps = 0
    prev_sample = first_sample
    print(prev_sample)
    performance = [prev_sample.reshape((out_dim, ))]
    while (steps < steps_limit):  # and time < time_limit
        params = model.predict(
            prev_sample.reshape(1, 1, out_dim) * SCALE_FACTOR)
        prev_sample = mdn.sample_from_output(params[0],
                                             out_dim,
                                             n_mixtures,
                                             temp=pi_temp,
                                             sigma_temp=sigma_temp)
        prev_sample = prev_sample / SCALE_FACTOR
        output_touch = prev_sample.reshape(out_dim, )
        output_touch = proc_generated_touch(output_touch)
        performance.append(output_touch.reshape((out_dim, )))
        steps += 1
        time += output_touch[0]
    return np.array(performance)
Esempio n. 3
0
def generate_unconditionally(random_seed=1):
    np.random.seed(random_seed)
    
    # Set up the generator
    inputs = Input(shape=(1,3))
    x = LSTM(256, return_sequences=True,batch_input_shape = (1,1,3))(inputs)
    x = LSTM(256)(x)
    outputs = mdn.MDN(3, 10)(x)
    generator = Model(inputs=inputs,outputs=outputs)
    generator.compile(loss=mdn.get_mixture_loss_func(3,10), optimizer=keras.optimizers.Adam())
    generator.load_weights('model_weights.h5')
    
    predictions = []
    stroke_pt = np.asarray([1,0,0], dtype=np.float32) # start point
    predictions.append(stroke_pt)

    for i in range(400):
        stroke_pt = mdn.sample_from_output(generator.predict(stroke_pt.reshape(1,1,3))[0], 3, 10)
        predictions.append(stroke_pt.reshape((3,)))
        
    predictions = np.array(predictions, dtype=np.float32)
    for i in range(len(predictions)):
        predictions[i][0] = (predictions[i][0] > 0.5)*1
        predictions[i][1] = predictions[i][1] * std_x + x_mean
        predictions[i][2] = predictions[i][2] * std_y + y_mean
    return predictions

#stroke = generate_unconditionally()
#plot_stroke(stroke)
Esempio n. 4
0
    def predict_sequence(self,
                         input_data_start=0,
                         num_preds=800,
                         plot_stats=True,
                         save_wav=True):
        """
        Predict new wav file, display and save it
        """
        #input_data_start = np.random.randint(self.data_processor.input_data.shape[0])
        new_sequence = np.array(
            [self.data_processor.input_data[input_data_start, :, :]])
        out_sequence = new_sequence
        out_sequence = np.squeeze(out_sequence)
        pred_tot = []
        for i in tqdm.tqdm(range(num_preds)):
            pred = self.model.predict(new_sequence)

            if (self.model_version == 1):
                new_elem = mdn.sample_from_output(pred[0], self.OUTPUT_DIMS,
                                                  self.n_mixes)
                pred_tot.append(np.copy(pred))
            else:
                new_elem = mdn.sample_from_output(pred[0][-1],
                                                  self.OUTPUT_DIMS,
                                                  self.n_mixes)
                pred_tot.append(np.copy(pred[0][-1]))

            new_sequence = np.concatenate((new_sequence[0, 1:, :], new_elem))
            out_sequence = np.concatenate((out_sequence, new_elem))
            new_sequence = new_sequence.reshape(1, new_sequence.shape[0],
                                                new_sequence.shape[1])

        pred_tot = np.array(pred_tot)
        if (save_wav):
            self._inverse_and_plot_sequence(out_sequence)
        if (plot_stats):
            self._mixture_components(pred_tot, num_plots=1)
            self._mixture_components(self.stats_cb.pred_tot, num_plots=2)

        return pred_tot
Esempio n. 5
0
def generate_sample(model,
                    n_mixtures,
                    prev_sample,
                    pi_temp=1.0,
                    sigma_temp=0.0,
                    out_dim=2):
    """Generate one forward prediction from a previous sample in format
    (dt, x_1,...,x_n). Pi and Sigma temperature are adjustable."""
    params = model.predict(prev_sample.reshape(1, 1, out_dim) * SCALE_FACTOR)
    new_sample = mdn.sample_from_output(
        params[0], out_dim, n_mixtures, temp=pi_temp,
        sigma_temp=sigma_temp) / SCALE_FACTOR
    new_sample = new_sample.reshape(out_dim, )
    return new_sample
def condition_and_generate(model, perf, n_mixtures, time_limit=5.0, steps_limit=1000, temp=1.0, sigma_temp=0.0, predict_moving=False):
    """Conditions the network on an existing tiny performance, then generates a new one."""
    if predict_moving:
        out_dim = 4
    else:
        out_dim = 3
    time = 0
    steps = 0
    # condition
    for touch in perf:
        params = model.predict(touch.reshape(1, 1, out_dim) * SCALE_FACTOR)
        previous_touch = mdn.sample_from_output(params[0], out_dim, n_mixtures, temp=temp, sigma_temp=sigma_temp) / SCALE_FACTOR
        output = [previous_touch.reshape((out_dim,))]
    # generate
    while (steps < steps_limit and time < time_limit):
        params = model.predict(previous_touch.reshape(1, 1, out_dim) * SCALE_FACTOR)
        previous_touch = mdn.sample_from_output(params[0], out_dim, n_mixtures, temp=temp, sigma_temp=sigma_temp) / SCALE_FACTOR
        output_touch = previous_touch.reshape(out_dim,)
        output_touch = constrain_touch(output_touch, with_moving=predict_moving)
        output.append(output_touch.reshape((out_dim,)))
        steps += 1
        time += output_touch[2]
    net_output = np.array(output)
    return net_output
Esempio n. 7
0
    def predict_one_step(self,
                         action,
                         previous_z=[],
                         sigma_temp=1.0,
                         force_prediction_from_mixture=-1):
        #Predicts one step ahead from the previous state.
        #If previous z is given, we predict with that as input. Otherwise, we dream from the previous output we generated.

        #Scaling inputs
        if len(previous_z) > 0:
            previous_z = np.array(previous_z)
            previous_z = np.multiply(previous_z, self.ioscaling)

        self.frame_count += 1
        prev_z = np.zeros((1, 1, LATENT_SPACE_DIMENSIONALITY))
        if len(previous_z) > 0:
            prev_z[0][0] = previous_z
        else:
            prev_z[0][0] = self.z

        rnn_input = np.append(prev_z[0][0], action)

        #print("Inserting to RNN:")
        #print(rnn_input)
        mixture_params = self.rnn.model.predict(np.array([[rnn_input]]))

        #If requested, sample from one specific mixture
        if force_prediction_from_mixture != -1:
            predicted_latent = sample_from_one_specific_mixture(
                mdn, force_prediction_from_mixture, mixture_params[0],
                LATENT_SPACE_DIMENSIONALITY, self.num_mixtures, sigma_temp)
        else:
            predicted_latent = mdn.sample_from_output(
                mixture_params[0],
                LATENT_SPACE_DIMENSIONALITY,
                self.num_mixtures,
                temp=self.temperature,
                sigma_temp=sigma_temp)
        mixture_weights = softmax(mixture_params[0][-self.num_mixtures:],
                                  t=self.temperature)
        #print("Got out from RNN after sampling: ")
        #print(predicted_latent)
        #Downscaling to output size.
        #predicted_latent = np.divide(predicted_latent, self.ioscaling)
        self.z = predicted_latent

        return predicted_latent[0], mixture_weights
def generate_random_tiny_performance(model, n_mixtures, first_touch, time_limit=5.0, steps_limit=1000, temp=1.0, sigma_temp=0.0, predict_moving=False):
    """Generates a tiny performance up to 5 seconds in length."""
    if predict_moving:
        out_dim = 4
    else:
        out_dim = 3
    time = 0
    steps = 0
    previous_touch = first_touch
    performance = [previous_touch.reshape((out_dim,))]
    while (steps < steps_limit and time < time_limit):
        params = model.predict(previous_touch.reshape(1,1,out_dim) * SCALE_FACTOR)
        previous_touch = mdn.sample_from_output(params[0], out_dim, n_mixtures, temp=temp, sigma_temp=sigma_temp) / SCALE_FACTOR
        output_touch = previous_touch.reshape(out_dim,)
        output_touch = constrain_touch(output_touch, with_moving=predict_moving)
        performance.append(output_touch.reshape((out_dim,)))
        steps += 1
        time += output_touch[2]
    return np.array(performance)
Esempio n. 9
0
# # Load weights

# In[9]:

vae.load_weights(VAE_PATH)
model.load_weights(DANCENET_PATH)

# # Generate Video

# In[10]:

fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video = cv2.VideoWriter("out.mp4", fourcc, 30.0, (208, 120))
lv_in = data[0]

for i in range(500):
    input = np.array(lv_in).reshape(1, 128)
    lv_out = model.predict(input)
    shape = np.array(lv_out).shape[1]
    lv_out = np.array(lv_out).reshape(shape)
    lv_out = mdn.sample_from_output(lv_out, 128, numComponents, temp=0.01)
    lv_out = scaler.inverse_transform(lv_out)
    img = decoder.predict(np.array(lv_out).reshape(1, 128))
    img = np.array(img).reshape(120, 208, 1)
    img = img * 255
    img = np.array(img).astype("uint8")
    img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
    lv_in = lv_out
    video.write(img)
video.release()
Esempio n. 10
0
            if (local_windowpos > windowwidth * -0.5
                    and local_windowpos < windowwidth * 0.5):
                char_strength = math.cos(
                    ((local_windowpos / windowwidth) + 0.5) * 2.0 *
                    math.pi) * -0.5 + 0.5
                onehot[alphabet.find(textsample[lc]) + 1] = char_strength

        ## combine coords with softwindow
        q = np.concatenate(
            (r.reshape(3), onehot.reshape(64))).reshape(1, 1, 67)
        params = decoder(q, training=False)

        ## sample
        r = mdn.sample_from_output(params[0].numpy(),
                                   OUTPUT_DIMENSION,
                                   MDN_MIXTURES,
                                   temp=pi_temperature,
                                   sigma_temp=sigma_temp)

        ##add to the drawing array
        sketch.append(r.reshape((3, )))

sketch = np.array(sketch)
sketch[:, 0:2] *= scale_factor

# round off pen_down to 0 / 1
sketch.T[2] = cutoff_stroke(sketch.T[2])

print("generated in {eltime:.2f} seconds.".format(eltime=time.time() - start))

### create the drawing
Esempio n. 11
0
def visualize(model,
              x_val,
              PLOT_DIR,
              TIME_OF_RUN,
              args,
              ode_model=True,
              latent=False,
              epoch=0,
              is_mdn=False):
    """Visualize a tf.keras.Model for a single pendulum.
    # Arguments:
        model: A Keras model, that accepts t and x when called
        x_val: np.ndarray, shape=(1, samples_per_series, 2) or (samples_per_series, 2)
                The reference time series, against which the model will be compared
        PLOT_DIR: Directory to plot in
        TIME_OF_RUN: Time at which the run began
        ode_model: whether the model outputs the derivative of the current step (True),
                   or the value of the next step (False)
        args: input arguments from main script
    """
    x_val = x_val.reshape(2, -1, 2)
    dt = 0.01
    t = tf.linspace(0., 10., int(10. / dt) + 1)
    # Compute the predicted trajectories
    if ode_model:
        x0_extrap = tf.stack([x_val[0, 0]])
        x_t_extrap = odeint(model, x0_extrap, t, rtol=1e-5,
                            atol=1e-5).numpy()[:, 0]
        x0_interp = tf.stack([x_val[1, 0]])
        x_t_interp = odeint(model, x0_interp, t, rtol=1e-5,
                            atol=1e-5).numpy()[:, 0]
    else:  # LSTM model
        x_t_extrap = np.zeros_like(x_val[0])
        x_t_extrap[0] = x_val[0, 0]
        x_t_interp = np.zeros_like(x_val[1])
        x_t_interp[0] = x_val[1, 0]
        # Always injects the entire time series because keras is slow when using
        # varying series lengths and the future timesteps don't affect the predictions
        # before it anyways.
        if is_mdn:
            import mdn
            for i in range(1, len(t)):
                pred_extrap = model(0., np.expand_dims(x_t_extrap,
                                                       axis=0))[0, i - 1:i]
                x_t_extrap[i:i + 1] = mdn.sample_from_output(
                    pred_extrap.numpy()[0], 2, 5, temp=1.)
                pred_interp = model(0., np.expand_dims(x_t_interp,
                                                       axis=0))[0, i - 1:i]
                x_t_interp[i:i + 1] = mdn.sample_from_output(
                    pred_interp.numpy()[0], 2, 5, temp=1.)
        else:
            for i in range(1, len(t)):
                x_t_extrap[i:i + 1] = model(0.,
                                            np.expand_dims(x_t_extrap,
                                                           axis=0))[0, i - 1:i]
                x_t_interp[i:i + 1] = model(0.,
                                            np.expand_dims(x_t_interp,
                                                           axis=0))[0, i - 1:i]

    x_t = np.stack([x_t_extrap, x_t_interp], axis=0)
    # Plot the generated trajectories
    fig = plt.figure(figsize=(12, 8), facecolor='white')
    ax_traj = fig.add_subplot(231, frameon=False)
    ax_phase = fig.add_subplot(232, frameon=False)
    ax_vecfield = fig.add_subplot(233, frameon=False)
    ax_vec_error_abs = fig.add_subplot(234, frameon=False)
    ax_vec_error_rel = fig.add_subplot(235, frameon=False)
    ax_energy = fig.add_subplot(236, frameon=False)
    ax_traj.cla()
    ax_traj.set_title('Trajectories')
    ax_traj.set_xlabel('t')
    ax_traj.set_ylabel('x,y')
    ax_traj.plot(t.numpy(), x_val[0, :, 0], t.numpy(), x_val[0, :, 1], 'g-')
    ax_traj.plot(t.numpy(), x_t[0, :, 0], '--', t.numpy(), x_t[0, :, 1], 'b--')
    ax_traj.set_xlim(min(t.numpy()), max(t.numpy()))
    ax_traj.set_ylim(-6, 6)
    ax_traj.legend()

    ax_phase.cla()
    ax_phase.set_title('Phase Portrait')
    ax_phase.set_xlabel('x')
    ax_phase.set_ylabel('x_dt')
    ax_phase.plot(x_val[0, :, 0], x_val[0, :, 1], 'g--')
    ax_phase.plot(x_t[0, :, 0], x_t[0, :, 1], 'b--')
    ax_phase.plot(x_val[1, :, 0], x_val[1, :, 1], 'g--')
    ax_phase.plot(x_t[1, :, 0], x_t[1, :, 1], 'b--')
    ax_phase.set_xlim(-6, 6)
    ax_phase.set_ylim(-6, 6)

    ax_vecfield.cla()
    ax_vecfield.set_title('Learned Vector Field')
    ax_vecfield.set_xlabel('x')
    ax_vecfield.set_ylabel('x_dt')

    steps = 61
    y, x = np.mgrid[-6:6:complex(0, steps), -6:6:complex(0, steps)]
    ref_func = Lambda()
    dydt_ref = ref_func(0.,
                        np.stack([x, y], -1).reshape(steps * steps,
                                                     2)).numpy()
    mag_ref = 1e-8 + np.linalg.norm(dydt_ref, axis=-1).reshape(steps, steps)
    dydt_ref = dydt_ref.reshape(steps, steps, 2)

    if ode_model:  # is Dense-Net or NODE-Net or NODE-e2e
        dydt = model(0.,
                     np.stack([x, y], -1).reshape(steps * steps, 2)).numpy()
    else:  # is LSTM
        # Compute artificial x_dot by numerically diffentiating:
        # x_dot \approx (x_{t+1}-x_t)/dt
        yt_1 = model(0.,
                     np.stack([x, y], -1).reshape(steps * steps, 1, 2))[:, 0]
        if is_mdn:  # have to sample from output Gaussians
            yt_1 = np.apply_along_axis(mdn.sample_from_output,
                                       1,
                                       yt_1.numpy(),
                                       2,
                                       5,
                                       temp=.1)[:, 0]
        dydt = (np.array(yt_1) -
                np.stack([x, y], -1).reshape(steps * steps, 2)) / dt

    dydt_abs = dydt.reshape(steps, steps, 2)
    dydt_unit = dydt_abs / np.linalg.norm(dydt_abs, axis=-1,
                                          keepdims=True)  # make unit vector

    ax_vecfield.streamplot(x,
                           y,
                           dydt_unit[:, :, 0],
                           dydt_unit[:, :, 1],
                           color="black")
    ax_vecfield.set_xlim(-6, 6)
    ax_vecfield.set_ylim(-6, 6)

    ax_vec_error_abs.cla()
    ax_vec_error_abs.set_title('Abs. error of xdot')
    ax_vec_error_abs.set_xlabel('x')
    ax_vec_error_abs.set_ylabel('x_dt')

    abs_dif = np.clip(np.linalg.norm(dydt_abs - dydt_ref, axis=-1), 0., 3.)
    c1 = ax_vec_error_abs.contourf(x, y, abs_dif, 100)
    plt.colorbar(c1, ax=ax_vec_error_abs)

    ax_vec_error_abs.set_xlim(-6, 6)
    ax_vec_error_abs.set_ylim(-6, 6)

    ax_vec_error_rel.cla()
    ax_vec_error_rel.set_title('Rel. error of xdot')
    ax_vec_error_rel.set_xlabel('x')
    ax_vec_error_rel.set_ylabel('x_dt')

    rel_dif = np.clip(abs_dif / mag_ref, 0., 1.)
    c2 = ax_vec_error_rel.contourf(x, y, rel_dif, 100)
    plt.colorbar(c2, ax=ax_vec_error_rel)

    ax_vec_error_rel.set_xlim(-6, 6)
    ax_vec_error_rel.set_ylim(-6, 6)

    ax_energy.cla()
    ax_energy.set_title('Total Energy')
    ax_energy.set_xlabel('t')
    ax_energy.plot(
        np.arange(1001) / 100.1,
        np.array([total_energy(x_) for x_ in x_t_interp]))

    fig.tight_layout()
    plt.savefig(PLOT_DIR + '/{:03d}'.format(epoch))
    plt.close()

    # Compute Metrics
    energy_drift_extrap = relative_energy_drift(x_t[0], x_val[0])
    phase_error_extrap = relative_phase_error(x_t[0], x_val[0])
    traj_error_extrap = trajectory_error(x_t[0], x_val[0])

    energy_drift_interp = relative_energy_drift(x_t[1], x_val[1])
    phase_error_interp = relative_phase_error(x_t[1], x_val[1])
    traj_error_interp = trajectory_error(x_t[1], x_val[1])

    wall_time = (datetime.datetime.now() - datetime.datetime.strptime(
        TIME_OF_RUN, "%Y%m%d-%H%M%S")).total_seconds()
    string = "{},{},{},{},{},{},{},{}\n".format(
        wall_time, epoch, energy_drift_interp, energy_drift_extrap,
        phase_error_interp, phase_error_extrap, traj_error_interp,
        traj_error_extrap)
    file_path = (PLOT_DIR + TIME_OF_RUN + "results" + str(args.lr) +
                 str(args.dataset_size) + str(args.batch_size) + ".csv")
    if not os.path.isfile(file_path):
        title_string = (
            "wall_time,epoch,energy_drift_interp,energy_drift_extrap, phase_error_interp,"
            + "phase_error_extrap, traj_err_interp, traj_err_extrap\n")
        fd = open(file_path, 'a')
        fd.write(title_string)
        fd.close()
    fd = open(file_path, 'a')
    fd.write(string)
    fd.close()

    # Print Jacobian
    if ode_model:
        np.set_printoptions(suppress=True, precision=4, linewidth=150)
        # The first Jacobian is averaged over 100 randomly sampled points from U(-1, 1)
        jac = tf.zeros((2, 2))
        for i in range(100):
            with tf.GradientTape(persistent=True) as g:
                x = (2 * tf.random.uniform((1, 2)) - 1)
                g.watch(x)
                y = model(0, x)
            jac = jac + g.jacobian(y, x)[0, :, 0]
        print(jac.numpy() / 100)

        with tf.GradientTape(persistent=True) as g:
            x = tf.zeros([1, 2])
            g.watch(x)
            y = model(0, x)
        print(g.jacobian(y, x)[0, :, 0])