Example #1
0
def create_koopman_net(phase, keep_prob, params):
    """Create a Koopman network that encodes, advances in time, and decodes.

    Arguments:
        phase -- boolean placeholder for dropout: training phase or not training phase
        keep_prob -- probability that weight is kept during dropout
        params -- dictionary of parameters for experiment

    Returns:
        x -- placeholder for input
        y -- list, output of decoder applied to each shift: g_list[0], K*g_list[0], K^2*g_list[0], ..., length num_shifts + 1
        g_list -- list, output of encoder applied to each shift in input x, length num_shifts_middle + 1
        weights -- dictionary of weights
        biases -- dictionary of biases

    Side effects:
        Adds more entries to params dict: num_encoder_weights, num_omega_weights, num_decoder_weights

    Raises ValueError if len(y) is not len(params['shifts']) + 1
    """
    # params['d']=8
    depth = int((params['d'] - 4) / 2)

    max_shifts_to_stack = helperfns.num_shifts_in_stack(params)

    encoder_widths = params['widths'][0:depth + 2]  # 2, w, w, k
    x, weights, biases = encoder(
        encoder_widths,
        dist_weights=params['dist_weights'][0:depth + 1],
        dist_biases=params['dist_biases'][0:depth + 1],
        scale=params['scale'],
        num_shifts_max=max_shifts_to_stack,
        first_guess=params['first_guess'])
    params['num_encoder_weights'] = len(weights)
    g_list = encoder_apply(x,
                           weights,
                           biases,
                           params['act_type'],
                           params['batch_flag'],
                           phase,
                           shifts_middle=params['shifts_middle'],
                           keep_prob=keep_prob,
                           num_encoder_weights=params['num_encoder_weights'])

    # g_list_omega is list of omegas, one entry for each middle_shift of x (like g_list)
    omegas, weights_omega, biases_omega = create_omega_net(
        phase, keep_prob, params, g_list[0])
    # params['num_omega_weights'] = len(weights_omega) already done inside create_omega_net
    weights.update(weights_omega)
    biases.update(biases_omega)

    num_widths = len(params['widths'])
    decoder_widths = params['widths'][depth + 2:num_widths]  # k ... n
    weights_decoder, biases_decoder = decoder(
        decoder_widths,
        dist_weights=params['dist_weights'][depth + 2:],
        dist_biases=params['dist_biases'][depth + 2:],
        scale=params['scale'])
    weights.update(weights_decoder)
    biases.update(biases_decoder)

    y = []
    # y[0] is x[0,:,:] encoded and then decoded (no stepping forward)
    encoded_layer = g_list[0]
    params['num_decoder_weights'] = depth + 1
    y.append(
        decoder_apply(encoded_layer, weights, biases, params['act_type'],
                      params['batch_flag'], phase, keep_prob,
                      params['num_decoder_weights']))

    # g_list_omega[0] is for x[0,:,:], pairs with g_list[0]=encoded_layer
    advanced_layer = varying_multiply(encoded_layer, omegas, params['delta_t'],
                                      params['num_real'],
                                      params['num_complex_pairs'])

    for j in np.arange(max(params['shifts'])):
        # considering penalty on subset of yk+1, yk+2, yk+3, ...
        if (j + 1) in params['shifts']:
            y.append(
                decoder_apply(advanced_layer, weights, biases,
                              params['act_type'], params['batch_flag'], phase,
                              keep_prob, params['num_decoder_weights']))

        omegas = omega_net_apply(phase, keep_prob, params, advanced_layer,
                                 weights, biases)
        advanced_layer = varying_multiply(advanced_layer, omegas,
                                          params['delta_t'],
                                          params['num_real'],
                                          params['num_complex_pairs'])

    if len(y) != (len(params['shifts']) + 1):
        print("messed up looping over shifts! %r" % params['shifts'])
        raise ValueError(
            'length(y) not proper length: check create_koopman_net code and how defined params[shifts] in experiment'
        )

    return x, y, g_list, weights, biases
Example #2
0
def try_net(data_val, params):
    """Run a random experiment for particular params and data.

    Arguments:
        data_val -- array containing validation dataset
        params -- dictionary of parameters for experiment

    Returns:
        None

    Side effects:
        Changes params dict
        Saves files
        Builds TensorFlow graph (reset in main_exp)
    """
    # SET UP NETWORK
    x, y, g_list, weights, biases = net.create_koopman_net(params)

    max_shifts_to_stack = helperfns.num_shifts_in_stack(params)

    # DEFINE LOSS FUNCTION
    trainable_var = tf.trainable_variables()
    loss1, loss2, loss3, loss_Linf, loss = define_loss(x, y, g_list, weights,
                                                       biases, params)
    loss_L1, loss_L2, regularized_loss, regularized_loss1 = define_regularization(
        params, trainable_var, loss, loss1)

    # CHOOSE OPTIMIZATION ALGORITHM
    optimizer = helperfns.choose_optimizer(params, regularized_loss,
                                           trainable_var)
    optimizer_autoencoder = helperfns.choose_optimizer(params,
                                                       regularized_loss1,
                                                       trainable_var)

    # LAUNCH GRAPH AND INITIALIZE
    sess = tf.Session()
    saver = tf.train.Saver()

    # Before starting, initialize the variables.  We will 'run' this first.
    init = tf.global_variables_initializer()
    sess.run(init)

    csv_path = params['model_path'].replace('model', 'error')
    csv_path = csv_path.replace('ckpt', 'csv')
    print(csv_path)

    num_saved_per_file_pass = params['num_steps_per_file_pass'] / 20 + 1
    num_saved = np.floor(num_saved_per_file_pass * params['data_train_len'] *
                         params['num_passes_per_file']).astype(int)
    train_val_error = np.zeros([num_saved, 16])
    count = 0
    best_error = 10000

    data_val_tensor = helperfns.stack_data(data_val, max_shifts_to_stack,
                                           params['len_time'])

    start = time.time()
    finished = 0
    saver.save(sess, params['model_path'])

    # TRAINING
    # loop over training data files
    for f in range(params['data_train_len'] * params['num_passes_per_file']):
        if finished:
            break
        file_num = (f % params['data_train_len']) + 1  # 1...data_train_len

        if (params['data_train_len'] > 1) or (f == 0):
            # don't keep reloading data if always same
            data_train = np.loadtxt(
                ('./data/%s_train%d_x.csv' % (params['data_name'], file_num)),
                delimiter=',',
                dtype=np.float64)
            data_train_tensor = helperfns.stack_data(data_train,
                                                     max_shifts_to_stack,
                                                     params['len_time'])
            num_examples = data_train_tensor.shape[1]
            num_batches = int(np.floor(num_examples / params['batch_size']))

        ind = np.arange(num_examples)
        np.random.shuffle(ind)
        data_train_tensor = data_train_tensor[:, ind, :]

        # loop over batches in this file
        for step in range(params['num_steps_per_batch'] * num_batches):

            if params['batch_size'] < data_train_tensor.shape[1]:
                offset = (step * params['batch_size']) % (num_examples -
                                                          params['batch_size'])
            else:
                offset = 0

            batch_data_train = data_train_tensor[:, offset:(
                offset + params['batch_size']), :]

            feed_dict_train = {x: batch_data_train}
            feed_dict_train_loss = {x: batch_data_train}
            feed_dict_val = {x: data_val_tensor}

            if (not params['been5min']) and params['auto_first']:
                sess.run(optimizer_autoencoder, feed_dict=feed_dict_train)
            else:
                sess.run(optimizer, feed_dict=feed_dict_train)

            if step % 20 == 0:
                train_error = sess.run(loss, feed_dict=feed_dict_train_loss)
                val_error = sess.run(loss, feed_dict=feed_dict_val)

                if val_error < (best_error - best_error * (10**(-5))):
                    best_error = val_error.copy()
                    saver.save(sess, params['model_path'])
                    reg_train_err = sess.run(regularized_loss,
                                             feed_dict=feed_dict_train_loss)
                    reg_val_err = sess.run(regularized_loss,
                                           feed_dict=feed_dict_val)
                    print(
                        "New best val error %f (with reg. train err %f and reg. val err %f)"
                        % (best_error, reg_train_err, reg_val_err))

                train_val_error[count, 0] = train_error
                train_val_error[count, 1] = val_error
                train_val_error[count,
                                2] = sess.run(regularized_loss,
                                              feed_dict=feed_dict_train_loss)
                train_val_error[count, 3] = sess.run(regularized_loss,
                                                     feed_dict=feed_dict_val)
                train_val_error[count,
                                4] = sess.run(loss1,
                                              feed_dict=feed_dict_train_loss)
                train_val_error[count, 5] = sess.run(loss1,
                                                     feed_dict=feed_dict_val)
                train_val_error[count,
                                6] = sess.run(loss2,
                                              feed_dict=feed_dict_train_loss)
                train_val_error[count, 7] = sess.run(loss2,
                                                     feed_dict=feed_dict_val)
                train_val_error[count,
                                8] = sess.run(loss3,
                                              feed_dict=feed_dict_train_loss)
                train_val_error[count, 9] = sess.run(loss3,
                                                     feed_dict=feed_dict_val)
                train_val_error[count,
                                10] = sess.run(loss_Linf,
                                               feed_dict=feed_dict_train_loss)
                train_val_error[count, 11] = sess.run(loss_Linf,
                                                      feed_dict=feed_dict_val)
                if np.isnan(train_val_error[count, 10]):
                    params['stop_condition'] = 'loss_Linf is nan'
                    finished = 1
                    break
                train_val_error[count,
                                12] = sess.run(loss_L1,
                                               feed_dict=feed_dict_train_loss)
                train_val_error[count, 13] = sess.run(loss_L1,
                                                      feed_dict=feed_dict_val)
                train_val_error[count,
                                14] = sess.run(loss_L2,
                                               feed_dict=feed_dict_train_loss)
                train_val_error[count, 15] = sess.run(loss_L2,
                                                      feed_dict=feed_dict_val)

                np.savetxt(csv_path, train_val_error, delimiter=',')
                finished, save_now = helperfns.check_progress(
                    start, best_error, params)
                count = count + 1
                if save_now:
                    train_val_error_trunc = train_val_error[range(count), :]
                    helperfns.save_files(sess, csv_path, train_val_error_trunc,
                                         params, weights, biases)
                if finished:
                    break

            if step > params['num_steps_per_file_pass']:
                params['stop_condition'] = 'reached num_steps_per_file_pass'
                break

    # SAVE RESULTS
    train_val_error = train_val_error[range(count), :]
    print(train_val_error)
    params['time_exp'] = time.time() - start
    saver.restore(sess, params['model_path'])
    helperfns.save_files(sess, csv_path, train_val_error, params, weights,
                         biases)
    tf.reset_default_graph()
Example #3
0
def try_net(data_val, params):
    # SET UP NETWORK
    phase = tf.placeholder(tf.bool, name='phase')
    keep_prob = tf.placeholder(tf.float64, shape=[], name='keep_prob')
    x, y, g_list, weights, biases, g_list_omega = net.create_koopman_net(
        phase, keep_prob, params)

    max_shifts_to_stack = helperfns.num_shifts_in_stack(params)

    # DEFINE LOSS FUNCTION
    trainable_var = tf.trainable_variables()
    loss1, loss2, loss3, loss_Linf, loss = define_loss(x, y, g_list,
                                                       g_list_omega, params)
    loss_L1, loss_L2, regularized_loss = define_regularization(
        params, trainable_var, loss)

    # CHOOSE OPTIMIZATION ALGORITHM
    optimizer = helperfns.choose_optimizer(params, regularized_loss,
                                           trainable_var)

    # LAUNCH GRAPH AND INITIALIZE
    sess = tf.Session()
    saver = tf.train.Saver()

    init = tf.global_variables_initializer()
    sess.run(init)

    csv_path = params['model_path'].replace('model', 'error')
    csv_path = csv_path.replace('ckpt', 'csv')
    print csv_path

    num_saved_per_file_pass = params['num_steps_per_file_pass'] / 20 + 1
    num_saved = np.floor(num_saved_per_file_pass * params['data_train_len'] *
                         params['num_passes_per_file']).astype(int)
    train_val_error = np.zeros([num_saved, 16])
    count = 0
    best_error = 10000

    data_val_tensor = helperfns.stack_data(data_val, max_shifts_to_stack,
                                           params['len_time'])

    start = time.time()
    finished = 0
    saver.save(sess, params['model_path'])

    # TRAINING
    # loop over training data files
    for f in xrange(params['data_train_len'] * params['num_passes_per_file']):
        if finished:
            break
        file_num = (f % params['data_train_len']) + 1  # 1...data_train_len

        if (params['data_train_len'] > 1) or (f == 0):
            # don't keep reloading data if always same
            data_train = np.loadtxt(
                ('./data/%s_train%d_x.csv' % (params['data_name'], file_num)),
                delimiter=',')
            data_train_tensor = helperfns.stack_data(data_train,
                                                     max_shifts_to_stack,
                                                     params['len_time'])
            num_examples = data_train_tensor.shape[1]
            num_batches = int(np.floor(num_examples / params['batch_size']))

        ind = np.arange(num_examples)
        np.random.shuffle(ind)
        data_train_tensor = data_train_tensor[:, ind, :]

        # loop over batches in this file
        for step in xrange(params['num_steps_per_batch'] * num_batches):

            if params['batch_size'] < data_train_tensor.shape[1]:
                offset = (step * params['batch_size']) % (num_examples -
                                                          params['batch_size'])
            else:
                offset = 0

            batch_data_train = data_train_tensor[:, offset:(
                offset + params['batch_size']), :]

            feed_dict_train = {
                x: batch_data_train,
                phase: 1,
                keep_prob: params['dropout_rate']
            }
            feed_dict_train_loss = {
                x: batch_data_train,
                phase: 1,
                keep_prob: 1.0
            }
            feed_dict_val = {x: data_val_tensor, phase: 0, keep_prob: 1.0}

            sess.run(optimizer, feed_dict=feed_dict_train)

            if step % 20 == 0:
                train_error = sess.run(loss, feed_dict=feed_dict_train_loss)
                val_error = sess.run(loss, feed_dict=feed_dict_val)

                if val_error < (best_error - best_error * (10**(-5))):
                    best_error = val_error.copy()
                    saver.save(sess, params['model_path'])
                    print("New best val error %f" % best_error)

                train_val_error[count, 0] = train_error
                train_val_error[count, 1] = val_error
                train_val_error[count,
                                2] = sess.run(regularized_loss,
                                              feed_dict=feed_dict_train_loss)
                train_val_error[count, 3] = sess.run(regularized_loss,
                                                     feed_dict=feed_dict_val)
                train_val_error[count,
                                4] = sess.run(loss1,
                                              feed_dict=feed_dict_train_loss)
                train_val_error[count, 5] = sess.run(loss1,
                                                     feed_dict=feed_dict_val)
                train_val_error[count,
                                6] = sess.run(loss2,
                                              feed_dict=feed_dict_train_loss)
                train_val_error[count, 7] = sess.run(loss2,
                                                     feed_dict=feed_dict_val)
                train_val_error[count,
                                8] = sess.run(loss3,
                                              feed_dict=feed_dict_train_loss)
                train_val_error[count, 9] = sess.run(loss3,
                                                     feed_dict=feed_dict_val)
                train_val_error[count,
                                10] = sess.run(loss_Linf,
                                               feed_dict=feed_dict_train_loss)
                train_val_error[count, 11] = sess.run(loss_Linf,
                                                      feed_dict=feed_dict_val)
                train_val_error[count,
                                12] = sess.run(loss_L1,
                                               feed_dict=feed_dict_train_loss)
                train_val_error[count, 13] = sess.run(loss_L1,
                                                      feed_dict=feed_dict_val)
                train_val_error[count,
                                14] = sess.run(loss_L2,
                                               feed_dict=feed_dict_train_loss)
                train_val_error[count, 15] = sess.run(loss_L2,
                                                      feed_dict=feed_dict_val)

                np.savetxt(csv_path, train_val_error, delimiter=',')
                finished, save_now = helperfns.check_progress(
                    start, best_error, params)
                if save_now:
                    train_val_error_trunc = train_val_error[range(count), :]
                    helperfns.save_files(sess, saver, csv_path,
                                         train_val_error_trunc, params,
                                         weights, biases)
                if finished:
                    break
                count = count + 1

            if step > params['num_steps_per_file_pass']:
                params['stop_condition'] = 'reached num_steps_per_file_pass'
                break

    # SAVE RESULTS
    train_val_error = train_val_error[range(count), :]
    print(train_val_error)
    params['time_exp'] = time.time() - start
    saver.restore(sess, params['model_path'])
    helperfns.save_files(sess, saver, csv_path, train_val_error, params,
                         weights, biases)