Пример #1
0
    def update(t):

        # Get eta for this timestep (integer divide isn't too slow)
        eta_i = min(t / steps_per_eta_val, eta_i_max)
        eta = eta_vals[eta_i]
        eta_text.set_text("$\eta=$%g" % eta)

        # All particles take one step
        model.step(state, L, N, v0, R, eta, t)
        t = t + 1

        # Update animation
        particles.set_data(state[:, 0], state[:, 1])
        leaders.set_data(state[range(N_ldr), 0], state[range(N_ldr), 1])

        return particles, leaders, box, eta_text
Пример #2
0
    def update(t):

        # Get eta for this timestep
        eta_i = min(t / steps_per_eta_val, eta_i_max)
        eta = eta_vals[eta_i]
        eta_text.set_text("$\eta=$%g" % eta)

        # All particles take one step
        model.step(state, L, N, v0, R, eta, t)
        V = model.order_parameter(state, V_coeff)
        V_series.append(V)
        t = t + 1

        # Update particle animation
        particles.set_data(state[:, 0], state[:, 1])
        leaders.set_data(state[range(N_ldr), 0], state[range(N_ldr), 1])

        # Take average of 'window' data points every 'plot_step' steps
        if t > window and t % plot_step == 0:
            # Do averages
            V_window = np.array(V_series[-window:])
            V_mean = np.mean(V_window)
            Vsq_mean = np.mean(V_window * V_window)
            chi = chi_coeff * (Vsq_mean - V_mean * V_mean)

            # Update data lists
            V_mean_series.append(V_mean)
            chi_series.append(chi)
            t_series.append(t * dt)

            # Update animated plots
            V_plot.set_data(t_series, V_mean_series)
            chi_plot.set_data(t_series, chi_series)

            # Update limits if necessary
            tmax = ax2.get_xlim()[1]
            chimax = np.max(chi_series)
            if t > tmax or chi > chimax:
                ax2.set_xlim(0, 2 * tmax)
                ax3.set_xlim(0, 2 * tmax)
                ax3.set_ylim(0, 1.1 * chimax)
                ax2.figure.canvas.draw()
                ax3.figure.canvas.draw()

        return particles, leaders, box, eta_text, V_plot, chi_plot
Пример #3
0
def decode():
    with tf.Session() as sess:
        # Create model and load parameters.
        model = create_model(sess, True)
        model.batch_size = batch_size  # We decode one sentence at a time.

        # Load vocabularies.
        vocab_path = os.path.join(vocab_dir, "vocab")
        vocab_list, vocab_dict = data_util.initialize_vocabulary(vocab_path)

        # Decode from standard input.
        document = data_util.basic_tokenizer(input_dir)
        while document:
            # Get token-ids for the input sentence.
            token_ids = data_util.sentence_to_token_ids(document, vocab_dict)
            # Which bucket does it belong to?
            bucket_id = len(buckets) - 1
            for i, bucket in enumerate(buckets):
                if bucket[0] >= len(token_ids):
                    bucket_id = i
                    break
            else:
                logging.warning("Document truncated: %s", document)

            # Get a 1-element batch to feed the sentence to the model.
            encoder_inputs, decoder_inputs, target_weights = model.get_batch(
                {bucket_id: [(token_ids, [])]}, bucket_id)

            # Get output logits for the sentence.
            _, _, output_logits = model.step(sess, encoder_inputs,
                                             decoder_inputs, target_weights,
                                             bucket_id, True)

            # This is a greedy decoder - outputs are just argmaxes of output_logits.
            outputs = [
                int(np.argmax(logit, axis=1)) for logit in output_logits
            ]

            # If there is an EOS symbol in outputs, cut them at that point.
            if data_util.EOS_ID in outputs:
                outputs = outputs[:outputs.index(data_util.EOS_ID)]

            summary = data_util.token_ids_to_sentence(outputs, vocab_list)
            print(' '.join(summary))
Пример #4
0
def nlu_interface(nlu_inputs, sess, model):
    # nlu_output = {'nlu_result':{'intent':'', 'slots':[]}}
    # CONFIRM_LIST, REJECT_LIST = get_y_n()

    nlu_inputs = nlu_inputs.strip().replace(' ', '')
    assert type(nlu_inputs) == str
    inputs = cut_sentence(nlu_inputs)
    id_inputs = data_utils.nlu_input_to_token_ids(inputs, input_vocab_path,
                                                  data_utils.tab_tokenizer)
    _inputs, _labels, _sequence_length = model.get_one([[id_inputs, [0]]], 0)
    # pdb.set_trace()
    _, step_loss, logits, summary = model.step(sess, _inputs, _labels,
                                               _sequence_length, True)
    label_logit = logits['label'][0]
    predict_label_ids = np.argmax(label_logit, 1)[:_sequence_length[0]]
    predict_label = [rev_label_vocab[x] for x in predict_label_ids]
    nlu_output = '\t'.join(predict_label)

    return nlu_output
Пример #5
0
# ------------------ #
#  Initialise model  #
# ------------------ #
box1, box2, glob = model.initialise()


# --------------------------- #
#  Integrate forward in time  #
# --------------------------- #
for n in range(nt):
    
    # Update fluxes, moisture, circulation using current temperatures
    model.update(n, box1, box2, glob)

    # Step temperatures forward
    model.step(n, box1, box2, glob)

    # Print every 'n_print' steps
    if n % n_print == 0:
        years, months, days = model.simulation_time(n)
        print "Simulation time: %dy, %dm, %dd" %(years, months, days)

    # Save every n_save steps (exluding step 0)
    if (n+1) % n_save == 0:
        years, months, days = model.simulation_time(n)
        "Saving time series at simulation time: %d years, %d months, %d days" %(years, months, days)
        
        model.save(n+1, box1, box2, glob)

# Update fluxes, moisture, circulation for final timestep
model.update(nt, box1, box2, glob)
Пример #6
0
import model
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import colors
import matplotlib.animation as animation
from agent import House

model = model.BurglaryModel(0, 128, 128, 5, .06, 5.6, 0.1, 0.019, 1, 5, 5, 0.3)

# Press the green button in the gutter to run the script.
if __name__ == '__main__':
    for i in range(1000):
        model.step()
        print(i)

    crime_counts = np.zeros((model.grid.width, model.grid.height))
    for cell in model.grid.coord_iter():
        content, x, y = cell
        crimes = 0
        for row in content:
            if isinstance(row, House):
                crimes = row.att_t
                crime_counts[x][y] = crimes

    norm = colors.Normalize(vmin=0.1, vmax=(model.theta * 0.75))

    plt.imshow(crime_counts,
               interpolation='nearest',
               cmap=plt.get_cmap('seismic'),
               norm=norm)
    plt.colorbar()
Пример #7
0
        # Print current repeat number
        print "            %d/%d" % (rep + 1, repeats)

        # Create a brand new initial state
        state = model.init(L, N, v0, R, eta)

        # Initialise V, t and set V_coeff
        V = 0
        t = 0

        # Loop over steps until order parameter hits threshold
        while V < 0.98:

            # All particles take one step
            model.step(state, L, N, v0, R, eta, t)
            V = model.order_parameter(state, V_coeff)
            t += 1

        # Save steps to burn in
        nt_burn_repeats[rep] = t

    # End loop over repeats

    # Compute mean and standard deviation of burn-in time
    nt_burn_mean[iN] = np.mean(nt_burn_repeats)
    nt_burn_stderr[iN] = np.std(nt_burn_repeats) / np.sqrt(repeats)

    iN += 1

# End loop over particle numbers
Пример #8
0
def train():
    from_train = None
    to_train = None
    from_dev = None
    to_dev = None

    with tf.Session() as sess:
        # Create model.
        print("Creating %d layers of %d units." % (num_layers, size))
        model = create_model(sess, False)

        # Read data into buckets and compute their sizes.
        print("Reading development and training data (limit: %d)." %
              max_train_data_size)
        dev_set = read_data(from_dev, to_dev)
        train_set = read_data(from_train, to_train, max_train_data_size)
        train_bucket_sizes = [len(train_set[b]) for b in range(len(buckets))]
        train_total_size = float(sum(train_bucket_sizes))

        # A bucket scale is a list of increasing numbers from 0 to 1 that we'll use
        # to select a bucket. Length of [scale[i], scale[i+1]] is proportional to
        # the size if i-th training bucket, as used later.
        train_buckets_scale = [
            sum(train_bucket_sizes[:i + 1]) / train_total_size
            for i in range(len(train_bucket_sizes))
        ]

        # This is the training loop.
        step_time, loss = 0.0, 0.0
        current_step = 0
        previous_losses = []
        while True:
            # Choose a bucket according to data distribution. We pick a random number
            # in [0, 1] and use the corresponding interval in train_buckets_scale.
            random_number_01 = np.random.random_sample()
            bucket_id = min([
                i for i in range(len(train_buckets_scale))
                if train_buckets_scale[i] > random_number_01
            ])

            # Get a batch and make a step.
            start_time = time.time()
            encoder_inputs, decoder_inputs, target_weights = model.get_batch(
                train_set, bucket_id)
            _, step_loss, _ = model.step(sess, encoder_inputs, decoder_inputs,
                                         target_weights, bucket_id, False)
            step_time += (time.time() - start_time) / steps_per_checkpoint
            loss += step_loss / steps_per_checkpoint
            current_step += 1

            # Once in a while, we save checkpoint, print statistics, and run evals.
            if current_step % steps_per_checkpoint == 0:
                # Print statistics for the previous epoch.
                perplexity = math.exp(
                    float(loss)) if loss < 300 else float("inf")
                print(
                    "global step %d learning rate %.4f step-time %.2f perplexity "
                    "%.2f" %
                    (model.global_step.eval(), model.learning_rate.eval(),
                     step_time, perplexity))
                # Decrease learning rate if no improvement was seen over last 3 times.
                if len(previous_losses) > 2 and loss > max(
                        previous_losses[-3:]):
                    sess.run(model.learning_rate_decay_op)
                previous_losses.append(loss)
                # Save checkpoint and zero timer and loss.
                checkpoint_path = os.path.join(train_dir, "fyp.ckpt")
                model.saver.save(sess,
                                 checkpoint_path,
                                 global_step=model.global_step)
                step_time, loss = 0.0, 0.0
                # Run evals on development set and print their perplexity.
                for bucket_id in range(len(buckets)):
                    if len(dev_set[bucket_id]) == 0:
                        print("  eval: empty bucket %d" % (bucket_id))
                        continue
                    encoder_inputs, decoder_inputs, target_weights = model.get_batch(
                        dev_set, bucket_id)
                    _, eval_loss, _ = model.step(sess, encoder_inputs,
                                                 decoder_inputs,
                                                 target_weights, bucket_id,
                                                 True)
                    eval_ppx = math.exp(
                        float(eval_loss)) if eval_loss < 300 else float("inf")
                    print("  eval: bucket %d perplexity %.2f" %
                          (bucket_id, eval_ppx))
                sys.stdout.flush()
Пример #9
0
def main(_):
    pp.pprint(FLAGS.__flags)
    emb = None

    try:
        # pre-trained chars embedding
        emb = np.load("./data/emb.npy")
        chars = cPickle.load(open("./data/vocab.pkl", 'rb'))
        vocab_size, emb_size = np.shape(emb)
        data_loader = TextLoader('./data', FLAGS.batch_size, chars)
    except Exception:
        data_loader = TextLoader('./data', FLAGS.batch_size)
        emb_size = FLAGS.emb_size
        vocab_size = data_loader.vocab_size

    model = DialogueModel(batch_size=FLAGS.batch_size,
                          max_seq_length=data_loader.seq_length,
                          vocab_size=vocab_size,
                          pad_token_id=0,
                          unk_token_id=UNK_ID,
                          emb_size=emb_size,
                          memory_size=FLAGS.memory_size,
                          keep_prob=FLAGS.keep_prob,
                          learning_rate=FLAGS.learning_rate,
                          grad_clip=FLAGS.grad_clip,
                          temperature=FLAGS.temperature,
                          infer=False)

    summaries = tf.summary.merge_all()

    init = tf.global_variables_initializer()

    # save hyper-parameters
    cPickle.dump(FLAGS.__flags, open(FLAGS.logdir + "/hyperparams.pkl", 'wb'))

    checkpoint = FLAGS.checkpoint + '/model.ckpt'
    count = 0

    saver = tf.train.Saver()

    with tf.Session() as sess:
        summary_writer = tf.summary.FileWriter(FLAGS.logdir, sess.graph)

        sess.run(init)

        if len(glob(checkpoint + "*")) > 0:
            saver.restore(sess, checkpoint)
            print("Model restored!")
        else:
            # load embedding
            if emb is not None:
                sess.run([], {model.embedding: emb})
            print("Fresh variables!")

        current_step = 0
        count = 0

        for e in range(FLAGS.num_epochs):
            data_loader.reset_batch_pointer()
            state = None

            # iterate by batch
            for _ in range(data_loader.num_batches):
                x, y, input_lengths, output_lengths = data_loader.next_batch()

                if (current_step + 1) % 10 != 0:
                    res = model.step(sess, x, y, input_lengths, output_lengths,
                                     state)
                else:
                    res = model.step(sess, x, y, input_lengths, output_lengths,
                                     state, summaries)
                    summary_writer.add_summary(res["summary_out"],
                                               current_step)
                    loss = res["loss"]
                    perplexity = np.exp(loss)
                    count += 1
                    print("{0}/{1}({2}), perplexity {3}".format(
                        current_step + 1,
                        FLAGS.num_epochs * data_loader.num_batches, e,
                        perplexity))
                state = res["final_state"]

                if (current_step + 1) % 2000 == 0:
                    count = 0
                    summary_writer.flush()
                    save_path = saver.save(sess, checkpoint)
                    print("Model saved in file:", save_path)

                current_step = tf.train.global_step(sess, model.global_step)

        summary_writer.close()
        save_path = saver.save(sess, checkpoint)
        print("Model saved in file:", save_path)