コード例 #1
0
def get_session(batch_size):
    """
      Returns an open tf session from last valid checkpoint. Useful to use a saved trained model.
      ie. use this function in a Jupyter notebook to create a session.
    """

    # Model
    input_real, input_z, input_d_y, input_g_y = model.model_inputs(
        DIM_X, DIM_Y, DIM_Z, Z_NOISE_DIM, Y_DIM)
    dcgan.generator(input_z, input_g_y)
    global_step = tf.Variable(0, trainable=False, name='global_step')
    saver = tf.train.Saver(max_to_keep=10, keep_checkpoint_every_n_hours=5)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    try:
        saver.restore(sess,
                      tf.train.latest_checkpoint(BASE_PATH + CHKPTS_PATH))
        last_global_step = sess.run(global_step)
        print(last_global_step)
    except ValueError:
        print("Error loading checkpoint, no valid checkpoint found")

    return sess, input_real, input_z, input_g_y
コード例 #2
0
from tensorflow.contrib.seq2seq import sequence_loss
from datetime import datetime
from model import model_inputs, seq2seq_model

print('Inference preparation started @ {}'.format(str(datetime.now())))
print('Initialize vocabulary...')
vocab_to_int, int_to_vocab = data.get_word_dicts()

print('Re-initialize TensorFlow graph...')
train_graph = tf.Graph()

with train_graph.as_default():

    print('Initialize model...')
    # Create placeholders for inputs to the model, which are initially empty
    input_data, targets, lr, keep_prob = model_inputs()

    # Sequence length will be the max line length for each batch
    sequence_length = \
        tf.placeholder_with_default(data.dparams['max_line_length'],
                                    None,
                                    name='sequence_length')

    # Find the shape of the input data for sequence_loss
    input_shape = tf.shape(input_data)

    print('Create logits using the model...')
    # Create training and inference logits
    train_logits, inference_logits = \
        seq2seq_model(tf.reverse(input_data, [-1]),
                      targets,
コード例 #3
0
    data.fetch_tparams()

print('Initialize vocabulary...')
vocab_to_int, int_to_vocab = data.get_word_dicts()

print('Initialize training set...')
sorted_questions, sorted_answers = data.get_sorted_qa()

print('Initialize graph...')
train_graph = tf.Graph()

with train_graph.as_default():

    print('Initialize model...')
    # Create placeholders for inputs to the model, which are initially empty
    input_data, targets, learn_rate, keep_prob = model_inputs()

    # Sequence length will be the max line length for each batch
    sequence_length = \
        tf.placeholder_with_default(max_length,
                                    None,
                                    name='sequence_length')

    # Find the shape of the input data for sequence_loss
    input_shape = tf.shape(input_data)

    # Create training and inference logits
    train_logits, inference_logits = \
        seq2seq_model(tf.reverse(input_data, [-1]),
                      targets,
                      keep_prob,
コード例 #4
0
min_learning_rate = 0.0001
keep_probability = 0.5

# Getting Question words to integers and answer words to integers dictionary
questionwords2int, answerwords2int = preprocessing.creating_dictionaries()

#Getting sorted clean questions and answers
sorted_clean_questions, sorted_clean_answers = preprocessing.sorted_clean_ques_ans(
)

#Defining a session
tf.reset_default_graph()
session = tf.InteractiveSession()

#Loading the model inputs
inputs, targets, lr, keep_prob = model.model_inputs()

#Setting the sequence length
sequence_length = tf.placeholder_with_default(25, None, name='sequence_length')

#Getting the shape of input tensor
input_shape = tf.shape(inputs)

#Getting the training and test predictions
training_predictions, test_predictions = model.seq2seq_model(
    tf.reverse(inputs, [-1]), targets, keep_prob, batch_size, sequence_length,
    len(answerwords2int), len(questionwords2int), encoding_embedding_size,
    decoding_embedding_size, rnn_size, num_layers, questionwords2int)
#Setting up the loss error,the optimizer gradient clipping
with tf.name_scope("optimization"):
    loss_error = tf.contrib.seq2seq.sequence_loss(
コード例 #5
0
def run():
    """
    run the program
    """
    parser = argparse.ArgumentParser()
    parser.add_argument("--generate",
                        nargs='?',
                        const=1,
                        type=int,
                        help="num of examples to generate")
    parser.add_argument(
        "--labels",
        nargs='?',
        const='r',
        help="p - positive labels, n - negative labels, no argument - random")
    parser.add_argument("--save_tensors", help="Save also tensors")
    parser.add_argument("--out_prefix", help="prefix for output filenames")

    parser.add_argument("--image", help="image_file")

    args = parser.parse_args()
    if args.out_prefix and (args.generate is None):
        parser.error("--out_prefix requires --generate argument")
    if args.save_tensors and (args.generate is None):
        parser.error("--save_tensors requires --generate argument")
    if args.labels and (args.generate is None):
        parser.error("--labels requires --generate argument")
    if args.out_prefix is None:
        args.out_prefix = "output"
    if args.save_tensors is None:
        args.save_tensors = False
    if args.labels == 'p':
        labels = [1] * BATCH_SIZE
        labels = np.eye(2)[labels]
    elif args.labels == 'n':
        labels = [0] * BATCH_SIZE
        labels = np.eye(2)[labels]
    else:
        labels = np.random.randint(0, 2, BATCH_SIZE)
        labels = np.eye(2)[labels]

    # Seed for reproducibility
    tf.set_random_seed(SEED)

    # Dataset
    dataset, dataset_len = data.create_dataset(BASE_PATH + DATA_PATH,
                                               BATCH_SIZE, NUM_EPOCHS)
    iterator = dataset.make_initializable_iterator()

    global_step = tf.Variable(0, trainable=False, name='global_step')
    increment_global_step = tf.assign_add(global_step,
                                          1,
                                          name='increment_global_step')

    # Model
    input_real, input_z, input_d_y, input_g_y = model.model_inputs(
        DIM_X, DIM_Y, DIM_Z, Z_NOISE_DIM, Y_DIM)
    total_steps = (dataset_len / BATCH_SIZE) * NUM_EPOCHS
    starter_stdev = 0.1

    ##decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps)
    decaying_stdev = tf.train.exponential_decay(starter_stdev, global_step,
                                                total_steps * 10, 0.0001)

    decaying_noise = tf.random_normal(shape=tf.shape(input_real),
                                      mean=0.0,
                                      stddev=decaying_stdev,
                                      dtype=tf.float32)
    tf.summary.scalar("stdev",
                      tf.keras.backend.std(decaying_noise),
                      collections=["d_summ"])
    d_loss, g_loss = model.model_loss(input_real,
                                      input_z,
                                      input_d_y,
                                      input_g_y,
                                      decaying_noise=decaying_noise)
    d_train_opt, g_train_opt = model.model_opt(d_loss, g_loss, D_LEARNING_RATE,
                                               G_LEARNING_RATE, BETA1)

    z_batch_tensor = tf.random.uniform((BATCH_SIZE, Z_NOISE_DIM),
                                       dtype=tf.float32,
                                       minval=-1,
                                       maxval=1)

    saver = tf.train.Saver(max_to_keep=10, keep_checkpoint_every_n_hours=1)

    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer())
        sess.run(iterator.initializer)
        try:
            saver.restore(sess,
                          tf.train.latest_checkpoint(BASE_PATH + CHKPTS_PATH))
            last_global_step = sess.run(global_step)
            print(last_global_step)
        except ValueError:
            print("Error loading checkpoint, no valid checkpoint found")

        if args.generate:
            output.generate_samples(sess,
                                    z_batch_tensor,
                                    input_z,
                                    input_g_y,
                                    labels,
                                    args.generate,
                                    save_tensor=args.save_tensors,
                                    name_prefix=args.out_prefix)

        elif args.image:
            start_img = Image.open(args.image)
            img, lat_vector = latent_space.search_image(sess, start_img, 10000)
            img.save(BASE_PATH + OUTPUT_PATH + 'rec_output.png')
            np.save(BASE_PATH + OUTPUT_PATH + "zp_rec", lat_vector)

        else:
            train(sess, saver, z_batch_tensor, increment_global_step,
                  dataset_len, iterator, input_real, input_z, input_d_y,
                  input_g_y, d_loss, g_loss, d_train_opt, g_train_opt)
コード例 #6
0
ファイル: train.py プロジェクト: SupunIsharaWK/interviewbot
# ==============================================================================
epochs = 100
batch_size = 20
rnn_size = 400
num_layers = 2
learning_rate = 0.005
keep_probability = 0.8
beam_width = 20

print('Building graph')
# Build the graph
train_graph = tf.Graph()
# Set the graph to default to ensure that it is ready for training
with train_graph.as_default():
    # Load the model inputs
    input_data, targets, lr, keep_prob, target_length, max_target_length, input_length = model.model_inputs(
    )

    # Create the training and inference logits
    training_logits, inference_logits = model.seq2seq_model(
        tf.reverse(input_data, [-1]), targets, keep_prob, input_length,
        target_length, max_target_length,
        len(vocab2int) + 1, rnn_size, num_layers, vocab2int,
        word_embedding_matrix, batch_size, beam_width)

    # Create tensors for the training logits and inference logits
    training_logits = tf.identity(training_logits.rnn_output, 'logits')
    inference_logits = tf.identity(inference_logits.predicted_ids,
                                   name='predictions')

    # Create the weights for sequence_loss
    masks = tf.sequence_mask(target_length,