コード例 #1
0
def main(sample_str=None):
    """Predict a title for a recipe."""
    # load model parameters used for training
    with open(path.join(path_models, 'model_params.json'), 'r') as f:
        model_params = json.load(f)

    # create placeholder model
    model = create_model(**model_params)

    # load weights from training run
    load_weights(model, path.join(path_models, '{}.hdf5'.format(FN1)))

    # load recipe titles and descriptions
    with open(path.join(path_data, 'vocabulary-embedding.data.pkl'), 'rb') as fp:
        X_data, Y_data = pickle.load(fp)

    # load vocabulary
    with open(path.join(path_data, '{}.pkl'.format(FN0)), 'rb') as fp:
        embedding, idx2word, word2idx, glove_idx2idx = pickle.load(fp)
    vocab_size, embedding_size = embedding.shape
    oov0 = vocab_size - nb_unknown_words

    if sample_str is None:
        # load random recipe description if none provided
        i = np.random.randint(len(X_data))
        sample_str = ''
        sample_title = ''
        for w in X_data[i]:
            sample_str += idx2word[w] + ' '
        for w in Y_data[i]:
            sample_title += idx2word[w] + ' '
        y = Y_data[i]
        print('Randomly sampled recipe:')
        print(sample_title)
        print(sample_str)
    else:
        sample_title = ''
        y = [eos]

    x = [word2idx[w.rstrip('^')] for w in sample_str.split()]

    samples = gensamples(
        skips=2,
        k=1,
        batch_size=2,
        short=False,
        temperature=1.,
        use_unk=True,
        model=model,
        data=(x, y),
        idx2word=idx2word,
        oov0=oov0,
        glove_idx2idx=glove_idx2idx,
        vocab_size=vocab_size,
        nb_unknown_words=nb_unknown_words,
    )

    headline = samples[0][0][len(samples[0][1]):]
    ' '.join(idx2word[w] for w in headline)
コード例 #2
0
ファイル: comp_models.py プロジェクト: BinbinBian/genie-kb
    def __init__(self, models, kb, size, batch_size, is_train=True, num_neg=200, learning_rate=1e-2, l2_lambda=0.0,
                 is_batch_training=False, composition=None, share_vars=False):
        self._models = []
        self.__name = '_'.join(models)
        if composition:
            self.__name = composition + "__" + self.__name
        with vs.variable_scope(self.name()):
            for m in models:
                self._models.append(model.create_model(kb, size, batch_size, False, num_neg, learning_rate,
                                                       l2_lambda, False, composition=composition, type=m))

        AbstractKBScoringModel.__init__(self, kb, size, batch_size, is_train, num_neg, learning_rate,
                                        l2_lambda, is_batch_training)
コード例 #3
0
ファイル: task.py プロジェクト: zhang01GA/cloudml-samples
def train_and_evaluate(args):
  model = model_lib.create_model(args)
  env = json.loads(os.environ.get('TF_CONFIG', '{}'))

  # Print the job data as provided by the service.
  logging.info('Original job data: %s', env.get('job', {}))

  # First find out if there's a task value on the environment variable.
  # If there is none or it is empty define a default one.
  task_data = env.get('task', None) or {'type': 'master', 'index': 0}
  task = type('TaskSpec', (object,), task_data)
  trial = task_data.get('trial')
  if trial is not None:
    args.output_path = os.path.join(args.output_path, trial)
  if args.write_to_tmp and args.output_path.startswith('gs://'):
    output_path = args.output_path
    args.output_path = os.path.join('/tmp/', str(uuid.uuid4()))
    os.makedirs(args.output_path)
  else:
    output_path = None

  if args.copy_train_data_to_tmp:
    args.train_data_paths = copy_data_to_tmp(args.train_data_paths)
  if args.copy_eval_data_to_tmp:
    args.eval_data_paths = copy_data_to_tmp(args.eval_data_paths)

  if not args.eval_batch_size:
    # If eval_batch_size not set, use min of batch_size and eval_set_size
    args.eval_batch_size = min(args.batch_size, args.eval_set_size)
    logging.info("setting eval batch size to %s", args.eval_batch_size)

  cluster_data = env.get('cluster', None)
  cluster = tf.train.ClusterSpec(cluster_data) if cluster_data else None
  if args.write_predictions:
    write_predictions(args, model, cluster, task)
  else:
    dispatch(args, model, cluster, task)

  if output_path and (not cluster or not task or task.type == 'master'):
    subprocess.check_call([
        'gsutil', '-m', '-q', 'cp', '-r', args.output_path + '/*', output_path
    ])
    shutil.rmtree(args.output_path, ignore_errors=True)
コード例 #4
0
# save model initialization parameters
model_params = (dict(
    vocab_size=vocab_size,
    embedding_size=embedding_size,
    LR=args.lr,
    rnn_layers=args.rnn_layers,
    rnn_size=args.rnn_size,
))
with open(os.path.join(config.path_models, 'model_params.json'), 'w') as f:
    json.dump(model_params, f)


model = create_model(
    vocab_size=vocab_size,
    embedding_size=embedding_size,
    LR=args.lr,
    embedding=embedding,
    rnn_layers=args.rnn_layers,
    rnn_size=args.rnn_size,
)
inspect_model(model)

# load pre-trained model weights
FN1_filename = os.path.join(config.path_models, '{}.hdf5'.format(FN1))
if args.warm_start and FN1 and os.path.exists(FN1_filename):
    model.load_weights(FN1_filename)
    print('Model weights loaded from {}'.format(FN1_filename))

# print samples before training
gensamples(
    skips=2,
    k=10,
コード例 #5
0
ファイル: save.py プロジェクト: CosmosShadow/MLPythonLib
# coding: utf-8
import os
import tensorflow as tf
import prettytensor as pt
import numpy as np
import cmtf.data.data_mnist as data_mnist
import model

def save_model(saver, sess, step):
	task_dir = 'checkpoint'
	file_name = 'linear'
	if not os.path.exists(task_dir):
		os.makedirs(task_dir)
	saver.save(sess, os.path.join(task_dir, file_name), global_step = step)

x, y = model.create_model()
saver = tf.train.Saver()

with tf.Session() as sess:
	sess.run(tf.initialize_all_variables())
	x_val = np.ones([2, 10])
	y_val = sess.run(y, feed_dict={x: x_val})
	print(y_val)

	save_model(saver, sess, 10)
	save_model(saver, sess, 20)

"""output
[[-0.30422074]
 [-0.30422074]]
"""
コード例 #6
0
ファイル: task.py プロジェクト: cottrell/notebooks
def main(_):
  model, argv = model_lib.create_model()
  run(model, argv)
コード例 #7
0
    # early stopping
    early_stopping = EarlyStopping(monitor='val_loss', patience=5)

    kf = KFold(n_splits=n_splits, shuffle=False, random_state=None)

    # Stacking
    for i, (train_index, test_index) in enumerate(kf.split(X_train), start=1):
        print('Start Fold {}'.format(i))
        t0_fold = time.time()

        X_train_prime, X_test_prime = X_train[train_index], X_train[test_index]
        y_train_prime, y_test_prime = y_train[train_index], y_train[test_index]

        # create model
        input_dim = X_train_prime.shape[1]
        model = create_model(input_dim)

        # model training
        history = model.fit_generator(
            generator=fit_batch_generator(X_train_prime, y_train_prime, batch_size),
            nb_epoch=nb_epoch,
            samples_per_epoch=X_train_prime.shape[0],
            verbose=1,
            validation_data=(X_test_prime, y_test_prime),
            callbacks=[early_stopping])

        # prediction
        preds = \
            np.exp(
                model.predict_generator(
                    generator=predict_batch_generator(X_test_prime, batch_size),
コード例 #8
0
ファイル: save.py プロジェクト: CosmosShadow/MLPythonLib
import os
import tensorflow as tf
import prettytensor as pt
import numpy as np
import cmtf.data.data_mnist as data_mnist
import model

def save_model(saver, sess, step):
	task_dir = 'checkpoint'
	file_name = 'linear'
	if not os.path.exists(task_dir):
		os.makedirs(task_dir)
	saver.save(sess, os.path.join(task_dir, file_name), global_step = step)

timesteps = 10
x, y = model.create_model(timesteps)
saver = tf.train.Saver()

with tf.Session() as sess:
	sess.run(tf.initialize_all_variables())
	x_val = np.ones([2, timesteps, 5])
	y_val = sess.run(y, feed_dict={x: x_val})
	print(y_val)

	save_model(saver, sess, 10)
	save_model(saver, sess, 20)

"""output
[[ 0.18480827]
 [ 0.18480827]
 [ 0.42967057]
コード例 #9
0
ファイル: test.py プロジェクト: intangere/cIon
import tensorflow as tf
import numpy as np
from model import create_model, buildSentence, respond
from config.config import FLAGS, _buckets, name
import data_utils
import os.path

sess = tf.Session()
# Create model and load parameters.
model = create_model(sess, True)
model.batch_size = 1  # We decode one sentence at a time.

# Load vocabularies.
vocab_path = os.path.join(FLAGS.data_dir,
                             "vocab%d.in" % FLAGS.vocab_size)
vocab, vocab_rev = data_utils.initialize_vocabulary(vocab_path)

print '%s: %s' % (name, respond('hi.', sess, model, vocab, vocab_rev))

print '%s: %s' % (name, respond('hello.', sess, model, vocab, vocab_rev))

print '%s: %s' % (name, respond('hey.', sess, model, vocab, vocab_rev))

print '%s: %s' % (name, respond('how are you?', sess, model, vocab, vocab_rev))

print '%s: %s' % (name, respond('what is the meaning of life?', sess, model, vocab, vocab_rev))

print '%s: %s' % (name, respond('you are a machine.', sess, model, vocab, vocab_rev))

print '%s: %s' % (name, respond('you\'re a machine.', sess, model, vocab, vocab_rev))
コード例 #10
0
optimizer = get_optimizer(args)

###############################################################################################################################
## Building model

from model import create_model
import keras.backend as K

logger.info('  Building model')


def max_margin_loss(y_true, y_pred):
    return K.mean(y_pred)


model = create_model(args, overall_maxlen, vocab)
# freeze the word embedding layer
model.get_layer('word_emb').trainable = False
model.compile(optimizer=optimizer,
              loss=max_margin_loss,
              metrics=[max_margin_loss])

###############################################################################################################################
## Training
#
from keras.models import load_model
from tqdm import tqdm

logger.info(
    '--------------------------------------------------------------------------------------------------------------------------'
)
コード例 #11
0
    mc = ModelCheckpoint(output_path, monitor=monitor, save_best_only=True)

    model.fit_generator(train_gen,
                        samples_per_epoch=nb_train_samples,
                        nb_epoch=nb_epoch,
                        callbacks=[tb, es, mc],
                        validation_data=val_gen,
                        nb_val_samples=nb_val_samples)

    return model


if __name__ == "__main__":
    import os.mkdir
    from datetime import datetime
    import numpy as np
    from model import create_model

    np.random.seed(sum(map(ord, 'keras-finetuning')))

    now = datetime.now().strftime('%y%m%d-%H%M')
    model_path = '../models/' + now + '.h5'
    log_dir = '../logs/' + now
    os.mkdir(log_dir)

    model = create_model()
    model = train_top(model)
    # weights_path = sys.argv[1]
    # model.load_weights(weights_path)
    finetune(model, model_path, log_dir)
コード例 #12
0
import os
import matplotlib.pyplot as plt
import cv2
from imageio import imread
from scipy.spatial import distance
from keras.models import load_model
import pandas as pd
from tqdm import tqdm
import dlib
from model import create_model
from align import AlignDlib
import glob
import imutils

# INITIALIZE MODELS
nn4_small2 = create_model()

nn4_small2.summary()

nn4_small2.load_weights('weights/nn4.small2.v1.h5')

alignment = AlignDlib('shape_predictor_68_face_landmarks.dat')

#LOAD TRAINING INFORMATION
train_paths = glob.glob("image/*")
print(train_paths)

nb_classes = len(train_paths)

df_train = pd.DataFrame(columns=['image', 'label', 'name'])
コード例 #13
0
ファイル: main.py プロジェクト: tanmayaggarwal/CycleGAN
img = images[0]

print('Min: ', img.min())
print('Max: ', img.max())

# scaled range
scaled_img = scale(img)

print('Scaled min: ', scaled_img.min())
print('Scaled max: ', scaled_img.max())

# define the CycleGAN model
from model import create_model, print_models

G_XtoY, G_YtoX, D_X, D_Y = create_model(g_conv_dim=64,
                                        d_conv_dim=64,
                                        n_res_blocks=6)

# print all of the models
print_models(G_XtoY, G_YtoX, D_X, D_Y)

# computing the discriminator and generator losses
from loss_functions import real_mse_loss, fake_mse_loss, cycle_consistency_loss

# set hyperparameters for the Adam optimizer
lr = 0.0002
beta1 = 0.5
beta2 = 0.999

g_params = list(G_XtoY.parameters()) + list(
    G_YtoX.parameters())  # Get generator parameters
コード例 #14
0
import os
import numpy as np
import cv2
from align import AlignDlib
from model import create_model
import pickle

alignment = AlignDlib('models/landmarks.dat')
nn4_small2_pretrained = create_model()
nn4_small2_pretrained.load_weights('weights/nn4.small2.v1.h5')

knn = pickle.load(open('knn_model.sav', 'rb'))
svc = pickle.load(open('svc_model.sav', 'rb'))