Exemplo n.º 1
0
def main(argv=None):
    d = ImageDataset()
    d.npy_maker(globalconf.get_root() + 'transfer/small')
    d.combine(globalconf.get_root() + 'transfer/small', 5, 0.1, 0.1, 0.5)
    print(d.training_images.shape, d.training_labels.shape,
          d.validation_images.shape, d.validation_labels.shape,
          d.test_images.shape, d.test_labels.shape)
Exemplo n.º 2
0
def train():
    train_file = globalconf.get_root(
    ) + "rnn/ptb/simple-examples/data/id.train.txt"
    model_path = globalconf.get_root() + "rnn/ptb/model/model.ckpt"
    train_id_list = file_to_id_list(train_file)
    batch_size = 20
    timestep = 35
    hidden_size = 300
    vocab_size = 10000
    data = make_batch(train_id_list, batch_size, timestep)
    model = Model(batch_size, timestep, hidden_size, vocab_size, 0.9)
    writer = tf.summary.FileWriter(logdir=globalconf.get_root() +
                                   "rnn/ptb/log",
                                   graph=tf.get_default_graph())
    writer.close()
    with tf.Session() as sess:
        sess.run([
            tf.global_variables_initializer(),
            tf.local_variables_initializer()
        ])
        for epoch in range(10):
            print("epoch ", epoch)
            model.train_epoch(data, sess)
            saver = tf.train.Saver()
            saver.save(sess, save_path=model_path)
Exemplo n.º 3
0
def valid():
    valid_file = globalconf.get_root(
    ) + "rnn/ptb/simple-examples/data/id.valid.txt"
    model_path = globalconf.get_root() + "rnn/ptb/model/model.ckpt"
    valid_id_list = file_to_id_list(valid_file)
    batch_size = 20
    timestep = 35
    hidden_size = 300
    vocab_size = 10000
    valid_data = make_batch(valid_id_list, batch_size, timestep)
    model = Model(batch_size, timestep, hidden_size, vocab_size, 1.0)
    saver = tf.train.Saver()
    with tf.Session() as sess:
        sess.run([
            tf.global_variables_initializer(),
            tf.local_variables_initializer()
        ])
        saver.restore(sess, model_path)
        model.validate(valid_data, sess)
Exemplo n.º 4
0
                         self.trg_path: ""
                     })
            saver.restore(sess, model_path)
            zh = sess.run(self.dec_ids,
                          feed_dict={
                              self.src_seq: [input],
                              self.src_size: [input_len],
                              self.keep_prob: 1.0
                          })
            output_zh_sentense = ''.join(
                [zh_id_to_word.get(w, "?") for w in zh])
            print(zh)
            print(output_zh_sentense)


if __name__ == "__main__":
    final_en_file = globalconf.get_root(
    ) + "rnn/ted/en-zh/train.tags.en-zh.en.final"
    final_zh_file = globalconf.get_root(
    ) + "rnn/ted/en-zh/train.tags.en-zh.zh.final"
    en_vocab_file = globalconf.get_root(
    ) + "rnn/ted/en-zh/train.tags.en-zh.en.vocab"
    zh_vocab_file = globalconf.get_root(
    ) + "rnn/ted/en-zh/train.tags.en-zh.zh.vocab"

    model = Model(100, 0, 1, 10000, 10000, 1024, 2, 100)
    model_path = globalconf.get_root() + "rnn/ted/seq2seq.model"
    model.train(model_path, final_en_file, final_zh_file)
    # model_path = globalconf.get_root() + "rnn/ted/seq2seq.model-140"
    # model.eval(model_path, en_vocab_file, zh_vocab_file)
Exemplo n.º 5
0
with tf.variable_scope("layer5", initializer=initializer):
    w = tf.get_variable("w", shape=[128, 10], dtype=tf.float32)
    b = tf.get_variable("b", shape=[10], dtype=tf.float32)
    logits = tf.matmul(a, w) + b

with tf.variable_scope("optimize", initializer=initializer):
    loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y,
                                                       logits=logits))
    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.argmax(logits, axis=1, output_type=tf.int32), y),
                tf.float32))

    train_op = tf.train.AdamOptimizer(0.001).minimize(loss)

log_dir = globalconf.get_root() + "one_cnn/straight_foeward"
writer = tf.summary.FileWriter(logdir=log_dir, graph=tf.get_default_graph())
writer.close()

with tf.Session() as s:
    tf.global_variables_initializer().run()
    for epoch in range(10):
        start = 0
        batch_size = 64
        while start < x_train.shape[0]:
            end = min(x_train.shape[0], start + batch_size)
            lo, acc, _ = s.run([loss, accuracy, train_op],
                               feed_dict={
                                   x: x_train[start:end],
                                   y: y_train[start:end]
                               })
Exemplo n.º 6
0
Arquivo: model.py Projeto: toddyan/tfa
        while start < x_valid.shape[0]:
            end = min(start + batch_size, x_valid.shape[0])
            sum += sess.run(correct_predict,
                            feed_dict={
                                self.x: x_valid[start:end],
                                self.y: y_valid[start:end]
                            })
            start = end
        return sum / x_valid.shape[0]


conv_layers = [[[5, 5], [16], [2, 2]], [[5, 5], [32], [2, 2]]]
dense_layers = [512, 128, 10]
from keras.datasets import cifar10
import numpy as np
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = x_train.reshape([-1, 32, 32, 3]) / 255.0
x_test = x_test.reshape([-1, 32, 32, 3]) / 255.0
y_train = y_train.reshape(-1).astype(np.int32)
y_test = y_test.reshape(-1).astype(np.int32)
a = Model(32, 32, 3, conv_layers, dense_layers)
import globalconf
model_dir = globalconf.get_root() + "/one_cnn/model/"
model_name = "model.ckpt"
with tf.Session() as s:
    tf.global_variables_initializer().run()
    a.train(s, x_train, y_train, 64, 100, model_dir + model_name)

with tf.Session() as s:
    tf.global_variables_initializer().run()
    print(a.eval(s, x_test, y_test, 128, model_dir + model_name))
Exemplo n.º 7
0
import sys
sys.path.append("..")
import globalconf
import tensorflow as tf
# a = tf.constant([1.0,2.0,3.0], name="a")
# b = tf.Variable(tf.random_normal([3]), name="b")
with tf.variable_scope("input1"):
    a = tf.constant([1.0, 2.0, 3.0], name="a")
with tf.variable_scope("input2"):
    b = tf.Variable(tf.random_normal([3]), name="b")
c = tf.add_n([a, b], name="c")
writer = tf.summary.FileWriter(globalconf.get_root() + "tensorboard/demo1",
                               tf.get_default_graph())
writer.close()
Exemplo n.º 8
0
# -*- coding: utf-8 -*-
import sys
sys.path.append("..")
import globalconf
import tensorflow as tf
import numpy as np
from keras.datasets import mnist
batch_size = 64
training_epochs = 10
log_savepath = globalconf.get_root() + "tensorboard/monitor/log"


def add_variable_to_monitor(var, name):
    with tf.variable_scope("summaries"):
        tf.summary.histogram(name=name, values=var)
        mean = tf.reduce_mean(var)
        stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
        tf.summary.scalar("mean/" + name, mean)
        tf.summary.scalar("stddev/" + name, stddev)


def dense(input_tensor, input_dim, output_dim, layer_name, act):
    with tf.variable_scope(layer_name):
        with tf.variable_scope("weights"):
            weights = tf.get_variable(
                "weights",
                shape=[input_dim, output_dim],
                initializer=tf.truncated_normal_initializer(stddev=0.1,
                                                            dtype=tf.float32))
            add_variable_to_monitor(weights, layer_name + "/weights")
        with tf.variable_scope("biases"):
Exemplo n.º 9
0
                record,
                features={
                    "image": tf.FixedLenFeature([], tf.string),
                    "label": tf.FixedLenFeature([], tf.int64)
                })
            image = tf.reshape(tf.decode_raw(features["image"], tf.uint8),
                               [self.height, self.width, self.channel])
            image = tf.image.convert_image_dtype(image, tf.float32)
            label = features["label"]
            return image, label

        return parser


if __name__ == "__main__1":
    src_root = globalconf.get_root() + "transfer/small/"
    dst_root = globalconf.get_root() + "transfer/tfrecord/"
    flower_id_map = {
        "daisy": 0,
        "dandelion": 1,
        "roses": 2,
        "sunflowers": 3,
        "tulips": 4
    }
    # res_a = tf.reshape(tf.decode_raw(a.tostring(),tf.uint8), [h,w,c])
    builder = ImageTFRecordBuilder(299, 299, 3, 128, src_root, dst_root,
                                   flower_id_map, 0.1, 0.1)
    builder.make_tfrecord()

if __name__ == "__main__":
    dst_root = globalconf.get_root() + "transfer/tfrecord/"
Exemplo n.º 10
0
X_test = X_test.astype(np.float32) / 255.0
Y_train = Y_train.astype(np.int32)
Y_test = Y_test.astype(np.int32)


def dataset_to_sprite(images):
    if isinstance(images, list):
        images = np.array(images)
    h = images.shape[1]
    w = images.shape[2]
    m = int(np.ceil(np.sqrt(images.shape[0])))
    sprite = np.ones(shape=[h * m, w * m], dtype=np.float32)
    for i in range(m):
        for j in range(m):
            index = i * m + j
            if index < images.shape[0]:
                sprite[i * h:(i + 1) * h, j * w:(j + 1) * w] = images[index]
    return sprite


sprite_image = dataset_to_sprite(1.0 - X_test)
sprite_path = globalconf.get_root() + "tensorboard/visual/sprite.png"
meta_path = globalconf.get_root() + "tensorboard/visual/meta.tsv"
plt.imsave(sprite_path, sprite_image, cmap='gray')
# plt.imshow(sprite_image, cmap='gray')
# plt.show()
with open(meta_path, 'w') as f:
    f.write("Index\tLabel\n")
    for index in range(Y_test.shape[0]):
        f.write("%d\t%d\n" % (index, Y_test[index]))
Exemplo n.º 11
0
    return word_id_dict


def word_to_id(in_file, out_file, vocab):
    unk_id = vocab['<unk>']
    with codecs.open(in_file, mode='r', encoding='utf8') as f_in, codecs.open(out_file, mode='w', encoding='utf8') as f_out:
        while True:
            line = f_in.readline()
            if not line: break
            words = line.strip().split(' ') + ['<eos>']
            ids = [vocab.get(w, unk_id) for w in words]
            f_out.write(' '.join(ids) + "\n")


# http://www.fit.vutbr.cz/~imikolov/rnnlm/simple-examples.tgz
in_train_file = globalconf.get_root() + "rnn/ptb/simple-examples/data/ptb.train.txt"
in_valid_file = globalconf.get_root() + "rnn/ptb/simple-examples/data/ptb.valid.txt"
in_test_file = globalconf.get_root() + "rnn/ptb/simple-examples/data/ptb.test.txt"

out_train_file = globalconf.get_root() + "rnn/ptb/simple-examples/data/id.train.txt"
out_valid_file = globalconf.get_root() + "rnn/ptb/simple-examples/data/id.valid.txt"
out_test_file = globalconf.get_root() + "rnn/ptb/simple-examples/data/id.test.txt"
out_vacab_file = globalconf.get_root() + "rnn/ptb/simple-examples/data/vocab.txt"

vocab = gen_vocab(in_train_file)
with codecs.open(out_vacab_file, mode='w', encoding='utf8') as f_vocab:
    for w, id in vocab.items():
        f_vocab.write(str(id) + "\t" + w + "\n")
word_to_id(in_train_file, out_train_file, vocab)
word_to_id(in_valid_file, out_valid_file, vocab)
word_to_id(in_test_file, out_test_file, vocab)
Exemplo n.º 12
0
import sys
sys.path.append("..")
sys.path.append("../mnist-demo/")
import globalconf
import tensorflow as tf
import numpy as np
import mnist_infer
import os
from keras.datasets import mnist
batch_size = 64
learning_rate_base = 0.8
learning_rate_decay = 0.99
regularization_rate = 0.0001
training_epochs = 10
moving_average_decay = 0.99
model_savepath = globalconf.get_root() + "tensorboard/mnist/model"
log_savepath = globalconf.get_root() + "tensorboard/mnist/log"
model_savepath_prv = globalconf.get_root() + "tensorboard/mnistprv/model"
log_savepath_prv = globalconf.get_root() + "tensorboard/mnistprv/log"
model_name = "mnist.ckpt"


def train(x_train, y_train):
    x = tf.placeholder(tf.float32,
                       shape=(None, mnist_infer.N[0]),
                       name="input-x")
    y = tf.placeholder(tf.float32,
                       shape=(None, mnist_infer.N[-1]),
                       name="input-y")
    regularizer = tf.contrib.layers.l2_regularizer(regularization_rate)
    z = mnist_infer.infer(x, regularizer)
Exemplo n.º 13
0
import globalconf
from inception_resnet_v2 import inception_resnet_v2_arg_scope, inception_resnet_v2
import numpy as np
import tensorflow as tf
import tensorflow.contrib.slim as slim
import scipy.misc as misc
import PIL.Image as Image

# http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz
checkpoint_path = globalconf.get_root(
) + "transfer/resnet_v2/inception_resnet_v2_2016_08_30.ckpt"
logdir = globalconf.get_root() + "transfer/resnet_v2/logdir"
height, width, channels = 299, 299, 3
x = tf.placeholder(tf.float32, shape=[None, height, width, channels])
arg_scope = inception_resnet_v2_arg_scope()
for k, v in arg_scope.items():
    print(k, ":")
    for k2, v2 in v.items():
        print("\t", k2, ":", v2)
with slim.arg_scope(arg_scope):
    logits, end_points = inception_resnet_v2(x,
                                             is_training=False,
                                             num_classes=1001)
    features = end_points['PreLogitsFlatten']
    sess = tf.Session()
    saver = tf.train.Saver()
    saver.restore(sess, checkpoint_path)

#writer = tf.summary.FileWriter(logdir=logdir, graph=tf.get_default_graph())
#writer.close()
Exemplo n.º 14
0
import globalconf
import PIL.Image as Image
import os


def image_resize(src, dst):
    img = Image.open(src).convert('RGB').resize((299, 299), Image.ANTIALIAS)
    img.save(dst)


def image_dir_resize(src, dst):
    if os.path.isfile(src) and src.endswith(".jpg"):
        Image.open(src).convert('RGB').resize((299, 299),
                                              Image.ANTIALIAS).save(dst)
    if os.path.isdir(src):
        if not os.path.exists(dst): os.mkdir(dst)
        for f in os.listdir(src):
            image_dir_resize(os.path.join(src, f), os.path.join(dst, f))


if __name__ == "__main__":
    src = globalconf.get_root() + "transfer/flower_photos"
    dst = globalconf.get_root() + "transfer/small"
    image_dir_resize(src, dst)
Exemplo n.º 15
0
import globalconf
import mnist_infer
import numpy as np
import tensorflow as tf
from keras.datasets import mnist
from tensorflow.contrib.tensorboard.plugins import projector

REGULATIZATION_RATE = 0.0001
EMA_DECAY = 0.99
learning_rate_base = 0.1
learning_rate_decay = 0.99
batch_size = 64
TRAINING_STEP = 10000
decay_steps = 1000

log_dir = globalconf.get_root() + "tensorboard/project/"
sprite_image = globalconf.get_root() + "tensorboard/visual/sprite.png"
meta_path = globalconf.get_root() + "tensorboard/visual/meta.tsv"


def train_definer():
    with tf.variable_scope("input"):
        x = tf.placeholder(dtype=tf.float32, shape=[None, 784], name="input-x")
        y = tf.placeholder(dtype=tf.int64, shape=[None], name="input-y")
    regularizer = tf.contrib.layers.l2_regularizer(REGULATIZATION_RATE)
    logits = mnist_infer.infer(x, regularizer)
    global_step = tf.get_variable("global_step",
                                  shape=[],
                                  initializer=tf.zeros_initializer(),
                                  trainable=False)
    #global_step = tf.Variable(0, trainable=False)
Exemplo n.º 16
0
Arquivo: mnist.py Projeto: toddyan/tf
import sys
sys.path.append("../../")
import globalconf
import numpy as np
import tensorflow as tf

from keras.datasets import mnist
tf.logging.set_verbosity(tf.logging.INFO)
root = globalconf.get_root()
(X_train, Y_train), (X_test, Y_test) = mnist.load_data()
X_train = X_train.reshape((X_train.shape[0], -1)).astype(np.float32) / 255.0
X_test = X_test.reshape((X_test.shape[0], -1)).astype(np.float32) / 255.0
Y_train = Y_train.astype(np.int32)
Y_test = Y_test.astype(np.int32)

image_shape = X_train.shape[1:]
print(X_train.shape, Y_train.shape, X_test.shape, Y_test.shape, image_shape)

model_dir = root + "estimator/mnist"
feature_columns = [
    tf.feature_column.numeric_column("image", shape=image_shape)
]
estimator = tf.estimator.DNNClassifier(hidden_units=[500],
                                       feature_columns=feature_columns,
                                       model_dir=model_dir,
                                       n_classes=10,
                                       optimizer=tf.train.AdamOptimizer())

train_fn = tf.estimator.inputs.numpy_input_fn(x={"image": X_train},
                                              y=Y_train,
                                              batch_size=64,
Exemplo n.º 17
0
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver()
        saver.restore(sess, model_path)
        start = 0
        while start < x_valid.shape[0]:
            end = min(start + batch_size, x_valid.shape[0])
            acc = sess.run(self.accuracy,
                           feed_dict={
                               self.images: x_valid[start:end],
                               self.labels: y_valid[start:end]
                           })
            print("valid:", acc)
            start = end


in_ckpt_path = globalconf.get_root(
) + "transfer/inception_v3/inception_v3.ckpt"
out_ckpt_path = globalconf.get_root(
) + "transfer/inception_v3/tuned_inception_v3.ckpt"
logdir = globalconf.get_root() + "transfer/inception_v3/logdir"

m = Model(5)
data = ImageDataset()
data.combine(globalconf.get_root() + 'transfer/small', 5, 0.1, 0.1, 0.5)
with tf.Session() as s:
    m.train(s, data.training_images, data.training_labels, 16, 10,
            in_ckpt_path, out_ckpt_path, logdir)

with tf.Session() as s:
    m.eval(s, data.validation_images, data.validation_labels, 64,
           out_ckpt_path)
Exemplo n.º 18
0
        sess = tf.Session()
        sess.run([
            tf.global_variables_initializer(),
            tf.local_variables_initializer()
        ],
                 feed_dict={self.tfrecord_paths: ''})
        saver = tf.train.Saver()
        saver.restore(sess, model_path)

        def predictor(images):
            return sess.run(self.predict, feed_dict={self.images: images})

        return predictor


in_ckpt_path = globalconf.get_root(
) + "transfer/inception_v3/inception_v3.ckpt"
out_ckpt_path = globalconf.get_root(
) + "transfer/inception_v3/tuned_inception_v3.ckpt"
logdir = globalconf.get_root() + "transfer/inception_v3/logdir"
tfrecord_root = globalconf.get_root() + "transfer/tfrecord/"

m = Model(5)

# with tf.Session() as s:
#     m.train(s, in_ckpt_path, out_ckpt_path, logdir,tfrecord_root+"train-*")

# with tf.Session() as s:
#     m.eval(s, out_ckpt_path, tfrecord_root+"valid-*")
#     exit()

predictor = m.get_predictor(out_ckpt_path)
Exemplo n.º 19
0
import globalconf
import codecs
from nltk.tokenize import word_tokenize
import jieba

# https://wit3.fbk.eu/mt.php?release=2015-01
in_en_file = globalconf.get_root() + "rnn/ted/en-zh/train.tags.en-zh.en"
in_zh_file = globalconf.get_root() + "rnn/ted/en-zh/train.tags.en-zh.zh"

token_en_file = globalconf.get_root(
) + "rnn/ted/en-zh/train.tags.en-zh.en.token"
token_zh_file = globalconf.get_root(
) + "rnn/ted/en-zh/train.tags.en-zh.zh.token"

vocab_en_file = globalconf.get_root(
) + "rnn/ted/en-zh/train.tags.en-zh.en.vocab"
vocab_zh_file = globalconf.get_root(
) + "rnn/ted/en-zh/train.tags.en-zh.zh.vocab"

id_en_file = globalconf.get_root() + "rnn/ted/en-zh/train.tags.en-zh.en.id"
id_zh_file = globalconf.get_root() + "rnn/ted/en-zh/train.tags.en-zh.zh.id"

final_en_file = globalconf.get_root(
) + "rnn/ted/en-zh/train.tags.en-zh.en.final"
final_zh_file = globalconf.get_root(
) + "rnn/ted/en-zh/train.tags.en-zh.zh.final"


def tokenize():
    stop_word = [' ']
    with codecs.open(in_en_file, mode='r', encoding='utf8') as f_in,\