def main():
	#Step 1 = download google's pre-trained neural network
	url = 'https://storage,googleapis.com/download.tensorflow.org/models/inception5h.zip'
	data_dir = '../data'
	model_name = os.path.split(url)[-1]
	local_zip_file = os.path.join(data_dir, model_name)
	if not os.path.exists(local_zip_file):
		#Download
		model_url = urllib.request.urlopen(url)
		with open(local_zip_file, 'wb') as output:
			output.write(model_url.read())

		#extract
		with zipfile.ZipFile(local_zip_file, 'r') as zip_ref:
			zip_ref.extractall(data_dir)




	model_fn = 'tensorflow_inception_graph.pb'

	#Step 2 - Creating Tensorrflow session and loading the model
	graph = tf.graph()
	sess = tf.InteractiveSession(graph=graph)
	with tf.gfile.FastGFIle(os.path.join(data_dir, model_fn), 'rb') as f:
		graph_def = tf.GraphDef()
		graph_def.ParseFromString(f.read())
	t_input = tf.placeholder(np.float32, name='input') #define input tensor
	imagenet_mean = 117.0
	t_preprocessed = tf.expand_dims(t_input-imagenet_mean, 0)
	tf.import_graph_def(graph_def, {'input':t_preprocessed})

	layers = [op.name for op in graph.get_operations() if op.type=='Conv2D' and 'import/' in op.name]
	feature_nums = [int(graph.get_tensor_by_name(name+':0').get_shape()[-1]) for name in layers]


	def render_deepdream(t_obj, img0=img_noise, iter_n = 10, step = 1.5, octave_n=4, octave_scale=1.4):
		t_score = tf.reduce_mean(t_obj) #defining optmisation objective
		t_grad = tf.gradients(t_score, t_input)[0]

		#split the image into a number of octaves
		img = img0
		octaves = []
		for _ in range(octave_n-1):
			hw = img.shape[:2]
			lo = resize(img, np.itn32(np.float32(hw)/octace_scale))
			hi = img-resize(low, hw)
			img = lo
			octaves.append(hi)

		#geberate details octave by octave
		for octave in range(octave_n):
			if octave>0:
				hi = octaves[-octave]
				img = resize(img, hi.shape[:2])+hi
			for _ in range(iter_n):
				g = calc_grad_titled(mg, t_grad)
				img += g*(step / (np.abs(g).mean()+1e-1))
			#Step 5 - Output deep dreamed image
			showarray(img/255.0)
Exemple #2
0
def train_vctk():
    """Summary

    Returns
    -------
    TYPE
        Description
    """
    batch_size = 24
    filter_length = 2
    n_stages = 7
    n_layers_per_stage = 9
    n_hidden = 48
    n_skip = 384
    dataset = vctk.get_dataset()
    it_i = 0
    n_epochs = 1000
    sequence_length = get_sequence_length(n_stages, n_layers_per_stage)
    ckpt_path = 'vctk-wavenet/wavenet_filterlen{}_batchsize{}_sequencelen{}_stages{}_layers{}_hidden{}_skips{}'.format(
        filter_length, batch_size, sequence_length, n_stages,
        n_layers_per_stage, n_hidden, n_skip)
    with tf.graph().as_default(), tf.session() as sess:
        net = create_wavenet(batch_size=batch_size,
                             filter_length=filter_length,
                             n_hidden=n_hidden,
                             n_skip=n_skip,
                             n_stages=n_stages,
                             n_layers_per_stage=n_layers_per_stage)
        saver = tf.train.saver()
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())
        sess.run(init_op)
        if tf.train.latest_checkpoint(ckpt_path) is not None:
            saver.restore(sess, tf.train.latest_checkpoint(ckpt_path))
        batch = vctk.batch_generator
        with tf.variable_scope('optimizer'):
            opt = tf.train.adamoptimizer(learning_rate=0.0002).minimize(
                net['loss'])
        var_list = [
            v for v in tf.global_variables() if v.name.startswith('optimizer')
        ]
        sess.run(tf.variables_initializer(var_list))
        writer = tf.summary.filewriter(ckpt_path)
        for epoch_i in range(n_epochs):
            for batch_xs in batch(dataset, batch_size, sequence_length):
                loss, quantized, _ = sess.run(
                    [net['loss'], net['quantized'], opt],
                    feed_dict={net['x']: batch_xs})
                print(loss)
                if it_i % 100 == 0:
                    summary = sess.run(net['summaries'],
                                       feed_dict={net['x']: batch_xs})
                    writer.add_summary(summary, it_i)
                    # save
                    saver.save(sess,
                               os.path.join(ckpt_path, 'model.ckpt'),
                               global_step=it_i)
                it_i += 1

    return loss
Exemple #3
0
def load_graph(model_file):
    graph = tf.graph()
    graph_def = tf.GraphDef()

    with open(model_file, "rb") as f:
        graph_def.ParseFromString(f.read)
    with graph.as_default():
        tf.import_graph_def(graph_def)

    return graph
    def _init_model(self):
        self.graph = tf.graph()
        with self.graph.as_default():
            tf.set_random_seed(self.random_seed)
             
            #1. embedding layer
            self.embeddings = tf.reshape(tf.nn.embedding_lookup(self.vr["single_second_embedding"], self.ph['single_index']),  shape=[-1, conf.embedding_size * self.single_size])

            #2. deep network
            self.y_deep = tf.reshape(self.embeddings, shape=[-1, self.field_size * self.embedding_size])
            self.y_deep = tf.nn.dropout(self.y_deep, self.dropout_keep_deep[0])
            for i,layer in enumerate(self.dnn_wides):
                self.y_deep = tf.add()
Exemple #5
0
def rnn_test():

    data_path = pjoin(data_dir, 'test.ids.test')
    with open(data_path, 'r') as fdata:
        raw_data = [map(int,d.strip().split(' ')) for d in fdata.readlines()]
    lengths = [len(x) for x in raw_data]
    lengths = np.array(lengths)
    print(lengths)
    test_data = [mask_input(rd, 25) for rd in raw_data]
    # test_data = np.load(test_file_path)
    print(test_data)
    # print('shape of test data is : {}'.format(test_data.shape))

    embed_path = pjoin(data_dir, "glove.trimmed.100.npz")
    embedding = np.load(embed_path)['glove']

    inputs = [x[0] for x in test_data]

    inputs = np.array(inputs)
    print('shape of inputs {}'.format(inputs.shape))
    masks = [x[1] for x in test_data]
    masks = np.array(masks)
    print('shape of masks {}'.format(masks.shape))

    with tf.graph().as_default():
        # embedding_tf = tf.Variable(embedding)
        x = tf.placeholder(tf.int32, (None, 25))
        x_m = tf.placeholder(tf.bool, (None, 25))
        l_x = tf.placeholder(tf.int32, (None,))
        print(x)
        print(x_m)
        print(l_x)

        embed = tf.nn.embedding_lookup(embedding, x)
        # x_in = tf.boolean_mask(embed, x_m)
        print('shape of embed {}'.format(embed.shape))
        # print('shape of x_in {}'.format(x_in.shape))

        num_hidden = 5
        lstm_fw_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)
        lstm_bw_cell = rnn.BasicLSTMCell(num_hidden, forget_bias=1.0)
        outputs, outputs_states = tf.nn.bidirectional_dynamic_rnn(lstm_fw_cell,lstm_bw_cell,
                                                              embed,sequence_length=sequence_length(x_m),dtype=tf.float64)
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            outp, outps = sess.run([ outputs, outputs_states], feed_dict={x:inputs,
                                                                         x_m:masks})
            # print('shape of input embeddings is : {}'.format(xin.shape))
            print("shape of output is :{}".format(np.array(outp).shape))
            print(outp)
Exemple #6
0
 def build_network(self, db):
     with tf.graph().as_default() as g:
         # build the graph
         self.images = tf.placeholder(tf.float32, shape=[None, 224, 224, 3])
         self.labels = tf.placeholder(tf.int32, shape=[
             None,
         ])
         self.predictions[cls_score], self.predictions[cls_prob] = \
             vgg16(images,batch_size)
         self.loss = tf.reduce_mean(
             tf.nn.sparse_softmax_cross_entropy_with_logits(
                 logits=tf.reshape(cls_score, [-1, db.num_classes]),
                 labels=labels))
         self.global_variables = \
             tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
Exemple #7
0
def train_vctk():
    """Summary

    Returns
    -------
    TYPE
        Description
    """
    batch_size = 24
    filter_length = 2
    n_stages = 7
    n_layers_per_stage = 9
    n_hidden = 48
    n_skip = 384
    dataset = vctk.get_dataset()
    it_i = 0
    n_epochs = 1000
    sequence_length = get_sequence_length(n_stages, n_layers_per_stage)
    ckpt_path = 'vctk-wavenet/wavenet_filterlen{}_batchsize{}_sequencelen{}_stages{}_layers{}_hidden{}_skips{}'.format(
        filter_length, batch_size, sequence_length, n_stages,
        n_layers_per_stage, n_hidden, n_skip)
    with tf.graph().as_default(), tf.session() as sess:
        net = create_wavenet(
            batch_size=batch_size,
            filter_length=filter_length,
            n_hidden=n_hidden,
            n_skip=n_skip,
            n_stages=n_stages,
            n_layers_per_stage=n_layers_per_stage)
        saver = tf.train.saver()
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())
        sess.run(init_op)
        if tf.train.latest_checkpoint(ckpt_path) is not None:
            saver.restore(sess, tf.train.latest_checkpoint(ckpt_path))
        batch = vctk.batch_generator
        with tf.variable_scope('optimizer'):
            opt = tf.train.adamoptimizer(
                learning_rate=0.0002).minimize(net['loss'])
        var_list = [
            v for v in tf.global_variables() if v.name.startswith('optimizer')
        ]
        sess.run(tf.variables_initializer(var_list))
        writer = tf.summary.filewriter(ckpt_path)
        for epoch_i in range(n_epochs):
            for batch_xs in batch(dataset, batch_size, sequence_length):
                loss, quantized, _ = sess.run(
                    [net['loss'], net['quantized'], opt],
                    feed_dict={net['x']: batch_xs})
                print(loss)
                if it_i % 100 == 0:
                    summary = sess.run(
                        net['summaries'], feed_dict={net['x']: batch_xs})
                    writer.add_summary(summary, it_i)
                    # save
                    saver.save(
                        sess,
                        os.path.join(ckpt_path, 'model.ckpt'),
                        global_step=it_i)
                it_i += 1

    return loss
        if line_count > 100:
            break

for i, word in enumerate(words):
    word2int[word] = i
    int2word[i] = word

vocabulary_size = 500000  # Parameter indicating the number of words we'll put in the dictionary
validation_size = 1000  # Size of the validation set
epochs = 20  # Number of epochs we usually start to train with
batch_size = 512  # Size of the batches used in the mini-batch gradient descent
time_steps = 8
lstm_size = 512
max_length = 12
graph = tf.graph()


def convolution(x):
    conv = tf.nn.conv1d


with graph.as_default():
    tf_train_dataset = tf.placeholder(tf.float32,
                                      shape=(batch_size, time_steps,
                                             max_length))
    tf_train_labels = tf.placeholder(tf.float32,
                                     shape=(batch_size, label_number))
    tf_test_dataset = tf.constant(test_dataset)

    initer = tf.truncated_normal_initializer(stddev=0.01)
import numpy as np
import tensorflow as tf
import tensorflow.tensor as T

batch_size = 128

graph = tf.graph()

with graph.as_default():

	tf_train_dataset = tf.placeholder(tf.float32, shape=(batch_size, image_size * image_size))
	tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
	tf_valid_dataset = tf.constant(valid_dataset)
	tf_test_dataset = tf.constant(test_dataset)

	weights = tf.Variable(
		tf.truncated_normal([image_size * image_size, num_labels]))

	biases = tf.Variable(tf.zeros([num_labels]))

	logits = tf.matmul(tf_train_dataset, weights) + biases

	loss = tf.reduce_mean(
		tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))

	optimizer = tf.GradientDescentOptimizer(0.5).minimize(loss)

	train_prediction = tf.nn.softmax(logits)

	valid_prediction = tf.nn.softmax(
		tf.matmul(tf_valid_dataset, weights) + biases)
    while True:
        yield np.expand_dims(next(gen), axis=3)


t_gen = _gen(train_dataset)
v_gen = _gen(valid_dataset)

# testing the generators
# gen = dataGenerator(80,train_dataset)
# array = next(gen)
# array = next(gen)
# array = next(gen)+.5
# showMultipleArraysHorizontally(array, max_per_row=4)

# net = ae.ConvolutionalAutoencoderSingle()
# net.model()
# net.optimizer()
# net.train(100000, t_gen, 100, v_gen)

with tf.graph().as_default():
    data_input = tf.placeholder(tf.float32,
                                shape=(self.batch_size, image_size, image_size,
                                       num_channels),
                                name="data_input_placeholder")

    network = ae.Convolutional()
    flow = network.addLayer(data_input, [3, 3, 1, 8], 2, relu=True)
    flow = network.addLayer(flow, [3, 3, 8, 16], 2, relu=True)
    reconstructed = ae.Deconvolutional(network)
    loss = tf.reduce_mean(tf.abs(reconstructed - data_input))
    optimizer = tf.train.AdamOptimizer(.001).minimize(loss)
import tensorflow as tf
import numpy as np
"""
# 显式的定义计算图
# 以计算图为执行基本单元
"""
g = tf.graph()
with g.as_default():
    a1 = tf.constant(np.ones([4, 4]))
    a2 = tf.constant(np.ones([4, 4]))
    # 矩阵点积
    a1_dot_a2 = tf.matmul(a1, a2)
"""
# 画出计算图,保存在graph目录中
# 运行完后,在控制台输入tensorboard --logdir=graph
# 在弹出的url中查看 
"""
tf.summary.FileWriter("graph", graph=g)
''' 显示的指定发送给session的计算图'''
sess = tf.Session(graph=g)
print(sess.run(a1_dot_a2))
Exemple #12
0
    return (norm_img).astype(np.float32)
def deprocess(img):
    return np.clip(img * 255, 0, 255).astype(np.uint8)
    # return ((img / np.max(np.abs(img))) * 127.5 +
    #         127.5).astype(np.uint8)

net = get_vgg_model()

labels = net['labels']

# g = tf.Graph()

# config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
# session = tf.Session(config=config, ...)
g1 = tf.graph()

with tf.Session(graph=g) as sess, g.device('/cpu:0'):
    tf.import_graph_def(net['graph_def'], name='vgg')
    names = [op.name for op in g.get_operations()]

# for i in names:
#     print i


#

# for i in range(401,501):
#     og = plt.imread("images/"+str(i)+".png")
#     og = preprocess(og)
#     img.append(og)