コード例 #1
0
def train():
    with tf.Session() as sess:
        tf.global_variable_initializer()
        for e in range(3):
            lasttime = time.time()
            train_acc, train_loss, test_acc, test_loss = 0, 0, 0, 0
            #train data set
            pbar = tqdm(range(0, len(train_X), batch_size),
                        desc="train minibatch loop")
            for i in pbar:
                batch_x = train_X[i:min(i + batch_size, train_X.shape[0])]
                batch_char = train_char[i:min(i +
                                              batch_size, train_X.shape[0])]
                batch_y = train_Y[i:min(i + batchsize, train_X.shape[0])]
                acc, cost, _ = sess.run(
                    [model.accuracy, model.cost, model.optimizer],
                    feed_dict={
                        model.word_ids: batch_x,
                        model.char_ids: batch_char,
                        model.labels: batch_y
                    })
                assert not np.isnan(cost)
                train_loss += cost
                train_acc += acc
                pbar.set_postfix(cost=cost, accuracy=acc)

            #test data set
            pbar = tqdm(range(0, len(test_X), batch_size),
                        desc='test minibatch loop')
            for i in pbar:
                batch_x = test_X[i:min(i + batch_size, test_X.shape[0])]
                batch_char = test_char[i:min(i + batch_size, test_X.shape[0])]
                batch_y = test_Y[i:min(i + batch_size, test_X.shape[0])]
                acc, cost = sess.run(
                    [model.accuracy, model.cost],
                    feed_dict={
                        model.word_ids: batch_x,
                        model.char_ids: batch_char,
                        model.labels: batch_y
                    },
                )
                assert not np.isnan(cost)
                test_loss += cost
                test_acc += acc
                pbar.set_postfix(cost=cost, accuracy=acc)

            train_loss /= len(train_X) / batch_size
            train_acc /= len(train_X) / batch_size
            test_loss /= len(test_X) / batch_size
            test_acc /= len(test_X) / batch_size
            print('time taken:', time.time() - lasttime)
            print(
                'epoch: %d, training loss: %f, training acc: %f, valid loss: %f, valid acc: %f\n'
                % (e, train_loss, train_acc, test_loss, test_acc))
コード例 #2
0
def run_benckmark():
    # 设置input
    image_test_x = tf.placeholder(tf.float32, [None, 3072], name='x_input')
    image_test_y = tf.placeholder(tf.float32, [None, 10], name='y_input')

    image_train_x = tf.placeholder(tf.float32, [None, 3072], name='x_input')
    image_train_y = tf.placeholder(tf.float32, [None, 10], name='y_input')
    # 构建模型
    pred, parameter = AlexNet(image_train_x)
    print('START:')
    # 设定损失函数和学习步骤
    loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred, image_train_y))
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)

    # 计算成功率
    accuracy = predict(image_test_x,image_test_y)
    with tf.Session() as sess:
        init = tf.global_variable_initializer()
        #初始化变量
        sess.run(init)
        for training_round in range(num_batches):
            # 每次选取一个batch_size的样本来进行训练
            train_batch_x = train_x[training_round]
            train_batch_y = train_y[training_round]
            #开始训练
            sess.run(optimizer,feed_dict={image_train_x:train_batch_x,image_train_y:train_batch_y})
            #显示结果
            print(sess.run(accuracy,feed_dict={image_test_x:test_x,image_test_y:test_y})
コード例 #3
0
ファイル: nst.py プロジェクト: kbhartiya/NeuralPrisma
def model_nn(sess, input_image, num_iterations):

    #initialize all variables.
    sess.run(tf.global_variable_initializer())

    #input the randoml generated image to the VGG19 model.
    sess.run(model['input'].assign(input_image))

    for i in range(num_iterations):
        sess.run(train)
        #Compute the generated image by running it in the model.
        generated_image = sess.run(model['input'])

        if i % 20:
            J, J_content, J_style = sess.run([J, J_content, J_style])
            print("Total Cost: {}".format(J))
            print("Total Content Cost: {}".format(J_content))
            print("Total Style Cost: {}".format(J_style))

            save_image('./output/generated_image_no' + str(i) + '.png',
                       generated_image)

    save_image("./output/final_generated_image.jpg", generated_image)

    return generated_images
コード例 #4
0
 def __init__(
     self,
     lr,
     gamma,
     n_actions=4,
     l1_size=64,
     l2_size=64,
     input_dims=8,
     chkpt_dir='tmp'
 ):  #neural network shape, chkpt_dir to save the model and run later
     self.lr = lr
     self.gamma = gamma
     self.n_actions = n_actions
     self.action_space = [i for i in range(n_actions)]
     self.state_memory = []
     self.action_memory = []
     self.reward_memory = []
     self.input_dims = input_dims
     self.l1_size = l1_size
     self.l2_size = l2_size
     self.sess = tf.Session()  #TF session
     self.build_net()
     self.sess.run(tf.global_variable_initializer())  #initializer Function
     self.saver = tf.train.Saver()  #To save and run the model later
     self.chkpt_file = os.path.join(chkpt_dir, 'policy.ckpt')
コード例 #5
0
    def __init__(self, sess, config, data_loader=None, logger, model):
        self.config = config
        self.sess = sess
        self.logger = logger
        self.model = model

        if data_loader is not None:
            self.data_loader = data_loader

        self.train_init = tf.global_variable_initializer()
        self.sess.run(self.train_init)
コード例 #6
0
    def __init__(self, sess, model, data, config, logger):

        self.sess = sess
        self.data = data
        self.config = config
        self.model = model
        self.logger = logger

        # global and local variable initializers
        self.init = tf.group(tf.global_variable_initializer(),
                             tf.local_variables_initializer())
        self.sess.run(self.init)
コード例 #7
0
def save():
	print('saving process started')
	tf_x = tf.placeholder(tf.float32, x.shape())
	tf_y = tf.placeholder(tf.float32, y.shape())
	l = tf.layers.dense(tf_x , 10, tf.nn.relu)
	0 = tf.layers.dense(l,1)
	loss = tf.losses.mean_squared_error(tf_y, o)
	train_op  = tf.train.GradientDescentOptimizer(learning_rate = 0.5).minimize(loss)

	sess = tf.Session()
	sess.run(tf.global_variable_initializer())

	saver = tf.Session()
	sess.run(tf.global_variable_initializer())
	saver  = tf.train.Saver()
	for step in range(100):
		sess.run(train_op, {tf_x : x, tf_y : y})
	saver.save(sess, 'params', write_meta_graph = False)
	pred , l = sess.run([o, loss], {tf_x : x, tf_y : y})
	plt.figure(1, figsize=(10,5))
	plt.subplot(121)
	plt.scatter(x,y)
	plt.plot(x, pred, 'r--', lw=5)
	plt.text(-1, 1.2, 'Save Loss=%.4f' %l, fontdict={'size' : 15, 'color':'red'})
コード例 #8
0
    def train(self, train_data, vocab_list, retrain=False):
        initializer = tf.random_uniform_initializer(-self._config.init_scale,
                                                    self._config.init_scale)

        with tf.variable_scope(
                self._category, reuse=None,
                initializer=self._initializer), tf.Session() as session:
            m = PTBModel(is_training=True,
                         config=self._config,
                         category=self._category)

            tf.global_variable_initializer().run()

            ckpt = tf.train.get_checkpoint_state(self._checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                tf.train.Saver().restore(session, ckpt.model_checkpoint_path)
                if not retrain:
                    return

            for i in range(self._config.max_max_epoch):
                lr_decay = self._config.lr_decay**max(
                    i - self._config.max_epoch, 0.0)
                m.assign_lr(session, self._config.learning_rate * lr_decay)

                print("Epoch: %d Learning rate: %.3f" %
                      (i + 1, session.run(m.lr)))
                train_perplexity = self.__run_epoch(session,
                                                    m,
                                                    train_data,
                                                    m.train_op,
                                                    verbose=True)
                print("Epoch: %d Train Perplexity: %.3f" %
                      (i + 1, train_perplexity))
                if i % 5 == 0:
                    tf.train.Saver().save(session,
                                          self._checkpoint_dir + "model.ckpt")
コード例 #9
0
	def __init__(self, dimensions, lr=1e-3):
		self.params = tf.Variable(tf.random_normal(shape=[D,1], mean=0.5, stddev=0.5), name="params")
		self.x = tf.Placeholder("float32", shape=(None, D), name="x")
		self.y = tf.Placeholder("float32",shape=(None, 1), name="x")
		linearComb = tf.matmul(x,params)
		loss = y-linearComb
		loss = tf.reduce_sum(loss * loss)

		# gradient = tf.gradients(loss, self.params)
		self.trainOp = tf.train.GradientDescentOptimizer(lr).minimize(loss)
		self.predictOp = linearComb

		i = tf.global_variable_initializer()
		self.session = tf.InteractiveSession()
		self.session.run(init)
コード例 #10
0
ファイル: ssnmf.py プロジェクト: nukui-s/sscomdetection
    def fit_and_transform(self, edge_list, const_pairs=None, weights=None,
                          const_weights=None, steps=2000, log_dir="log",
                          threshold=0.001):
        self.n_nodes = n_nodes = max(chain.from_iterable(edge_list)) + 1
        self.n_edges = n_edges = len(edge_list)
        if weights is None:
            weights = np.ones(n_edges).astype(np.float32)
        edge_list = [(e[0],e[1],w) for e, w in zip(edge_list, weights)]
        edge_list = edge_list + [(j, i, w) for i, j, w in edge_list]
        self.edge_list = sorted(edge_list, key=lambda x: (x[0], x[1]))
        if const_pairs is None:
            const_pairs = []
        if const_weights is None:
            const_weights = np.ones(len(const_pairs)).astype(np.float32)
        const_pairs = [(c[0], c[1], w) for c, w
                            in zip(const_pairs, const_weights)]
        const_pairs = const_pairs + [(j, i, w) for i, j, w in const_pairs]

        self.const_pairs = sorted(const_pairs, key=lambda x: (x[0], x[1]))
        self.A = A = self.convert_edge_list_into_dense(edge_list, n_nodes)
        self.O = O = self.convert_edge_list_into_dense(const_pairs, n_nodes)
        D = np.diag(O.sum(axis=1))
        self.updater = updater = UpdateElem()
        graph = tf.Graph()
        with graph.as_default():
            updater.add_semi_supervised_rule(A, O, D, self.K, self.mlambda)
            init_op = tf.global_variable_initializer()
            self.sess = tf.Session()
            self.sess.run(init_op)
        pre_cost = -1
        cost_list = []
        for s in range(steps):
            self.sess.run(updater.update_W_node())
            self.sess.run(updater.assign_W_node())
            self.sess.run(updater.update_H_node())
            self.sess.run(updater.assign_H_node())
            cost = self.sess.run(updater.cost)
            if abs(cost - pre_cost) < threshold:
                break
            pre_cost = cost
            cost_list.append(cost)
            print(cost)
        print("Steps: " + str(s+1))
        H = self.get_H()
        W = self.get_W()
        return H, cost_list
コード例 #11
0
def train(conf, data):
    X = tf.placeholder(
        tf.float32,
        shape=[None, conf.img_height, conf.img_width, conf.channel])
    model = PixelCNN(X, conf)

    trainer = tf.train.RMSPropOptimizer(1e-3)
    gradients = trainer.compute_gradients(model.loss)

    clipped_gradients = [(tf.clip_by_value(_[0], -conf.grad_clip,
                                           conf.grad_clip), _[1])
                         for _ in gradients]
    optimizer = trainer.apply_gradients(clipped_gradients)

    saver = tf.train.Saver(tf.trainable_variables())

    with tf.Session() as sess:
        sess.run(tf.global_variable_initializer())
        if os.path.exists(conf.ckpt_file):
            saver.restore(sess, conf.ckpt_file)
            print "Model Restored"

        if conf.epochs > 0:
            print "Started Model Training..."
        pointer = 0
        for i in range(conf.epochs):
            for j in range(conf.num_batches):
                if conf.data == "mnist":
                    batch_X, batch_y = data.train.next_batch(conf.batch_size)
                    batch_X = binarize(batch_X.reshape([conf.batch_size, \
                            conf.img_height, conf.img_width, conf.channel]))
                    batch_y = one_hot(batch_y, conf.num_classes)
                else:
                    batch_X, pointer = get_batch(data, pointer,
                                                 conf.batch_size)
                data_dict = {X: batch_X}
                if conf.conditional is True:
                    data_dict[model.h] = batch_y
                _, cost = sess.run([optimizer, model.loss],
                                   feed_dict=data_dict)
            print "Epoch: %d, Cost: %f" % (i, cost)
            if (i + 1) % 10 == 0:
                saver.save(sess, conf.ckpt_file)
                generate_samples(sess, X, model.h, model.pred, conf, "")

        generate_samples(sess, X, model.h, model.pred, conf, "")
コード例 #12
0
def train(train_data, num_layers, num_epochs, batch_size, model_save_name):
    #setup data and input
    best_loss = 999
    print_step = 50
    global_step = 0
    hidden_size = 5700
    element_size = 57
    training_input = Input(batch_size, 30, train_data)
    model = Model(training_input,
                  is_training=True,
                  hidden_size=hidden_size,
                  num_layers=num_layers,
                  element_size=element_size)
    init_op = tf.global_variable_initializer()
    with tf.Session() as sess:
        sess.run([init_op])
        coord = tf.train.Coordiantor()
        threads = tf.train.start_queue_runners(coord=coord)
        saver = tf.train.Saver()
        # start training.
        for epoch in range(num_epochs):
            curent_state = np.zeros([num_layers, 2, batch_size, hidden_size])
            curr_time = dt.datetime.now()
            for step in range(training_input.epoch_pieces):
                # run a training step.
                cost, _, current_state = sess.run(
                    [model.cost, model.optimizer, model.state],
                    feed_dict={model.init_state: current_state})
                # print cost, sconds per step every print_step
                if step % print_step == 0:
                    seconds = (float(
                        (dt.datetime.now() - curr_time).seconds) / print_iter)
                    curr_time = dt.datetime.now()
                    print(
                        "epoch {}, step {}, cost: {:.3f}, seconds per step: {:.3f}"
                        .format(epoch, step, cost, seconds))
                # save model checkpoint if cost is improved
                if cost < best_loss:
                    saver.save(sess, model_save_name, global_step=global_step)
                    global_step += 1
                    best_loss = cost
                    print('model saved.')
        # close threads
        coord.request_stop()
        coord.join(threads)
コード例 #13
0
def run():
    x_batch, y_batch = generate_dataset()
    ####
    x, y, y_pred, loss = linear_regression()
    optimizer = tf.train.GradientDescentOptimizer(0.1).mimimizer(loss)
    init = tf.global_variable_initializer()
    with tf.Session() as sess:
        sess.run(init)
        feed_dict = {x: x_batch, y: y_batch}
        for _ in range(30):
            loss_val, _ = sess.run([loss, optimizer], feed_dict)
            print('loss:', loss_val.mean())

        ##### we could extract some intermediter values by just run, but not update, so not optimizer and y_batch
        y_pred_batch = session.run(y_pred, {x: x_batch})
        plt.figure(1)
        plt.scatter(x_batch, y_batch)
        plt.plot(x_batch, y_pred_batch)
コード例 #14
0
    def testAtrousFullyConvolutionValue(self):
        """Verify dense feature extraction with atrous convolution."""
        #从这个测试我们可以看出在带孔卷积加入后,输出了在相对自己solution
        #的位置的pix 是不变的,只是分辨率变大了
        nominal_stride = 32
        for output_stride in [4, 8, 16, 32, None]:
            with slim.arg_scope(resnet_utils.resnet_arg_scope()):
                with tf.Graph().as_default():
                    with self.test_session() as sess:
                        tf.set_random_seed(0)
                        inputs = create_test_input(2, 81, 81, 3)
                        # Dense feature extraction followed by subsampling.
                        output, _ = self._resnet_small(
                            inputs,
                            None,
                            is_training=False,
                            global_pool=False,
                            output_stride=output_stride)
                        if output_stride is None:
                            factor = 1
                        else:
                            #采用带孔卷积我们get a dense output 而另外
                            #使用normal conv output 要小很多

                            factor = nominal_stride // output_stride
                        output = resnet_utils.subsample(output, factor)
                        # Make the two networks use the same weights.
                        tf.get_variable_scope().reuse_variables()
                        # Featrue extraction at the nominal network rate.
                        expected, _ = self._resnet_small(inputs,
                                                         None,
                                                         is_training=False,
                                                         global_pool=False)
                        #  make the variable has their own value in the model
                        sess.run(tf.global_variable_initializer())
                        #如果你有一个Tensor t,在使用t.eval()时,等价于:tf.get_default_session().run(t).
                        '''Calling this method will execute all preceding operations that produce the inputs needed for
                         the operation that produces this tensor. N.B. Before invoking Tensor.eval(), its graph must have
                          been launched in a session,and either a default session must be available, or session must be 
                          specified explicitly.'''
                        self.assertAllClose(output.eval(),
                                            expected.eval(),
                                            atol=1e-4,
                                            rtol=1e-4)
コード例 #15
0
 def testUnknownBatchSize(self):
     batch = 2
     height, width = 65, 65
     global_pool = True
     num_classes = 10
     inputs = create_test_input(None, height, width, 3)
     with slim.arg_scope(resnet_utils.resnet_arg_scope()):
         logits, _ = self._resnet_small(inputs,
                                        num_classes,
                                        global_pool=global_pool,
                                        scope='resnet')
         self.assertTrue(logits.op.name.startswith('resnet/logits'))
         self.assertListEqual(logits.get_shape().as_list(),
                              [None, 1, 1, num_classes])
         images = create_test_input(batch, height, width, 3)
         with self.test_session() as sess:
             sess.run(tf.global_variable_initializer())
             out_put = sess.run(logits, {input: images.eval()})
             self.assertEquals(out_put.shape, (batch, 1, 1, num_classes))
コード例 #16
0
def train_neural_network(x):

    #prediction of the model
    prediction = neural_network_model(x)
    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(prediction, y))

    #learning rate = 0.01
    optimizer = tf.train.AdamOptimizer().minimize(cost)

    #cycle feed forward + backprop = epoch
    hm_epoch = 50

    with tf.Sesson() as sess:
        sess.run(tf.global_variable_initializer())
        #training model with training data
        for epoch in range(hm_epoch):
            epoch_loss = 0
            for _ in range(int(mnist.train.num_examples / batch_size)):
                epoch_x, epoch_y = mnist.train.next_batch(batch_size)
                _, x - sess.run([optimizer, cost],
                                feed_dict={
                                    x: epoch_x,
                                    y: epoch_y
                                })
                epoch_loss += c
            print('Epoch', epoch, 'completed out of', hm_epoch, 'loss:',
                  epoch_loss)
        writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())

        correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
        print('Accuracy:',
              accuracy.eval({
                  x: mnist.test.images,
                  y: mnist.text.labels
              }))
コード例 #17
0
project_dir = path(_file_).resolve().parent.parent
USE_dir = project_dir.joinpath('Sentenca Universal do encoder')
os.environ["TFHUB_CACHE_DIR"] = str(USE_dir)

#download do encoder
encoder = hub.Module(
    "https://tfhub.dev/google/universal-sentence-encoder-large/3")

#checando erros
embeddings = encoder([
    "the quick brown fox jump pver the lazy dog."
    "I am a sentence for wich I would like to get its embedding"
])

with tf.Session() as sess:
    sess.run([tf.global_variable_initializer(), tf.tables_initializer()])
    embs = sess.run(embeddings)
    assert (embs.shape == (2, 512))

#move o encoder para saida temporaria do diretorio usando Use como função
temp_dir = [
    USE_dir.joinpath(dir) for dir in os.listdir(USE_dir)
    if os.path.isdir(USE_dir.joinpath(dir))
][0]
for f in os.listdir(temp_dir):
    shutil.move(str(temp_dir.joinpath(f)), USE_dir)

print('#' * 10)
print('#' * 10)
print('Done installing Universal Sentence Encoder')
print('#' * 10)
コード例 #18
0
ファイル: lstm.py プロジェクト: MrH2S/py
    )
    sample_output,sample_state = lstm_cell(
        sample_input,saved_sample_output,saved_sample_state
    )
    with tf.control_dependencies(
        [saved_sample_output.assign(sample_output),
        saved_sample_state.assign(sample_state)
        ]
    ):
        sample_prediction = tf.nn.softmax(tf.nn.xw_plus_b(sample_output,w,b))

NUM_STEPS = 7001
SUMMARY_FREQUENCY = 100

with tf.Session(graph=graph) as s:
    tf.global_variable_initializer().run()
    print('Initialized!')
    mean_loss = 0
    for step in range(NUM_STEPS):
        batches = train_batches.next()
        feed_dict = dict()
        for i in range(NUM_UNROLLINGS+1):
            feed_dict[train_data[i]] = batches[i]
        _,l,predictions,lr = s.run(
            [optimizer,loss,train_prediction,learning_rate],feed_dict=feed_dict
        )
        mean_loss += 1
        if step % SUMMARY_FREQUENCY == 0:
            if step > 0:
                mean_loss = mean_loss/SUMMARY_FREQUENCY
            #The mean loss is an estimate of the loss over the last few batches
コード例 #19
0
ファイル: new_train.py プロジェクト: hlang8160/char_rnn_hl
def train(args):
    data_loader = TextLoader(args.data_dir, args.batch_size, args.seq_length)
    args.vocab_size = data_loader.vocab_size
    if args.init_from is not None:
        # check if all necessary files exist
        assert os.path.isdir(
            args.init_from), " %s must be a a path" % args.init_from
        assert os.path.isfile(os.path.join(args.init_from, "config.pkl")
                              ), "config.pkl file does not exist in path %s" % args.init_from
        assert os.path.isfile(os.path.join(args.init_from, "chars_vocab.pkl")
                              ), "chars_vocab.pkl.pkl file does not exist in path %s" % args.init_from
        ckpt = tf.train.get_checkpoint_state(args.init_from)
        assert ckpt, "No checkpoint found"
        assert ckpt.model_checkpoint_path, "No model path found in checkpoint"

        # open old config and check if models are compatible
        with open(os.path.join(args.init_from, 'config.pkl'), 'rb') as f:
            saved_model_args = cPickle.load(f)
        need_be_same = ["model", "rnn_size", "num_layers", "seq_length"]
        for checkme in need_be_same:
            assert vars(saved_model_args)[checkme] == vars(args)[
                checkme], "Command line argument and saved model disagree on '%s' " % checkme

        # open saved vocab/dict and check if vocabs/dicts are compatible
        with open(os.path.join(args.init_from, 'chars_vocab.pkl'), 'rb') as f:
            saved_chars, saved_vocab = cPickle.load(f)
        assert saved_chars == data_loader.chars, "Data and loaded model disagree on character set!"
        assert saved_vocab == data_loader.vocab, "Data and loaded model disagree on dictionary mappings!"
    # 如果没有目录,则生成目录
    if not os.path.isdir(args.save_dir):
        os.makedirs(args.save_dir)
    with open(os.path.join(args.save_dir, 'config.pkl'), 'wb') as f:
        cPickle.dump(args, f)
    with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'wb') as f:
        cPickle.dump((data_loader.chars, data_loader.vocab), f)

    model = Model(args)

    with tf.Session() as sess:
        summaries = tf.summary.merge_all()
        writer = tf.summary.FileWriter(os.path.join(
            args.log_dir, time.strftime('%Y-%m-%d-%H-%M-%S')))
        writer.add_graph(sess.graph)

        sess.run(tf.global_variable_initializer())
        saver = tf.train.Saver(tf.global_variables)

        # restor model
        if args.init_from is not None:
            saver.restore(sess, ckpt.model_checkpoint_path)

            # xun lian moxing
            for e in range(args.num_epochs):
                # jiang learning_rate fu gei model.lr
                sess.run(tf.assign(model.lr, args.learning_rate * (args.decay**e)))
                data_loader.reset_batch_pointer()

                state = sess.run(model.initial_state)
                # model.initial_state  yunxing chushihua state

                for b in range(data_loader.num_batches):
                    start = time.time()
                    x, y = data_loader.next_batch()
                    feed = {model.input_data: x, model.targets: y}
                    # jiang input targets weigei feed
                    # feed[c]=state[i].c
                    # feed[h]=state[h].h
                    for i, (c, h) in enumerate(model.initial_state):
                        feed[c] = state[i].c
                        feed[h] = state[i].h

                    train_loss, state, _ = sess.run(
                        [model.cost, model.final_state, model.train_op])
                    # jiang dui ying de cost final_state yunxing quchu

                    summ, train_loss, state, _ = sess.run(
                        [summaries, model.cost, model.final_state, model.train_op], feed)
                    writer.add_summary(summ, e * data_loader.num_batches + b)
                    end = time.time()
                    print(
                        '{}/{} (epoch {}),train_loss={:.3f},time/batch={:.3f}'
                        .format(e * data_loader.num_epochs + b,
                                args.num_epochs * data_loader.num_batches,
                                e, train_loss, end - start))

                    if (e * data_loader.num_batches + b) % args.save_every == 0
                        or (e == args.num_epochs - 1 and b == data_loader.num_batches - 1):
                            checkpoint_path = os.path.join(
                                args.save_dir, 'model.ckpt')

                    saver.save(sess, checkpoint_path,
                               global_step=e * data_loader.num_batches + b)
                    print("model saved to {}".format(checkpoint_path))
コード例 #20
0
def word2vec(batch_gen):
    """ Build the graph for word2vec model and train it """
    # Step 1: define the placeholders for input and output
    # center_words have to be int to work on embedding lookup

    # TO DO
    with tf.name_scope('data'):
        center_words = tf.placeholder(tf.int32, [BATCH_SIZE],
                                      name='center_words')
        target_words = tf.placeholder(tf.int32, [BATCH_SIZE, 1],
                                      name='target_words')

    # Step 2: define weights. In word2vec, it's actually the weights that we care about
    # vocab size x embed size
    # initialized to random uniform -1 to 1

    # TOO DO
    with tf.name_scope('embedding_matrix'):
        embed_matrix = tf.Variable(tf.random_uniform([VOCAB_SIZE, EMBED_SIZE],
                                                     -1.0, 1.0),
                                   name='embed_matrix')

    # Step 3: define the inference
    # get the embed of input words using tf.nn.embedding_lookup
    # embed = tf.nn.embedding_lookup(embed_matrix, center_words, name='embed')

    # TO DO
    with tf.name_scope('loss'):
        embed = tf.nn.embedding_lookup(embed_matrix,
                                       center_words,
                                       name='embed')

        # Step 4: construct variables for NCE loss
        # tf.nn.nce_loss(weights, biases, labels, inputs, num_sampled, num_classes, ...)
        # nce_weight (vocab size x embed size), intialized to truncated_normal stddev=1.0 / (EMBED_SIZE ** 0.5)
        # bias: vocab size, initialized to 0

        # TO DO
        nce_weights = tf.Variable(tf.truncated_normal([VOCAB_SIZE, EMBED_SIZE],
                                                      stddev=1.0 /
                                                      (EMBED_SIZE**0.5)),
                                  name='nce_weights')
        nce_biases = tf.Variable(tf.zeros(VOCAB_SIZE), name='nce_biases')

        # define loss function to be NCE loss function
        # tf.nn.nce_loss(weights, biases, labels, inputs, num_sampled, num_classes, ...)
        # need to get the mean accross the batch
        # note: you should use embedding of center words for inputs, not center words themselves

        # TO DO
        nce_loss = tf.nn.nce_loss(weights=nce_weights,
                                  biases=nce_biases,
                                  labels=target_words,
                                  inputs=embed,
                                  num_sampled=NUM_SAMPLED,
                                  num_classes=VOCAB_SIZE,
                                  name='loss')
        loss = tf.reduce_mean(nce_loss)

    # Step 5: define optimizer

    # TO DO
    optimizer = tf.GradientDescentOptimizer(LEARNING_RATE).minimize(loss)

    with tf.Session() as sess:
        # TO DO: initialize variables
        sess.run(tf.global_variable_initializer())

        total_loss = 0.0  # we use this to calculate the average loss in the last SKIP_STEP steps
        writer = tf.summary.FileWriter('./graphs/no_frills/', sess.graph)
        for index in range(NUM_TRAIN_STEPS):
            centers, targets = next(batch_gen)
            # TO DO: create feed_dict, run optimizer, fetch loss_batch
            _, loss_batch = sess.run([optimizer, loss],
                                     feed_dict={
                                         center_words: centers,
                                         target_words: targets
                                     })

            total_loss += loss_batch
            if (index + 1) % SKIP_STEP == 0:
                print('Average loss at step {}: {:5.1f}'.format(
                    index, total_loss / SKIP_STEP))
                total_loss = 0.0
        writer.close()
コード例 #21
0
ファイル: TF_mnist.py プロジェクト: Aaayue/learn_TF
def train(mnist):
    x = tf.palceholder(tf.float32, shape=[None, IUPUT_NODE], name='x-input')
    y_ = tf.palceholder(tf.float32, shape=[None, OUTPUT_NODE], name='y-input')

    # 定义变量初始值
    weight1 = tf.Variable(tf.truncated_normal([IUPUT_NODE, LAYER1_NODE], stddev=0.1))
    bias1 = tf.Variable(tf.constant(0.1, shape=[LAYER1_NODE]))

    weight2 = tf.Variable(tf.truncated_normal([LAYER1_NODE, OUTPUT_NODE], stddev=0.1))
    bias2 = tf.Variable(tf.constant(0.1, shape=[OUTPUT_NODE]))

    # 计算前向传播输出结果
    y = inference(x, None, weight1, bias1, weight2, bias2)

    # 定义一个滑动平均模型
    global_step = tf.Variable(0, trainable=False)
    variable_avg = tf.train.ExponentialMovingAverage(
        MOVING_AVG_DECAY, global_step
    )

    # 将滑动平均模型应用到可训练的变量中
    variable_avg_op = variable_avg.apply(tf.trainable_variables())

    # 计算使用了滑动平均后的前向传播输出结果
    avg_y_ = inference(x, variable_avg_op, weight1, bias1, weight2, bias2)

    # 交叉熵定义损失函数
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=y, labels=tf.argmax(y_, 1)
    )
    # 计算当前banch中所有样例的交叉熵平均
    cross_entropy_mean = tf.reduce_mean(cross_entropy, 0)

    # 计算l2正则化损失
    regulizer = tf.contrib.layers.l2_regularizer(LAMBDA)
    regulize_loss = regulizer(weight1) + regulizer(weight2)
    total_loss = cross_entropy_mean + regulize_loss

    #定义学习率衰减
    learning_rate = tf.train.exponential_dacay(
        LEARNING_RATE_ORIGIN,
        global_step,
        mnist.trian.num_examples/BANCH_SIZE,
        LEARNING_RATE_DECAY,
        staircase=True
    )


    # 优化过程
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss, global_step=global_step)

    with tf.control_dependencies([train_step, variable_avg_op]):
        train_op = tf.no_op(name='train')

    # 判断预测值与真实值是否相等
    correct_predict = tf.equal(tf.argmax(y_, 1), tf.argmax(y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_predict, tf.float32))

    with tf.Session() as sess:
        tf.global_variable_initializer().run()

        #CV set
        validate_feed = {x: mnist.validation.images, y_: mnist.validation.labels}

        #Test set
        test_feed = {x, mnist.test.images, y_:mnist.test.labels}

        # 迭代训练
        for i in range(TRAINING_STEP):

            if i % 1000 == 0:
                validate_acc = sess.run(accuracy, validate_feed)
                print('After %d training steps, validation accuracy using average model is %g' %(i, validate_acc))

            xs, ys = mnist.train.next_banch(BANCH_SIZE)
            sess.run(train_op, feed_dict={x: xs, y_:ys})

        test_acc = sess.run(accuracy, test_feed)
        print('After %d training steps, test accuracy using average model is %g' %(i, test_acc))
コード例 #22
0
    features = tf.transpose(tf.stack([sepal_length, sepal_width, petal_length, petal_width]))
    return features, label_number


def train(total_loss):
    learning_rate = 0.01
    return tf.train.GradientDescentOptimizer(learning_rate).minimize(total_loss)

def evaluate(sess, x, y):
    predicated = tf.cast(tf.arg_max(inference(x), 1), tf.int32)
    print(sess.run(tf.reduce_mean(tf.cast(tf.equal(predicated, y), tf.float32))))

with tf.Session() as sess:

    tf.global_variable_initializer().run()

    x, y = inputs()

    total_loss = loss(x, y)
    train_op = train(total_loss)

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)

    trainning_steps = 1000
    for step in range(trainning_steps):
        sess.run([train_op])

        if step % 10 == 0:
            print('loss: ', sess.run([total_loss]))
コード例 #23
0
  def train(self):
    parser = argparser.ArgumentParser()
    parser.add_argument('--learning_rate', required=False, type=float, default=0.01)
    parser.add_argument('--dropout_rate', required=False, type=float, default=0.2)
    args = parser.parse_args()
    
    mnist = input_data.read_data_set("MNIST_data", one_hot=True)
    
    learning_rate = args.learning_rate
    print("### learning_rate: ", learning_rate)
    training_epochs = 5
    batch_size = 100
    
    X = tf.placeholder(tf.float32, [None, 784])
    Y = tf.placeholder(tf.float32, [None, 10])
    
    keep_probe = tf.placeholder(tf.float32)
    
    W1 = tf.get_variable("W1", shape=[784, 512],
                        initializer=tf.contrib.layers.xavier_initializer)
    b1 = tf.get_variable(tf.random_normal([512]))
    L1 = tf.nn.relu(tf.matmul(X, W1) + b1)
    L1 = tf.nn.dropout(L1, keep_prob=keep_prob)
    
    W2 = tf.get_variable("W2", shape=[512, 10],
                        initializer=tf.contrib.layers.xavier_initializer)
    b2 = tf.get_variable(tf.random_normal([10]))
    
    hypothesis = tf.matmul((L1, W2) + b2)
    
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=hypothesis, label=Y))
    optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
    
    sess = tf.Session()
    sess.run(tf.global_variable_initializer())
    
    # train by model
    for epoch in range(training_epochs) :
        avg_cost = 0
        total_batch = int(mnist.train.num_examples / batch_size)
        
        for i in range(total_batch) :
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            # keep_prob: Network의 70%를 유지해서 학습함
            feed_dict = { X: batch_xs, Y: batch_ys, keep_prob: args.dropout_rate}
            C, _ = sess.run([cost, optimizer], feed_dict=feed_dict)
            avg_cost += C /total_batch
        
        print('Epoch: ', '%04d' % (epoch+1), 'cost=', '{:.9}'.format(avg_cost))
        
        correct_predition =tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_predition, tf.float32))
        
        # validation accuracy
        print('Validation-accuracy=' + str(sess.run(accuracy, feed_dict={X: mnist.test.images, Y: mnist.test.labels, keep_prob: 1 })))
        
    print('Learning Finished!')
    

    correct_prediction = tf.equal(tf.argmax(hypothesis, 1), tf.argmax(Y, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    
    print('Accuracy: ', sess.run(accuracy, feed_dict={X: mnist.test.images, Y: mnist.test.labels, keep_prob: 1}))
    
    for epoch in range(5) :
        r = random.randint(0, mnist.test.num_examples - 1)
        print("\nTest Image -> ", sess.run(tf.argmax(mnist.test.labels[r:r], 1)))
        
        # plt.imshow(mnist.test.images[r:r+1].reshape(28, 28), cmap='Grey', interpolation='nearest')
        # plt.show()
        print("Prediction: ", sess.run(tf.argmax(hypothesis, 1), feed_dict={X: mnist.test.images[r:r+1], keep_prob: 1}))
コード例 #24
0
he_init = tf.contrib.layers.variance_scalling_initializer()
l2_regularizer = tf.contrib.layers.l2_regularizer(l2_reg)
my_dense_layer = partial(tf.layers.dense,
                         activation=tf.nn.elu,
                         ker_initializer=he_init,
                         kernel_regularizer=l2_regularizer)

hidden1 = my_dense_layer(X, n_hidden1)
hidden2 = my_dense_layer(hidden1, hidden2)
hidden3 = my_dense_layer(hidden2, hidden3)
outputs = my_dense_layer(hidden3, n_outputs, activation=None)

reconstruction_loss = tf.reduce_mean(tf.square(output - X))
reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
loss = tf.add_n([reconstruction_loss] + reg_losses)

optimizer = tf.train.AdamOptimizer(learning_rate)
training_op = optimizer.minimize(loss)
init = tf.global_variable_initializer()

n_epochs = 5
batch_size = 150

with tf.Session() as sess:
    init.run()
    for epoch in range(n_epochs):
        n_batches = mnist.train.num_examples // batch_size
        for iteration in range(n_batches):
            X_batch, y_batch = mnist.train.next_batch(batch_size)
            sess.run(training_op, feed_dict={X: X_batch})
コード例 #25
0
    def train(self, config):
        d_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1)\
            .minimize(self.d_loss, var_list=self.d_vars)
        g_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1)\
            .minimize(self.g_loss, var_list=self.g_vars)

        try:
            tf.global_variable_initializer().run()
        except:
            tf.initialize_all_variabbles().run()

        self.g_sum = merge_summary([
            self.z_sum, self.d__sum, self.G_sum, self.d_loss_face_sum,
            self.g_loss_sum
        ])
        self.d_sum = merge_summary(
            [self.z_sum, self.d_sum, self.d_loss_real_sum, self.d_loss_sum])
        self.writer = SummaryWriter('./logs', self.sess.graph)

        sample_z = np.random_uniform(-1, 1, size=(self.sample_num, self.z_dim))

        if config.dataset == 'mnist':
            sample_inputs = self.data_X[0:self.sample_num]
            sample_labels = self.data_y[0:self.sample_num]
        else:
            sample_files = self.data[0:self.sample_num]
            sample = [
                get_image(sample_file,
                          input_height=self.input_height,
                          input_width=self.input_width,
                          resize_height=self.output_height,
                          resize_width=self.output_width,
                          crop=self.crop,
                          grayscale=self.grayscale)
                for sample_file in sample_files
            ]
            if (self.grayscale):
                sample_inputs = np.array(sample).astype(np.float32)[:, :, :,
                                                                    None]
            else:
                sample_inputs = np.array(sample).astype(np.float32)

        counter = 1
        start_time = time.time()
        could_load, checkpoint_counter = self.load(self.checkpoint_dir)

        if could_load:
            counter = checkpoint_counter
            print(' [*] Load SUCCESS')
        else:
            print(' [!] Load failed..')

        for epoch in xrange(config.epoch):
            if config.data == 'mnist':
                batch_idxs = min(len(self.data_X),
                                 config.train_size) // config.batch_size
            else:
                self.data = glob(
                    os.path.join('./data', config.dataset,
                                 set.input_fname_pattern))
                batch_idxs = min(len(self.data),
                                 config.train_size) // config.batch_size

            for idx in xrange(0, batch_idxs):
                if config.dataset == 'mnist':
                    batch_images = self.data_X[idx *
                                               config.batch_size:(idx + 1) *
                                               config.batch_size]
                    batch_labels = self.data_y[idx *
                                               config.batch_size:(idx + 1) *
                                               config.batch_size]
                else:
                    batch_files = self.data[idx * config.batch_size:(idx + 1) *
                                            config.batch_size]
                    batch = [
                        get_image(batch_file,
                                  input_height=self.input_height,
                                  input_width=self.input_width,
                                  resize_height=self.output_height,
                                  resize_width=self.output_width,
                                  crop=self.crop,
                                  grayscale=self.grayscale)
                        for batch_file in batch_files
                    ]
                    if self.grayscale:
                        batch_images = np.array(batch).astype(
                            np.float32)[:, :, :, None]
                    else:
                        batch_images = np.array(batch).astype(np.float32)

                batch_z = np.random.uniform(
                    -1, 1, [config.batch_size, self.z_dim]).astype(np.float32)

                if config.dataset == 'mnist':
                    # Update D network
                    _, summary_str = self.sess.run(
                        [d_optim, self.d_sum],
                        feed_dict={
                            self.inputs: batch_images,
                            self.z: batch_z,
                            self.y: batch_labels,
                        })
                    self.writer.add_summary(summary_str, counter)

                    # Update G network
                    _, summary_str = self.sess.run([g_optim, self.g_sum],
                                                   feed_dict={
                                                       self.z: batch_z,
                                                       self.y: batch_labels,
                                                   })
                    self.writer.add_summary(summary_str, counter)

                    # Run g_optim twice to make sure that d_loss does not go to zero (different from paper)
                    _, summary_str = self.sess.run([g_optim, self.g_sum],
                                                   feed_dict={
                                                       self.z: batch_z,
                                                       self.y: batch_labels
                                                   })
                    self.writer.add_summary(summary_str, counter)

                    errD_fake = self.d_loss_fake.eval({
                        self.z: batch_z,
                        self.y: batch_labels
                    })
                    errD_real = self.d_loss_real.eval({
                        self.inputs: batch_images,
                        self.y: batch_labels
                    })
                    errG = self.g_loss.eval({
                        self.z: batch_z,
                        self.y: batch_labels
                    })
                else:
                    # Update D network
                    _, summary_srt = self.sess.run([d_optim, self.d_sum],
                                                   feed_dict={
                                                       self.inputs:
                                                       batch_images,
                                                       self.z: batch_z
                                                   })
                    self.writer.add_summary(summary_str, counter)

                    # Update G network
                    _, summary_str = self.sess.run([g_optim, self.g_sum],
                                                   feed_dict={self.z: batch_z})
                    self.writer.add_summary(summary_str, counter)

                    # Run g_optim twice to make sure that d_loss does not go to zero (different from paper)
                    _, summary_str = self.sess.run([g_optim, self.g_sum],
                                                   feed_dict={self.z: batch_z})

                    errD_fake = self.d_loss_fake.eval({self.z: batch_z})
                    errD_real = self.d_loss_real.eval(
                        {self.inputs: batch_images})
                    errG = self.g_loss.eval({self.z: batch_z})

                counter += 1
                print('Epoch: [%2d] [%4d/%4d] time: %4.4f, d_loss: %.8f' %
                      (epoch, idx, batch_idxs, time.time() - start_time,
                       errD_fake + errD_real + errG))

                if np.mod(counter, 100) == 1:
                    if config.dataset == 'mnist':
                        samples, d_loss, g_loss = self.sess.run(
                            [self.sampler, self.d_loss, self.g_loss],
                            feed_dict={
                                self.z: sample_z,
                                self.inputs: sample_inputs,
                                self.y: sample_labels
                            })
                        save_images(
                            samples, image_manifold_size(samples.shape[0]),
                            './{}/train_{:02d}_{:04d}.png'.format(
                                config.smaple_dir, epoch, idx))
                        print(
                            '[Sample] d_loss: {:0.8f}, g_loss: {:0.8f}'.format(
                                d_loss, g_loss))
                    else:
                        try:
                            samples, d_loss, g_loss = self.sess.run(
                                [self.sampler, self.d_loss, self.g_loss],
                                feed_dict={
                                    self.z: sample_z,
                                    self.inputs: sample_inputs
                                })
                            save_images(
                                samples, image_manifold_size(sample.shape[0]),
                                './{}/train_{:02d}_{:04d}.png'.format(
                                    config.sample_dir, epoch, idx))
                            print('[Sample] d_loss: {:0.8f}, g_loss: {:0.8f}'.
                                  format(d_loss, g_loss))
                        except:
                            print('one pic error!...')

                if np.mod(counter, 500) == 2:
                    self.save(config.checkpoint_dir, counter)