Пример #1
0
def run_generator(fig_name, cval):

    with tf.Session() as sess:

        # init session
        tf.sg_init(sess)

        # restore parameters
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint('asset/train'))

        feed_dic = {}
        for t, c in zip(target_cval, cval):
            feed_dic[t] = c

        # run generator
        imgs = sess.run(gen, feed_dic)

        # plot result
        _, ax = plt.subplots(10, 10, sharex=True, sharey=True)
        for i in range(10):
            for j in range(10):
                ax[i][j].plot(imgs[i * 10 + j])
                ax[i][j].set_axis_off()
        plt.savefig('asset/train/' + fig_name, dpi=600)
        tf.sg_info('Sample image saved to "asset/train/%s"' % fig_name)
        plt.close()
Пример #2
0
def run_generator(num, x1, x2, fig_name='sample.png'):
    with tf.Session() as sess:
        tf.sg_init(sess)
        # restore parameters
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint('asset/train/ckpt'))

        # run generator
        imgs = sess.run(gen, {target_num: num,
                              target_cval_1: x1,
                              target_cval_2: x2})

        # plot result
        _, ax = plt.subplots(10, 10, sharex=True, sharey=True)
        for i in range(10):
            for j in range(10):
                ax[i][j].plot(imgs[i * 10 + j, :, 0], color='b', linewidth=0.25)
                # Turn off tick labels only
                # ax[i][j].set_axis_off()
                ax[i][j].set_xticks([])
                ax[i][j].set_yticks([])

        plt.savefig('asset/train/' + fig_name, dpi=600)
        tf.sg_info('Sample image saved to "asset/train/%s"' % fig_name)
        plt.close()
Пример #3
0
def main():  
    g = ModelGraph()
        
    with tf.Session() as sess:
        tf.sg_init(sess)

        # restore parameters
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint('asset/train/ckpt'))
        
        hits = 0
        num_imgs = 0
        
        with tf.sg_queue_context(sess):
            # loop end-of-queue
            while True:
                try:
                    logits, y = sess.run([g.logits, g.y]) # (16, 28) 
                    preds = np.squeeze(np.argmax(logits, -1)) # (16,)
                     
                    hits += np.equal(preds, y).astype(np.int32).sum()
                    num_imgs += len(y)
                    print "%d/%d = %.02f" % (hits, num_imgs, float(hits) / num_imgs)
                except:
                    break
                
        print "\nFinal result is\n%d/%d = %.02f" % (hits, num_imgs, float(hits) / num_imgs)
Пример #4
0
	def train(self):
		predict = self.forward(Mnist.train.image)

		#######GP
		sess = tf.Session()
		with tf.sg_queue_context(sess):
			tf.sg_init(sess)
			trainf = sess.run([Mnist.train.image])[0]
			n, w, h, c = trainf.shape
			print trainf.shape
			np.savetxt('./image.txt', trainf[1, :, :, 0])
			#print trainf[1, :, :, 0]
			#plt.imshow(trainf[1, :, :, 0])
			#plt.axis('off')
			#plt.show()
			#print type(trainf[1, :, :, 0])

			transfer = np.zeros((n, w, h, c))
			for i in range(n):
				candi = random.randint(0, n - 1)
				#print GP(trainf[i, :, :, 0], trainf[candi, :, :, 0])

				#transfer[i, :, :, :] = GP(trainf[i, :, :, :], trainf[candi, :, :, :])
				#print trainsfer[i, :, :, :]
				t = tf.convert_to_tensor(transfer, dtype=tf.float32)
				gp_predict = predict.sg_reuse(input=t)
				#print trainf.shape
		sess.close()                    
Пример #5
0
def generate(sample_image):
    start_time = time.time()

    g = ModelGraph()

    with tf.Session() as sess:
        # We need to initialize variables in this case because the Variable `generator/x` will not restored.
        tf.sg_init(sess)

        vars = [v for v in tf.global_variables() if "generator" not in v.name]
        saver = tf.train.Saver(vars)
        saver.restore(sess, tf.train.latest_checkpoint('asset/train/ckpt'))

        i = 0
        while True:
            mse, _ = sess.run([g.mse, g.train_gen],
                              {g.y: transform_image(sample_image)})  # (16, 28)

            if time.time() - start_time > 60:  # Save every 60 seconds
                gen_image = sess.run(g.x)
                gen_image = np.squeeze(gen_image)
                misc.imsave('gen_images/%s/gen_%.2f.jpg' % (label, mse),
                            gen_image)

                start_time = time.time()
                i += 1
                if i == 60: break  # Finish after 1 hour
def run_generator(num, x1, x2, fig_name='sample.png'):
    with tf.Session() as sess:

        tf.sg_init(sess)

        # restore parameters
        tf.sg_restore(sess,
                      tf.train.latest_checkpoint('asset/train/infogan'),
                      category='generator')

        # run generator
        imgs = sess.run(gen, {
            target_num: num,
            target_cval_1: x1,
            target_cval_2: x2
        })

        # plot result
        _, ax = plt.subplots(10, 10, sharex=True, sharey=True)
        for i in range(10):
            for j in range(10):
                ax[i][j].imshow(imgs[i * 10 + j], 'gray')
                ax[i][j].set_axis_off()
        plt.savefig('asset/train/infogan/' + fig_name, dpi=600)
        tf.sg_info('Sample image saved to "asset/train/infogan/%s"' % fig_name)
        plt.close()
Пример #7
0
    def __init__(self):
        # set log level to debug
        tf.sg_verbosity(10)

        # batch size
        self.batch_size = 1

        # vocabulary size
        self.voca_size = sttwdata.voca_size

        # mfcc feature of audio
        self.x = tf.placeholder(dtype=tf.sg_floatx,
                                shape=(self.batch_size, None, 20))

        # encode audio feature
        self.logit = get_logit(self.x, voca_size=self.voca_size)

        # sequence length except zero-padding
        self.seq_len = tf.not_equal(self.x.sg_sum(axis=2),
                                    0.).sg_int().sg_sum(axis=1)

        # run network
        self.session = tf.Session()
        tf.sg_init(self.session)
        self.saver = tf.train.Saver()
        self.saver.restore(self.session,
                           tf.train.latest_checkpoint('asset/train'))
Пример #8
0
    def train(self):  # train baseline model
        input_ph = tf.placeholder(shape=[batch_size, 28, 28, 1],
                                  dtype=tf.float32)
        label_ph = tf.placeholder(shape=[
            batch_size,
        ], dtype=tf.int32)

        predict = self.forward(input_ph)

        loss_tensor = tf.reduce_mean(predict.sg_ce(target=label_ph))

        # use to update network parameters
        optim = tf.sg_optim(loss_tensor, optim='Adam', lr=1e-3)

        # use saver to save a new model
        saver = tf.train.Saver()

        sess = tf.Session()
        with tf.sg_queue_context(sess):
            # inital
            tf.sg_init(sess)

        # validation
        acc = (predict.sg_reuse(
            input=Mnist.valid.image).sg_softmax().sg_accuracy(
                target=Mnist.valid.label, name='validation'))

        tf.sg_train(loss=loss,
                    eval_metric=[acc],
                    max_ep=max_ep,
                    save_dir=save_dir,
                    ep_size=Mnist.train.num_batch,
                    log_interval=10)
Пример #9
0
def sg_print(tensor_list):
    r"""Simple tensor printing function for debugging.
    Prints the value, shape, and data type of each tensor in the list.
    
    Args:
      tensor_list: A list/tuple of tensors or a single tensor.
      
    Returns:
      The value of the tensors.
      
    For example,
    
    ```python
    import sugartensor as tf
    a = tf.constant([1.])
    b = tf.constant([2.])
    out = tf.sg_print([a, b])
    # Should print [ 1.] (1,) float32
    #              [ 2.] (1,) float32
    print(out)
    # Should print [array([ 1.], dtype=float32), array([ 2.], dtype=float32)]
    ``` 
    """
    # to list
    if type(tensor_list) is not list and type(tensor_list) is not tuple:
        tensor_list = [tensor_list]

    # evaluate tensor list with queue runner
    with tf.Session() as sess:
        sg_init(sess)
        with tf.sg_queue_context():
            res = sess.run(tensor_list)
            for r in res:
                print(r, r.shape, r.dtype)
    return res
Пример #10
0
def eval():
    # Load graph
    g = Graph(mode="inference")
    print("Graph Loaded")

    with tf.Session() as sess:
        # Initialize variables
        tf.sg_init(sess)

        # Restore parameters
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint('asset/train'))
        print("Restored!")
        mname = open('asset/train/checkpoint',
                     'r').read().split('"')[1]  # model name

        # Load data
        X, Sources, Targets = load_test_data(input_reverse=Hp.reverse_inputs)
        char2idx, idx2char = load_vocab()

        with codecs.open(mname, "w", "utf-8") as fout:
            list_of_refs, hypotheses = [], []
            for i in range(len(X) // Hp.batch_size):
                # Get mini-batches
                x = X[i * Hp.batch_size:(i + 1) * Hp.batch_size]  # mini-batch
                sources = Sources[i * Hp.batch_size:(i + 1) * Hp.batch_size]
                targets = Targets[i * Hp.batch_size:(i + 1) * Hp.batch_size]

                preds_prev = np.zeros((Hp.batch_size, Hp.maxlen), np.int32)
                preds = np.zeros((Hp.batch_size, Hp.maxlen), np.int32)
                for j in range(Hp.maxlen):
                    # predict next character
                    outs = sess.run(g.preds, {g.x: x, g.y_src: preds_prev})
                    # update character sequence
                    if j < Hp.maxlen - 1:
                        preds_prev[:, j + 1] = outs[:, j]
                    preds[:, j] = outs[:, j]

                # Write to file
                for source, target, pred in zip(sources, targets,
                                                preds):  # sentence-wise
                    got = "".join(idx2char[idx] for idx in pred).split(u"␃")[0]
                    fout.write("- source: " + source + "\n")
                    fout.write("- expected: " + target + "\n")
                    fout.write("- got: " + got + "\n\n")
                    fout.flush()

                    # For bleu score
                    ref = target.split()
                    hypothesis = got.split()
                    if len(ref) > 2:
                        list_of_refs.append([ref])
                        hypotheses.append(hypothesis)

            # Get bleu score
            score = corpus_bleu(list_of_refs, hypotheses)
            fout.write("Bleu Score = " + str(100 * score))
Пример #11
0
    def generate(self, prev_midi):
        with tf.Session() as sess:
            tf.sg_init(sess)
            # saver = tf.train.Saver()
            # saver.restore(sess, tf.train.latest_checkpoint('save/train/small'))
            # KDK: choose self.next_token or self.preds
            # out = sess.run(self.next_token, {self.x: prev_midi})
            tf.sg_restore(sess, tf.train.latest_checkpoint('save/train/small'))
            out = sess.run(self.next_token, {self.x: prev_midi})

            return out
Пример #12
0
	def test(self):
		# predict = self.forward(Mnist.test.image)


		# acc = (predict.sg_softmax()
		# 		.sg_accuracy(target=Mnist.test.label, name='test'))

		sess = tf.Session()
		with tf.sg_queue_context(sess):
			tf.sg_init(sess)
			testf = sess.run([Mnist.test.image])[0]
			# print testf.shape
			n, w, h, c = testf.shape
                        tmp0 = np.zeros((n * w, h))
                        tmp02 = np.zeros((n * w, h))
                        tmp05 = np.zeros((n * w, h))
                        tmp08 = np.zeros((n * w, h))
                        tmp90 = np.zeros((n * w, h))
                        tmp_90 = np.zeros((n * w, h))
			for i in range(n):
                            tmp0[i * w : (i + 1) * w, 0 : h] = testf[i, :, :, 0]
                            tmp02[i * w : (i + 1) * w, 0 : h] = addnoisy(testf[i, :, :, 0], 0.2)
                            tmp05[i * w : (i + 1) * w, 0 : h] = addnoisy(testf[i, :, :, 0], 0.5)
                            tmp08[i * w : (i + 1) * w, 0 : h] = addnoisy(testf[i, :, :, 0], 0.8)
                            tmp90[i * w : (i + 1) * w, 0 : h] = rotate90(testf[i, :, :, 0])
                            tmp_90[i * w : (i + 1) * w, 0 : h] = rotate_90(testf[i, :, :, 0])# addnoisy(testf[i, :, :, 0], 0.8)
                            #testf[i, :, :, 0] = addnoisy(testf[i, :, :, 0], 0.0)
			    #testf[i, :, :, 0] = rotate90(testf[i, :, :, 0])
			    #testf[i, :, :, 0] = rotate_90(testf[i, :, :, 0])
		    	    #print testf[i, :, :, 0]
                        np.savetxt('./image0.txt', tmp0)
                        np.savetxt('./image02.txt', tmp02)
                        np.savetxt('./image05.txt', tmp05)
                        np.savetxt('./image08.txt', tmp08)
                        np.savetxt('./image90.txt', tmp90)
                        np.savetxt('./image_90.txt', tmp_90)

			testf_tensor = tf.convert_to_tensor(testf, dtype=tf.float32)
			predict = self.forward(testf_tensor)

			acc = (predict.sg_softmax()
				.sg_accuracy(target=Mnist.test.label, name='test'))            

			saver=tf.train.Saver()
			saver.restore(sess, tf.train.latest_checkpoint(save_dir))

			total_accuracy = 0
			for i in range(Mnist.test.num_batch):
				total_accuracy += np.sum(sess.run([acc])[0])

			print('Evaluation accuracy: {}'.format(float(total_accuracy)/(Mnist.test.num_batch*batch_size)))

		# close session
		sess.close()
Пример #13
0
def sg_print(tensor_list):
    # to list
    if type(tensor_list) is not list and type(tensor_list) is not tuple:
        tensor_list = [tensor_list]

    # evaluate tensor list with queue runner
    with tf.Session() as sess:
        sg_init(sess)
        with tf.sg_queue_context():
            res = sess.run(tensor_list)
            for r in res:
                print r, r.shape, r.dtype
    return res
Пример #14
0
def main():
    g = ModelGraph(mode="test")

    with tf.Session() as sess:
        tf.sg_init(sess)

        # restore parameters
        saver = tf.train.Saver()
        if Hyperparams.isqwerty:
            save_path = "qwerty/asset/train/ckpt"
        else:
            save_path = "nine/asset/train/ckpt"
        saver.restore(sess, tf.train.latest_checkpoint(save_path))
        mname = open(save_path + "/checkpoint", 'r').read().split('"')[1]

        nums, X, expected_list = load_test_data()
        pnyn2idx, idx2pnyn, hanzi2idx, idx2hanzi = load_vocab()

        with codecs.open('data/output_{}.txt'.format(mname), 'w',
                         'utf-8') as fout:
            cum_score = 0
            full_score = 0
            for step in range(len(X) // 64 + 1):
                n = nums[step * 64:(step + 1) * 64]  #number batch
                x = X[step * 64:(step + 1) * 64]  # input batch
                e = expected_list[step * 64:(step + 1) *
                                  64]  # batch of ground truth strings

                # predict characters
                logits = sess.run(g.logits, {g.x: x})
                preds = np.squeeze(np.argmax(logits, -1))

                for nn, xx, pp, ee in zip(n, x, preds, e):  # sentence-wise
                    got = ''
                    for xxx, ppp in zip(xx, pp):  # character-wise
                        if xxx == 0: break
                        if xxx == 1 or ppp == 1:
                            got += "*"
                        else:
                            got += idx2hanzi.get(ppp, "*")
                    got = got.replace("_", "")  # Remove blanks

                    error = distance.levenshtein(ee, got)
                    score = len(ee) - error
                    cum_score += score
                    full_score += len(ee)

                    fout.write(u"{}\t{}\t{}\t{}\n".format(nn, ee, got, score))
            fout.write(u"Total acc.: {}/{}={}\n".format(
                cum_score, full_score, round(float(cum_score) / full_score,
                                             2)))
Пример #15
0
def load_model():
    # load the pre-trained Keras model (here we are using a model
    # pre-trained on ImageNet and provided by Keras, but you can
    # substitute in your own networks just as easily)
    global model, sess
    init_model()
    # run network
    sess = tf.Session()
    # init variables
    tf.sg_init(sess)

    # restore parameters
    saver = tf.train.Saver()
    model = saver.restore(sess, tf.train.latest_checkpoint('wavenet_train'))
Пример #16
0
def predict_wavenet(mfcc):
    with tf.Session().as_default() as sess:

        # init variables
        tf.sg_init(sess)

        # # restore parameters
        # saver = tf.train.Saver()
        saver.restore(sess, train_model)
        # run session
        label = sess.run(y, feed_dict={x: mfcc})

        # print label
        return data.print_index(label)
Пример #17
0
def genIt(name='bird'):
    z = tf.random_normal((batch_size, rand_dim))
    gen = generator(z)
    with tf.Session() as sess:
        sess.run(
            tf.group(tf.global_variables_initializer(),
                     tf.sg_phase().assign(False)))
        tf.sg_restore(sess,
                      tf.train.latest_checkpoint('asset/train/gan'),
                      category=['generator', 'discriminator'])
        fake_features = []
        for i in range(100):
            fake_features.append(sess.run(gen))
    np.save('../data/fake_' + name + '_negative.npy',
            np.array(fake_features).reshape((-1, 4096)))
Пример #18
0
def main():
    graph = ModelGraph("test")

    with tf.Session() as sess:
        tf.sg_init(sess)

        # restore parameters
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint('asset/train/ckpt'))

        X, Y = load_data("test")
        idx2chr = load_charmaps()[0]

        with codecs.open('results.txt', 'w', 'utf-8') as fout:
            results = []
            for step in range(len(X) // Hyperparams.batch_size - 1):
                X_batch = X[step * Hyperparams.batch_size:(step + 1) *
                            Hyperparams.batch_size]
                Y_batch = Y[step * Hyperparams.batch_size:(step + 1) *
                            Hyperparams.batch_size]

                # predict characters
                logits = sess.run(graph.logits, {graph.X_batch: X_batch})
                preds = np.squeeze(np.argmax(logits, -1))

                for x, y, p in zip(X_batch, Y_batch, preds):  # sentence-wise
                    ground_truth = ''
                    predicted = ''
                    for xx, yy, pp in zip(x, y, p):  # character-wise
                        if xx == 0: break
                        else:
                            predicted += idx2chr.get(xx, "*")
                            ground_truth += idx2chr.get(xx, "*")
                        if pp == 1: predicted += " "
                        if yy == 1: ground_truth += " "

                        if pp == yy: results.append(1)
                        else: results.append(0)

                    fout.write(u"▌Expected: " + ground_truth + "\n")
                    fout.write(u"▌Got: " + predicted + "\n\n")
            fout.write(u"Final Accuracy = %d/%d=%.2f" %
                       (sum(results), len(results),
                        float(sum(results)) / len(results)))
Пример #19
0
    def test(self):
        print 'Testing model {}: addnoise={}, rotate={}, var={}'.format(
            save_dir, addnoise, rotate, var)
        input_ph = tf.placeholder(shape=[batch_size, 28, 28, 1],
                                  dtype=tf.float32)
        label_ph = tf.placeholder(shape=[
            batch_size,
        ], dtype=tf.int32)

        predict = self.forward(input_ph)

        acc = (predict.sg_softmax().sg_accuracy(target=label_ph, name='test'))

        sess = tf.Session()
        with tf.sg_queue_context(sess):
            tf.sg_init(sess)

            saver = tf.train.Saver()
            saver.restore(sess, tf.train.latest_checkpoint(save_dir))

            total_accuracy = 0
            for i in range(Mnist.test.num_batch):
                [image_array,
                 label_array] = sess.run([Mnist.test.image, Mnist.test.label])

                if addnoise:
                    image_array[0, :, :, 0] = addnoisy(image_array[0, :, :, 0],
                                                       var)
                if rotate:
                    image_array[0, :, :, 0] = rotate_90(image_array[0, :, :,
                                                                    0])

                acc_value = sess.run([acc],
                                     feed_dict={
                                         input_ph: image_array,
                                         label_ph: label_array
                                     })[0]
                total_accuracy += np.sum(acc_value)

            print 'Evaluation accuracy: {}'.format(
                float(total_accuracy) / (Mnist.test.num_batch * batch_size))

        # close session
        sess.close()
Пример #20
0
def test(tfname, weightPaths, steps=100000, Var=["NNReg"], lll=2000):
    tf.Graph()
    x, y = read_from_tfrecords(tfname, ["source", "target"], 10,
                               [[1070, 3], [1070, 3]])
    global_step = tf.Variable(1, trainable=False, name='global_step')
    print(x.shape, y.shape)
    x = np.loadtxt('EM.txt', dtype='float32') / 1500
    y = np.loadtxt('FM.txt', dtype='float32')[:, :100] / 1500
    x = tf.convert_to_tensor(np.expand_dims(np.rollaxis(x, axis=0), axis=0))
    y = tf.convert_to_tensor(np.expand_dims(np.rollaxis(y, axis=0), axis=0))

    print(x.shape, y.shape)

    yp = Net(x, x, y) + x
    tmp_var_list = {}
    for j in Var:
        for i in tf.global_variables():
            if i.name.startswith(j):
                tmp_var_list[i.name[:-2]] = i

    saver = tf.train.Saver(tmp_var_list)
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    path = weightPaths + "model.ckpt-{}".format(steps)

    Sour = []
    Targ = []
    Trans_S = []

    with tf.Session() as sess:
        sess.run(init_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        saver.restore(sess, path)
        for i in tqdm.tqdm(range(lll)):
            S, T, TS = sess.run([x, y, yp])
            Sour.append(S)
            Targ.append(T)
            Trans_S.append(TS)

        coord.request_stop()
        coord.join(threads)

    return Sour, Targ, Trans_S
Пример #21
0
def main():
    g = ModelGraph(mode="test")

    with tf.Session() as sess:
        tf.sg_init(sess)

        # restore parameters
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint('asset/train'))
        print("Restored!")
        mname = open('asset/train/checkpoint',
                     'r').read().split('"')[1]  # model name

        char2idx, idx2char = load_char_vocab()
        word2idx, idx2word = load_word_vocab()

        previous = [0] * 50  # a stack for previous words
        para = "EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE"
        ctx = [0] * 50

        while True:
            key = readchar.readkey().lower()

            if key == readchar.key.BACKSPACE:
                ctx.insert(0, previous.pop())
                ctx.pop()
                previous.insert(0, 0)

            elif key == readchar.key.ESC:
                break

            else:
                key_idx = char2idx[key]
                ctx.append(key_idx)
                ctx.pop(0)

            logits = sess.run(g.logits, {g.x: np.expand_dims(ctx, 0)})
            preds = logits.argsort()[0][-3:]
            # pred = np.argmax(logits, -1)[0]
            predword1, predword2, predword3 = [
                idx2word.get(pred) for pred in preds
            ]
            print(predword1, ' ', predword2, ' ', predword3)
Пример #22
0
def testIt():
    data = raw
    positive = np.array(data.label_train) > 0
    x = tf.placeholder(tf.float32, [None, 4096])
    y = tf.placeholder(tf.float32)
    disc_real = discriminator(x)
    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.cast(disc_real > 0.5, "float"), y), tf.float32))
    np.set_printoptions(precision=3, suppress=True)
    with tf.Session() as sess:
        sess.run(
            tf.group(tf.global_variables_initializer(),
                     tf.sg_phase().assign(False)))
        # restore parameters
        tf.sg_restore(sess,
                      tf.train.latest_checkpoint('asset/train/gan'),
                      category=['generator', 'discriminator'])
        ans = sess.run(disc_real, feed_dict={x: np.array(data.test)})
        print np.sum(ans > 0.5)
        np.save('dm_bird.npy', ans)
Пример #23
0
def test1():
    '''
    Predicts all at once.
    '''
    X, Y = preprocess()
    g = Graph(is_train=False)

    with tf.Session() as sess:
        tf.sg_init(sess)

        # restore parameters
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint('asset/train/ckpt'))

        total_blanks, total_hits = 0, 0
        for x_3d, y_2d in zip(X, Y):  # problem-wise x: (9, 9, 1), y: (9, 9)
            x_2d = np.squeeze(x_3d, -1)  #(9, 9)
            x_4d = np.expand_dims(x_3d, 0)  # (1, 9, 9, 1)
            while 1:
                logits = sess.run(g.logits,
                                  {g.X: x_4d})  # (1, 9, 9, 10) float32
                preds = np.squeeze(np.argmax(logits, axis=-1),
                                   0)  # (9, 9) # most probable numbers

                expected = y_2d[x_2d == 0]
                got = preds[x_2d == 0]
                hits = np.equal(expected, got).sum()

                result = np.where(x_2d == 0, preds, y_2d).astype(int)

                print result
                print "Acc.=%d/%d=%.2f" % (hits, len(expected),
                                           float(hits) / len(expected))

                total_blanks += len(expected)
                total_hits += hits
                break

        print "Total Accuracy = %d/%d=%.2f" % (
            total_hits, total_blanks, float(total_hits) / total_blanks)
Пример #24
0
def main():
    g = ModelGraph(is_train=False)

    with tf.Session() as sess:
        tf.sg_init(sess)

        # restore parameters
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint('asset/train/ckpt'))

        # Or you could use pretrained model which can be downloaded from here
        # https://drive.google.com/open?id=0B5M-ed49qMsDQ1dEYXF3cTVNM1E
        #         saver.restore(sess, 'model-019-1239684')

        sents, X = vectorize_input()
        idx2hanja = load_charmaps()[-1]

        with codecs.open('data/output.txt', 'w', 'utf-8') as fout:
            for step in range(len(X) // Hyperparams.batch_size + 1):
                inputs = sents[step * Hyperparams.batch_size:(step + 1) *
                               Hyperparams.batch_size]  # batch
                x = X[step * Hyperparams.batch_size:(step + 1) *
                      Hyperparams.batch_size]  # batch

                # predict characters
                logits = sess.run(g.logits, {g.x: x})
                preds = np.squeeze(np.argmax(logits, -1))
                for ii, xx, pp in zip(inputs, x, preds):  # sentence-wise
                    got = ''
                    for ii, xxx, ppp in zip(ii, xx, pp):  # character-wise
                        if xxx == 0: break
                        elif xxx == 1 or ppp == 1:
                            got += ii
                        else:
                            got += idx2hanja.get(ppp, "*")

                    fout.write(got + "\n")
Пример #25
0
def test(tfname, weightPaths, steps=100000, Var=["NNReg"], lll=2000):
    tf.Graph()
    x, y = read_from_tfrecords(tfname, ["source", "target"], 10,
                               [[91, 2], [91, 2]])
    global_step = tf.Variable(1, trainable=False, name='global_step')
    yp = Net(x, x, y) + x
    tmp_var_list = {}
    for j in Var:
        for i in tf.global_variables():
            if i.name.startswith(j):
                tmp_var_list[i.name[:-2]] = i

    saver = tf.train.Saver(tmp_var_list)
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    path = weightPaths + "model.ckpt-{}".format(steps)

    Sour = []
    Targ = []
    Trans_S = []

    with tf.Session() as sess:
        sess.run(init_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        saver.restore(sess, path)
        for i in tqdm.tqdm(range(lll)):
            S, T, TS = sess.run([x, y, yp])
            Sour.append(S)
            Targ.append(T)
            Trans_S.append(TS)

        coord.request_stop()
        coord.join(threads)

    return Sour, Targ, Trans_S
#
# Testing Graph
#

# encode audio feature
logit = get_logit(x, voca_size=voca_size)

# CTC loss
loss = logit.sg_ctc(target=y, seq_len=seq_len)

#
# run network
#

with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:

    # init variables
    tf.sg_init(sess)

    # restore parameters
    saver = tf.train.Saver()
    saver.restore(sess, tf.train.latest_checkpoint('asset/train'))

    # logging
    tf.sg_info('Testing started on %s set at global step[%08d].' %
            (tf.sg_arg().set.upper(), sess.run(tf.sg_global_step())))

    with tf.sg_queue_context():

        # create progress bar
def generate():
    dev = '/cpu:0'
    with tf.device(dev):
        mydir = 'tfrc150char_wrd0704'
        files = [f for f in listdir(mydir) if isfile(join(mydir, f))]
        tfrecords_filename = []
        tfrecords_filename = [join(mydir, 'short_infer3.tfrecords')
                              ]  #[join(mydir, f) for f in tfrecords_filename]
        tfrecords_filename_inf = [join(mydir, '11_3.tfrecords')]

        print(tfrecords_filename)
        filename_queue = tf.train.string_input_producer(tfrecords_filename,
                                                        num_epochs=num_epochs,
                                                        shuffle=True,
                                                        capacity=1)
        infer_queue = tf.train.string_input_producer(tfrecords_filename_inf,
                                                     num_epochs=num_epochs,
                                                     shuffle=True,
                                                     capacity=1)

        optim = tf.train.AdamOptimizer(learning_rate=0.0001,
                                       beta1=0.9,
                                       beta2=0.99)

        # Calculate the gradients for each model tower.
        tower_grads = []
        reuse_vars = False
        with tf.variable_scope("dec_lstm") as scp:
            dec_cell = BasicLSTMCell2(Hp.w_emb_size,
                                      Hp.rnn_hd,
                                      state_is_tuple=True)

        with tf.variable_scope("contx_lstm") as scp:
            cell = BasicLSTMCell2(Hp.hd, Hp.rnn_hd, state_is_tuple=True)
            rnn_cell = tf.contrib.rnn.DropoutWrapper(
                cell,
                input_keep_prob=Hp.keep_prob,
                output_keep_prob=Hp.keep_prob)

        (words, chars) = read_and_decode(filename_queue,
                                         Hp.batch_size * Hp.num_gpus)

        words_splits = tf.split(axis=0,
                                num_or_size_splits=Hp.num_gpus,
                                value=words)
        chars_splits = tf.split(axis=0,
                                num_or_size_splits=Hp.num_gpus,
                                value=chars)

        word_emb = np.loadtxt("glove300d_0704.txt")
        Hp.word_vs = word_emb.shape[0]

        # --------------------------------------------------------------------------------
        with tf.name_scope('%s_%d' % ("tower", 0)) as scope:
            rnn_state = tower_infer_enc(chars_splits[0],
                                        scope,
                                        rnn_cell,
                                        dec_cell,
                                        word_emb,
                                        out_reuse_vars=False,
                                        dev='/cpu:0')

            chars_pl = tf.placeholder(tf.int32, shape=(None, Hp.c_maxlen))
            rnn_state_pl1 = [
                tf.placeholder(tf.float32, shape=(None, Hp.rnn_hd)),
                tf.placeholder(tf.float32, shape=(None, Hp.rnn_hd))
            ]
            rnn_state_pl = tf.contrib.rnn.LSTMStateTuple(
                rnn_state_pl1[0], rnn_state_pl1[1])

            final_ids, rnn_state_dec = tower_infer_dec(chars_pl,
                                                       scope,
                                                       rnn_cell,
                                                       dec_cell,
                                                       word_emb,
                                                       rnn_state_pl,
                                                       out_reuse_vars=False,
                                                       dev='/cpu:0')

        # --------------------------------------------------------------------------------

        saver = tf.train.Saver(tf.trainable_variables())
        session_config = tf.ConfigProto(allow_soft_placement=True,
                                        log_device_placement=False)
        session_config.gpu_options.per_process_gpu_memory_fraction = 0.94

        session_config.gpu_options.allow_growth = False

        restore_dir = 'tnsrbrd/hin17d08m_1313g2'  #   lec30d07m_1634g2   lec04d07m_2006g2     lec28d07m_1221g2    lec31d07m_1548g2
        csv_file = join(restore_dir, time.strftime("hin%dd%mm_%H%M.csv"))
        csv_f = open(csv_file, 'a')
        csv_writer = csv.writer(csv_f)

        with tf.Session(config=session_config) as sess:
            sess.run(
                tf.group(tf.global_variables_initializer(),
                         tf.local_variables_initializer()))

            tf.train.start_queue_runners(sess=sess)
            saver.restore(sess,
                          tf.train.latest_checkpoint(
                              join(restore_dir,
                                   'last_chpt')))  #    lec04d07m_2006g2

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)

            for ep in range(num_epochs):

                tf.sg_set_infer(sess)
                rnn_state_val, w_txt, ch_txt = sess.run(
                    [rnn_state, words_splits[0], chars_splits[0]],
                    feed_dict={Hp.keep_prob: 1.0})

                predictions = []  #[w_txt[:,2,:]]
                for idx in range(3):
                    char_inpt = word2char_ids(
                        ids_val) if idx != 0 else ch_txt[:, 2, :]
                    ids_val, rnn_state_val = sess.run(
                        [final_ids, rnn_state_dec],
                        feed_dict={
                            Hp.keep_prob: 1.0,
                            rnn_state_pl1[0]: rnn_state_val[0],
                            rnn_state_pl1[1]: rnn_state_val[1],
                            chars_pl: char_inpt
                        })
                    temp = np.zeros((Hp.batch_size, Hp.w_maxlen))
                    for b in range(Hp.batch_size):
                        stop_ind = np.where(ids_val[b] == 2)[0]
                        if stop_ind.size > 0:
                            stop_ind = stop_ind[0]
                            ids_val[b, stop_ind +
                                    1:] = ids_val[b, stop_ind + 1:] * 0
                    temp[:, :ids_val.shape[1]] = ids_val
                    predictions.append(temp)

                # predictions are decode_sent x b x w_maxlen
                predictions = np.array(predictions)
                in_batches = [w_txt[b, :, :] for b in range(Hp.batch_size)]
                res_batches = [
                    predictions[:, b, :] for b in range(Hp.batch_size)
                ]

                for b in range(Hp.batch_size):
                    in_paragraph = idxword2txt(in_batches[b])
                    print("\n INPUT SAMPLE \n")
                    print(in_paragraph)

                    res_paragraph = idxword2txt(res_batches[b])
                    print("\n RESULTS \n")
                    print(res_paragraph)

                    csv_writer.writerow([
                        " ".join(in_paragraph[:3]), " ".join(in_paragraph[3:]),
                        " ".join(res_paragraph)
                    ])

            csv_f.close()
Пример #28
0
                       decoded[0].values) + 1

# regcognize audio file

# perintah untuk menginput path file audio
tf.sg_arg_def(file=('', 'speech wave file to recognize.'))

# load audio file
file = sys.argv[1]
wav, sr = librosa.load(file, mono=True, sr=16000)

# mendapatkan mfcc feature
mfcc = np.transpose(np.expand_dims(librosa.feature.mfcc(wav, 16000), axis=0),
                    [0, 2, 1])

# run network
with tf.Session() as sess:

    # init variables
    tf.sg_init(sess)

    # restore parameters
    saver = tf.train.Saver()
    saver.restore(sess, tf.train.latest_checkpoint('asset/train'))

    # run session
    label = sess.run(y, feed_dict={x: mfcc})

    # print label
    data.print_index(label)
Пример #29
0
def test2():
    '''
    Predicts sequentially.
    '''
    X, Y = preprocess()
    g = Graph(is_train=False)

    with tf.Session() as sess:
        tf.sg_init(sess)

        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint('asset/train/ckpt'))

        total_blanks, total_hits = 0, 0
        for x_3d, y_2d in zip(X, Y):  # problem-wise x: (9, 9, 1), y: (9, 9)
            x_2d = np.squeeze(x_3d, -1)  #(9, 9)
            x_4d = np.expand_dims(x_3d, 0)  # (1, 9, 9, 1)
            _x_2d = np.copy(x_2d)  # (9, 9)
            while 1:
                logits = sess.run(g.logits,
                                  {g.X: x_4d})  # (1, 9, 9, 10) float32

                def softmax(x):
                    """Compute softmax values for each sets of scores in x."""
                    e_x = np.exp(x - np.max(x, -1, keepdims=True))
                    return e_x / e_x.sum(axis=-1, keepdims=True)

                activated = softmax(logits)  # (1, 9, 9, 10) float32
                preds = np.squeeze(np.argmax(activated, axis=-1),
                                   0)  # (9, 9) # most probable numbers
                preds_prob = np.squeeze(
                    np.max(activated, axis=-1),
                    0)  # (9, 9) # highest probabilities for blanks
                preds_prob = np.where(x_2d == 0, preds_prob, 0)  # (9, 9)

                top1 = np.argmax(
                    preds_prob
                )  # the index of the most confident number amongst all predictions
                ind = np.unravel_index(top1, (9, 9))
                got = preds[ind]  # the most confident number
                x_2d[ind] = got  # result

                x_4d = np.expand_dims(np.expand_dims(x_2d, 0), -1)

                if len(x_2d[x_2d == 0]) == 0:
                    expected = y_2d[_x_2d == 0]
                    got = x_2d[_x_2d == 0]
                    hits = np.equal(expected, got).sum()

                    result = np.where(_x_2d == 0, x_2d, y_2d).astype(int)

                    print result
                    print "Acc.=%d/%d=%.2f" % (hits, len(expected),
                                               float(hits) / len(expected))

                    total_blanks += len(expected)
                    total_hits += hits
                    break

        print "Total Accuracy = %d/%d=%.2f" % (
            total_hits, total_blanks, float(total_hits) / total_blanks)
Пример #30
0
    def wrapper(**kwargs):
        opt = tf.sg_opt(kwargs)

        # default training options
        opt += tf.sg_opt(lr=0.001,
                         save_dir='asset/train',
                         max_ep=1000,
                         ep_size=100000,
                         save_interval=600,
                         log_interval=60,
                         early_stop=True,
                         lr_reset=False,
                         eval_metric=[],
                         max_keep=5,
                         keep_interval=1,
                         tqdm=True,
                         console_log=False)

        # make directory if not exist
        if not os.path.exists(opt.save_dir + '/log'):
            os.makedirs(opt.save_dir + '/log')
        if not os.path.exists(opt.save_dir + '/ckpt'):
            os.makedirs(opt.save_dir + '/ckpt')

        # find last checkpoint
        last_file = tf.train.latest_checkpoint(opt.save_dir + '/ckpt')
        if last_file:
            ep = start_ep = int(last_file.split('-')[1]) + 1
            start_step = int(last_file.split('-')[2])
        else:
            ep = start_ep = 1
            start_step = 0

        # checkpoint saver
        saver = tf.train.Saver(max_to_keep=opt.max_keep,
                               keep_checkpoint_every_n_hours=opt.keep_interval)

        # summary writer
        summary_writer = tf.train.SummaryWriter(opt.save_dir + '/log',
                                                graph=tf.get_default_graph())

        # add learning rate summary
        with tf.name_scope('summary'):
            tf.scalar_summary('60. learning_rate/learning_rate',
                              _learning_rate)

        # add evaluation metric summary
        for m in opt.eval_metric:
            tf.sg_summary_metric(m)

        # summary op
        summary_op = tf.merge_all_summaries()

        # create session
        if opt.sess:
            sess = opt.sess
        else:
            # session with multiple GPU support
            sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
            # initialize variables
            sg_init(sess)

        # restore last checkpoint
        if last_file:
            saver.restore(sess, last_file)

        # set learning rate
        if start_ep == 1 or opt.lr_reset:
            sess.run(_learning_rate.assign(opt.lr))

        # logging
        tf.sg_info('Training started from epoch[%03d]-step[%d].' %
                   (start_ep, start_step))

        try:
            # start data queue runner
            with tf.sg_queue_context(sess):

                # set session mode to train
                tf.sg_set_train(sess)

                # loss history for learning rate decay
                loss, loss_prev, early_stopped = None, None, False

                # time stamp for saving and logging
                last_saved = last_logged = time.time()

                # epoch loop
                for ep in range(start_ep, opt.max_ep + 1):

                    # show progressbar
                    if opt.tqdm:
                        iterator = tqdm(range(opt.ep_size),
                                        desc='train',
                                        ncols=70,
                                        unit='b',
                                        leave=False)
                    else:
                        iterator = range(opt.ep_size)

                    # batch loop
                    for _ in iterator:

                        # call train function
                        batch_loss = func(sess, opt)

                        # loss history update
                        if batch_loss is not None:
                            if loss is None:
                                loss = np.mean(batch_loss)
                            else:
                                loss = loss * 0.9 + np.mean(batch_loss) * 0.1

                        # saving
                        if time.time() - last_saved > opt.save_interval:
                            last_saved = time.time()
                            saver.save(sess,
                                       opt.save_dir + '/ckpt/model-%03d' % ep,
                                       write_meta_graph=False,
                                       global_step=sess.run(
                                           tf.sg_global_step()))

                        # logging
                        if time.time() - last_logged > opt.log_interval:
                            last_logged = time.time()

                            # set session mode to infer
                            tf.sg_set_infer(sess)

                            # run evaluation op
                            if len(opt.eval_metric) > 0:
                                sess.run(opt.eval_metric)

                            if opt.console_log:  # console logging
                                # log epoch information
                                tf.sg_info(
                                    '\tEpoch[%03d:lr=%7.5f:gs=%d] - loss = %s'
                                    % (ep, sess.run(_learning_rate),
                                       sess.run(tf.sg_global_step()),
                                       ('NA' if loss is None else '%8.6f' %
                                        loss)))
                            else:  # tensorboard logging
                                # run logging op
                                summary_writer.add_summary(
                                    sess.run(summary_op),
                                    global_step=sess.run(tf.sg_global_step()))

                            # learning rate decay
                            if opt.early_stop and loss_prev:
                                # if loss stalling
                                if loss >= 0.95 * loss_prev:
                                    # early stopping
                                    current_lr = sess.run(_learning_rate)
                                    if current_lr < 5e-6:
                                        early_stopped = True
                                        break
                                    else:
                                        # decrease learning rate by half
                                        sess.run(
                                            _learning_rate.assign(current_lr /
                                                                  2.))

                            # update loss history
                            loss_prev = loss

                            # revert session mode to train
                            tf.sg_set_train(sess)

                    # log epoch information
                    if not opt.console_log:
                        tf.sg_info(
                            '\tEpoch[%03d:lr=%7.5f:gs=%d] - loss = %s' %
                            (ep, sess.run(_learning_rate),
                             sess.run(tf.sg_global_step()),
                             ('NA' if loss is None else '%8.6f' % loss)))

                    if early_stopped:
                        tf.sg_info('\tEarly stopped ( no loss progress ).')
                        break
        finally:
            # save last epoch
            saver.save(sess,
                       opt.save_dir + '/ckpt/model-%03d' % ep,
                       write_meta_graph=False,
                       global_step=sess.run(tf.sg_global_step()))

            # set session mode to infer
            tf.sg_set_infer(sess)

            # logging
            tf.sg_info('Training finished at epoch[%d]-step[%d].' %
                       (ep, sess.run(tf.sg_global_step())))

            # close session
            if opt.sess is None:
                sess.close()