예제 #1
0
def test(tfname, weightPaths, steps=100000, Var=["NNReg"], lll=2000):
    tf.Graph()
    x, y = read_from_tfrecords(tfname, ["source", "target"], 10,
                               [[1070, 3], [1070, 3]])
    global_step = tf.Variable(1, trainable=False, name='global_step')
    print(x.shape, y.shape)
    x = np.loadtxt('EM.txt', dtype='float32') / 1500
    y = np.loadtxt('FM.txt', dtype='float32')[:, :100] / 1500
    x = tf.convert_to_tensor(np.expand_dims(np.rollaxis(x, axis=0), axis=0))
    y = tf.convert_to_tensor(np.expand_dims(np.rollaxis(y, axis=0), axis=0))

    print(x.shape, y.shape)

    yp = Net(x, x, y) + x
    tmp_var_list = {}
    for j in Var:
        for i in tf.global_variables():
            if i.name.startswith(j):
                tmp_var_list[i.name[:-2]] = i

    saver = tf.train.Saver(tmp_var_list)
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    path = weightPaths + "model.ckpt-{}".format(steps)

    Sour = []
    Targ = []
    Trans_S = []

    with tf.Session() as sess:
        sess.run(init_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        saver.restore(sess, path)
        for i in tqdm.tqdm(range(lll)):
            S, T, TS = sess.run([x, y, yp])
            Sour.append(S)
            Targ.append(T)
            Trans_S.append(TS)

        coord.request_stop()
        coord.join(threads)

    return Sour, Targ, Trans_S
예제 #2
0
def test(tfname, weightPaths, steps=100000, Var=["NNReg"], lll=2000):
    tf.Graph()
    x, y = read_from_tfrecords(tfname, ["source", "target"], 10,
                               [[91, 2], [91, 2]])
    global_step = tf.Variable(1, trainable=False, name='global_step')
    yp = Net(x, x, y) + x
    tmp_var_list = {}
    for j in Var:
        for i in tf.global_variables():
            if i.name.startswith(j):
                tmp_var_list[i.name[:-2]] = i

    saver = tf.train.Saver(tmp_var_list)
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    path = weightPaths + "model.ckpt-{}".format(steps)

    Sour = []
    Targ = []
    Trans_S = []

    with tf.Session() as sess:
        sess.run(init_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        saver.restore(sess, path)
        for i in tqdm.tqdm(range(lll)):
            S, T, TS = sess.run([x, y, yp])
            Sour.append(S)
            Targ.append(T)
            Trans_S.append(TS)

        coord.request_stop()
        coord.join(threads)

    return Sour, Targ, Trans_S
예제 #3
0
__author__ = '*****@*****.**'

#
# default float, int precision
#

sg_floatx = tf.float32
sg_intx = tf.int32
sg_eps = 1e-8

#
# global step
#

_global_step = tf.Variable(0, name='global_step', trainable=False)


def sg_global_step():
    r"""Gets global step count

    Returns:
      A 0-D `Tensor`.

    """
    global _global_step
    return _global_step


#
# global phase(train or infer) flag
예제 #4
0
# -*- coding: utf-8 -*-
import sugartensor as tf
import numpy as np
import os
import time
from tqdm import tqdm
from functools import wraps

__author__ = '*****@*****.**'

# global learning rate
_learning_rate = tf.Variable(0.001,
                             dtype=tf.sg_floatx,
                             name='learning_rate',
                             trainable=False)


def sg_train(**kwargs):
    opt = tf.sg_opt(kwargs)
    assert opt.loss is not None, 'loss is mandatory.'

    # default training options
    opt += tf.sg_opt(optim='MaxProp',
                     lr=0.001,
                     beta1=0.9,
                     beta2=0.99,
                     category='')

    # get optimizer
    train_op = sg_optim(opt.loss,
                        optim=opt.optim,
예제 #5
0
파일: train.py 프로젝트: zikai1/CPD-Net
def train():
    tf.Graph()
    tf.set_random_seed(888)
    print("*****************************************")
    print("Training started with random seed: {}".format(111))
    print("Batch started with random seed: {}".format(111))
    
    #read data
    x,y=read_from_tfrecords(tfname,
                                 ["source","target"], batSize, [[s1,2],[s2,2]])
    global_step = tf.Variable(1, trainable=False,name='global_step')
    yp=Net(x,x,y)+x    
    Loss=chamfer_loss(yp,y)    

    #Learning Rate****************************************************************************
    lr = tf.train.exponential_decay(learningRate, global_step,
                                                  batSize, learningRateDecay, staircase=False) 
    # Optimization Algo************************************************************************
    train_step = tf.train.AdamOptimizer(learning_rate=lr,
                                                    beta1=adam_beta1,
                                                    beta2=adam_beta2
                                                   ).minimize(Loss,global_step=global_step)
    
    saver = tf.train.Saver(max_to_keep=int(maxKeepWeights))
    init_op = tf.group(tf.global_variables_initializer(),
                               tf.local_variables_initializer())
    
    # Continue Training************************************************************************
    if len(conWeightPath)>0:
        print("Continue Training...")
        tmp_var_list={}
        if len(conWeightVar)==0:
            print("For all variables")
            globals()['conWeightVar']={''}
        else:
            print("Training variables: {}".format(conWeightVar))
            
        for j in conWeightVar: 
            for i in tf.global_variables():
                if i.name.startswith(j):
                    tmp_var_list[i.name[:-2]] = i      
        saver1=tf.train.Saver(tmp_var_list)     
    
    # Training**********************************************************************************    
    with tf.Session() as sess:
        sess.run(init_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        # Read Weight******************************
        if len(conWeightPath)>0:
            print(conWeightPath)
            if stepsContinue==-1:            
                STEPS=sorted([int(i.split("/")[-1].split(".")[1].split("-")[-1]) for i in glob.glob(conWeightPath+"/*meta")])
                print("hahaha",STEPS)
                globals()['stepsContinue']=STEPS[-1]
                
            wtt=glob.glob(conWeightPath+"/*{}*meta".format(stepsContinue))[0][:-5]
            print("Reading Weight:{}".format(wtt))
            saver1.restore(sess,wtt)
            print('Weight is successfully updated from: {}'.format(wtt))  
        #*******************************************    
        stepst = sess.run(global_step)
        for t in tqdm.tqdm(range(stepst,int(maxStep)+1)):      
            _= sess.run([train_step]) 
            if t % saveStep==0:
                if not os.path.exists(dirSave):
                    os.makedirs(dirSave)
                saver.save(sess, dirSave + '/model.ckpt', global_step=t)
        coord.request_stop()
        coord.join(threads)   
예제 #6
0
파일: nn.py 프로젝트: jackyzha0/vybe
    return ret


def t_get_indices(batchsize):
    index = np.arange(batchsize)
    np.random.shuffle(index)
    return index


## Training Loop
sd = 1 / np.sqrt(num_features)
with tf.name_scope('input'):
    X = tf.placeholder(tf.float32, [None, num_features], name="x_inp")
    Y = tf.placeholder(tf.float32, [None, num_classes], name="y_inp")

W_1 = tf.Variable(
    tf.random_normal([num_features, n_hidden_units_one], mean=0, stddev=sd))
b_1 = tf.Variable(tf.random_normal([n_hidden_units_one], mean=0, stddev=sd))
h_1 = tf.nn.tanh(tf.matmul(X, W_1) + b_1)

W_2 = tf.Variable(
    tf.random_normal([n_hidden_units_one, n_hidden_units_two],
                     mean=0,
                     stddev=sd))
b_2 = tf.Variable(tf.random_normal([n_hidden_units_two], mean=0, stddev=sd))
h_2 = tf.nn.tanh(tf.matmul(h_1, W_2) + b_2)

W_3 = tf.Variable(
    tf.random_normal([n_hidden_units_two, n_hidden_units_three],
                     mean=0,
                     stddev=sd))
b_3 = tf.Variable(tf.random_normal([n_hidden_units_three], mean=0, stddev=sd))
예제 #7
0
        with tf.name_scope('cellStack'):
            stack = tf.contrib.rnn.MultiRNNCell(
                [lstm_cell() for _ in range(num_layers)], state_is_tuple=True)
            outputs, _ = tf.nn.dynamic_rnn(stack,
                                           inputs,
                                           seq_len,
                                           dtype=tf.float32)
        shape = tf.shape(inputs)
        batch_s, TF_max_timesteps = shape[0], shape[1]

        with tf.name_scope('outputs'):
            outputs = tf.reshape(outputs, [-1, num_hidden])

        with tf.name_scope('weights'):
            W = tf.Variable(tf.truncated_normal([num_hidden, num_classes],
                                                stddev=0.1),
                            name='weights')
        with tf.name_scope('biases'):
            b = tf.get_variable("b",
                                initializer=tf.constant(0.,
                                                        shape=[num_classes]))

        with tf.name_scope('logits'):
            logits = tf.matmul(outputs, W) + b
            logits = tf.reshape(logits, [batch_s, -1, num_classes])
            logits = tf.transpose(logits, (1, 0, 2), name="out/logits")
        with tf.name_scope('loss'):
            loss = tf.nn.ctc_loss(targets,
                                  logits,
                                  seq_len,
                                  ctc_merge_repeated=True,