示例#1
0
def sg_init(sess):
    r""" Initializes session variables.

    Args:
      sess: Session to initialize.
    """
    # initialize variables
    sess.run(tf.group(tf.global_variables_initializer(),
                      tf.local_variables_initializer()))
示例#2
0
def genIt(name='bird'):
    z = tf.random_normal((batch_size, rand_dim))
    gen = generator(z)
    with tf.Session() as sess:
        sess.run(
            tf.group(tf.global_variables_initializer(),
                     tf.sg_phase().assign(False)))
        tf.sg_restore(sess,
                      tf.train.latest_checkpoint('asset/train/gan'),
                      category=['generator', 'discriminator'])
        fake_features = []
        for i in range(100):
            fake_features.append(sess.run(gen))
    np.save('../data/fake_' + name + '_negative.npy',
            np.array(fake_features).reshape((-1, 4096)))
示例#3
0
def test(tfname, weightPaths, steps=100000, Var=["NNReg"], lll=2000):
    tf.Graph()
    x, y = read_from_tfrecords(tfname, ["source", "target"], 10,
                               [[1070, 3], [1070, 3]])
    global_step = tf.Variable(1, trainable=False, name='global_step')
    print(x.shape, y.shape)
    x = np.loadtxt('EM.txt', dtype='float32') / 1500
    y = np.loadtxt('FM.txt', dtype='float32')[:, :100] / 1500
    x = tf.convert_to_tensor(np.expand_dims(np.rollaxis(x, axis=0), axis=0))
    y = tf.convert_to_tensor(np.expand_dims(np.rollaxis(y, axis=0), axis=0))

    print(x.shape, y.shape)

    yp = Net(x, x, y) + x
    tmp_var_list = {}
    for j in Var:
        for i in tf.global_variables():
            if i.name.startswith(j):
                tmp_var_list[i.name[:-2]] = i

    saver = tf.train.Saver(tmp_var_list)
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    path = weightPaths + "model.ckpt-{}".format(steps)

    Sour = []
    Targ = []
    Trans_S = []

    with tf.Session() as sess:
        sess.run(init_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        saver.restore(sess, path)
        for i in tqdm.tqdm(range(lll)):
            S, T, TS = sess.run([x, y, yp])
            Sour.append(S)
            Targ.append(T)
            Trans_S.append(TS)

        coord.request_stop()
        coord.join(threads)

    return Sour, Targ, Trans_S
示例#4
0
def testIt():
    data = raw
    positive = np.array(data.label_train) > 0
    x = tf.placeholder(tf.float32, [None, 4096])
    y = tf.placeholder(tf.float32)
    disc_real = discriminator(x)
    accuracy = tf.reduce_mean(
        tf.cast(tf.equal(tf.cast(disc_real > 0.5, "float"), y), tf.float32))
    np.set_printoptions(precision=3, suppress=True)
    with tf.Session() as sess:
        sess.run(
            tf.group(tf.global_variables_initializer(),
                     tf.sg_phase().assign(False)))
        # restore parameters
        tf.sg_restore(sess,
                      tf.train.latest_checkpoint('asset/train/gan'),
                      category=['generator', 'discriminator'])
        ans = sess.run(disc_real, feed_dict={x: np.array(data.test)})
        print np.sum(ans > 0.5)
        np.save('dm_bird.npy', ans)
示例#5
0
def test(tfname, weightPaths, steps=100000, Var=["NNReg"], lll=2000):
    tf.Graph()
    x, y = read_from_tfrecords(tfname, ["source", "target"], 10,
                               [[91, 2], [91, 2]])
    global_step = tf.Variable(1, trainable=False, name='global_step')
    yp = Net(x, x, y) + x
    tmp_var_list = {}
    for j in Var:
        for i in tf.global_variables():
            if i.name.startswith(j):
                tmp_var_list[i.name[:-2]] = i

    saver = tf.train.Saver(tmp_var_list)
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    path = weightPaths + "model.ckpt-{}".format(steps)

    Sour = []
    Targ = []
    Trans_S = []

    with tf.Session() as sess:
        sess.run(init_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        saver.restore(sess, path)
        for i in tqdm.tqdm(range(lll)):
            S, T, TS = sess.run([x, y, yp])
            Sour.append(S)
            Targ.append(T)
            Trans_S.append(TS)

        coord.request_stop()
        coord.join(threads)

    return Sour, Targ, Trans_S
def generate():
    dev = '/cpu:0'
    with tf.device(dev):
        mydir = 'tfrc150char_wrd0704'
        files = [f for f in listdir(mydir) if isfile(join(mydir, f))]
        tfrecords_filename = []
        tfrecords_filename = [join(mydir, 'short_infer3.tfrecords')
                              ]  #[join(mydir, f) for f in tfrecords_filename]
        tfrecords_filename_inf = [join(mydir, '11_3.tfrecords')]

        print(tfrecords_filename)
        filename_queue = tf.train.string_input_producer(tfrecords_filename,
                                                        num_epochs=num_epochs,
                                                        shuffle=True,
                                                        capacity=1)
        infer_queue = tf.train.string_input_producer(tfrecords_filename_inf,
                                                     num_epochs=num_epochs,
                                                     shuffle=True,
                                                     capacity=1)

        optim = tf.train.AdamOptimizer(learning_rate=0.0001,
                                       beta1=0.9,
                                       beta2=0.99)

        # Calculate the gradients for each model tower.
        tower_grads = []
        reuse_vars = False
        with tf.variable_scope("dec_lstm") as scp:
            dec_cell = BasicLSTMCell2(Hp.w_emb_size,
                                      Hp.rnn_hd,
                                      state_is_tuple=True)

        with tf.variable_scope("contx_lstm") as scp:
            cell = BasicLSTMCell2(Hp.hd, Hp.rnn_hd, state_is_tuple=True)
            rnn_cell = tf.contrib.rnn.DropoutWrapper(
                cell,
                input_keep_prob=Hp.keep_prob,
                output_keep_prob=Hp.keep_prob)

        (words, chars) = read_and_decode(filename_queue,
                                         Hp.batch_size * Hp.num_gpus)

        words_splits = tf.split(axis=0,
                                num_or_size_splits=Hp.num_gpus,
                                value=words)
        chars_splits = tf.split(axis=0,
                                num_or_size_splits=Hp.num_gpus,
                                value=chars)

        word_emb = np.loadtxt("glove300d_0704.txt")
        Hp.word_vs = word_emb.shape[0]

        # --------------------------------------------------------------------------------
        with tf.name_scope('%s_%d' % ("tower", 0)) as scope:
            rnn_state = tower_infer_enc(chars_splits[0],
                                        scope,
                                        rnn_cell,
                                        dec_cell,
                                        word_emb,
                                        out_reuse_vars=False,
                                        dev='/cpu:0')

            chars_pl = tf.placeholder(tf.int32, shape=(None, Hp.c_maxlen))
            rnn_state_pl1 = [
                tf.placeholder(tf.float32, shape=(None, Hp.rnn_hd)),
                tf.placeholder(tf.float32, shape=(None, Hp.rnn_hd))
            ]
            rnn_state_pl = tf.contrib.rnn.LSTMStateTuple(
                rnn_state_pl1[0], rnn_state_pl1[1])

            final_ids, rnn_state_dec = tower_infer_dec(chars_pl,
                                                       scope,
                                                       rnn_cell,
                                                       dec_cell,
                                                       word_emb,
                                                       rnn_state_pl,
                                                       out_reuse_vars=False,
                                                       dev='/cpu:0')

        # --------------------------------------------------------------------------------

        saver = tf.train.Saver(tf.trainable_variables())
        session_config = tf.ConfigProto(allow_soft_placement=True,
                                        log_device_placement=False)
        session_config.gpu_options.per_process_gpu_memory_fraction = 0.94

        session_config.gpu_options.allow_growth = False

        restore_dir = 'tnsrbrd/hin17d08m_1313g2'  #   lec30d07m_1634g2   lec04d07m_2006g2     lec28d07m_1221g2    lec31d07m_1548g2
        csv_file = join(restore_dir, time.strftime("hin%dd%mm_%H%M.csv"))
        csv_f = open(csv_file, 'a')
        csv_writer = csv.writer(csv_f)

        with tf.Session(config=session_config) as sess:
            sess.run(
                tf.group(tf.global_variables_initializer(),
                         tf.local_variables_initializer()))

            tf.train.start_queue_runners(sess=sess)
            saver.restore(sess,
                          tf.train.latest_checkpoint(
                              join(restore_dir,
                                   'last_chpt')))  #    lec04d07m_2006g2

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)

            for ep in range(num_epochs):

                tf.sg_set_infer(sess)
                rnn_state_val, w_txt, ch_txt = sess.run(
                    [rnn_state, words_splits[0], chars_splits[0]],
                    feed_dict={Hp.keep_prob: 1.0})

                predictions = []  #[w_txt[:,2,:]]
                for idx in range(3):
                    char_inpt = word2char_ids(
                        ids_val) if idx != 0 else ch_txt[:, 2, :]
                    ids_val, rnn_state_val = sess.run(
                        [final_ids, rnn_state_dec],
                        feed_dict={
                            Hp.keep_prob: 1.0,
                            rnn_state_pl1[0]: rnn_state_val[0],
                            rnn_state_pl1[1]: rnn_state_val[1],
                            chars_pl: char_inpt
                        })
                    temp = np.zeros((Hp.batch_size, Hp.w_maxlen))
                    for b in range(Hp.batch_size):
                        stop_ind = np.where(ids_val[b] == 2)[0]
                        if stop_ind.size > 0:
                            stop_ind = stop_ind[0]
                            ids_val[b, stop_ind +
                                    1:] = ids_val[b, stop_ind + 1:] * 0
                    temp[:, :ids_val.shape[1]] = ids_val
                    predictions.append(temp)

                # predictions are decode_sent x b x w_maxlen
                predictions = np.array(predictions)
                in_batches = [w_txt[b, :, :] for b in range(Hp.batch_size)]
                res_batches = [
                    predictions[:, b, :] for b in range(Hp.batch_size)
                ]

                for b in range(Hp.batch_size):
                    in_paragraph = idxword2txt(in_batches[b])
                    print("\n INPUT SAMPLE \n")
                    print(in_paragraph)

                    res_paragraph = idxword2txt(res_batches[b])
                    print("\n RESULTS \n")
                    print(res_paragraph)

                    csv_writer.writerow([
                        " ".join(in_paragraph[:3]), " ".join(in_paragraph[3:]),
                        " ".join(res_paragraph)
                    ])

            csv_f.close()
示例#7
0
文件: train.py 项目: zikai1/CPD-Net
def train():
    tf.Graph()
    tf.set_random_seed(888)
    print("*****************************************")
    print("Training started with random seed: {}".format(111))
    print("Batch started with random seed: {}".format(111))
    
    #read data
    x,y=read_from_tfrecords(tfname,
                                 ["source","target"], batSize, [[s1,2],[s2,2]])
    global_step = tf.Variable(1, trainable=False,name='global_step')
    yp=Net(x,x,y)+x    
    Loss=chamfer_loss(yp,y)    

    #Learning Rate****************************************************************************
    lr = tf.train.exponential_decay(learningRate, global_step,
                                                  batSize, learningRateDecay, staircase=False) 
    # Optimization Algo************************************************************************
    train_step = tf.train.AdamOptimizer(learning_rate=lr,
                                                    beta1=adam_beta1,
                                                    beta2=adam_beta2
                                                   ).minimize(Loss,global_step=global_step)
    
    saver = tf.train.Saver(max_to_keep=int(maxKeepWeights))
    init_op = tf.group(tf.global_variables_initializer(),
                               tf.local_variables_initializer())
    
    # Continue Training************************************************************************
    if len(conWeightPath)>0:
        print("Continue Training...")
        tmp_var_list={}
        if len(conWeightVar)==0:
            print("For all variables")
            globals()['conWeightVar']={''}
        else:
            print("Training variables: {}".format(conWeightVar))
            
        for j in conWeightVar: 
            for i in tf.global_variables():
                if i.name.startswith(j):
                    tmp_var_list[i.name[:-2]] = i      
        saver1=tf.train.Saver(tmp_var_list)     
    
    # Training**********************************************************************************    
    with tf.Session() as sess:
        sess.run(init_op)
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        # Read Weight******************************
        if len(conWeightPath)>0:
            print(conWeightPath)
            if stepsContinue==-1:            
                STEPS=sorted([int(i.split("/")[-1].split(".")[1].split("-")[-1]) for i in glob.glob(conWeightPath+"/*meta")])
                print("hahaha",STEPS)
                globals()['stepsContinue']=STEPS[-1]
                
            wtt=glob.glob(conWeightPath+"/*{}*meta".format(stepsContinue))[0][:-5]
            print("Reading Weight:{}".format(wtt))
            saver1.restore(sess,wtt)
            print('Weight is successfully updated from: {}'.format(wtt))  
        #*******************************************    
        stepst = sess.run(global_step)
        for t in tqdm.tqdm(range(stepst,int(maxStep)+1)):      
            _= sess.run([train_step]) 
            if t % saveStep==0:
                if not os.path.exists(dirSave):
                    os.makedirs(dirSave)
                saver.save(sess, dirSave + '/model.ckpt', global_step=t)
        coord.request_stop()
        coord.join(threads)   
示例#8
0
文件: nn.py 项目: jackyzha0/vybe
h_2 = tf.nn.tanh(tf.matmul(h_1, W_2) + b_2)

W_3 = tf.Variable(
    tf.random_normal([n_hidden_units_two, n_hidden_units_three],
                     mean=0,
                     stddev=sd))
b_3 = tf.Variable(tf.random_normal([n_hidden_units_three], mean=0, stddev=sd))
h_3 = tf.nn.sigmoid(tf.matmul(h_2, W_3) + b_3)

W = tf.Variable(
    tf.random_normal([n_hidden_units_three, num_classes], mean=0, stddev=sd))
b = tf.Variable(tf.random_normal([num_classes], mean=0, stddev=sd))
with tf.name_scope('out'):
    y_ = tf.nn.softmax(tf.matmul(h_3, W) + b, name="out")

init = tf.global_variables_initializer()

cost_function = tf.reduce_mean(
    -tf.reduce_sum(Y * tf.log(y_), reduction_indices=[1]))
#optimizer = tf.train.RMSPropOptimizer(learning_rate,decay=0.9,momentum=0.9,centered=True).minimize(cost_function)
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(
    cost_function)

correct_prediction = tf.equal(tf.argmax(y_, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

cost_history = np.empty(shape=[1], dtype=float)
acc_history = np.empty(shape=[1], dtype=float)
t_cost_history = np.empty(shape=[1], dtype=float)
t_acc_history = np.empty(shape=[1], dtype=float)
y_true, y_pred = None, None
示例#9
0
def train_loop():
    with tf.device("/cpu:0"):
        # Launch the graph
        with tf.Session(graph=graph, config=config) as sess:
            print("Starting Tensorboard...")
            initstart = time.time()
            train_writer = tf.summary.FileWriter(logs_path + '/TRAIN',
                                                 graph=sess.graph)
            test_writer = tf.summary.FileWriter(logs_path + '/TEST',
                                                graph=sess.graph)

            run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE,
                                        output_partition_graphs=True)
            run_metadata = tf.RunMetadata()
            tf.global_variables_initializer().run()
            saver = tf.train.Saver()
            #Load paths
            for curr_epoch in range(num_epochs):
                print('>>>', time.strftime('[%H:%M:%S]'), 'Epoch',
                      curr_epoch + 1, '/', num_epochs)
                train_cost = train_ler = 0
                start = t_time = time.time()
                index_list = range(0, datasetsize)
                for batch in range(num_batches_per_epoch):
                    # Getting the index
                    indexes = random.sample(index_list, batchsize)
                    index_list = [x for x in index_list if x not in indexes]
                    train_inputs = next_miniBatch(indexes, dr[0])
                    train_targets = next_target_miniBatch(indexes, dr[1])
                    #train_inputs,train_targets = fake_data(num_examples,num_mfccs,num_classes-1)
                    newindex = [i % num_examples for i in range(batchsize)]
                    random.shuffle(newindex)

                    batch_train_inputs = train_inputs[newindex]
                    # Padding input to max_time_step of this batch
                    batch_train_inputs, batch_train_seq_len = pad_sequences(
                        batch_train_inputs)

                    #for x in range(batchsize):
                    #    print('>>>'+str(x)+':    ',train_targets[newindex][x].size,batch_train_seq_len[x],dr[0][x])
                    #    print(decode_to_chars(train_targets[newindex][x]))
                    #if train_targets[newindex][x].size > batch_train_seq_len[x]:
                    # Converting to sparse representation so as to to feed SparseTensor input
                    batch_train_targets = sparse_tuple_from(
                        train_targets[newindex])
                    #saveImg(batch_train_inputs)
                    feed = {
                        inputs: batch_train_inputs,
                        targets: batch_train_targets,
                        seq_len: batch_train_seq_len
                    }
                    batch_cost, _, l = sess.run(
                        [cost, train_optimizer, ler],
                        feed,
                        options=run_options)  #,run_metadata = run_metadata)
                    train_cost += batch_cost * batchsize
                    train_ler += l * batchsize
                    print('[' + str(curr_epoch) + ']', '  >>>',
                          time.strftime('[%H:%M:%S]'), 'Batch', batch + 1, '/',
                          num_batches_per_epoch, '@Cost', batch_cost,
                          'Time Elapsed',
                          time.time() - t_time, 's')
                    t_time = time.time()
                    if (batch % 16 == 0):
                        summary = sess.run(
                            merged, feed_dict=feed,
                            options=run_options)  #,run_metadata=run_metadata)
                        train_writer.add_summary(
                            summary,
                            int(batch + (curr_epoch * num_batches_per_epoch)))
                        #train_writer.add_run_metadata(run_metadata, 'step%03d' % int(batch+(curr_epoch*num_batches_per_epoch)))
                        train_writer.flush()

                # Metrics mean
                train_cost /= num_examples
                train_ler /= num_examples
                #Testing
                print('>>>', time.strftime('[%H:%M:%S]'),
                      'Evaluating Test Accuracy...')
                t_index = random.sample(range(0, testsetsize), testbatchsize)
                test_inputs = next_miniBatch(t_index, t_dr[0], test=True)
                test_targets = next_target_miniBatch(t_index, t_dr[1])
                newindex = [i % testbatchsize for i in range(testbatchsize)]
                batch_test_inputs = test_inputs[newindex]
                batch_test_inputs, batch_test_seq_len = pad_sequences(
                    batch_test_inputs, test=True)
                batch_test_targets = sparse_tuple_from(test_targets[newindex])
                t_feed = {
                    inputs: batch_test_inputs,
                    targets: batch_test_targets,
                    seq_len: batch_test_seq_len
                }
                test_ler, d = sess.run(
                    (ler, decoded[0]), feed_dict=t_feed,
                    options=run_options)  #,run_metadata = run_metadata)
                dense_decoded = tf.sparse_tensor_to_dense(
                    d, default_value=-1).eval(session=sess)
                for i, seq in enumerate(dense_decoded):
                    seq = [s for s in seq if s != -1]
                    tmp_o = decode_to_chars(test_targets[i])
                    tmp_d = decode_to_chars(seq)
                    print('Sequence %d' % i)
                    print('\t Original:\n%s' % tmp_o)
                    print('\t Decoded:\n%s' % tmp_d)
                    #print('\t Corrected:\n%s' % tmp_corr)
                    print('Done!')
                log = "Epoch {}/{}  |  Batch Cost : {:.3f}  |  Train Accuracy : {:.3f}%  |  Test Accuracy : {:.3f}%  |  Time Elapsed : {:.3f}s"
                print(
                    log.format(curr_epoch + 1, num_epochs, train_cost,
                               100 - (train_ler * 100), 100 - (test_ler * 100),
                               time.time() - start))
                t_summary = sess.run(
                    merged, feed_dict=t_feed,
                    options=run_options)  #, run_metadata=run_metadata)
                test_writer.add_summary(
                    t_summary,
                    int(batch + (curr_epoch * num_batches_per_epoch)))
                #test_writer.add_run_metadata(run_metadata, 'step%03d' % int(batch+(curr_epoch*num_batches_per_epoch)))
                test_writer.flush()
                save_path = saver.save(sess, savepath + '/model')
                print(">>> Model saved succesfully")
            print('Total Training Time: ' + str(time.time() - initstart) + 's')
tfrecords_filename = []
for fl in files:
    if int(fl.split('.')[0].split('_')[1]) in Hp.par_maxlen:
        tfrecords_filename.append(fl)
tfrecords_filename = sorted(tfrecords_filename,
                            key=lambda x: int(x.split('.')[0].split('_')[0]) *
                            100 + int(x.split('.')[0].split('_')[1]))
tfrecords_filename = [join(mydir, 'short_3.tfrecords')]

c = 0
for fn in tfrecords_filename:
    for record in tf.python_io.tf_record_iterator(fn):
        c += 1

print(tfrecords_filename)
num_epochs = 1
filename_queue = tf.train.string_input_producer(tfrecords_filename,
                                                num_epochs=num_epochs,
                                                shuffle=False,
                                                capacity=1)

(words, chars) = read_and_decode(filename_queue, Hp.batch_size * Hp.num_gpus)

with tf.Session() as sess:
    sess.run(tf.local_variables_initializer())
    sess.run(tf.global_variables_initializer())
    coord = tf.train.Coordinator()
    tf.train.start_queue_runners(coord=coord)
    wo = sess.run(words)
print(wo)