예제 #1
0
class TestIhrmLogin(unittest.TestCase):
    def setUp(self):
        self.login_api = TestLoginApi()

    def tearDown(self):
        pass

    @parameterized.expand(test_data('./data/test_login.json'))
    def test01_login_success(self, data, httpcode, success, code, message):
        jsonData = data
        response = self.login_api.login(jsonData)
        logging.info('登录的结果为:', response.json())
        assesr_common(httpcode, success, code, message, response, self)

    '''
def main (verbose = False):
     
    path_train = '../../data/restaurants_train.tsv'
    path_test = '../../data/restaurants_gold.tsv'
    
    prompt = 'please input path to SemEval 2014 CoNLL formatted training data: '
    input_path_train = input(prompt)
    if input_path_train == '':
        pass
    else:
        path_train = input_path_train
    prompt = 'please input path to SemEval 2014 CoNLL formatted test data: '
    input_path_test = input(prompt)
    if input_path_test == '':
        pass
    else:
        path_test = input_path_test

    df_train, df_test = load_test_train(path_train, path_test)
    
    embed_prompt = "please enter path to 300-dimensional Word2vec Google News vectors: "
    w2v_path = input (embed_prompt)    
    xu_embed_prompt = "please enter path to Xu et al.'s 100-d restaurant domain vectors: "
    xuv_path = input (xu_embed_prompt)
    print("Thank you. Please wait while embeddings load")
    #load embeddings
    w2v, xuv = load_embeddings(w2v_path, xuv_path)
    
    X_train, X_valid, y_train, y_valid = training_data (w2v, xuv, df_train['word'], df_train['label'])
    X_test, label_index = test_data (w2v, xuv, df_test['word'], df_test['label'])
    # input("Ready to run?")
    # run DE_CNN NN
    batch_size = 128
    embedding_dims = X_train.shape[2]
    kernel_size = 5
    epochs = 200
    model = run_cnn(X_train, y_train, X_valid, y_valid, embedding_dims = embedding_dims,
                    batch_size = batch_size, kernel_size = kernel_size, epochs = 200, 
                    verbose = verbose)
    #predict
    predict(model, X_test, df_test, label_index)
예제 #3
0
                                     module_name='partition_thrift')
    return make_client(partition_thrift.Partition, '127.0.0.1', 6000)


def file_info(filename):
    with open(filename, 'rb') as file:
        file_content = file.read()
    return {filename: file_content}


if __name__ == '__main__':
    # get time threshold
    threshold = float(input('Please input latency threshold: '))

    # get test data
    dataiter = test_data()
    images, labels = dataiter.next()

    start = time.time()

    # get partition point and exit point
    ep, pp = Optimize(threshold)
    print('Branch is %d, and partition point is %d' % (ep, pp))

    # infer left part
    out = infer(CLIENT, ep, pp, images)

    print('Left part of model inference complete.')

    # save intermediate for RPC process
    intermediate = out.detach().numpy()
예제 #4
0
파일: word2vec.py 프로젝트: atao99/HYML2020
import os
from gensim.models import word2vec
import numpy as np

from utils import train_data, test_data

DIM = 256

if __name__ == "__main__":
    print("Loading train data ...", flush=True)
    train_x1, _ = train_data("data/training_label.txt", True)
    train_x0 = train_data("data/training_nolabel.txt", False)
    
    print("Loading test data ...", flush=True)
    test_x = test_data("data/testing_data.txt")
    #print(np.percentile([len(i) for i in train_x1], 75))
    #print(np.percentile([len(i) for i in train_x0], 75))
    #print(np.percentile([len(i) for i in test_x], 75))
    #exit()

    print("Word2Vec ...", flush=True)
    model = word2vec.Word2Vec(train_x1 + train_x0 + test_x, size=256, window=5, min_count=5, workers=12, iter=10, sg=1)
    
    print("Saving model ...", flush=True)
    model.save("w2v_model/w2v.model")
예제 #5
0
                                            labels=tf.ones_like(real_predict)))
dis_loss = fake_loss + real_loss

var_list = tf.trainable_variables()
g_var_list = [x for x in var_list if 'g_' in x.name]
d_var_list = [x for x in var_list if 'd_' in x.name]
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
    g_opt = tf.train.AdamOptimizer(0.0002, 0.5, 0.9)
    d_opt = tf.train.AdamOptimizer(0.0002, 0.5, 0.9)
    optim_gen = g_opt.compute_gradients(gen_loss, var_list=g_var_list)
    optim_g = g_opt.apply_gradients(optim_gen)
    optim_dis = d_opt.compute_gradients(dis_loss, var_list=d_var_list)
    optim_d = d_opt.apply_gradients(optim_dis)
# the optim operation

sample_gen = generator(x_place, reuse=True)
#test files
#restore from the checkpoint dir
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()
    #    print('22')
    ckpt = tf.train.get_checkpoint_state(check_dir)
    # restore from the check point
    saver.restore(sess, ckpt.model_checkpoint_path)

    for i in range(batch_idx):
        _, x = test_data(i)
        feed_dict = {x_place: x}
        sample_imgs = sess.run(sample_gen, feed_dict=feed_dict)
        save_batch_imgs(sample_imgs, i, fig_dir)