예제 #1
0
def train(args):
    logger = logging.getLogger("QANet")
    logger.info("====== training ======")

    logger.info('Load data_set and vocab...')
    with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin:
        vocab = pickle.load(fin)

    dataloader = DataLoader(args.max_p_len, args.max_q_len, args.save_dir,
                            args.train_files, args.dev_files)
    num_train_steps = int(
        len(dataloader.train_set) / args.batch_size * args.epochs)
    num_warmup_steps = int(num_train_steps * args.warmup_proportion)
    logger.info('Converting text into ids...')
    dataloader.convert_to_ids(vocab)

    logger.info('Initialize the model...')
    model = Model(vocab, num_train_steps, num_warmup_steps, args)
    del vocab

    logger.info('Training the model...')
    model.train(dataloader,
                args.epochs,
                args.batch_size,
                save_dir=args.model_dir,
                save_prefix=args.algo,
                dropout=args.dropout)

    logger.info('====== Done with model training! ======')
def main():  
    # Dataset path
    test_thermal_list = '../idx/test_thermal_1.txt'
    test_color_list = '../idx/test_color_1.txt'
    dataset_path = '../Dataset/'
    model_path = 'log/tone_iter1_900.ckpt'
    save_dir = 'data/'

    test_num = 2060
    # Graph input
    x1 = tf.placeholder(tf.float32, [test_num, 227, 227, 3])
    x2 = tf.placeholder(tf.float32, [test_num, 227, 227, 3])    
    keep_var = tf.placeholder(tf.float32)   
    
    # Model
    predict1 = Model().alexnet_visible(x1, keep_var)
    predict2 = Model().alexnet_thermal(x2, keep_var)
    feat     = Model().share_modal(predict1, predict2, keep_var)
    # 
 
    # load model
    init = tf.initialize_all_variables()
    saver = tf.train.Saver()
       
    with tf.Session() as sess:
        sess.run(init)
        saver.restore(sess, model_path)

        print 'Load Testing Data'
        test_color_imgs,   test_color_labels   = get_test_data(test_color_list)
        test_thermal_imgs, test_thermal_labels = get_test_data(test_thermal_list)

        print 'Extracting Feature'
        feature1 = sess.run(predict1, feed_dict={ x1:test_color_imgs, keep_var:  1. })          
        feature2 = sess.run(predict2, feed_dict={ x2:test_thermal_imgs, keep_var:  1.})
        feature  = sess.run(feat, feed_dict={ predict1:feature1, predict2:feature2, keep_var:  1.})       
        feature1, feature2 = tf.split(0, 2, feature)
        
        print 'Evaluate Performance'
        query_t_norm = tf.nn.l2_normalize(feature1, dim=1)
        test_t_norm  = tf.nn.l2_normalize(feature2, dim=1)

        distmat = tf.matmul(query_t_norm, test_t_norm, transpose_a=False, transpose_b=True)

        cmc, mAP = compute_accuracy(-distmat, test_color_labels[:test_num], test_thermal_labels[:test_num],topk = 20)
            
        print('top-1: {:.2%} | top-5: {:.2%} | top-10: {:.2%}| top-20: {:.2%}'.format(cmc[0], cmc[4], cmc[9], cmc[19]))
        print('mAP: {:.2%}'.format(mAP))
        
        # # save feature
        print 'Save Feature'
        feature = query_t_norm.eval()
        f = h5py.File(save_dir + 'train_color_iter_1.mat','w')
        f.create_dataset('feature',data=feature)
        f.close()
        
        feature = test_t_norm.eval()
        f = h5py.File(save_dir + 'train_thermal_iter_1.mat','w')
        f.create_dataset('feature',data=feature)
        f.close()
예제 #3
0
def test(config):

	gpu_options = tf.GPUOptions(visible_device_list="2")
	sess_config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)
	sess_config.gpu_options.allow_growth = True

	with open(config.word_emb_file, "r") as fh:
		word_mat = np.array(json.load(fh), dtype=np.float32)
	with open(config.char_emb_file, "r") as fh:
		char_mat = np.array(json.load(fh), dtype=np.float32)
	with open(config.test_eval_file, "r") as fh:
		eval_file = json.load(fh)
	with open(config.test_meta, "r") as fh:
		meta = json.load(fh)

	total = meta["total"]

	print("Loading model...")
	test_batch = get_dataset(config.test_record_file, get_record_parser(
		config, is_test=True), config).make_one_shot_iterator()

	model = Model(config, test_batch, word_mat, char_mat, trainable=False)

	with tf.Session(config=sess_config) as sess:
		sess.run(tf.global_variables_initializer())
		saver = tf.train.Saver()
		saver.restore(sess, tf.train.latest_checkpoint(config.save_dir))
		sess.run(tf.assign(model.is_train, tf.constant(False, dtype=tf.bool)))
		losses = []
		answer_dict = {}
		remapped_dict = {}

		# tqdm
		for step in tqdm(range(total // config.batch_size + 1)):
			qa_id, loss, yp1, yp2 = sess.run(
				[model.qa_id, model.loss, model.yp1, model.yp2])
			answer_dict_, remapped_dict_, outlier = convert_tokens(
				eval_file, qa_id.tolist(), yp1.tolist(), yp2.tolist())
			answer_dict.update(answer_dict_)
			remapped_dict.update(remapped_dict_)
			losses.append(loss)
			print("\n",loss)
			if(loss>50):
				for i,j,k in zip(qa_id.tolist(),yp1.tolist(),yp2.tolist()):
					print(answer_dict[str(i)],j,k)
				#print("IDs: {} Losses: {} Yp1: {} Yp2: {}".format(qa_id.tolist(),\
				#	loss.tolist(), yp1.tolist(), yp2.tolist()))
		loss = np.mean(losses)

		# evaluate with answer_dict, but in evaluate-v1.1.py, evaluate with remapped_dict
		# since only that is saved. Both dict are a little bit different, check evaluate-v1.1.py
		metrics = evaluate(eval_file, answer_dict)
		with open(config.answer_file, "w") as fh:
			json.dump(remapped_dict, fh)
		print("Exact Match: {}, F1: {} Rouge-l-f: {} Rouge-l-p: {} Rouge-l-r: {}".format(\
			metrics['exact_match'], metrics['f1'], metrics['rouge-l-f'], metrics['rouge-l-p'],\
			metrics['rouge-l-r']))
예제 #4
0
def predict(args):
    logger = logging.getLogger("QANet")

    logger.info('Load data_set and vocab...')
    with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin:
        vocab = pickle.load(fin)

    assert len(args.test_files) > 0, 'No test files are provided.'
    dataloader = DataLoader(args.max_p_num,
                            args.max_p_len,
                            args.max_q_len,
                            args.save_dir,
                            test_files=args.test_files)
    num_train_steps = int(
        len(dataloader.train_set) / args.batch_size * args.epochs)
    num_warmup_steps = int(num_train_steps * args.warmup_proportion)
    logger.info('Converting text into ids...')
    dataloader.convert_to_ids(vocab)
    logger.info('Restoring the model...')

    model = Model(vocab, num_train_steps, num_warmup_steps, args)
    model.restore(args.model_dir, 'qanet_64000')
    logger.info('Predicting answers for test set...')
    test_batches = dataloader.next_batch('test',
                                         48,
                                         vocab.get_word_id(vocab.pad_token),
                                         shuffle=False)

    model.evaluate(test_batches,
                   result_dir=args.result_dir,
                   result_prefix='test.predicted')
예제 #5
0
def main(_):
    word_char = 'word'  # 'word' or 'char'
    print('use word or char:',word_char)

    FLAGS.file_name = word_char+'_'+FLAGS.file_name
    print('model_path:',FLAGS.file_name)

    model_path = os.path.join('models', FLAGS.file_name)
    if os.path.exists(model_path) is False:
        os.makedirs(model_path)

    if FLAGS.file_name[-1] == '2':
        from model2 import Model
    elif FLAGS.file_name[-1] == '3':
        from model3 import Model
    elif FLAGS.file_name[-1] == '4':
        from model4 import Model
    elif FLAGS.file_name[-1] == '5':
        from model5 import Model
    else:
        from model1 import Model

    data_path,save_path = 'data','process_data1'

    converter = TextConverter(word_char, data_path, save_path,  FLAGS.num_steps)
    embeddings = converter.embeddings

    if word_char == 'word':
        train_pkl = 'train_word.pkl'
        val_pkl = 'val_word.pkl'
    if word_char == 'char':
        train_pkl = 'train_char.pkl'
        val_pkl = 'val_char.pkl'

    train_samples = converter.load_obj(os.path.join(save_path, train_pkl))
    train_g = batch_generator(train_samples, FLAGS.batch_size)

    val_samples = converter.load_obj(os.path.join(save_path, val_pkl))
    val_g = val_samples_generator(val_samples)


    print('use embeding:',FLAGS.use_embedding)
    print('vocab size:',converter.vocab_size)


    model = Model(converter.vocab_size,FLAGS,test=False, embeddings=embeddings)

    # 继续上一次模型训练
    FLAGS.checkpoint_path = tf.train.latest_checkpoint(model_path)
    if FLAGS.checkpoint_path:
        model.load(FLAGS.checkpoint_path)

    model.train(train_g,
                FLAGS.max_steps,
                model_path,
                FLAGS.save_every_n,
                FLAGS.log_every_n,
                val_g
                )
예제 #6
0
def evaluate(args):
    logger = logging.getLogger("QANet")
    logger.info("====== evaluating ======")
    logger.info('Load data_set and vocab...')
    with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin:
        vocab = pickle.load(fin)

    assert len(args.dev_files) > 0, 'No dev files are provided.'
    dataloader = DataLoader(args.max_p_num,
                            args.max_p_len,
                            args.max_q_len,
                            args.save_dir,
                            dev_files=args.dev_files)

    num_train_steps = int(
        len(dataloader.train_set) / args.batch_size * args.epochs)
    num_warmup_steps = int(num_train_steps * args.warmup_proportion)
    logger.info('Converting text into ids...')
    dataloader.convert_to_ids(vocab)

    logger.info('Restoring the model...')
    model = Model(vocab, num_train_steps, num_warmup_steps, args)
    model.restore(args.model_dir, "averaged.ckpt-0")
    logger.info('Evaluating the model on dev set...')
    dev_batches = dataloader.next_batch('dev',
                                        16,
                                        vocab.get_word_id(vocab.pad_token),
                                        shuffle=False)

    dev_loss, dev_bleu_rouge = model.evaluate(dev_batches,
                                              result_dir=args.result_dir,
                                              result_prefix='dev.predicted')

    logger.info('Loss on dev set: {}'.format(dev_loss))
    logger.info('Result on dev set: {}'.format(dev_bleu_rouge))
    logger.info('Predicted answers are saved to {}'.format(
        os.path.join(args.result_dir)))
예제 #7
0
def ComputePrecisionK(modelfile, testfile, K_list):

    CURRENT_DIR = os.path.dirname(os.path.abspath("./WikiCategoryLabelling/"))
    sys.path.append(os.path.dirname(CURRENT_DIR + "/WikiCategoryLabelling/"))

    maxParagraphLength = 250
    maxParagraphs = 10
    labels = 1001
    vocabularySize = 76390
    model = Model(maxParagraphLength, maxParagraphs, labels, vocabularySize)

    testing = DataParser(maxParagraphLength, maxParagraphs, labels,
                         vocabularySize)
    testing.getDataFromfile(testfile)
    print("data loading done")
    print("no of test examples: " + str(testing.totalPages))

    model.load(modelfile)

    print("model loading done")

    batchSize = 10

    testing.restore()
    truePre = []
    pred = []
    for i in range(math.ceil(testing.totalPages / batchSize)):
        if i < testing.totalPages / batchSize:
            data = testing.nextBatch(batchSize)
        else:
            data = testing.nextBatch(testing.totalPages % batchSize)
        truePre.extend(data[0])
        pre = model.predict(data)
        pred.extend(pre[0].tolist())

    avgPrecK = [0] * len(K_list)
    for i, p in enumerate(pred):
        sortedL = sorted(range(len(p)), key=p.__getitem__, reverse=True)
        for k, K in enumerate(K_list):
            labelK = sortedL[:K]
            precK = 0
            for l in labelK:
                if truePre[i][l] == 1:
                    precK += 1
            avgPrecK[k] += precK / float(K)
    avgPrecK = [float(a) / len(pred) for a in avgPrecK]

    for p in avgPrecK:
        print(str(p))
예제 #8
0
def main(_):
    # # FLAGS.start_string = FLAGS.start_string#.decode('utf-8')
    word_char = 'word'  # 'word' or 'char'
    print('use word or char:', word_char)

    FLAGS.file_name = word_char + '_' + FLAGS.file_name
    print('model_path:', FLAGS.file_name)

    model_path = os.path.join('models', FLAGS.file_name)
    if os.path.isdir(model_path):
        FLAGS.checkpoint_path = tf.train.latest_checkpoint(model_path)

    if FLAGS.file_name[-1] == '2':
        from model2 import Model
    elif FLAGS.file_name[-1] == '3':
        from model3 import Model
    elif FLAGS.file_name[-1] == '4':
        from model4 import Model
    elif FLAGS.file_name[-1] == '5':
        from model5 import Model
    else:
        from model1 import Model

    data_path, save_path = 'data', 'process_data1'

    converter = TextConverter(word_char, data_path, save_path, FLAGS.num_steps)
    embeddings = converter.embeddings

    if word_char == 'word':
        test_pkl = 'test_word.pkl'
    if word_char == 'char':
        test_pkl = 'test_char.pkl'

    test_samples = converter.load_obj(os.path.join(save_path, test_pkl))

    print('use embeding:', FLAGS.use_embedding)
    print('vocab size:', converter.vocab_size)

    with open(model_path + '/submission.csv', 'w') as file:
        file.write(str('y_pre') + '\n')
    for i in range(0, len(test_samples), 5000):  # 内存不足 分批test
        print('>>>>:', i, '/', len(test_samples))
        test_g = test_samples_generator(test_samples[i:i + 5000])

        model = Model(converter.vocab_size,
                      FLAGS,
                      test=False,
                      embeddings=embeddings)

        model.load(FLAGS.checkpoint_path)

        model.test(test_g, model_path)
    print('finished!')
예제 #9
0
def main():
    # Load vocabulary wrapper.
    with open(vocab_path) as f:
        vocab = pickle.load(f)

    encoder = EncoderCNN(4096, embed_dim)
    decoder = DecoderRNN(embed_dim, hidden_size, len(vocab), num_layers_rnn)
    model = Model(4096, embed_dim, hidden_size, len(vocab), num_layers_rnn)
    if torch.cuda.is_available():
        model = model.cuda()

    # Loss and Optimizer
    params = list(model.parameters())
    optimizer = torch.optim.Adam(params, lr=0.001)

    #load data
    with open(image_data_file) as f:
        image_data = pickle.load(f)
    image_features = si.loadmat(image_feature_file)

    img_features = image_features['fc7'][0]
    img_features = np.concatenate(img_features)

    print 'here'
    iteration = 0
    save_loss = []
    for i in range(10):  # epoch
        use_caption = i % 5
        print 'Epoch', i
        for x, y in make_mini_batch(img_features,
                                    image_data,
                                    use_caption=use_caption):
            word_padding, lengths = make_word_padding(y, vocab)
            x = Variable(torch.from_numpy(x).cuda())
            word_index = Variable(torch.from_numpy(word_padding).cuda())

            model.zero_grad()
            loss = model(x, word_index, lengths)
            loss.backward()
            optimizer.step()

            if iteration % 100 == 0:
                print 'loss', loss.data[0]
                save_loss.append(loss.data[0])

            iteration += 1

        torch.save(model, 'model.pkl')
        with open('losses.txt', 'w') as f:
            print >> f, losses
예제 #10
0
def train(config):

	gpu_options = tf.GPUOptions(visible_device_list=config.gpu_id)
	sess_config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options)
	sess_config.gpu_options.allow_growth = True

	with open(config.word_emb_file, "r") as fh:
		word_mat = np.array(json.load(fh), dtype=np.float32)
	with open(config.char_emb_file, "r") as fh:
		char_mat = np.array(json.load(fh), dtype=np.float32)
	with open(config.train_eval_file, "r") as fh:
		train_eval_file = json.load(fh)
	with open(config.dev_eval_file, "r") as fh:
		dev_eval_file = json.load(fh)
	with open(config.dev_meta, "r") as fh:
		meta = json.load(fh)

	dev_total = meta["total"]

	print("Building model...")
	parser = get_record_parser(config)
	train_dataset = get_batch_dataset(config.train_record_file, parser, config)
	dev_dataset = get_dataset(config.dev_record_file, parser, config)
	handle = tf.placeholder(tf.string, shape=[])
	iterator = tf.data.Iterator.from_string_handle(
		handle, train_dataset.output_types, train_dataset.output_shapes)
	train_iterator = train_dataset.make_one_shot_iterator()
	dev_iterator = dev_dataset.make_one_shot_iterator()

	model = Model(config, iterator, word_mat, char_mat)

	loss_save = 100.0
	patience = 0
	lr = config.init_lr

	with tf.Session(config=sess_config) as sess:
		writer = tf.summary.FileWriter(config.log_dir, graph=tf.get_default_graph())
		writer.add_graph(sess.graph)
		
		sess.run(tf.global_variables_initializer())
		saver = tf.train.Saver(max_to_keep=config.max_checkpoint_to_keep,
			save_relative_paths=True)
		#print(config.save_dir_temp)
		if config.restore_checkpoint:
			saver.restore(sess, tf.train.latest_checkpoint(config.save_dir_temp))
		#saver.restore(sess, tf.train.latest_checkpoint(config.save_dir))
		train_handle = sess.run(train_iterator.string_handle())
		dev_handle = sess.run(dev_iterator.string_handle())
		sess.run(tf.assign(model.is_train, tf.constant(True, dtype=tf.bool)))
		sess.run(tf.assign(model.lr, tf.constant(lr, dtype=tf.float32)))
		print("Started training")
		for _ in tqdm(range(1, config.num_steps + 1)):
			global_step = sess.run(model.global_step) + 1
			loss, train_op, ee_loss = sess.run([model.loss, model.train_op, model.ee_loss], feed_dict={
									  handle: train_handle})
			if global_step % config.period == 0:
				loss_sum1 = tf.Summary(value=[tf.Summary.Value(
					tag="model/esp_loss", simple_value=loss),])
				loss_sum2 = tf.Summary(value=[tf.Summary.Value(
					tag="model/ee_loss", simple_value=ee_loss),])
				writer.add_summary(loss_sum1, global_step)
				writer.add_summary(loss_sum2, global_step)
			#print(global_step)
			if global_step % config.checkpoint == 0 or global_step in [500]:
				sess.run(tf.assign(model.is_train,
								   tf.constant(False, dtype=tf.bool)))
				_, summ = evaluate_batch(
					model, config.val_num_batches, train_eval_file, sess, "train", handle, train_handle)
				for s in summ:
					writer.add_summary(s, global_step)
				metrics, summ = evaluate_batch(
					model, dev_total // config.batch_size + 1, dev_eval_file, sess, "dev", handle, dev_handle)
				sess.run(tf.assign(model.is_train,
								   tf.constant(True, dtype=tf.bool)))

				dev_loss = metrics["ee_loss"]
				if dev_loss < loss_save:
					loss_save = dev_loss
					patience = 0
				else:
					patience += 1
				if patience >= config.patience:
					lr /= 2.0
					loss_save = dev_loss
					patience = 0
				sess.run(tf.assign(model.lr, tf.constant(lr, dtype=tf.float32)))
				for s in summ:
					writer.add_summary(s, global_step)
				writer.flush()
				filename = os.path.join(
					config.save_dir, "model_{}.ckpt".format(global_step))
				saver.save(sess, filename)
예제 #11
0
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import sqlite3                           
import reset
import export
import vote
import rank
import label
import burger
import sys
sys.path.insert(0, '../model')
from model2 import Model

model = Model()

train_burgers = pandas.read_hdf('../data/split.h5', 'train')
test_burgers = pandas.read_hdf('../data/split.h5', 'test')
connection = sqlite3.connect('../data/server.db')
burgers = train_burgers.append(test_burgers)

class IndexHandler(tornado.web.RequestHandler):
    def get(self):
        self.redirect("/static/index.html")


urls = [
    (r"/", IndexHandler),
    (r"/reset", reset.ResetHandler, dict(connection=connection, model=model)),
    (r"/vote", vote.VoteHandler, dict(connection=connection, burgers=burgers, model=model)),
예제 #12
0
from DataParser import DataParser
from model2 import Model2 as Model

# In[ ]:

maxParagraphLength = 250
maxParagraphs = 10
labels = 1000
vocabularySize = 15000
model = Model(maxParagraphLength, maxParagraphs, labels, vocabularySize)
training = DataParser(maxParagraphLength, maxParagraphs, labels,
                      vocabularySize)
training.getDataFromfile("data/vocab_3L_l1000_sampled_10000_red_train.txt")

batchSize = 50

epoch = 0
epochEnd = 10
for e in range(epoch, epochEnd):
    print 'Epoch: ' + str(e)
    cost = 0
    for itr in range(int(training.totalPages / batchSize)):
        cost += model.train(training.nextBatch(batchSize))
        #break
    print(str(cost))

    if e % 10 == 0:
        model.save("model2_l1000_" + str(e))
예제 #13
0
파일: run_me2.py 프로젝트: hannakerek/KEX
from model2 import Model
from qTable2 import QTable
import time 
import pandas
import numpy as np

m = Model()
Qt = QTable(m.actions)

lives = 500000 #number of times we run the simulation
AgentNumber = 2

#save state/action in case of collision
state = [0,0]
action = [0,0]
reward =[0,0]
step =[0,0]
live=[0,0]
totalLoop = True
total_reward =[0,0]

values =['step','total reward','collison', 'collision_obst']
info_lista1 =pandas.DataFrame(columns=values, dtype=np.float64)
info_lista2 =pandas.DataFrame(columns=values, dtype=np.float64)


while totalLoop == True:

	loopBoth = True
	loop = [True,True]
	myLoop = True
예제 #14
0
def ComputeFscore(modelfile, testfile, outputfile):

    CURRENT_DIR = os.path.dirname(os.path.abspath("./WikiCategoryLabelling/"))
    sys.path.append(os.path.dirname(CURRENT_DIR + "/WikiCategoryLabelling/"))

    maxParagraphLength = 250
    maxParagraphs = 10
    labels = 1000
    vocabularySize = 150000
    model = Model(maxParagraphLength, maxParagraphs, labels, vocabularySize)

    testing = DataParser(maxParagraphLength, maxParagraphs, labels,
                         vocabularySize)
    testing.getDataFromfile(testfile)

    model.load(modelfile)

    print("loading done")

    testing.restore()
    truePre = []
    pred = []
    for itr in range(testing.totalPages):
        data = testing.nextBatch()
        truePre.append(data[0])
        pre = model.predict(data)
        pred.append(pre[0])

    labelsCount = {}
    ConfusionMa = {}
    fScr = {}

    thres = 0.5
    valid = int(len(truePre) * 0.35)
    labelsCount = {}
    ConfusionMa = {}
    fScr = {}
    thresLab = {}
    for la in range(1000):
        if la % 25 == 0:
            print("Currnet label", la)
        t = []
        p = []
        for i in range(valid):
            t.append(truePre[i][la])
            p.append(pred[i][la])
        bestF, bestThre = thresholdTuning(t, p)

        t = []
        p = []
        for i in range(valid, len(truePre)):
            t.append(truePre[i][la])
            p.append(pred[i][la])

        p = np.array(p)
        fScr[la] = f1_score(t, p >= bestThre)
        ConfusionMa[la] = confusion_matrix(t, p > bestThre)
        thresLab[la] = bestThre

    f = open(outputfile, "w")
    for i in range(1000):
        inp = str(i) + "," + str(thresLab[i]) + "," + str(fScr[i]) + "\n"
        f.write(inp)
    f.close()
예제 #15
0
    sess = tf.Session(config=config)
    print(n_items)
    sampler = WarpSampler(train,
                          n_users,
                          n_items,
                          id2user,
                          user2idmap2,
                          num_neg=num_neg,
                          batch_size=batch_size,
                          maxlen=args.maxlen,
                          n_workers=3)

    model = Model(num_users2,
                  n_items,
                  args,
                  emb,
                  num_neg,
                  dec_step=num_batch * 25,
                  emb_usr=usr_emb)

    sess.run(tf.initialize_all_variables())
    sess.run(tf.assign(model.item_emb_table, model.emb_item))
    # sess.run(tf.assign(model.user_emb_table, model.usr_emb))

    user, user_array, seqs_array, label_array = generate_vail_date(
        train, valid, id2user, user2idmap2)
    valid_array = [user, user_array, seqs_array, label_array]
    idx = np.random.choice(len(user), 5000, replace=False)
    user2, user_array2, seqs_array2, label_array2 = [], [], [], []

    for i in range(len(idx)):
예제 #16
0
파일: main3.py 프로젝트: yhu9/visor
from model2 import Model

################################################################################################
parser = argparse.ArgumentParser()
parser.add_argument('--load', type=str, default=False,help='model to load')
parser.add_argument('--out',type=str, default='DQN.pth',help='output file')
parser.add_argument('--test', action='store_const',const=True,default=False,help='testing flag')
opt = parser.parse_args()
################################################################################################
################################################################################################
################################################################################################
################################################################################################
################################################################################################
from logger import Logger
env = World()
agent = Model(opt.load,mode='DDQN')

##########################################################################
#TRAIN THE VISOR
def train(n_episodes=50000, max_t=10, print_every=1, save_every=20):

    logger = Logger('./logs')
    scores_deque = deque(maxlen=200)
    solved_deque = deque(maxlen=200)
    scores= []
    best = 0

    for i_episode in count():
        state = env.reset2_4(manual_pose=(i_episode % 200) + 1)
        score = 0
        timestep = time.time()
예제 #17
0
root_dir = './data/pts_no_rot/'
fn_list = glob.glob(root_dir + '*.csv')
fn_list.sort()
ids = [os.path.basename(fn).split('.')[0] for fn in fn_list]

my_dataset = MyDataset(root_dir=root_dir, ids=ids)

# sample = my_dataset[0]
# print(sample['pts_xyz'], sample['pts_label'], sample['pts_bbox'])

train_loader = torch.utils.data.DataLoader(my_dataset,
                                           batch_size=batch_size,
                                           shuffle=True)

# network
model = Model().train()
if USE_CUDA:
    model = model.cuda()

# criterion
smooth_l1 = nn.SmoothL1Loss()
cross_entropy = nn.CrossEntropyLoss()
nllloss = nn.NLLLoss()

# training
loss_all = []
for e in range(epoch):

    lr = init_lr / np.power(2, (e // 8))

    # optimizer