Exemplo n.º 1
0
def main(train_model, model_type): 
    print(train_model)
    if train_model:
        if model_type == 'base_model':
            train.train()
        if model_type == 'mobile_net':
            train_mobile_net.train()
    else:
        evaluate.eval(model=model_type)
Exemplo n.º 2
0
def main():
    # data
    train_dataloader, test_dataloader = data.mnist_dataloader()

    # model
    model = models.CNN_Net().to(cfg.device)

    # train
    trainer.train(model, train_dataloader, test_dataloader)

    # test
    model.load_state_dict(torch.load(cfg.best_model_path))
    tester.test(model, test_dataloader)

    # metrics
    evaluate.eval()
Exemplo n.º 3
0
def analyze_function():
    try:
        analyze_creds = request.get_json()
        result = dict()
        result["audio"] = "none"
        result["class"] = "none"
        path = "data/" + analyze_creds["email"]
        if (analyze_creds["audio"] != ""):
            audioString = analyze_creds["audio"]
            audioString = bytes(audioString, encoding="utf-8")
            if not os.path.exists(path):
                os.makedirs(path)
            file = path + "/temp.wav"
            with open(file, "wb+") as f:
                f.write(base64.decodebytes(audioString))
            result["audio"] = eval(file, 1)

        print(analyze_creds["link"])
        if (analyze_creds["link"] != ""):
            user_scraping(analyze_creds["link"], path)

        file = path + "/twitter_output.txt"
        print(analyze_creds["messages"])
        if (analyze_creds["messages"] != ""):
            if not os.path.exists(path):
                os.makedirs(path)
            lines = analyze_creds["messages"].split(";")
            with open(file, "a+") as f:
                for i in lines:
                    f.write(i)
                    f.write("\n")
        if os.path.exists(file):
            result["class"] = eval(file, 2)
            result["class"] = round(result["class"], 2)
        print(result)
        result_creds = jsonify(result)
        shutil.rmtree(path)
        return result_creds
    except Exception as e:
        print(e)
        return "NF"
Exemplo n.º 4
0
def main():

    model_ids = range(12)
    scores = []

    for i, id in enumerate(model_ids):
        print(f"== Evaluating model rq1/model-{id}.pt ==")
        scores.append(
            evaluate.eval(f"rq1/model-{id}.pt", "output/test_dps.txt",
                          "output/test_ids.txt"))

    pickle.dump(scores, open('output/scores.pickle', 'wb'))
Exemplo n.º 5
0
def main():
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    batch_size = 100

    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=True,
        download=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                               batch_size=batch_size,
                                               shuffle=True)

    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=False,
        transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307, ), (0.3081, ))
        ])),
                                              batch_size=batch_size,
                                              shuffle=True)

    large_model = large().to(device)
    optimizer = optim.SGD(large_model.parameters(), lr=0.01, momentum=0.9)
    epochs = 20
    train_large(large_model, train_loader, optimizer, epochs, device)
    eval(large_model, test_loader)

    distil_model = distil().to(device)
    loss_fn = nn.MSELoss()
    distil_weight = 0.7
    temp = 20
    train_distil(large_model, distil_model, train_loader, optimizer, loss_fn,
                 epochs, temp, distil_weight)
    eval(distil_model, test_loader)
Exemplo n.º 6
0
    def __init__(self):

        ##windows intialising
        self.new_window = QtWidgets.QMainWindow()
        self.new_screen = new()
        self.new_screen.setupUi(self.new_window)

        self.open_window = QtWidgets.QMainWindow()
        self.open_screen = open()
        self.open_screen.setupUi(self.open_window)

        self.eval_window = QtWidgets.QMainWindow()
        self.eval_screen = eval()
        self.eval_screen.setupUi(self.eval_window)
Exemplo n.º 7
0
Arquivo: run.py Projeto: MattX/toylisp
def tokEval(tok, environment, menv, silent=False):
	while(tok != []):
		(obj, tok) = lparse.parse(tok)
#		newobj = evaluate.macroeval(obj, menv)
		try:
			res = evaluate.eval(obj, environment)
		except env.UndefinedError as e:
			print('* Undefined variable: ' + e.args[0])
		except objects.NotAFunctionError as e:
			print('* Not a function: ' + e.args[0])
		except misc.ExecutionError as e:
			print('* Error: ' + e.args[0])
		else:
			if not silent:
				print(res)
Exemplo n.º 8
0
def main():
	# create the parser
	parser = Parser()

	# look for the history file
	hist_file = path.join(path.expanduser('~'), '.logix_history')

	# init history file
	try:
		readline.read_history_file(hist_file)
	except FileNotFoundError:
		pass

	# set histort file save at end of execution
	atexit.register(readline.write_history_file, hist_file)

	while True:
		# Get input
		try:
			expr = input("> ")
		except EOFError:
			print('quit')
			sys.exit()

		# If input 'quit' or 'q', exit the program
		if expr.lower() in {'quit', 'q'}:
			sys.exit()

		# Create the ast 
		ast = tree.create(parser.parse(expr))

		# Get the variables used in the expression 
		# and get a printable repr of them 
		expression_vars = evaluate.find_vars(ast)
		variables = ''
		for var in expression_vars:
			variables += var

		# Print the variables used and the results
		# from the truth table 
		print('vars: %s' % str(variables))
		results = evaluate.eval(ast, variables)
		pprint.pprint(results)

	sys.exit()
Exemplo n.º 9
0
def main():
    # create the parser
    parser = Parser()

    # look for the history file
    hist_file = path.join(path.expanduser('~'), '.logix_history')

    # init history file
    try:
        readline.read_history_file(hist_file)
    except FileNotFoundError:
        pass

    # set histort file save at end of execution
    atexit.register(readline.write_history_file, hist_file)

    while True:
        # Get input
        try:
            expr = input("> ")
        except EOFError:
            print('quit')
            sys.exit()

        # If input 'quit' or 'q', exit the program
        if expr.lower() in {'quit', 'q'}:
            sys.exit()

        # Create the ast
        ast = tree.create(parser.parse(expr))

        # Get the variables used in the expression
        # and get a printable repr of them
        expression_vars = evaluate.find_vars(ast)
        variables = ''
        for var in expression_vars:
            variables += var

        # Print the variables used and the results
        # from the truth table
        print('vars: %s' % str(variables))
        results = evaluate.eval(ast, variables)
        pprint.pprint(results)

    sys.exit()
Exemplo n.º 10
0
def train(data, args):
    """
    Train the model specified by the parameters in args on the dataset given by data.

    :param - data: torch_geometric Data object holding node features, edges, and labels
    :param - args: user-specified arguments detailing GNN architecture and training
    return: DataLoader, trained model, and list of validation accuracies during training
    """
    num_classes = len(set([int(x) for x in data.y]))
    print('CUDA availability:', torch.cuda.is_available())
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    loader = DataLoader([data.to(device)], shuffle=True)
    model = models.GNN(data.num_node_features, args.hidden_dim, num_classes, args).to(device)
    scheduler, optimizer = gnn_utils.build_optimizer(args, model.parameters())

    validation_accuracies = []
    for epoch in range(args.epochs):
        total_loss = 0
        model.train()
        for batch in loader:
            optimizer.zero_grad()
            pred = model(batch)[batch.train_mask]
            label = batch.y[batch.train_mask]
            loss = model.loss(pred, label)
            loss.backward()
            optimizer.step()
            total_loss += loss.item()
        
        val_acc = evaluate.eval(loader, model, is_test=False)
        if not len(validation_accuracies) or val_acc > max(validation_accuracies):
            # Save the model each time it achieves a new max val
            # accuracy. Previously saved models are overwritten.
            print('New max accuracy', val_acc, '- saving model...')
            gnn.save_model(args, model)
        validation_accuracies.append(val_acc)
        if epoch % 10 == 0:
            print('val:', val_acc)
            print('loss:', total_loss)
    
    return loader, model, validation_accuracies
Exemplo n.º 11
0
def run_eval(test=False,
             test_score=False,
             score=0.0,
             min_for_score=0,
             multi=False,
             test_k=False,
             k_=11):
    return eval(
        gtFramesSingle,
        convert_eval_format(score_=score,
                            test_score=test_score,
                            min_for_score=min_for_score,
                            multi=False,
                            test_k=test_k,
                            k_=k_), gtFramesMulti,
        convert_eval_format(score_=score,
                            test_score=test_score,
                            min_for_score=min_for_score,
                            multi=True,
                            test_k=test_k,
                            k_=k_))
Exemplo n.º 12
0
# coding=utf-8
# Author:fan hongtao
# Date:2020-11-22

import segment_dic
import segment_hmm
import pos_tag
import evaluate
import ner

if __name__ == '__main__':
    # 加载分词、词性标注、命名实体识别的模型
    Segment_dic = segment_dic.segment()
    Segment_hmm = segment_hmm.segment()
    Pos_tag = pos_tag.pos()
    Evaluator = evaluate.eval()
    Recognition_entity = ner.recognition_entity()
    # 根据命令执行操作
    print("输入help查看帮助")
    while True:
        request = input("request:")
        if request == "help":
            print("字典分词——segment dic")
            print("增加用户词汇——add")
            print("统计分词——segment hmm")
            print("序列标注——pos")
            print("准确率评测——eval")
            print("命名实体识别——ner")
        elif request == "segment dic":
            sent = input("请输入要分词的句子:")
            ans = Segment_dic.bi_segment(sent)
Exemplo n.º 13
0
                mini_clses_t = [
                    clses_t[i_clses_t] for i_clses_t in index_test_minibatch
                ]
                mini_feas_t = [
                    feas_t[i_clses_t] for i_clses_t in index_test_minibatch
                ]
                mini_imgs_t = [
                    imgs_t[i_clses_t] for i_clses_t in index_test_minibatch
                ]
                mini_gtes_t = [
                    gtes_t[i_clses_t] for i_clses_t in index_test_minibatch
                ]

                hit_cnt, detect_hit_cnt, sup_cnt, test_cnt, avg_step_of_agent = \
                                        evaluate.eval(model, testoutput, \
                                                      [mini_clses_t, mini_feas_t, mini_imgs_t, mini_gtes_t], \
                                                       para, \
                                                       '\n\n===Epoch:{:d}===\n\n'.format(i_epoch))
                reStr = 'Test RL:{:.3f}\nTest Det:{:.3f}\nRL is superior to Det:{:.2f}\nAvg steps:{:.3f}\n'\
                        .format(float(hit_cnt)/test_cnt, float(detect_hit_cnt)/test_cnt, \
                                float(sup_cnt)/test_cnt,avg_step_of_agent)
                print reStr
                logging.info(reStr)

    if epsilon > 0.1:
        epsilon -= 0.05
    if i_epoch % 5 == 0:
        string = outputModelPath + '/' + para.keyAttr + '_epoch_' + str(
            i_epoch) + '.h5'
        model.save_weights(string, overwrite=True)
testoutput.close()
Exemplo n.º 14
0
import evaluate
import csv
import cv2
import sys 



if __name__ == "__main__":
	
	with open('labels.csv', newline='') as csvfile:
		reader = csv.reader(csvfile, delimiter = ',')
		for data in reader:
			filename = data[0]
			detect_coords = detector.detect(filename)
			actual_coords = [data[1],data[2],data[3],data[4]]
			score = evaluate.eval(actual_coords, detect_coords)
		
			actual_x1 = int(actual_coords[0])
			actual_y1 = int(actual_coords[1])
			actual_x2 = int(actual_coords[2])
			actual_y2 = int(actual_coords[3])

			detect_x1 = int(detect_coords[0])
			detect_y1 = int(detect_coords[1])
			detect_x2 = int(detect_coords[2])
			detect_y2 = int(detect_coords[3])

			# loop over the cat faces and draw a rectangle surrounding each
			image = cv2.imread(filename)
			cv2.rectangle(image, (actual_x1, actual_y1), (actual_x2, actual_y2), (0, 255, 0), 2) # CORRECT BOUNDING BOX
			cv2.rectangle(image, (detect_x1, detect_y1), (detect_x2, detect_y2), (0, 0, 255), 2) # PREDICTED BOUNDING BOX
Exemplo n.º 15
0
def main(config_file='config/hrnn_config.json'):
    # batch_size = 2 # mini-batch size
    # embedding_size = 2 # embedding size
    #
    # sentences = ["apple banana fruit", "banana orange fruit", "orange banana fruit",
    #              "dog cat animal", "cat monkey animal", "monkey dog animal"]

    # word_sequence = " ".join(sentences).split()
    # word_list = " ".join(sentences).split()
    # word_list = list(set(word_list))
    # word_dict = {w: i for i, w in enumerate(word_list)}
    # voc_size = len(word_list)

    with open(config_file) as fin:
        config = json.load(fin, object_hook=lambda d: SimpleNamespace(**d))
    get_path(os.path.join(config.model_path, config.experiment_name))
    get_path(config.log_path)
    # build_vocab(config.train_file_path, os.path.join(config.model_path, 'vocab.txt'), int(config.vocab_size) - 2)

    data = Data(vocab_file=os.path.join(config.model_path, 'vocab.txt'),
                model_type=config.model_type,
                config=config)
    train_dataset, collate_fn = data.load_train_and_valid_files(
        train_file=config.train_file_path)
    sampler_train = RandomSampler(train_dataset)
    data_loader = DataLoader(
        train_dataset,
        batch_size=config.batch_size,
        sampler=sampler_train,
        collate_fn=collate_fn,
        drop_last=True,
    )

    # Make skip gram of one size window
    # skip_grams = []
    # for i in range(1, len(word_sequence) - 1):
    #     target = word_dict[word_sequence[i]]
    #     context = [word_dict[word_sequence[i - 1]], word_dict[word_sequence[i + 1]]]
    #     for w in context:
    #         skip_grams.append([target, w])
    # pos = range(config.vocab_size)
    # i = torch.LongTensor([pos, pos])
    # elements = [1.0] * config.vocab_size
    # v = torch.LongTensor(elements)
    if torch.cuda.is_available():
        device = torch.device('cuda')
        # onehot = torch.sparse.FloatTensor(i,v,torch.Size([config.vocab_size, config.vocab_size])).cuda()
    else:
        device = torch.device('cpu')
        # onehot = torch.sparse.FloatTensor(i, v, torch.Size([config.vocab_size, config.vocab_size]))

    model = MODEL_MAP[config.model_type](config)
    # load model states.
    if config.trained_weight:
        model = torch.load(config.trained_weight)
    model.to(device)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=config.lr)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=5, gamma=0.997)

    a_score = 0
    # Training
    for epoch in range(config.num_epoch):
        tqdm_obj = tqdm(data_loader, ncols=80)

        for step, batch in enumerate(tqdm_obj):
            input_batch, target_batch = batch['input_ids'], batch['labels']
            # input_batch = torch.LongTensor(input_batch)
            # target_batch = torch.LongTensor(target_batch)
            input_batch, target_batch = input_batch.to(
                device), target_batch.to(device)

            ind_iter = range(input_batch.shape[0])
            index = 0
            while index < input_batch.shape[0]:
                # use sparse matrix
                # batch_input = None
                # for i_part in islice(ind_iter, index, index + int(config.max_stem_size)):
                #     i_part_input = onehot[input_batch[i_part]].to_dense().unsqueeze(dim=0).float()
                #     if batch_input is not None:
                #         batch_input = torch.cat([batch_input, i_part_input], dim=0)
                #     else:
                #         batch_input = i_part_input
                # 2:
                batch_range = list(
                    islice(ind_iter, index, index + int(config.max_stem_size)))
                batch_input = torch.zeros(
                    (len(batch_range), config.vocab_size),
                    dtype=float).float().cuda()
                for i in range(len(batch_range)):
                    batch_input[i, input_batch[batch_range[i]]] = 1.0

                batch_target_batch = target_batch[index:index +
                                                  int(config.max_stem_size)]
                index += int(config.max_stem_size)

                optimizer.zero_grad()
                output = model(batch_input)
                # output : [batch_size, voc_size], target_batch : [batch_size] (LongTensor, not one-hot)
                if config.hierarchical_softmax:
                    loss = torch.mean(
                        model.hsoftmax(output, batch_target_batch))
                else:
                    loss = criterion(output, batch_target_batch)

                if a_score:
                    tqdm_obj.set_description(
                        'anlogy:{:.6f},sim:{:.6f},loss: {:.6f}'.format(
                            a_score, s_score, loss.item()))
                else:
                    tqdm_obj.set_description('loss: {:.6f}'.format(
                        loss.item()))

                loss.backward()
                optimizer.step()

            if (step + 1) % 100000 == 0:
                print('Epoch:', '%04d' % (epoch + 1), 'cost =',
                      '{:.6f}'.format(loss))
                W, WT = model.parameters()
                weights = W.T.detach().cpu().numpy()
                dic = data.tokenizer.dictionary
                vocab = [
                    key
                    for (key, value) in sorted(dic.items(), key=lambda x: x[1])
                ]
                vocab = numpy.reshape(numpy.array(vocab), (-1, 1))
                w2v = numpy.concatenate((vocab, weights), axis=1)
                pandas.DataFrame(w2v).to_csv("word2vec.txt",
                                             sep=' ',
                                             header=None,
                                             index=False)
                with open("word2vec.txt", 'r+', encoding='utf-8') as file:
                    readcontent = file.read(
                    )  # store the read value of exe.txt into
                    file.seek(0, 0)  # Takes the cursor to top line
                    file.write(
                        str(len(vocab)) + " " + str(weights.shape[1]) +
                        "\n")  # convert int to str since write() deals
                    file.write(readcontent)
                # torch.save(model, os.path.join(config.model_path, config.experiment_name, 'model.bin'))
                a_score, s_score = eval(config.analogy_valid_file_path,
                                        config.similarity_valid_file_path)
                tqdm_obj.set_description(
                    'anlogy:{:.6f},sim:{:.6f},loss: {:.6f}'.format(
                        a_score, s_score, loss.item()))

            # drop the learning rate gradually
            scheduler.step()

        if (epoch + 1) % 1 == 0 or epoch == int(config.num_epoch) - 1:
            print('Epoch:', '%04d' % (epoch + 1), 'cost =',
                  '{:.6f}'.format(loss))
            W, WT = model.parameters()
            weights = W.T.detach().cpu().numpy()
            dic = data.tokenizer.dictionary
            vocab = [
                key for (key, value) in sorted(dic.items(), key=lambda x: x[1])
            ]
            vocab = numpy.reshape(numpy.array(vocab), (-1, 1))
            w2v = numpy.concatenate((vocab, weights), axis=1)
            pandas.DataFrame(w2v).to_csv("word2vec.txt",
                                         sep=' ',
                                         header=None,
                                         index=False)
            with open("word2vec.txt", 'r+', encoding='utf-8') as file:
                readcontent = file.read(
                )  # store the read value of exe.txt into
                file.seek(0, 0)  # Takes the cursor to top line
                file.write(
                    str(len(vocab)) + " " + str(weights.shape[1]) +
                    "\n")  # convert int to str since write() deals
                file.write(readcontent)
            torch.save(
                model,
                os.path.join(config.model_path, config.experiment_name,
                             'model.bin'))

            a_score, s_score = eval(config.analogy_valid_file_path,
                                    config.similarity_valid_file_path)
            tqdm_obj.set_description('a{:.6f},s{:.6f},loss: {:.6f}'.format(
                a_score, s_score, loss.item()))
Exemplo n.º 16
0
#coding=utf-8
from document import *
from pmi import pmi_prediction
from evaluate import eval
from bigram import bigram_prediction
from classify import *
import nn
from keras.preprocessing import sequence

train = read_question_corpus(train_file)
test = read_c_and_j_corpus()

train = train[:10000]

results = pmi_prediction(train, test)
eval(test, results)

#results=bigram_prediction(train,test)
#eval(test,results)
test0 = test
train, test = get_question_documents(train), get_question_documents(test0)
V = get_vocabrary(train, k=1000)

# pair wise
train_x, train_y = get_pair_wise_documents_for_train(train, V)
test_x, test_y = get_pair_wise_documents(test, V)

v_len = len(V) * 2
model = nn.lstm_train(train_x, train_y, v_len)

test_x = sequence.pad_sequences(test_x, maxlen=nn.MAX_LEN)
Exemplo n.º 17
0
def test_pwr_up():
    assert eval('2↑3') == 8
Exemplo n.º 18
0
def test_unary_divide():
    assert eval('//2') == 2
Exemplo n.º 19
0
def test_pwr_db_down():
    assert eval('8⇓2') == 3
Exemplo n.º 20
0
    def train_loop_fn(loader):
        tracker = xm.RateTracker()
        model.train()
        a_score, s_score = 0, 0
        for x, batch in enumerate(loader):
            input_batch, target_batch = batch['input_ids'], batch['labels']
            input_batch, target_batch = input_batch.to(
                device), target_batch.to(device)

            ind_iter = range(input_batch.shape[0])
            index = 0
            while index < input_batch.shape[0]:
                # 2:
                batch_range = list(
                    islice(ind_iter, index, index + int(config.max_stem_size)))
                batch_input = torch.zeros(
                    (len(batch_range), config.vocab_size),
                    dtype=float).float().to(device)
                for i in range(len(batch_range)):
                    batch_input[i, input_batch[batch_range[i]]] = 1.0

                batch_target_batch = target_batch[index:index +
                                                  int(config.max_stem_size)]
                index += int(config.max_stem_size)

                optimizer.zero_grad()
                output = model(batch_input)
                # output : [batch_size, voc_size], target_batch : [batch_size] (LongTensor, not one-hot)
                if config.hierarchical_softmax:
                    loss = torch.mean(
                        model.hsoftmax(output, batch_target_batch))
                else:
                    loss = criterion(output, batch_target_batch)

                loss.backward()
                optimizer.step()
                # drop the learning rate gradually

                if xm.get_ordinal() == 0:
                    if (x + 1) % 100000 == 0:
                        print('Epoch:', '%04d' % (epoch + 1), 'cost =',
                              '{:.6f}'.format(loss))
                        W, WT = model.parameters()
                        weights = W.T.detach().cpu().numpy()
                        dic = data.tokenizer.dictionary
                        vocab = [
                            key
                            for (key, value
                                 ) in sorted(dic.items(), key=lambda x: x[1])
                        ]
                        vocab = numpy.reshape(numpy.array(vocab), (-1, 1))
                        w2v = numpy.concatenate((vocab, weights), axis=1)
                        pandas.DataFrame(w2v).to_csv("word2vec.txt",
                                                     sep=' ',
                                                     header=None,
                                                     index=False)
                        with open("word2vec.txt", 'r+',
                                  encoding='utf-8') as file:
                            readcontent = file.read(
                            )  # store the read value of exe.txt into
                            file.seek(0, 0)  # Takes the cursor to top line
                            file.write(
                                str(len(vocab)) + " " + str(weights.shape[1]) +
                                "\n")  # convert int to str since write() deals
                            file.write(readcontent)
                        # torch.save(model, os.path.join(config.model_path, config.experiment_name, 'model.bin'))
                        a_score, s_score = eval(
                            config.analogy_valid_file_path,
                            config.similarity_valid_file_path)
                        print(
                            '[xla:{}]({}) anlogy:{:.6f},sim:{:.6f},Loss={:.5f} Rate={:.2f} GlobalRate={:.2f} Time={}'
                            .format(xm.get_ordinal(), x, a_score, s_score,
                                    loss.item(), tracker.rate(),
                                    tracker.global_rate(), time.asctime()),
                            flush=True)
            tracker.add(FLAGS.batch_size)
            scheduler.step()

            if xm.get_ordinal() == 0:
                if (epoch + 1) % 1 == 0 or epoch == int(config.num_epoch) - 1:
                    print('Epoch:', '%04d' % (epoch + 1), 'cost =',
                          '{:.6f}'.format(loss))
                    W, WT = model.parameters()
                    weights = W.T.detach().cpu().numpy()
                    dic = data.tokenizer.dictionary
                    vocab = [
                        key
                        for (key,
                             value) in sorted(dic.items(), key=lambda x: x[1])
                    ]
                    w2v = numpy.concatenate((vocab, weights), axis=1)
                    pandas.DataFrame(w2v).to_csv("word2vec.txt",
                                                 sep=' ',
                                                 header=None,
                                                 index=False)
                    with open("word2vec.txt", 'r+', encoding='utf-8') as file:
                        readcontent = file.read(
                        )  # store the read value of exe.txt into
                        file.seek(0, 0)  # Takes the cursor to top line
                        file.write(
                            str(len(vocab)) + " " + str(weights.shape[1]) +
                            "\n")  # convert int to str since write() deals
                        file.write(readcontent)
                    # torch.save(model, os.path.join(config.model_path, config.experiment_name, 'model.bin'))

                    a_score, s_score = eval(config.analogy_valid_file_path,
                                            config.similarity_valid_file_path)
                    print(
                        '[xla:{}]({}) anlogy:{:.6f},sim:{:.6f},Loss={:.5f} Rate={:.2f} GlobalRate={:.2f} Time={}'
                        .format(xm.get_ordinal(), x, a_score, s_score,
                                loss.item(), tracker.rate(),
                                tracker.global_rate(), time.asctime()),
                        flush=True)
            return a_score, s_score
Exemplo n.º 21
0
def eval_(model_type):
    evaluate.eval(model_type=model_type)
Exemplo n.º 22
0
def calc():
    global store
    def reset_store():
        global store
        store.display.clear()
        store.eval.clear()

    if store.refresh is True:
        reset_store()
        store.refresh = False

    def pop_store():
        global store
        if len(store.display):
            del store.display[-1]
            del store.eval[-1]
            display_string = ''.join(store.display)
            
            output_field.write(display_string)

    def append_store(item_disp, item_eval=None):
        global store
        store.display.append(item_disp)
        if not item_eval:
            item_eval = item_disp
        
        store.eval.append(item_eval)      
        output_field.write(''.join(store.display))    

    output, info_state, = st.beta_columns((3,3))
    output_field = output.empty()
    info_state.write('klik eerst op `AC`')

    answer, delete, ac = st.beta_columns((3,1,2))
    answer_field = answer.empty()
    del_buttion = delete.button('⌫')
    ac_button = ac.button('AC')

    seven, eight, nine, bracket_l, bracket_r, _ = st.beta_columns(6)
    seven_button = seven.button('7')
    eight_button = eight.button('8')
    nine_button = nine.button('9') 
    bracket_l_button = bracket_l.button('(')
    bracket_r_button = bracket_r.button(')')    
        
    four, five, six, pwr_up, pwr_down, pwr_db_down = st.beta_columns(6)
    four_button = four.button('4')
    five_button = five.button('5')
    six_button = six.button('6')
    pwr_up_button = pwr_up.button('↑') 
    pwr_down_button = pwr_down.button('↓')
    pwr_db_down_button = pwr_db_down.button('⇓')   

    one, two, three, multi, divi, _ = st.beta_columns(6)
    one_button = one.button('1')
    two_button = two.button('2')
    three_button = three.button('3')
    multi_button = multi.button('x')   
    divi_button = divi.button('÷')     

    zero, dot, equals, plus, minus, _ = st.beta_columns(6)
    zero_button = zero.button('0')
    dot_button = dot.button('.')
    equals_button = equals.button('=')
    plus_button = plus.button('+')
    minus_button = minus.button('-')   

    if ac_button:
        output_field.write('')
        answer_field.write(' ')
        reset_store()

    if del_buttion:
        pop_store()

    if one_button:
        append_store('1')

    if two_button:
        append_store('2')

    if three_button:
        append_store('3')

    if four_button:
        append_store('4')

    if five_button:
        append_store('5')

    if six_button:
        append_store('6')

    if seven_button:
        append_store('7')

    if eight_button:
        append_store('8')

    if nine_button:
        append_store('9')

    if zero_button:
        append_store('0')

    if dot_button:
        append_store('.') 

    if plus_button:
        append_store('+')

    if minus_button:
        append_store('-') 

    if multi_button:
        append_store('x', '*') 

    if divi_button:
        append_store('÷', '/')

    if pwr_up_button:
        append_store('↑')

    if pwr_down_button:
        append_store('↓')

    if pwr_db_down_button:
        append_store('⇓')

    if bracket_l_button:
        append_store('(')

    if bracket_r_button:
        append_store(')')        

    if equals_button:
        answer_field.write(eval(''.join(store.eval)))
        output_field.write(''.join(store.display))
    
# optrekken
# aftrekken
# vermenigvuldigen 
# delen
# macht omhoog
# macht omlaag
# dubbel macht omlaag
Exemplo n.º 23
0
def train(model, train_loader, test_loader, outer, inner):
    '''
    Implementing the trainning processes
    '''

    # Save the total training time
    total_time = 0

    # Save the initial accuracy
    '''
    print("Begin Evaluating...")
    test_clean_acc, test_final_acc = evaluate.eval(model, test_loader)
    record = {'epoch': 0, 'clean_acc': test_clean_acc, 'final_acc': test_final_acc, 'time': total_time}
    with open('result.txt', 'a') as f:
        f.write(json.dumps(record) + '\n')
    '''

    # begin training
    for epoch in range(config.Parameter_setting['max_epoch']):
        print("Now Begin Epoch: " + str(epoch))

        # change to training phase
        model.train()

        trades_criterion = torch.nn.KLDivLoss(size_average=False)

        # train one batch
        for step, (x, label) in enumerate(train_loader):

            # save the start time
            start_time = time.time()

            # To GPU device
            x = x.to(config.Parameter_setting['device'])
            label = label.to(config.Parameter_setting['device'])

            model.eval()

            # define eta and soft_label
            eta = 0.001 * torch.randn(x.shape).detach().to(
                config.Parameter_setting['device'])
            eta.requires_grad_()
            soft_label = F.softmax(model(x), dim=1).detach()

            # Outer loop
            eta, final_acc = outer_loop(model, x, eta, outer, inner, label,
                                        soft_label, trades_criterion)

            model.train()

            # Clear previous gradients
            outer.optimizer.zero_grad()
            inner.optimizer.zero_grad()

            y_pred = model(x)
            clean_acc = torch_accuracy(y_pred, label, (1, ))[0].item()
            clean_loss = outer.criterion(y_pred, label)

            adv_pred = model(torch.clamp(x + eta.detach(), 0.0, 1.0))
            kl_loss = trades_criterion(F.log_softmax(adv_pred, dim=1),
                                       F.softmax(y_pred, dim=1)) / x.shape[0]

            loss = clean_loss + kl_loss
            loss.backward()

            # Update the weights
            outer.optimizer.step()
            inner.optimizer.step()

            # calculate the total time used
            end_time = time.time()
            total_time += end_time - start_time

            # print information
            if step % config.Parameter_setting['print_step'] == 0:
                print("Epoch: " + str(epoch) + " Batch step " + str(step) +
                      " LR: " + str(outer.lr_scheduler.get_lr()[0]) +
                      " Total Loss: " + str(loss.item()) + " Clean_acc: " +
                      str(clean_acc) + " Final_acc: " + str(final_acc) +
                      " Total Time: " + str(total_time))

            # Save information to txt file
            train_record = {
                'epoch': epoch,
                'clean_acc': clean_acc,
                'final_acc': final_acc,
                'time': total_time
            }
            with open('train_log.txt', 'a') as f:
                f.write(json.dumps(train_record) + '\n')

        # Evaluate after each epoch
        print("Begin Evaluating...")
        test_clean_acc, test_final_acc = evaluate.eval(model, test_loader)

        # Save test set information to txt file
        test_record = {
            'epoch': epoch,
            'clean_acc': test_clean_acc,
            'final_acc': test_final_acc
        }
        with open('test_log.txt', 'a') as f:
            f.write(json.dumps(test_record) + '\n')

        # Update learning rate
        outer.lr_scheduler.step()
        inner.lr_scheduler.step()
def train(model, optimizer, criterion, path, epochs, epochs_log, model_name):
    start_time = time.time()
    for epoch in range(epochs):
        # SETTING MODEL IN TRAINING MODE
        model.train()

        epoch_loss = 0
        batch_num = 0.0
        exmaples_count = 0.0
        best_dev_acc_exact = -1.0
        best_dev_acc_f1 = -1.0

        for train_data in iter(data_preprocessor.train_iter):
            batch_num += 1.0
            exmaples_count += train_data.batch_size
            p1, p2 = model(train_data)
            optimizer.zero_grad()
            try:
                batch_loss = criterion(p1, train_data.start_idx) + criterion(p2, train_data.end_idx)
            except Exception as e:
                print(e)
                return (p1, p2, train_data)

            epoch_loss += batch_loss.item()
            batch_loss.backward()

            optimizer.step()

            time_delta = datetime.timedelta(seconds=np.round(time.time() - start_time, 0))
            sys.stdout.write(f'\rEpoch:{epoch} | Batch:{batch_num} | Time Running: {time_delta}')
            break

        if epoch % epochs_log == 0:

            train_loss = epoch_loss/(exmaples_count)
            dev_accuracy, dev_loss = eval(data_preprocessor.dev_iter,
                                              model,
                                              criterion,
                                              data_preprocessor.WORDS.vocab,
                                              calculate_loss=True,
                                              calculate_accuracy=True)
            dev_accuracy_exact = dev_accuracy.groupby('id')['Exact'].max().mean()
            dev_accuracy_f1 = dev_accuracy.groupby('id')['F1'].max().mean()

            train_accuracy, _ = eval(data_preprocessor.train_iter,
                                         model,
                                         criterion,
                                         data_preprocessor.WORDS.vocab,
                                         calculate_loss=False,
                                         calculate_accuracy=True)

            train_accuracy_exact = train_accuracy.groupby('id')['Exact'].max().mean()
            train_accuracy_f1 = train_accuracy.groupby('id')['F1'].max().mean()

            print(
                f'\nTrain Loss:{train_loss:.4f} Train Acc Exact:{train_accuracy_exact:.4f} Train Acc F1:{train_accuracy_f1:.4f}')
            print(
                f'Validation Loss :{dev_loss:.4f} Dev Acc Exact:{dev_accuracy_exact:.4f} Dev Acc F1:{dev_accuracy_f1:.4f}')

            print('Test Prediction Results')
            predict_context = "He was speaking after figures showed that the country's economy shrank by 20.4% in April - " \
                              "the largest monthly contraction on record - as the country spent its first full month in lockdown."
            predict_ques = "By how much did the country's economy shrank"

            print(get_prediction(predict_context,
                                 predict_ques,
                                 model,
                                 data_preprocessor.WORDS.vocab,
                                 data_preprocessor.CHAR.vocab))

            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'loss': epoch_loss,
            }, path + '/' + model_name + '.torch')

            if dev_accuracy_f1 > best_dev_acc_f1:
                best_dev_acc_f1 = dev_accuracy_f1
                best_dev_acc_exact = dev_accuracy_exact
                torch.save(model, path + '/' + 'best_' + model_name + '.torch')

    print (f'Best Validation Results '
           f'Dev Acc Exact:{best_dev_acc_exact:.4f} '
           f'Dev Acc F1:{best_dev_acc_f1:.4f}')
Exemplo n.º 25
0
import pandas as pd
from tqdm import tqdm
from post_process import post_process
from evaluate import eval

tqdm.pandas()

train_en = pd.read_csv("./data/train_en.csv")
train_tcn = pd.read_csv("./data/train_tcn.csv")

dev_en = pd.read_csv("./data/dev_en.csv")
dev_tcn = pd.read_csv("./data/dev_tcn.csv")
dev_data = pd.read_csv("./data/dev.csv")
translated_test = pd.read_csv("./data/translated_test.csv")
test = pd.read_csv("./data/test_tcn.csv")

texts = dev_data.text.tolist()
preds = dev_data.translated.progress_apply(post_process)
refs = dev_data.ground_truth
score, preds, refs = eval(preds, refs)

# validation score
print(score)
Exemplo n.º 26
0
def test_pwr_down():
    assert eval('8↓3') == 2
Exemplo n.º 27
0
# model.load(type=train_type, version='lstm_1583287513.h5') # fixed_models
# model.load(type=train_type, version='lstm.h5')  # fixed_models
# model.load(type=train_type, version='lstm_1583488295.h5')
# model.load(type=train_type)
# 训练模型
model.train()
# 储存模型
model.save(type=train_type)
# 读入模型
# model.load(type=train_type)
# 预测

predict = model.predict(test_x)
predict = predict.reshape(-1, output_size)
# 评价函数
eval(predict, df)

predict_df = pd.DataFrame(predict)
predict_df.columns = list(df.columns[1:].values.astype(str))
dim_date = x_date[-predict_time_interval - empty_time:]

predict_df['date'] = dim_date
predict_df.set_index('date', drop=True, inplace=True)
print(predict_df)

# 涨5个点线
test_1 = np.copy(test_x)
test_1[-1][-1, :] = test_1[-1][-1, :] + 0.02

predict_1 = model.predict(test_1)
predict_1 = predict_1.reshape(-1, output_size)
Exemplo n.º 28
0
import argparse
from evaluate import eval

parser = argparse.ArgumentParser()

parser.add_argument('-u',
                    '--analogy_file',
                    default=None,
                    help='model validation file')

parser.add_argument('-v',
                    '--similarity_file',
                    default=None,
                    help='model validation file')

args = parser.parse_args()

if args.analogy_file:
    eval(args.analogy_file.split(":"), args.similarity_file.split(":"))

print('FIN')
Exemplo n.º 29
0
 def run_eval(self, data_loader, test_num=1000000):
     return eval(data_loader, self.my_model, test_num)
Exemplo n.º 30
0
def main(arguments):
    random.seed(arguments.seed)
    np.random.seed(arguments.seed)
    torch.manual_seed(arguments.seed)
    torch.cuda.manual_seed(arguments.seed)
    torch.cuda.set_device(arguments.gpu_id)

    splits = build_mnli_split(arguments, reverse=False)
    iters = build_nli_iterator_all(splits, arguments)
    multinli_train_iter = iters[0]
    multinli_dev_match_iter, multinli_dev_umatch_iter = iters[1], iters[2]

    print("initiate NLI model...")
    if not "vocab_size" in arguments:
        raise Exception("vocab size has not been determined")

    if arguments.model == "stacked":
        multinli_model = ResidualEncoder(arguments)
        # multinli_model = StackedEncoder(arguments)
    elif arguments.model == "infersent":
        multinli_model = InferSent(arguments)
    elif arguments.model == "decomposable":
        multinli_model = DecompAttention(arguments)
    else:
        raise Exception("invalid model")

    if arguments.resume:
        print("resuming previous training...")
        checkpoint = torch.load(arguments.resume, map_location=lambda storage, loc: storage)
        state_dict = checkpoint["state_dict"]
        # update to last learning rate
        arguments.learning_rate = checkpoint["lr"]
        # load model state dict
        multinli_model.load_state_dict(state_dict)
    else:
        multinli_model.load_pretrained_emb(arguments)

    multinli_model = to_cuda(multinli_model)
    multinli_model.display()

    optimizer = build_optimizer(multinli_model, arguments)
    multinli_model.train()

    # prepare to save model
    save_dir = datetime.now().strftime("experiment_D%d-%m_H%H-%M")
    save_dir = "{}_{}".format(arguments.model, save_dir)
    os.mkdir(os.path.join(arguments.checkpoint_dir, save_dir))

    step_i = 0
    for epoch in range(arguments.epoch):
        decay_i = epoch // arguments.decay_every
        lr = arguments.learning_rate * (arguments.decay_rate ** decay_i)
        adjust_learning_rate(optimizer, lr)
        print("learning rate is decayed to:", lr)

        multinli_train_iter.init_epoch()
        trainbar = tqdm(multinli_train_iter)
        for batch_i, batch in enumerate(trainbar):
            step_i += 1
            s1, s1_len = batch.premise
            s2, s2_len = batch.hypothesis
            target_y = batch.label
            s1, s1_len, s2, s2_len, target_y = to_cuda(s1, s1_len, s2, s2_len, target_y)

            class_scores = multinli_model(s1, s1_len, s2, s2_len)
            loss = multinli_model.compute_loss(class_scores, target_y)
            acc = (torch.max(class_scores, 1)[1] == target_y).sum().item() / float(len(batch))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if step_i % arguments.eval_step == 0:
                avg_acc, avg_loss = eval(multinli_model, multinli_dev_match_iter)
                print("training validation. step-%d. "
                      "average acc: %.3f. average loss: %.3f" %
                      (batch_i + 1, avg_acc, avg_loss))
                multinli_model.train()
                # save current model to ckpt file
                save_file = "%s_%s_model_epoch_%d_step_%d_acc_%.3f.pt.tar" % \
                            (arguments.model, arguments.nli_dataset,
                             (epoch + 1), (batch_i + 1), avg_acc)
                save_path = os.path.join(arguments.checkpoint_dir,
                                         save_dir,
                                         save_file)
                print("saving the model to checkpoint file", save_path)
                torch.save({"state_dict": multinli_model.state_dict(),
                            "train_args": arguments, "lr": lr},
                           save_path)

            trainbar.set_description("Epoch-%d, current acc: %.3f, loss: %.3f" %
                                     ((epoch+1), acc, loss.item()))
Exemplo n.º 31
0
                            mask)
                        total_loss_val += loss.item()
                        total_heat_val += heat_loss.item()
                        total_off_val += off_loss.item()
                        total_size_val += size_loss.item()
                        c_val += 1
                    except Exception as e:
                        # count_err_file+=1
                        # sys.stdout.write('\r'+'In epoch {0}, {1} error file(s) has found!'.format(epoch, count_err_file))
                        pass
            writer.add_scalars('total_loss', {
                'train': total_loss_train / c_train,
                'val': total_loss_val / c_val
            }, epoch)
            writer.add_scalars('heat_loss', {
                'train': total_heat_train / c_train,
                'val': total_heat_val / c_val
            }, epoch)
            # writer.add_scalars('off_loss', {'train':total_off_train/c_train, 'val':total_off_val/c_val}, epoch)
            # writer.add_scalars('size_loss', {'train':total_size_train/c_train, 'val':total_size_val/c_val}, epoch)
            iou_obj, thr = eval(opt.eval_path, model_path=model)
            writer.add_scalar('acc_det', iou_obj, epoch)
            # print(iou_obj, thr)
            if iou_obj > best:
                best = iou_obj
                torch.save(
                    model, os.path.join(saved_path,
                                        'model{}.pth'.format(epoch)))
                print('Save model in epoch {0} with accuracy detection {1}.'.
                      format(epoch, best))