Exemplo n.º 1
0
def evaluate_forecast():
    """
    Evaluate a forecast
    :return:
    """
    evaluator = Evaluator(web.get_db())
    return evaluator.api_evaluate_lat_lon(web.get_parameters())
Exemplo n.º 2
0
 def eval(self, x_test, kb_words, y_test):
     if self.model:
         evaluator = Evaluator(self.model,
                               self.kb_miner,
                               preprocessor=self.p)
         evaluator.eval(x_test, kb_words, y_test)
     else:
         raise (OSError('Could not find a model. Call load(dir_path).'))
Exemplo n.º 3
0
 def run(self):
     """В функции создаётся объект 
     модели. Данные передаются в модель.
     Модель возвращает результат
     
     """
     evaluator = Evaluator()
     evaluator.fit(self.list1, self.list2)
     results = evaluator.evaluate()
     self.throw_resalts.emit(results)
Exemplo n.º 4
0
    def _attempt(self):

        missing_numbers = self._debut_board.get_missing_numbers()
        empty_cells = self._debut_board.get_empty_cell_locations()
        number_combinations = permutations(missing_numbers,
                                           len(missing_numbers))

        for combo in number_combinations:

            cloned_board = self._debut_board.clone()
            cell_index = 0

            for empty_cell in empty_cells:
                try:
                    cloned_board.set_cell_value(empty_cell.get_x(),
                                                empty_cell.get_y(),
                                                combo[cell_index])
                    cell_index = cell_index + 1
                except NumberAssignmentError:
                    break

            if cloned_board.has_empty_cell():
                continue

            evaluator = Evaluator()
            evaluator.evaluate(cloned_board)

            if evaluator.is_complete() and evaluator.is_valid():
                self._result.complete = True
                self._result.success = True
                self._result.solution = cloned_board
                return
clf = xgb.XGBClassifier(**param)
if conf['use_previous_model'] is False:
    clf.fit(train_x, train_y)
    clf.save_model('xgboost.m')
else:
    clf.load_model('xgboost.m')

train_pred = clf.predict(train_x)
test_pred = clf.predict(test_x)

show_feature_importance(clf, conf['feature_name'])
# plot_scatter(test_y, test_pred)
plot_classification(test_y, test_pred)

evaluator = Evaluator()

print('evaluate trend')
acc = evaluator.evaluate_trend(train_y, train_pred)
print(acc)
acc = evaluator.evaluate_trend(test_y, test_pred)
print(acc)

print('evaluate trend without stay')
acc = evaluator.evaluate_trend_2(train_y, train_pred)
print(acc)
acc = evaluator.evaluate_trend_2(test_y, test_pred)
print(acc)

print('simple evaluate')
acc = evaluator.evaluate_trend_simple(train_y, train_pred)
Exemplo n.º 6
0
import argparse


if __name__ == '__main__':
    # Connect to the db.
    from feva import db

    # Send debug logging to stderr.
    log_handler = logging.StreamHandler(sys.stderr)
    log_handler.setLevel(logging.DEBUG)
    log_fmt = logging.Formatter(
        '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    log_handler.setFormatter(log_fmt)
    logging.getLogger().setLevel(logging.DEBUG)
    logging.getLogger().addHandler(log_handler)

    # Parse CLI parameters.
    parser = argparse.ArgumentParser()
    parser.add_argument('lat', type=float)
    parser.add_argument('lon', type=float)
    args = parser.parse_args()
    print('Location: {}, {}'.format(args.lat, args.lon))

    # Evaluate and print result.
    eva = Evaluator(db)
    print(eva.evaluate_lat_lon(
        args.lon,
        args.lat,
        time=datetime.datetime.utcnow(),
        max_distance=10000))
Exemplo n.º 7
0
        points = points.to(device)
    return points, coords, feats, labels.astype(np.long)


if __name__ == '__main__':
    config_path = os.path.join(os.path.abspath("../"), "config.json")
    with open(config_path) as config_file:
        config = json.load(config_file)
    config_file.close()
    voxel_size = config["val_voxel_size"]
    num_class = config["class"]
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Define a model and load the weights
    model = ResUNet.Res16UNet34C(3, num_class).to(device)
    evaluator = Evaluator(num_class)
    model_dict = torch.load(os.path.join(config['resume_path'], 'parameters.pth'))["model"]
    model.load_state_dict(model_dict)
    model.eval()

    test_data = read_plyfile("/home/gaoqiyu/文档/Stanford3dDataset_v1.2_Aligned_Version/ply/val/Area_1_hallway_2.ply")
    sinput, coords, feats, labels = data_preprocess(test_data, voxel_size)

    soutput = model(sinput.to(device))

    # Feed-forward pass and get the prediction
    _, pred = soutput.F.max(1)
    pred = pred.cpu().numpy()

    evaluator.generate(pred, labels)
    IOU, mIOU = evaluator.mIOU()
Exemplo n.º 8
0
    iter = tf.compat.v1.data.Iterator.from_structure(
        train_dataset.output_types, train_dataset.output_shapes)
    xs, ys = iter.get_next()

    train_init_op = iter.make_initializer(train_dataset)
    eval_init_op = iter.make_initializer(eval_dataset)

nadst = NADST()  # Make NADST model class
total_loss, train_op, global_step, train_summaries, losses,\
nb_tokens, state_out, evaluation_variable = nadst.model(xs=xs, ys=ys, src_lang=src_lang,
                                                        domain_lang=domain_lang, slot_lang=slot_lang,
                                                        len_val=max_len_val, args=args, training=True)

logging.info("# Load model complete")
#
evaluator = Evaluator(SLOTS_LIST)

#start training
logging.info("# Open Tensor Session")
saver = tf.compat.v1.train.Saver(max_to_keep=5)

with open(args['path'] + '/train_log.csv', 'w') as f:
    f.write('epoch,step,gate_loss,lenval_loss,state_loss\n')
with open(args['path'] + '/val_log.csv', 'w') as f:
    f.write(
        'epoch,split,gate_loss,lenval_loss,state_loss,joint_gate_acc,joint_lenval_acc,joint_acc,f1,turn_acc\n'
    )
json.dump(args, open(args['path'] + '/params.json', 'w'))

with tf.compat.v1.Session() as sess:
    ckpt = tf.compat.v1.train.latest_checkpoint(args['save_path'])
Exemplo n.º 9
0
    def __init__(self, config_):
        self.config = config_
        self.best_pred = -math.inf
        self.train_iter_number = 0
        self.val_iter_number = 0
        self.epoch = 0
        self.class_name = self.config["class_label"]
        if self.config["multi_gpu"]:
            self.device_list = list(range(torch.cuda.device_count()))
            self.device = self.device_list[0]
        else:
            self.device = torch.device('cuda')
        self.loss_value = torch.tensor(0.0, requires_grad=True).to(self.device)
        self.point_number = self.config["point_num"]
        self.batch_size = self.config["batch_size"]
        self.model = ResUNet.Res16UNet34C(3, self.config["class"])
        if self.config["fine_tune"]:
            model_dict = torch.load(
                os.path.join(self.config["resume_path"], 'weights_14.pth'),
                map_location=lambda storage, loc: storage.cuda(self.device))
            self.model.load_state_dict(model_dict)
            self.optimizer = torch.optim.SGD(
                [{
                    'params': self.model.convtr7p2s2.parameters(),
                    'lr': self.config["lr"] / 1e2
                }, {
                    'params': self.model.bntr7.parameters(),
                    'lr': self.config["lr"] / 1e2
                }, {
                    'params': self.model.block8.parameters(),
                    'lr': self.config["lr"] / 1e1
                }, {
                    'params': self.model.final.parameters(),
                    'lr': self.config["lr"]
                }],
                lr=self.config["lr"] / 1e4,
                momentum=self.config["momentum"],
                weight_decay=1e-4)
        if self.config["use_cuda"]:
            self.model = self.model.to(self.device)
        if self.config["multi_gpu"]:
            self.model = torch.nn.DataParallel(self.model,
                                               device_ids=self.device_list)

        self.loss = torch.nn.CrossEntropyLoss(
            ignore_index=self.config['ignore_label'])

        self.train_data = initialize_data_loader(S3DISDataset,
                                                 self.config,
                                                 phase='TRAIN',
                                                 threads=1,
                                                 augment_data=True,
                                                 shuffle=True,
                                                 repeat=True,
                                                 batch_size=1,
                                                 limit_numpoints=False)
        self.val_data = initialize_data_loader(S3DISDataset,
                                               self.config,
                                               threads=1,
                                               phase='VAL',
                                               augment_data=False,
                                               shuffle=True,
                                               repeat=False,
                                               batch_size=1,
                                               limit_numpoints=False)

        self.optimizer = torch.optim.SGD(
            self.model.parameters(),
            lr=self.config['lr'],
            momentum=self.config['momentum'],
            weight_decay=self.config['weight_decay'])
        # self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.config['lr'], weight_decay=self.config['weight_decay'])
        self.lr_scheduler = PolyLR(self.optimizer,
                                   max_iter=60000,
                                   power=self.config['poly_power'],
                                   last_step=-1)

        log_path = os.path.join(self.config["log_path"], str(time.time()))
        os.mkdir(log_path) if not os.path.exists(log_path) else None
        self.summary = SummaryWriter(log_path)
        self.evaluator = Evaluator(self.config["class"])

        self.load()
Exemplo n.º 10
0
class Trainer(object):
    def __init__(self, config_):
        self.config = config_
        self.best_pred = -math.inf
        self.train_iter_number = 0
        self.val_iter_number = 0
        self.epoch = 0
        self.class_name = self.config["class_label"]
        if self.config["multi_gpu"]:
            self.device_list = list(range(torch.cuda.device_count()))
            self.device = self.device_list[0]
        else:
            self.device = torch.device('cuda')
        self.loss_value = torch.tensor(0.0, requires_grad=True).to(self.device)
        self.point_number = self.config["point_num"]
        self.batch_size = self.config["batch_size"]
        self.model = ResUNet.Res16UNet34C(3, self.config["class"])
        if self.config["fine_tune"]:
            model_dict = torch.load(
                os.path.join(self.config["resume_path"], 'weights_14.pth'),
                map_location=lambda storage, loc: storage.cuda(self.device))
            self.model.load_state_dict(model_dict)
            self.optimizer = torch.optim.SGD(
                [{
                    'params': self.model.convtr7p2s2.parameters(),
                    'lr': self.config["lr"] / 1e2
                }, {
                    'params': self.model.bntr7.parameters(),
                    'lr': self.config["lr"] / 1e2
                }, {
                    'params': self.model.block8.parameters(),
                    'lr': self.config["lr"] / 1e1
                }, {
                    'params': self.model.final.parameters(),
                    'lr': self.config["lr"]
                }],
                lr=self.config["lr"] / 1e4,
                momentum=self.config["momentum"],
                weight_decay=1e-4)
        if self.config["use_cuda"]:
            self.model = self.model.to(self.device)
        if self.config["multi_gpu"]:
            self.model = torch.nn.DataParallel(self.model,
                                               device_ids=self.device_list)

        self.loss = torch.nn.CrossEntropyLoss(
            ignore_index=self.config['ignore_label'])

        self.train_data = initialize_data_loader(S3DISDataset,
                                                 self.config,
                                                 phase='TRAIN',
                                                 threads=1,
                                                 augment_data=True,
                                                 shuffle=True,
                                                 repeat=True,
                                                 batch_size=1,
                                                 limit_numpoints=False)
        self.val_data = initialize_data_loader(S3DISDataset,
                                               self.config,
                                               threads=1,
                                               phase='VAL',
                                               augment_data=False,
                                               shuffle=True,
                                               repeat=False,
                                               batch_size=1,
                                               limit_numpoints=False)

        self.optimizer = torch.optim.SGD(
            self.model.parameters(),
            lr=self.config['lr'],
            momentum=self.config['momentum'],
            weight_decay=self.config['weight_decay'])
        # self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.config['lr'], weight_decay=self.config['weight_decay'])
        self.lr_scheduler = PolyLR(self.optimizer,
                                   max_iter=60000,
                                   power=self.config['poly_power'],
                                   last_step=-1)

        log_path = os.path.join(self.config["log_path"], str(time.time()))
        os.mkdir(log_path) if not os.path.exists(log_path) else None
        self.summary = SummaryWriter(log_path)
        self.evaluator = Evaluator(self.config["class"])

        self.load()

    def train(self, epoch_):
        epoch_loss = []
        self.model.train()
        for ith, data_dict in enumerate(self.train_data):
            point, labels = self.data_preprocess(data_dict, 'train')
            output_sparse = self.model(point)
            pred = output_sparse.F
            self.reset_loss(labels)
            self.loss_value = self.loss(pred, labels) + self.loss_value
            epoch_loss.append(
                self.loss(pred, labels).item() /
                self.config["accumulate_gradient"])
            self.train_iter_number += 1
            if self.train_iter_number % self.batch_size == 0:
                self.loss_value /= (self.config["accumulate_gradient"] *
                                    self.batch_size)
                self.loss_value.backward()
                lr_value = self.optimizer.state_dict()['param_groups'][0]['lr']
                self.optimizer.step()
                self.lr_scheduler.step(self.train_iter_number)
                self.optimizer.zero_grad()
                torch.cuda.empty_cache()
                self.summary.add_scalar('train/loss: ', self.loss_value,
                                        self.train_iter_number)
                self.summary.add_scalar('train/lr: ', lr_value,
                                        self.train_iter_number)
                print(
                    "train epoch:  {}/{}, ith:  {}/{}, loss:  {:.4f}, lr:  {:.6f}"
                    .format(epoch_, self.config['epoch'],
                            self.train_iter_number, len(self.train_data),
                            self.loss_value.item(), lr_value))
                self.loss_value = 0
            if ith == len(self.train_data) - 1:
                break
        average_loss = np.nanmean(epoch_loss)
        self.summary.add_scalar('train/loss_epoch: ', average_loss, epoch_)
        print("epoch:    {}/{}, average_loss:    {:.4f}".format(
            epoch_, self.config['epoch'], average_loss))
        print(
            '------------------------------------------------------------------'
        )

    def eval(self, epoch_):
        self.model.eval()
        torch.cuda.empty_cache()
        IOU_epoch = []
        mIOU_epoch = []
        Acc_epoch = []
        precision_epoch = []
        recall_epoch = []
        epoch_loss = []
        for ith, data_dict in enumerate(self.val_data):
            point, labels = self.data_preprocess(data_dict, 'val')
            with torch.no_grad():
                output = self.model(point)
            pred = output.F
            loss_eval = self.loss(pred,
                                  labels) / self.config["accumulate_gradient"]
            epoch_loss.append(loss_eval.item())
            mIOU, Acc, precision, recall = self.evaluator.generate(
                pred.max(1)[1].cpu(), labels.cpu())
            IOU, _ = self.evaluator.mIOU()

            IOU_epoch.append(IOU)
            mIOU_epoch.append(mIOU)
            Acc_epoch.append(Acc)
            precision_epoch.append(precision)
            recall_epoch.append(recall)

            self.val_iter_number += 1
            self.summary.add_scalar('val/mIOU', mIOU, self.val_iter_number)
            self.summary.add_scalar('val/accuracy', Acc, self.val_iter_number)
            self.summary.add_scalar('val/precision', precision,
                                    self.val_iter_number)
            self.summary.add_scalar('val/recall', recall, self.val_iter_number)
            self.summary.add_scalar('val/loss: ', loss_eval,
                                    self.val_iter_number)

            print(
                "val epoch:  {}/{}, ith:  {}/{}, loss:  {:.4f}, mIOU:  {:.2%}, accuracy  {:.2%},precision  {:.2%},recall  {:.2%}"
                .format(epoch_, self.config['epoch'], ith, len(self.val_data),
                        loss_eval, mIOU, Acc, precision, recall))
        average_loss = np.nanmean(epoch_loss)
        average_IOU = np.nanmean(IOU_epoch, axis=0)
        average_mIOU = np.nanmean(mIOU_epoch)
        average_Acc = np.nanmean(Acc_epoch)
        average_precision = np.nanmean(precision_epoch)
        average_recall = np.nanmean(recall_epoch)

        self.summary.add_scalar('val/acc_epoch', average_Acc, epoch_)
        self.summary.add_scalar('val/loss_epoch', average_loss, epoch_)
        self.summary.add_scalar('val/mIOU_epoch', average_mIOU, epoch_)
        self.summary.add_scalar('val/precision_epoch', average_precision,
                                epoch_)
        self.summary.add_scalar('val/recall_epoch', average_recall, epoch_)

        print(
            "epoch:  {}/{}, average_loss: {:.4f},average_mIOU:  {:.2%}, average_accuracy:  {:.2%}, average_precision:  {:.2%}, average_recall:  {:.2%}"
            .format(epoch_, self.config['epoch'], average_loss, average_mIOU,
                    average_Acc, average_precision, average_recall))
        print("class name  ", self.class_name)
        print("IOU/class:  ", average_IOU)
        print(
            '------------------------------------------------------------------'
        )
        if average_mIOU > self.best_pred:
            self.best_pred = average_mIOU
            self.save(epoch_)

    def load(self):
        load_path = os.path.join(self.config["resume_path"], 'parameters.pth')
        if os.path.isfile(load_path):
            load_parameters = torch.load(
                load_path,
                map_location=lambda storage, loc: storage.cuda(self.device))
            self.optimizer.load_state_dict(load_parameters['optimizer'])
            self.lr_scheduler.load_state_dict(load_parameters['lr_scheduler'])
            self.model.load_state_dict(load_parameters['model'])
            self.best_pred = load_parameters['best_prediction']
            self.epoch = load_parameters['epoch']
            self.train_iter_number = load_parameters['train_iter_number']
            self.val_iter_number = load_parameters['val_iter_number']
            self.loss_value = load_parameters['loss_value']
            self.model = self.model.to(self.device)
            self.lr_scheduler.step(self.train_iter_number)

    def save(self, epoch_):
        os.mkdir(self.config["resume_path"]) if not os.path.exists(
            self.config["resume_path"]) else None
        torch.save(
            {
                'best_prediction': self.best_pred,
                'model': self.model.state_dict(),
                'optimizer': self.optimizer.state_dict(),
                'lr_scheduler': self.lr_scheduler.state_dict(),
                'epoch': epoch_,
                'train_iter_number': self.train_iter_number,
                'val_iter_number': self.val_iter_number,
                'loss_value': self.loss_value
            }, os.path.join(self.config["resume_path"], 'parameters.pth'))

    def data_preprocess(self, data_dict, data_type='train'):
        coords = data_dict[0]
        feats = data_dict[1] / 255 - 0.5
        labels = data_dict[2]
        length = coords.shape[0]

        if length > self.point_number and data_type == 'train':
            inds = np.random.choice(np.arange(length),
                                    self.point_number,
                                    replace=False)
            coords, feats, labels = coords[inds], feats[inds], labels[inds]

        if data_type == 'train':
            # For some networks, making the network invariant to even, odd coords is important
            coords[:, :3] += (torch.rand(3) * 100).type_as(coords)

        points = ME.SparseTensor(feats, coords.int())

        if self.config["use_cuda"]:
            points, labels = points.to(self.device), labels.to(self.device)
        return points, labels.long()

    def reset_loss(self, label):
        data_ = label.cpu().numpy()
        number = data_.shape[0]
        bin = np.bincount(data_, minlength=self.config["class"])
        for ith, i in enumerate(bin):
            if i != 0:
                bin[ith] = number / i
        bin = bin / np.sum(bin)
        self.loss = torch.nn.CrossEntropyLoss(
            weight=torch.from_numpy(bin).cuda().float(),
            ignore_index=self.config['ignore_label'])
Exemplo n.º 11
0
    train, lstm_conf['time_step'])
test_x, test_y, test_price, test_mean_price = data_transform_lstm_mv(
    test, lstm_conf['time_step'])

# step 4: Create and train model_weight
network = LSTM_MV(lstm_conf)
if lstm_conf['use_previous_model']:
    network.load(lstm_conf['load_file_name'])
else:
    network.train(train_x, train_y)
    network.save(lstm_conf['save_file_name'])

# step 5: Predict
train_pred = network.predict(train_x)
test_pred = network.predict(test_x)

for i in range(len(train_pred[0])):
    train_pred[0][i] += train_price[i]
for i in range(len(test_pred[0])):
    test_pred[0][i] += test_price[i]

# step 6: Evaluate
evaluator = Evaluator()
train_acc = evaluator.evaluate_mean_and_variance(train_mean_price, train_pred)
print('train=', train_acc)
test_acc = evaluator.evaluate_mean_and_variance(test_mean_price, test_pred)
print('test=', test_acc)

plot_confidence_interval(test_mean_price, test_price, test_pred[0],
                         test_pred[1], 3000)
Exemplo n.º 12
0
        odom_weight=args.odom_weight,
        learn_smooth_term=False
    )

elif(args.model == 'vlocnet_lstm'):
    train_criterion = criterion_lstm.Criterion(
        sx=args.sx,
        sq=args.sq,
        abs_weight=args.abs_weight,
        rel_weight=args.rel_weight,
        odom_weight=args.odom_weight,
        learn_smooth_term=False
    )
# optimizer
# param_list = [{'params': model.parameters()}]
# param_list.append({'params': [train_criterion.sx, train_criterion.sq]})


# config_name = args.config_file.split('/')[-1]
# config_name = config_name.split('.')[0]

# exp_name
experiment_name = '{:s}_{:s}'.format(args.dataset, args.scene)


# trainer
evaluator = Evaluator(model, train_criterion,
                      test_dataset=test_set, config=args, resume_optim=False)

evaluator.test()
Exemplo n.º 13
0
        crit_lm.cuda()
        ner_model.cuda()
        packer = CRFRepack_WC(len(tag2idx), True)
    else:
        packer = CRFRepack_WC(len(tag2idx), False)

    if args.start_epoch != 0:
        args.start_epoch += 1
        args.epoch = args.start_epoch + args.epoch
        epoch_list = range(args.start_epoch, args.epoch)
    else:
        args.epoch += 1
        epoch_list = range(1, args.epoch)

    predictor = Predictor(tag2idx, packer, label_seq=True, batch_size=50)
    evaluator = Evaluator(predictor, packer, tag2idx, args.eva_matrix,
                          args.pred_method)

    trainer = Trainer(ner_model, packer, crit_ner, crit_lm, optimizer,
                      evaluator, crf2corpus, args.plateau)
    trainer.train(crf2train_dataloader, crf2dev_dataloader, dev_dataset_loader,
                  epoch_list, args)

    trainer.eval_batch_corpus(dev_dataset_loader, args.dev_file,
                              args.corpus2crf)

    try:
        print("Load from PICKLE")
        single_testset = pickle.load(
            open(args.pickle + "/temp_single_test.p", "rb"))
        test_dataset_loader = []
        for datasets_tuple in single_testset:
Exemplo n.º 14
0
        pos_emb[i, i] = 1.0
    
    if args.eval_on_dev:
 
        ner_model = BiLSTM(word_emb, pos_emb, args.word_hid_dim, max(list(label2idx.values()))+1, args.dropout, args.batch, trainable_emb = args.trainable_emb)
        ner_model.rand_init()
        criterion = nn.NLLLoss()
        if args.cuda:
            ner_model.cuda()
        if args.opt == 'adam':
            optimizer = optim.Adam(filter(lambda p: p.requires_grad, ner_model.parameters()), lr=args.lr)
        else:
            optimizer = optim.SGD(filter(lambda p: p.requires_grad, ner_model.parameters()), lr=args.lr, momentum=args.momentum)
        
        predictor = Predictor()
        evaluator = Evaluator(predictor, label2idx, word2idx, pos2idx, args)

        best_scores = []
        best_dev_f1_sum = 0
        patience = 0

        print('\n'*2)
        print('='*10 + 'Phase1, train on train_data, epoch=args.epochs' + '='*10)
        print('\n'*2)
        for epoch in range(1, args.epochs+1):
            
            loss = train_epoch(train_data, ner_model, optimizer, criterion, args)
            print("*"*10 + "epoch:{}, loss:{}".format(epoch, loss) + "*"*10)
            eval_result_train = evaluator.evaluate(train_data, ner_model, args, cuda = args.cuda)
            print("On train_data: ")
            print_scores(eval_result_train, args)
Exemplo n.º 15
0
    if args.gpu >= 0:
        if_cuda = True
        torch.cuda.set_device(args.gpu)
        ner_model.cuda()
        packer = CRFRepack_WC(len(tag2idx), True)
    else:
        if_cuda = False
        packer = CRFRepack_WC(len(tag2idx), False)


    # init the predtor and evaltor
    # predictor 
    predictor = Predictor(tag2idx, packer, label_seq = True, batch_size = 50)
    
    # evaluator       
    evaluator = Evaluator(predictor, packer, tag2idx, args.eva_matrix, args.pred_method)

    agent = Trainer(ner_model, packer, crit_ner, crit_lm, optimizer, evaluator, crf2corpus)
    
    # perform the evalution for dev and test set of training corpus
    if args.local_eval:
        # assert len(train_args['dev_file']) == len(train_args['test_file'])
        num_corpus = len(train_args['dev_file'])


        # construct the pred and eval dataloader
        dev_tokens = []
        dev_labels = []

        test_tokens = []
        test_labels = []
Exemplo n.º 16
0
    'max_keypoints': args.max_keypoints,
    'pre_train': args.pre_train,
}

# dataset
data_loader = CRDataset_train(args.poses_path, args.data_dir)

# model
config = {"num_GNN_layers": args.num_GNN_layers}
model = MainModel(config)

# criterion
criterion = PoseNetCriterion(args.sx, args.sq, args.learn_sxsq)

# eval
# target = pd.read_csv(args.poses_path, header = None, sep =" ")
# predict = pd.read_csv(args.prediction_result, header = None, sep =" ")
# target = target.iloc[:,1:].to_numpy()
# predict = predict.iloc[:,1:].to_numpy()

# plot_result(predict, target, data_loader)

# get_errors(target, predict)

test_target = pd.read_csv(args.poses_path, header=None, sep=" ")
test_target = test_target.iloc[:, 1:].to_numpy()

eval_ = Evaluator(model, data_loader, criterion, args, superPoint_config,
                  test_target)
eval_.evaler()
if lstm_conf['use_previous_model'] == 1:
    network.load(lstm_conf['load_file_name'])
elif lstm_conf['use_previous_model'] == 2:
    network.load(lstm_conf['save_file_name'])
    network.strong_train(train_x, train_y)
    network.save('strongtrain_test.h5')
else:
    network.train(train_x, train_y)
    network.save(lstm_conf['save_file_name'])

# step 5: Predict
train_pred = network.predict(train_x)
test_pred = network.predict(test_x)

# step 6: Evaluate
evaluator = Evaluator()
print('simple evaluation')

# method1
acc = evaluator.evaluate_trend_simple(y_true=train_y, y_pred=train_pred)
print(acc)
acc = evaluator.evaluate_trend_simple(y_true=test_y, y_pred=test_pred)
print(acc)

# method 2
acc_train_list = evaluator.evaluate_divided_trend(train_y, train_pred)
acc_test_list = evaluator.evaluate_divided_trend(test_y, test_pred)
print('acc_train_list = ' + str(acc_train_list))
print('acc_test_list = ' + str(acc_test_list))

# step 7: Plot
Exemplo n.º 18
0
test_y = one_hot_encode(test_y, 3)

# step 4: Create and train model_weight
network = CNN(cnn_conf)
if cnn_conf['use_previous_model']:
    network.load(cnn_conf['file_name'])
else:
    network.train(train_x, train_y)
    network.save(cnn_conf['file_name'])

# step 5: Predict
train_pred = network.predict(train_x)
test_pred = network.predict(test_x)

# step 6: Evaluate
evaluator = Evaluator()

train_y = one_hot_decode(batch_labelize_prob_vector(train_y))
train_pred = one_hot_decode(batch_labelize_prob_vector(train_pred))
test_y = one_hot_decode(batch_labelize_prob_vector(test_y))
test_pred = one_hot_decode(batch_labelize_prob_vector(test_pred))
# plot_scatter(test_y, test_pred)

# method 1
print('evaluate trend')
total_acc, stay_acc, rise_dec_acc = evaluator.evaluate_trend(y_true=train_y,
                                                             y_pred=train_pred)
print(total_acc, stay_acc, rise_dec_acc)
total_acc, stay_acc, rise_dec_acc = evaluator.evaluate_trend(y_true=test_y,
                                                             y_pred=test_pred)
print(total_acc, stay_acc, rise_dec_acc)