Ejemplo n.º 1
0
    def _attempt(self):

        missing_numbers = self._debut_board.get_missing_numbers()
        empty_cells = self._debut_board.get_empty_cell_locations()
        number_combinations = permutations(missing_numbers,
                                           len(missing_numbers))

        for combo in number_combinations:

            cloned_board = self._debut_board.clone()
            cell_index = 0

            for empty_cell in empty_cells:
                try:
                    cloned_board.set_cell_value(empty_cell.get_x(),
                                                empty_cell.get_y(),
                                                combo[cell_index])
                    cell_index = cell_index + 1
                except NumberAssignmentError:
                    break

            if cloned_board.has_empty_cell():
                continue

            evaluator = Evaluator()
            evaluator.evaluate(cloned_board)

            if evaluator.is_complete() and evaluator.is_valid():
                self._result.complete = True
                self._result.success = True
                self._result.solution = cloned_board
                return
Ejemplo n.º 2
0
 def eval(self, x_test, kb_words, y_test):
     if self.model:
         evaluator = Evaluator(self.model,
                               self.kb_miner,
                               preprocessor=self.p)
         evaluator.eval(x_test, kb_words, y_test)
     else:
         raise (OSError('Could not find a model. Call load(dir_path).'))
Ejemplo n.º 3
0
 def run(self):
     """В функции создаётся объект 
     модели. Данные передаются в модель.
     Модель возвращает результат
     
     """
     evaluator = Evaluator()
     evaluator.fit(self.list1, self.list2)
     results = evaluator.evaluate()
     self.throw_resalts.emit(results)
if lstm_conf['use_previous_model'] == 1:
    network.load(lstm_conf['load_file_name'])
elif lstm_conf['use_previous_model'] == 2:
    network.load(lstm_conf['save_file_name'])
    network.strong_train(train_x, train_y)
    network.save('strongtrain_test.h5')
else:
    network.train(train_x, train_y)
    network.save(lstm_conf['save_file_name'])

# step 5: Predict
train_pred = network.predict(train_x)
test_pred = network.predict(test_x)

# step 6: Evaluate
evaluator = Evaluator()
print('simple evaluation')

# method1
acc = evaluator.evaluate_trend_simple(y_true=train_y, y_pred=train_pred)
print(acc)
acc = evaluator.evaluate_trend_simple(y_true=test_y, y_pred=test_pred)
print(acc)

# method 2
acc_train_list = evaluator.evaluate_divided_trend(train_y, train_pred)
acc_test_list = evaluator.evaluate_divided_trend(test_y, test_pred)
print('acc_train_list = ' + str(acc_train_list))
print('acc_test_list = ' + str(acc_test_list))

# step 7: Plot
Ejemplo n.º 5
0
    if args.gpu >= 0:
        if_cuda = True
        torch.cuda.set_device(args.gpu)
        ner_model.cuda()
        packer = CRFRepack_WC(len(tag2idx), True)
    else:
        if_cuda = False
        packer = CRFRepack_WC(len(tag2idx), False)


    # init the predtor and evaltor
    # predictor 
    predictor = Predictor(tag2idx, packer, label_seq = True, batch_size = 50)
    
    # evaluator       
    evaluator = Evaluator(predictor, packer, tag2idx, args.eva_matrix, args.pred_method)

    agent = Trainer(ner_model, packer, crit_ner, crit_lm, optimizer, evaluator, crf2corpus)
    
    # perform the evalution for dev and test set of training corpus
    if args.local_eval:
        # assert len(train_args['dev_file']) == len(train_args['test_file'])
        num_corpus = len(train_args['dev_file'])


        # construct the pred and eval dataloader
        dev_tokens = []
        dev_labels = []

        test_tokens = []
        test_labels = []
Ejemplo n.º 6
0
        points = points.to(device)
    return points, coords, feats, labels.astype(np.long)


if __name__ == '__main__':
    config_path = os.path.join(os.path.abspath("../"), "config.json")
    with open(config_path) as config_file:
        config = json.load(config_file)
    config_file.close()
    voxel_size = config["val_voxel_size"]
    num_class = config["class"]
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Define a model and load the weights
    model = ResUNet.Res16UNet34C(3, num_class).to(device)
    evaluator = Evaluator(num_class)
    model_dict = torch.load(os.path.join(config['resume_path'], 'parameters.pth'))["model"]
    model.load_state_dict(model_dict)
    model.eval()

    test_data = read_plyfile("/home/gaoqiyu/文档/Stanford3dDataset_v1.2_Aligned_Version/ply/val/Area_1_hallway_2.ply")
    sinput, coords, feats, labels = data_preprocess(test_data, voxel_size)

    soutput = model(sinput.to(device))

    # Feed-forward pass and get the prediction
    _, pred = soutput.F.max(1)
    pred = pred.cpu().numpy()

    evaluator.generate(pred, labels)
    IOU, mIOU = evaluator.mIOU()
Ejemplo n.º 7
0
    iter = tf.compat.v1.data.Iterator.from_structure(
        train_dataset.output_types, train_dataset.output_shapes)
    xs, ys = iter.get_next()

    train_init_op = iter.make_initializer(train_dataset)
    eval_init_op = iter.make_initializer(eval_dataset)

nadst = NADST()  # Make NADST model class
total_loss, train_op, global_step, train_summaries, losses,\
nb_tokens, state_out, evaluation_variable = nadst.model(xs=xs, ys=ys, src_lang=src_lang,
                                                        domain_lang=domain_lang, slot_lang=slot_lang,
                                                        len_val=max_len_val, args=args, training=True)

logging.info("# Load model complete")
#
evaluator = Evaluator(SLOTS_LIST)

#start training
logging.info("# Open Tensor Session")
saver = tf.compat.v1.train.Saver(max_to_keep=5)

with open(args['path'] + '/train_log.csv', 'w') as f:
    f.write('epoch,step,gate_loss,lenval_loss,state_loss\n')
with open(args['path'] + '/val_log.csv', 'w') as f:
    f.write(
        'epoch,split,gate_loss,lenval_loss,state_loss,joint_gate_acc,joint_lenval_acc,joint_acc,f1,turn_acc\n'
    )
json.dump(args, open(args['path'] + '/params.json', 'w'))

with tf.compat.v1.Session() as sess:
    ckpt = tf.compat.v1.train.latest_checkpoint(args['save_path'])
Ejemplo n.º 8
0
    'max_keypoints': args.max_keypoints,
    'pre_train': args.pre_train,
}

# dataset
data_loader = CRDataset_train(args.poses_path, args.data_dir)

# model
config = {"num_GNN_layers": args.num_GNN_layers}
model = MainModel(config)

# criterion
criterion = PoseNetCriterion(args.sx, args.sq, args.learn_sxsq)

# eval
# target = pd.read_csv(args.poses_path, header = None, sep =" ")
# predict = pd.read_csv(args.prediction_result, header = None, sep =" ")
# target = target.iloc[:,1:].to_numpy()
# predict = predict.iloc[:,1:].to_numpy()

# plot_result(predict, target, data_loader)

# get_errors(target, predict)

test_target = pd.read_csv(args.poses_path, header=None, sep=" ")
test_target = test_target.iloc[:, 1:].to_numpy()

eval_ = Evaluator(model, data_loader, criterion, args, superPoint_config,
                  test_target)
eval_.evaler()
Ejemplo n.º 9
0
    def __init__(self, config_):
        self.config = config_
        self.best_pred = -math.inf
        self.train_iter_number = 0
        self.val_iter_number = 0
        self.epoch = 0
        self.class_name = self.config["class_label"]
        if self.config["multi_gpu"]:
            self.device_list = list(range(torch.cuda.device_count()))
            self.device = self.device_list[0]
        else:
            self.device = torch.device('cuda')
        self.loss_value = torch.tensor(0.0, requires_grad=True).to(self.device)
        self.point_number = self.config["point_num"]
        self.batch_size = self.config["batch_size"]
        self.model = ResUNet.Res16UNet34C(3, self.config["class"])
        if self.config["fine_tune"]:
            model_dict = torch.load(
                os.path.join(self.config["resume_path"], 'weights_14.pth'),
                map_location=lambda storage, loc: storage.cuda(self.device))
            self.model.load_state_dict(model_dict)
            self.optimizer = torch.optim.SGD(
                [{
                    'params': self.model.convtr7p2s2.parameters(),
                    'lr': self.config["lr"] / 1e2
                }, {
                    'params': self.model.bntr7.parameters(),
                    'lr': self.config["lr"] / 1e2
                }, {
                    'params': self.model.block8.parameters(),
                    'lr': self.config["lr"] / 1e1
                }, {
                    'params': self.model.final.parameters(),
                    'lr': self.config["lr"]
                }],
                lr=self.config["lr"] / 1e4,
                momentum=self.config["momentum"],
                weight_decay=1e-4)
        if self.config["use_cuda"]:
            self.model = self.model.to(self.device)
        if self.config["multi_gpu"]:
            self.model = torch.nn.DataParallel(self.model,
                                               device_ids=self.device_list)

        self.loss = torch.nn.CrossEntropyLoss(
            ignore_index=self.config['ignore_label'])

        self.train_data = initialize_data_loader(S3DISDataset,
                                                 self.config,
                                                 phase='TRAIN',
                                                 threads=1,
                                                 augment_data=True,
                                                 shuffle=True,
                                                 repeat=True,
                                                 batch_size=1,
                                                 limit_numpoints=False)
        self.val_data = initialize_data_loader(S3DISDataset,
                                               self.config,
                                               threads=1,
                                               phase='VAL',
                                               augment_data=False,
                                               shuffle=True,
                                               repeat=False,
                                               batch_size=1,
                                               limit_numpoints=False)

        self.optimizer = torch.optim.SGD(
            self.model.parameters(),
            lr=self.config['lr'],
            momentum=self.config['momentum'],
            weight_decay=self.config['weight_decay'])
        # self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.config['lr'], weight_decay=self.config['weight_decay'])
        self.lr_scheduler = PolyLR(self.optimizer,
                                   max_iter=60000,
                                   power=self.config['poly_power'],
                                   last_step=-1)

        log_path = os.path.join(self.config["log_path"], str(time.time()))
        os.mkdir(log_path) if not os.path.exists(log_path) else None
        self.summary = SummaryWriter(log_path)
        self.evaluator = Evaluator(self.config["class"])

        self.load()
Ejemplo n.º 10
0
        odom_weight=args.odom_weight,
        learn_smooth_term=False
    )

elif(args.model == 'vlocnet_lstm'):
    train_criterion = criterion_lstm.Criterion(
        sx=args.sx,
        sq=args.sq,
        abs_weight=args.abs_weight,
        rel_weight=args.rel_weight,
        odom_weight=args.odom_weight,
        learn_smooth_term=False
    )
# optimizer
# param_list = [{'params': model.parameters()}]
# param_list.append({'params': [train_criterion.sx, train_criterion.sq]})


# config_name = args.config_file.split('/')[-1]
# config_name = config_name.split('.')[0]

# exp_name
experiment_name = '{:s}_{:s}'.format(args.dataset, args.scene)


# trainer
evaluator = Evaluator(model, train_criterion,
                      test_dataset=test_set, config=args, resume_optim=False)

evaluator.test()
Ejemplo n.º 11
0
        pos_emb[i, i] = 1.0
    
    if args.eval_on_dev:
 
        ner_model = BiLSTM(word_emb, pos_emb, args.word_hid_dim, max(list(label2idx.values()))+1, args.dropout, args.batch, trainable_emb = args.trainable_emb)
        ner_model.rand_init()
        criterion = nn.NLLLoss()
        if args.cuda:
            ner_model.cuda()
        if args.opt == 'adam':
            optimizer = optim.Adam(filter(lambda p: p.requires_grad, ner_model.parameters()), lr=args.lr)
        else:
            optimizer = optim.SGD(filter(lambda p: p.requires_grad, ner_model.parameters()), lr=args.lr, momentum=args.momentum)
        
        predictor = Predictor()
        evaluator = Evaluator(predictor, label2idx, word2idx, pos2idx, args)

        best_scores = []
        best_dev_f1_sum = 0
        patience = 0

        print('\n'*2)
        print('='*10 + 'Phase1, train on train_data, epoch=args.epochs' + '='*10)
        print('\n'*2)
        for epoch in range(1, args.epochs+1):
            
            loss = train_epoch(train_data, ner_model, optimizer, criterion, args)
            print("*"*10 + "epoch:{}, loss:{}".format(epoch, loss) + "*"*10)
            eval_result_train = evaluator.evaluate(train_data, ner_model, args, cuda = args.cuda)
            print("On train_data: ")
            print_scores(eval_result_train, args)